query
stringlengths
10
3.85k
ru_query
stringlengths
9
3.76k
document
stringlengths
17
430k
metadata
dict
negatives
listlengths
97
100
negative_scores
listlengths
97
100
document_score
stringlengths
5
10
document_rank
stringclasses
2 values
Returns the value of the 'go_package' option of the first .proto file found in the same directory as projectFile
Возвращает значение опции 'go_package' первого файла .proto, найденного в той же директории, что и projectFile
func detectGoPackageForProject(projectFile string) (string, error) { var goPkg string projectDir := filepath.Dir(projectFile) if err := filepath.Walk(projectDir, func(protoFile string, info os.FileInfo, err error) error { // already set if goPkg != "" { return nil } if !strings.HasSuffix(protoFile, ".proto") { return nil } // search for go_package on protos in the same dir as the project.json if projectDir != filepath.Dir(protoFile) { return nil } content, err := ioutil.ReadFile(protoFile) if err != nil { return err } lines := strings.Split(string(content), "\n") for _, line := range lines { goPackage := goPackageStatementRegex.FindStringSubmatch(line) if len(goPackage) == 0 { continue } if len(goPackage) != 2 { return errors.Errorf("parsing go_package error: from %v found %v", line, goPackage) } goPkg = goPackage[1] break } return nil }); err != nil { return "", err } if goPkg == "" { return "", errors.Errorf("no go_package statement found in root dir of project %v", projectFile) } return goPkg, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (g *Generator) GoFilePackage(depfile *fdep.DepFile) string {\n\treturn fproto_wrap.BaseName(g.GoWrapPackage(depfile))\n}", "func (g *Generator) GoPackage(depfile *fdep.DepFile) string {\n\tfor _, o := range depfile.ProtoFile.Options {\n\t\tif o.Name == \"go_package\" {\n\t\t\treturn o.Value.String()\n\t\t}\n\t}\n\treturn path.Dir(depfile.FilePath)\n}", "func (c *common) GetPackage() string { return c.file.GetPackage() }", "func (pkg *goPackage) firstGoFile() string {\n\tgoSrcs := []platformStringsBuilder{\n\t\tpkg.library.sources,\n\t\tpkg.binary.sources,\n\t\tpkg.test.sources,\n\t}\n\tfor _, sb := range goSrcs {\n\t\tif sb.strs != nil {\n\t\t\tfor s := range sb.strs {\n\t\t\t\tif strings.HasSuffix(s, \".go\") {\n\t\t\t\t\treturn s\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}", "func (fd *File) GoPackagePath() string {\n\treturn fd.builder.GoPackagePath\n}", "func GoPackage(packageName string) string {\n\tif packageName == \"\" {\n\t\treturn \"\"\n\t}\n\tsplit := strings.Split(packageName, \".\")\n\treturn split[len(split)-1] + \"pb\"\n}", "func goPackageName(d *descriptor.FileDescriptorProto) (name string, explicit bool) {\n\t// Does the file have a \"go_package\" option?\n\tif _, pkg, ok := goPackageOption(d); ok {\n\t\treturn pkg, true\n\t}\n\n\t// Does the file have a package clause?\n\tif pkg := d.GetPackage(); pkg != \"\" {\n\t\treturn pkg, false\n\t}\n\t// Use the file base name.\n\treturn baseName(d.GetName()), false\n}", "func (d *FileDescriptor) goPackageName() (name string, explicit bool) {\n\t// Does the file have a \"go_package\" option?\n\tif _, pkg, ok := d.goPackageOption(); ok {\n\t\treturn pkg, true\n\t}\n\n\t// Does the file have a package clause?\n\tif pkg := d.GetPackage(); pkg != \"\" {\n\t\treturn pkg, false\n\t}\n\t// Use the file base name.\n\treturn baseName(d.GetName()), false\n}", "func (d *FileDescriptor) PackageName() string { return uniquePackageOf(d.FileDescriptorProto) }", "func (c *common) PackageName() string { return uniquePackageOf(c.file) }", "func (d *FileDescriptor) goFileName(pathType pathType) string {\n\tname := *d.Name\n\tif ext := path.Ext(name); ext == \".proto\" || ext == \".protodevel\" {\n\t\tname = name[:len(name)-len(ext)]\n\t}\n\tname += \".cobra.pb.go\"\n\n\tif pathType == pathTypeSourceRelative {\n\t\treturn name\n\t}\n\n\t// Does the file have a \"go_package\" option?\n\t// If it does, it may override the filename.\n\tif impPath, _, ok := d.goPackageOption(); ok && impPath != \"\" {\n\t\t// Replace the existing dirname with the declared import path.\n\t\t_, name = path.Split(name)\n\t\tname = path.Join(impPath, name)\n\t\treturn name\n\t}\n\n\treturn name\n}", "func (*GetProjectRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{25}\n}", "func goFileName(d *descriptor.FileDescriptorProto) string {\n\tname := *d.Name\n\tif ext := path.Ext(name); ext == \".proto\" || ext == \".protodevel\" {\n\t\tname = name[:len(name)-len(ext)]\n\t}\n\tname += \".nrpc.go\"\n\n\t// Does the file have a \"go_package\" option?\n\t// If it does, it may override the filename.\n\tif impPath, _, ok := goPackageOption(d); ok && impPath != \"\" {\n\t\t// Replace the existing dirname with the declared import path.\n\t\t_, name = path.Split(name)\n\t\tname = path.Join(impPath, name)\n\t\treturn name\n\t}\n\n\treturn name\n}", "func (pp *protoPackage) pkgPath() string {\n\treturn strings.Replace(pp.Pkg, \".\", \"/\", -1)\n}", "func goPkg(fileName string) (string, error) {\n\tcontent, err := os.ReadFile(fileName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar pkgName string\n\tif match := goPkgOptRe.FindSubmatch(content); len(match) > 0 {\n\t\tpn, err := strconv.Unquote(string(match[1]))\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tpkgName = pn\n\t}\n\tif p := strings.IndexRune(pkgName, ';'); p > 0 {\n\t\tpkgName = pkgName[:p]\n\t}\n\treturn pkgName, nil\n}", "func (project Project) Package() (string, error) {\n\n\tif project.packageName != \"\" {\n\t\treturn project.packageName, nil\n\t}\n\n\tgoModPath := project.RelPath(GoModFileName)\n\tif !project.FileExists(goModPath) {\n\t\treturn \"\", errors.New(\"Failed to determine the package name for this project\")\n\t}\n\n\tb, err := ioutil.ReadFile(goModPath)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Failed to read the go.mod file\")\n\t}\n\n\tmod, err := gomod.Parse(goModPath, b)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Failed to parse the go.mod file\")\n\t}\n\n\tproject.packageName = strings.TrimSuffix(mod.Name, \"/\")\n\n\treturn project.packageName, nil\n\n}", "func (g *Generator) GoWrapFilePackage(depfile *fdep.DepFile) string {\n\tif g.PkgSource != nil {\n\t\tif p, ok := g.PkgSource.GetFilePkg(g, depfile); ok {\n\t\t\treturn p\n\t\t}\n\t}\n\n\treturn \"fw\" + fproto_wrap.BaseName(g.GoWrapPackage(depfile))\n}", "func (f *FileStruct) GetPersistPackageOption() string {\n\tif f.Desc == nil || f.Desc.GetOptions() == nil {\n\t\treturn \"\"\n\t}\n\tif proto.HasExtension(f.Desc.GetOptions(), persist.E_Package) {\n\t\tpkg, err := proto.GetExtension(f.Desc.GetOptions(), persist.E_Package)\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).Debug(\"Error\")\n\t\t\treturn \"\"\n\t\t}\n\t\t//logrus.WithField(\"pkg\", *pkg.(*string)).Info(\"Package\")\n\t\treturn *pkg.(*string)\n\t}\n\tlogrus.WithField(\"File Options\", f.Desc.GetOptions()).Debug(\"file options\")\n\treturn \"\"\n}", "func Which(s protoreflect.FullName) ProtoFile {\r\n\treturn wellKnownTypes[s]\r\n}", "func GetPackageName(source string) string {\n\tfileNode, err := parser.ParseFile(\"\", source, nil, parser.ImportsOnly)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\treturn fileNode.Name.Name()\n}", "func (*GetProjectResponse) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{26}\n}", "func (*Project) Descriptor() ([]byte, []int) {\n\treturn file_proto_carbon_proto_rawDescGZIP(), []int{0}\n}", "func ProtoFromFileDescriptor(d protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto {\n\tif imp, ok := d.(protoreflect.FileImport); ok {\n\t\td = imp.FileDescriptor\n\t}\n\ttype canProto interface {\n\t\tFileDescriptorProto() *descriptorpb.FileDescriptorProto\n\t}\n\tif res, ok := d.(canProto); ok {\n\t\treturn res.FileDescriptorProto()\n\t}\n\tif res, ok := d.(DescriptorProtoWrapper); ok {\n\t\tif fd, ok := res.AsProto().(*descriptorpb.FileDescriptorProto); ok {\n\t\t\treturn fd\n\t\t}\n\t}\n\treturn protodesc.ToFileDescriptorProto(d)\n}", "func goPackageOption(d *descriptor.FileDescriptorProto) (impPath, pkg string, ok bool) {\n\tpkg = d.GetOptions().GetGoPackage()\n\tif pkg == \"\" {\n\t\treturn\n\t}\n\tok = true\n\t// The presence of a slash implies there's an import path.\n\tslash := strings.LastIndex(pkg, \"/\")\n\tif slash < 0 {\n\t\treturn\n\t}\n\timpPath, pkg = pkg, pkg[slash+1:]\n\t// A semicolon-delimited suffix overrides the package name.\n\tsc := strings.IndexByte(impPath, ';')\n\tif sc < 0 {\n\t\treturn\n\t}\n\timpPath, pkg = impPath[:sc], impPath[sc+1:]\n\treturn\n}", "func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) {\n\treturn file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2, 0}\n}", "func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto {\n\tp := &descriptorpb.FileDescriptorProto{\n\t\tName: proto.String(file.Path()),\n\t\tOptions: proto.Clone(file.Options()).(*descriptorpb.FileOptions),\n\t}\n\tif file.Package() != \"\" {\n\t\tp.Package = proto.String(string(file.Package()))\n\t}\n\tfor i, imports := 0, file.Imports(); i < imports.Len(); i++ {\n\t\timp := imports.Get(i)\n\t\tp.Dependency = append(p.Dependency, imp.Path())\n\t\tif imp.IsPublic {\n\t\t\tp.PublicDependency = append(p.PublicDependency, int32(i))\n\t\t}\n\t\tif imp.IsWeak {\n\t\t\tp.WeakDependency = append(p.WeakDependency, int32(i))\n\t\t}\n\t}\n\tfor i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ {\n\t\tloc := locs.Get(i)\n\t\tl := &descriptorpb.SourceCodeInfo_Location{}\n\t\tl.Path = append(l.Path, loc.Path...)\n\t\tif loc.StartLine == loc.EndLine {\n\t\t\tl.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndColumn)}\n\t\t} else {\n\t\t\tl.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndLine), int32(loc.EndColumn)}\n\t\t}\n\t\tl.LeadingDetachedComments = append([]string(nil), loc.LeadingDetachedComments...)\n\t\tif loc.LeadingComments != \"\" {\n\t\t\tl.LeadingComments = proto.String(loc.LeadingComments)\n\t\t}\n\t\tif loc.TrailingComments != \"\" {\n\t\t\tl.TrailingComments = proto.String(loc.TrailingComments)\n\t\t}\n\t\tif p.SourceCodeInfo == nil {\n\t\t\tp.SourceCodeInfo = &descriptorpb.SourceCodeInfo{}\n\t\t}\n\t\tp.SourceCodeInfo.Location = append(p.SourceCodeInfo.Location, l)\n\n\t}\n\tfor i, messages := 0, file.Messages(); i < messages.Len(); i++ {\n\t\tp.MessageType = append(p.MessageType, ToDescriptorProto(messages.Get(i)))\n\t}\n\tfor i, enums := 0, file.Enums(); i < enums.Len(); i++ {\n\t\tp.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i)))\n\t}\n\tfor i, services := 0, file.Services(); i < services.Len(); i++ {\n\t\tp.Service = append(p.Service, ToServiceDescriptorProto(services.Get(i)))\n\t}\n\tfor i, exts := 0, file.Extensions(); i < exts.Len(); i++ {\n\t\tp.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i)))\n\t}\n\tif syntax := file.Syntax(); syntax != protoreflect.Proto2 {\n\t\tp.Syntax = proto.String(file.Syntax().String())\n\t}\n\treturn p\n}", "func (*PatchProject) Descriptor() ([]byte, []int) {\n\treturn file_determined_project_v1_project_proto_rawDescGZIP(), []int{4}\n}", "func (*Project) Descriptor() ([]byte, []int) {\n\treturn file_determined_project_v1_project_proto_rawDescGZIP(), []int{2}\n}", "func (d *FileDescriptor) goPackageOption() (impPath, pkg string, ok bool) {\n\tpkg = d.GetOptions().GetGoPackage()\n\tif pkg == \"\" {\n\t\treturn\n\t}\n\tok = true\n\t// The presence of a slash implies there's an import path.\n\tslash := strings.LastIndex(pkg, \"/\")\n\tif slash < 0 {\n\t\treturn\n\t}\n\timpPath, pkg = pkg, pkg[slash+1:]\n\t// A semicolon-delimited suffix overrides the package name.\n\tsc := strings.IndexByte(impPath, ';')\n\tif sc < 0 {\n\t\treturn\n\t}\n\timpPath, pkg = impPath[:sc], impPath[sc+1:]\n\treturn\n}", "func (*ProjectSimple) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{29}\n}", "func (*GoPackageInfo) Descriptor() ([]byte, []int) {\n\treturn file_kythe_proto_go_proto_rawDescGZIP(), []int{1}\n}", "func deduceGenPkgName(genFiles []*descriptor.FileDescriptorProto) (string, error) {\n\tvar genPkgName string\n\tfor _, f := range genFiles {\n\t\tname, explicit := goPackageName(f)\n\t\tif explicit {\n\t\t\tname = stringutils.CleanIdentifier(name)\n\t\t\tif genPkgName != \"\" && genPkgName != name {\n\t\t\t\t// Make sure they're all set consistently.\n\t\t\t\treturn \"\", errors.Errorf(\"files have conflicting go_package settings, must be the same: %q and %q\", genPkgName, name)\n\t\t\t}\n\t\t\tgenPkgName = name\n\t\t}\n\t}\n\tif genPkgName != \"\" {\n\t\treturn genPkgName, nil\n\t}\n\n\t// If there is no explicit setting, then check the implicit package name\n\t// (derived from the protobuf package name) of the files and make sure it's\n\t// consistent.\n\tfor _, f := range genFiles {\n\t\tname, _ := goPackageName(f)\n\t\tname = stringutils.CleanIdentifier(name)\n\t\tif genPkgName != \"\" && genPkgName != name {\n\t\t\treturn \"\", errors.Errorf(\"files have conflicting package names, must be the same or overridden with go_package: %q and %q\", genPkgName, name)\n\t\t}\n\t\tgenPkgName = name\n\t}\n\n\t// All the files have the same name, so we're good.\n\treturn genPkgName, nil\n}", "func GetFirstGoPath() string {\n\treturn strings.Split(os.Getenv(\"GOPATH\"), \":\")[0]\n}", "func lookupProjPath(protoAbs string) (result string) {\n\tlastIndex := len(protoAbs)\n\tcurPath := protoAbs\n\n\tfor lastIndex > 0 {\n\t\tif fileExist(curPath+\"/cmd\") && fileExist(curPath+\"/api\") {\n\t\t\tresult = curPath\n\t\t\treturn\n\t\t}\n\t\tlastIndex = strings.LastIndex(curPath, string(os.PathSeparator))\n\t\tcurPath = protoAbs[:lastIndex]\n\t}\n\tresult = \"\"\n\treturn\n}", "func Namespace(file *descriptor.FileDescriptorProto) string {\n\toptions := file.GetOptions()\n\n\t// When there is a namespace option defined we use it\n\tif options.PhpNamespace != nil {\n\t\treturn options.GetPhpNamespace()\n\t}\n\n\treturn Name(file.GetPackage())\n}", "func protobufName(f *ast.Field) string {\n\tfor _, attr := range f.Attrs {\n\t\tif strings.HasPrefix(attr.Text, \"@protobuf\") {\n\t\t\tfor _, str := range strings.Split(attr.Text[10:len(attr.Text)-1], \",\") {\n\t\t\t\tif strings.HasPrefix(str, \"name=\") {\n\t\t\t\t\treturn str[5:]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}", "func (*Project) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{79}\n}", "func (*Project) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{76}\n}", "func (*ExistingFile) Descriptor() ([]byte, []int) {\n\treturn file_protoconfig_go_kingpinv2_v1_extensions_proto_rawDescGZIP(), []int{1}\n}", "func getTmplFileDesc(fds []*descriptor.FileDescriptorProto) (string, *descriptor.FileDescriptorProto, error) {\n\tvar templateDescriptorProto *descriptor.FileDescriptorProto\n\tfor _, fd := range fds {\n\t\tif fd.GetOptions() == nil || !proto.HasExtension(fd.GetOptions(), tmpl.E_TemplateVariety) {\n\t\t\tcontinue\n\t\t}\n\t\tif templateDescriptorProto != nil {\n\t\t\treturn \"\", nil, fmt.Errorf(\n\t\t\t\t\"proto files %s and %s, both have the option %s. Only one proto file is allowed with this options\",\n\t\t\t\tfd.GetName(), templateDescriptorProto.GetName(), tmpl.E_TemplateVariety.Name)\n\t\t}\n\t\ttemplateDescriptorProto = fd\n\t}\n\n\tif templateDescriptorProto == nil {\n\t\treturn \"\", nil, fmt.Errorf(\"there has to be one proto file that has the extension %s\", tmpl.E_TemplateVariety.Name)\n\t}\n\n\tvar tmplName string\n\tif nameExt, err := proto.GetExtension(templateDescriptorProto.GetOptions(), tmpl.E_TemplateName); err != nil {\n\t\treturn \"\", nil, fmt.Errorf(\n\t\t\t\"proto files %s is missing required template_name option\", templateDescriptorProto.GetName())\n\t} else if err := validateTmplName(*(nameExt.(*string))); err != nil {\n\t\treturn \"\", nil, err\n\t} else {\n\t\ttmplName = *(nameExt.(*string))\n\t}\n\n\treturn tmplName, templateDescriptorProto, nil\n}", "func generateFile(gen *protogen.Plugin, file *protogen.File) {\n\tfilename := file.GeneratedFilenamePrefix + \"_message.pb.go\"\n\tg := gen.NewGeneratedFile(filename, file.GoImportPath)\n\n\tg.P(\"// Code generated by protoc-gen-message-validator. DO NOT EDIT.\")\n\tg.P()\n\tg.P(\"package \", file.GoPackageName)\n\tg.P()\n\n\tfor _, message := range file.Messages {\n\t\tstructName := string(message.Desc.Name())\n\t\tprefix := strings.ToLower(string(structName[0]))\n\n\t\tfor _, subMessage := range message.Messages {\n\t\t\tif subMessage.Desc.IsMapEntry() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tsubStructName := string(subMessage.Desc.Name())\n\t\t\tgenerateMessage(fmt.Sprintf(\"%s_%s\", structName, subStructName), prefix, subMessage, g)\n\t\t}\n\n\t\tgenerateMessage(structName, prefix, message, g)\n\t}\n}", "func (*AppGroup) Descriptor() ([]byte, []int) {\n\treturn file_common_proto_rawDescGZIP(), []int{1}\n}", "func (*UpdateProjectRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{21}\n}", "func (*ProjectModel) Descriptor() ([]byte, []int) {\n\treturn file_determined_project_v1_project_proto_rawDescGZIP(), []int{3}\n}", "func (*Project) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_project_api_ocp_project_api_proto_rawDescGZIP(), []int{12}\n}", "func (s *Stub) GetProject() string {\n\treturn \"\"\n}", "func (*UpdateProjectResponse) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{22}\n}", "func guessPackageName(b *util.BuildCtxt, base string) string {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"main\"\n\t}\n\n\tpkg, err := b.Import(base, cwd, 0)\n\tif err != nil {\n\t\t// There may not be any top level Go source files but the project may\n\t\t// still be within the GOPATH.\n\t\tif strings.HasPrefix(base, b.GOPATH) {\n\t\t\tp := strings.TrimPrefix(base, b.GOPATH)\n\t\t\treturn strings.Trim(p, string(os.PathSeparator))\n\t\t}\n\t}\n\n\treturn pkg.ImportPath\n}", "func GetGoPackage(url string) string {\n\tswitch {\n\tcase strings.Contains(url, \";\"):\n\t\tidx := strings.LastIndex(url, \";\")\n\t\treturn url[idx+1:]\n\tcase strings.Contains(url, \"/\"):\n\t\tidx := strings.LastIndex(url, \"/\")\n\t\treturn url[idx+1:]\n\tdefault:\n\t\treturn url\n\t}\n}", "func (g *Generator) GoWrapPackage(depfile *fdep.DepFile) string {\n\tif g.PkgSource != nil {\n\t\tif p, ok := g.PkgSource.GetPkg(g, depfile); ok {\n\t\t\treturn p\n\t\t}\n\t}\n\n\tfor _, o := range depfile.ProtoFile.Options {\n\t\tif o.Name == \"gowrap_package\" {\n\t\t\treturn o.Value.String()\n\t\t}\n\t}\n\n\t// prepend \"fpwrap\"\n\tfor _, o := range depfile.ProtoFile.Options {\n\t\tif o.Name == \"go_package\" {\n\t\t\treturn path.Join(\"fpwrap\", o.Value.String())\n\t\t}\n\t}\n\treturn path.Join(\"fpwrap\", path.Dir(depfile.FilePath))\n}", "func getFileExtensionBySdk(precompiledObjectPath string) (string, error) {\n\tsdk := strings.Split(precompiledObjectPath, string(os.PathSeparator))[0]\n\tvar extension string\n\tswitch sdk {\n\tcase pb.Sdk_SDK_JAVA.String():\n\t\textension = javaExtension\n\tcase pb.Sdk_SDK_PYTHON.String():\n\t\textension = pyExtension\n\tcase pb.Sdk_SDK_GO.String():\n\t\textension = goExtension\n\tcase pb.Sdk_SDK_SCIO.String():\n\t\textension = scioExtension\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"\")\n\t}\n\treturn extension, nil\n}", "func (p *Provider) GetProject() string {\n\to := p.opts\n\tif len(o.projects) > 1 {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"multiple projects not supported (%d specified)\", len(o.projects)))\n\t}\n\treturn o.projects[0]\n}", "func (*ListProjectsResponse_Project) Descriptor() ([]byte, []int) {\n\treturn file_web_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*ProjectMember) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{78}\n}", "func (*GetUserProjectsRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{27}\n}", "func (t *Type) fullGoPackageName() string {\n\tif t.qname.namespace != t.Namespace {\n\t\treturn t.qname.namespace.fullGoPackageName\n\t}\n\treturn \"\"\n}", "func (i Import) Package() string {\n\tif v := i.Alias; len(v) != 0 {\n\t\treturn v\n\t}\n\n\tif v := i.Path; len(v) != 0 {\n\t\tparts := strings.Split(v, \"/\")\n\t\tpkg := parts[len(parts)-1]\n\t\treturn pkg\n\t}\n\n\treturn \"\"\n}", "func (*CreateProjectResponse) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{18}\n}", "func (*CreateProjectRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{17}\n}", "func (*Projects) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{78}\n}", "func (b *Buffer) FileType() string {\n\treturn b.Settings[\"filetype\"].(string)\n}", "func (*ProjectMember) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{81}\n}", "func (x *fastReflection_ModuleOptions) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value {\n\tswitch descriptor.FullName() {\n\tcase \"cosmos.autocli.v1.ModuleOptions.tx\":\n\t\tvalue := x.Tx\n\t\treturn protoreflect.ValueOfMessage(value.ProtoReflect())\n\tcase \"cosmos.autocli.v1.ModuleOptions.query\":\n\t\tvalue := x.Query\n\t\treturn protoreflect.ValueOfMessage(value.ProtoReflect())\n\tdefault:\n\t\tif descriptor.IsExtension() {\n\t\t\tpanic(fmt.Errorf(\"proto3 declared messages do not support extensions: cosmos.autocli.v1.ModuleOptions\"))\n\t\t}\n\t\tpanic(fmt.Errorf(\"message cosmos.autocli.v1.ModuleOptions does not contain field %s\", descriptor.FullName()))\n\t}\n}", "func (*DescribeProjectRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_project_api_ocp_project_api_proto_rawDescGZIP(), []int{8}\n}", "func GetProto(src string) (string, error) {\n\tparsed, err := url.Parse(src)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(parsed.Scheme) > 0 {\n\t\treturn parsed.Scheme, nil\n\t}\n\n\treturn \"\", nil\n}", "func (*NewProject) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_project_api_ocp_project_api_proto_rawDescGZIP(), []int{13}\n}", "func goPath() string {\n\tgpDefault := build.Default.GOPATH\n\tgps := filepath.SplitList(gpDefault)\n\n\treturn gps[0]\n}", "func (p *plugin) analyzeFile(f *descriptor.FileDescriptorProto) error {\n\tif f.GetSyntax() != \"proto3\" {\n\t\treturn fmt.Errorf(\"unsupported syntax '%s', must be 'proto3'\", f.GetSyntax())\n\t}\n\n\tfile := goFile{structs: map[string]goStruct{}}\n\n\tfor _, m := range f.GetMessageType() {\n\t\tif err := p.analyzeMessageType(file, []string{}, m); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to analyze message type '%s': %s\", m.GetName(), err.Error())\n\t\t}\n\t}\n\n\tif len(file.structs) > 0 {\n\t\tn := filepath.Base(f.GetName())\n\t\tn = strings.TrimSuffix(n, filepath.Ext(n))\n\t\tp.targetFiles[n+\".pb.go\"] = file\n\t}\n\n\treturn nil\n}", "func (resolver *NpmResolver) ParsePkgFile(pkgFile string) (*Package, error) {\n\tcontent, err := ioutil.ReadFile(pkgFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar packageInfo Package\n\tif err := json.Unmarshal(content, &packageInfo); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &packageInfo, nil\n}", "func getPackageName(datatypeName string) string {\n\tparts := strings.Split(datatypeName, \".\")\n\tif len(parts) == 1 {\n\t\treturn \"\" // no package name\n\t}\n\n\toffset := 0\n\tfor i, p := range parts {\n\t\tif unicode.IsUpper(rune(p[0])) {\n\t\t\tbreak\n\t\t}\n\n\t\toffset += len(p)\n\t\tif i > 0 {\n\t\t\toffset += 1 // also account for the '.'\n\t\t}\n\t}\n\n\treturn datatypeName[:offset]\n}", "func (pp *protoPackage) absPath() string {\n\treturn path.Join(pp.Path, pp.pkgPath())\n}", "func (f *tmplFuncs) resolvePkgPath(pkg string) string {\n\t// Test this proto file itself:\n\tif stripExt(filepath.Base(*f.f.Name)) == pkg {\n\t\treturn *f.f.Name\n\t}\n\n\t// Test each dependency:\n\tfor _, p := range f.f.Dependency {\n\t\tif stripExt(filepath.Base(p)) == pkg {\n\t\t\treturn p\n\t\t}\n\t}\n\treturn \"\"\n}", "func (*MyCompany) Descriptor() ([]byte, []int) {\n\treturn file_parser_company_proto_rawDescGZIP(), []int{21}\n}", "func IsCommonProto(f *desc.FileDescriptor) bool {\n\tp := f.GetPackage()\n\tfor _, prefix := range []string{\"google.api\", \"google.protobuf\", \"google.rpc\", \"google.longrunning\"} {\n\t\tif strings.HasPrefix(p, prefix) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (i *Import) GetTFVCProject() string {\n\tif i == nil || i.TFVCProject == nil {\n\t\treturn \"\"\n\t}\n\treturn *i.TFVCProject\n}", "func (*Program) Descriptor() ([]byte, []int) {\n\treturn file_proto_common_proto_rawDescGZIP(), []int{1}\n}", "func (*DescribeProjectResponse) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_project_api_ocp_project_api_proto_rawDescGZIP(), []int{9}\n}", "func (*DcsProject) Descriptor() ([]byte, []int) {\n\treturn file_dcs_model_proto_rawDescGZIP(), []int{2}\n}", "func (o *ProformaArray) GetProject() string {\n\tif o == nil || o.Project == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Project\n}", "func (*CheckProjectTokenResponse) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{33}\n}", "func (*ProjectColumn) Descriptor() ([]byte, []int) {\n\treturn file_determined_project_v1_project_proto_rawDescGZIP(), []int{0}\n}", "func getCallerPackage() string {\n\tconst replayModeRecordModeCaller = 3\n\tc := caller.Get(replayModeRecordModeCaller)\n\tpkg := strings.SplitN(c, \".\", 2)[0]\n\treturn path.Base(pkg)\n}", "func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath }", "func (b *baseBuilder) GetFile() *FileBuilder {\n\tp := b.parent\n\tfor p != nil {\n\t\tif fb, ok := p.(*FileBuilder); ok {\n\t\t\treturn fb\n\t\t}\n\t\tp = p.GetParent()\n\t}\n\treturn nil\n}", "func GoServicePackagePath(name string) string {\n\treturn filepath.Join(PaceBase, ServiceBase, name)\n}", "func (*UpdateProjectRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_project_api_ocp_project_api_proto_rawDescGZIP(), []int{10}\n}", "func GetPkgName() string {\n\t_, filePath, _, _ := runtime.Caller(0)\n\tfile, _ := os.Open(filePath)\n\tr := bufio.NewReader(file)\n\tline, _, _ := r.ReadLine()\n\tpkgName := bytes.TrimPrefix(line, []byte(\"package \"))\n\n\treturn string(pkgName)\n}", "func (p *Parser) Package() string {\n\treturn p.asTree.Name.Name\n}", "func ProtoServiceName(fullname string) Option {\n\treturn func(a *appOptions) {\n\t\ta.protoService = fullname\n\t}\n}", "func (*Projects) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{75}\n}", "func packageFilename(pwd, relativePath string) string {\n\tfullPath := filepath.Join(pwd, relativePath)\n\treturn strings.TrimPrefix(strings.TrimPrefix(fullPath, filepath.Join(gopath(), \"src\")), \"/\")\n}", "func (*CheckProjectTokenRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{32}\n}", "func (*ProjectID) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{6}\n}", "func (*ProjectID) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{6}\n}", "func (o *ProformaArray) GetProjectOk() (*string, bool) {\n\tif o == nil || o.Project == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Project, true\n}", "func getPackageName(f string) string {\n\tfor {\n\t\tlastPeriod := strings.LastIndex(f, \".\")\n\t\tlastSlash := strings.LastIndex(f, \"/\")\n\t\tif lastPeriod > lastSlash {\n\t\t\tf = f[:lastPeriod]\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn f\n}", "func Project(ctx context.Context, project string) (*configpb.ProjectConfig, error) {\n\tconfigs, err := Projects(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif c, ok := configs[project]; ok {\n\t\treturn c, nil\n\t}\n\treturn nil, ErrNotFoundProjectConfig\n}", "func (*GetMyRequest) Descriptor() ([]byte, []int) {\n\treturn file_parser_company_proto_rawDescGZIP(), []int{6}\n}", "func (*GlobalOptions) Descriptor() ([]byte, []int) {\n\treturn file_github_com_google_cloudprober_targets_gce_proto_config_proto_rawDescGZIP(), []int{3}\n}", "func (pi *PackageInfo) FileVName(file *ast.File) *spb.VName {\n\tif v := pi.fileVName[file]; v != nil {\n\t\treturn v\n\t}\n\tv := proto.Clone(pi.VName).(*spb.VName)\n\tv.Language = \"\"\n\tv.Signature = \"\"\n\tv.Path = pi.FileSet.Position(file.Pos()).Filename\n\treturn v\n}" ]
[ "0.6514599", "0.6418956", "0.62568474", "0.6100058", "0.6059389", "0.6033528", "0.5760218", "0.571393", "0.56766814", "0.56747204", "0.55886865", "0.55810106", "0.55710924", "0.5528127", "0.5502348", "0.55021805", "0.5456921", "0.5454185", "0.54367715", "0.54307157", "0.54025465", "0.53424656", "0.5329483", "0.5327733", "0.53020066", "0.52819157", "0.52795607", "0.52257407", "0.52240294", "0.51456493", "0.5137637", "0.5134006", "0.5133966", "0.51120484", "0.5107064", "0.50847185", "0.5082025", "0.507457", "0.50681937", "0.5033462", "0.50315183", "0.50245786", "0.5019538", "0.5017327", "0.50053775", "0.50039554", "0.49943617", "0.49909803", "0.49890676", "0.49804884", "0.49707755", "0.4931849", "0.49237567", "0.4921064", "0.4905122", "0.49045667", "0.49015555", "0.49012348", "0.4892581", "0.4889273", "0.48874435", "0.48817107", "0.48771524", "0.4876309", "0.48685285", "0.48641378", "0.4861054", "0.48610055", "0.48546317", "0.4849436", "0.48461854", "0.48250479", "0.4822479", "0.48205945", "0.48192737", "0.48135585", "0.48060772", "0.4804189", "0.48035768", "0.48003185", "0.47990838", "0.4787936", "0.47851935", "0.47817233", "0.4781146", "0.47775137", "0.47767115", "0.4776245", "0.47737917", "0.47712553", "0.476954", "0.47676048", "0.47668436", "0.47668436", "0.47661626", "0.47600132", "0.47533566", "0.47468054", "0.47438252", "0.4743572" ]
0.7273096
0
NewQueueManagerWithDefaults instantiates a new QueueManager object This constructor will only assign default values to properties that have it defined, but it doesn't guarantee that properties required by API are set
NewQueueManagerWithDefaults создает новый объект QueueManager. Этот конструктор присваивает значения по умолчанию только тем свойствам, которые определены, но он не гарантирует, что свойства, требуемые API, будут заданы.
func NewQueueManagerWithDefaults() *QueueManager { this := QueueManager{} return &this }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewQueueManager(logger log.Logger, cfg config.QueueConfig, externalLabels labels.Labels, relabelConfigs []*relabel.Config, client StorageClient, flushDeadline time.Duration) *QueueManager {\n\tif logger == nil {\n\t\tlogger = log.NewNopLogger()\n\t} else {\n\t\tlogger = log.With(logger, \"queue\", client.Name())\n\t}\n\tt := &QueueManager{\n\t\tlogger: logger,\n\t\tflushDeadline: flushDeadline,\n\t\tcfg: cfg,\n\t\texternalLabels: externalLabels,\n\t\trelabelConfigs: relabelConfigs,\n\t\tclient: client,\n\t\tqueueName: client.Name(),\n\n\t\tlogLimiter: rate.NewLimiter(logRateLimit, logBurst),\n\t\tnumShards: cfg.MinShards,\n\t\treshardChan: make(chan int),\n\t\tquit: make(chan struct{}),\n\n\t\tsamplesIn: newEWMARate(ewmaWeight, shardUpdateDuration),\n\t\tsamplesOut: newEWMARate(ewmaWeight, shardUpdateDuration),\n\t\tsamplesOutDuration: newEWMARate(ewmaWeight, shardUpdateDuration),\n\t}\n\tt.shards = t.newShards(t.numShards)\n\tnumShards.WithLabelValues(t.queueName).Set(float64(t.numShards))\n\tshardCapacity.WithLabelValues(t.queueName).Set(float64(t.cfg.Capacity))\n\n\t// Initialize counter labels to zero.\n\tsentBatchDuration.WithLabelValues(t.queueName)\n\tsucceededSamplesTotal.WithLabelValues(t.queueName)\n\tfailedSamplesTotal.WithLabelValues(t.queueName)\n\tdroppedSamplesTotal.WithLabelValues(t.queueName)\n\n\treturn t\n}", "func NewDefaultClient() QueueClient {\n\treturn &inMemoryQueue{queues: make(map[string][]string)}\n}", "func DefaultQueue(queue string) func(*Locker) error {\n\treturn func(l *Locker) error {\n\t\tl.DefaultQueue = queue\n\t\treturn nil\n\t}\n}", "func (m ManagedConsumerConfig) setDefaults() ManagedConsumerConfig {\n\tif m.NewConsumerTimeout <= 0 {\n\t\tm.NewConsumerTimeout = 5 * time.Second\n\t}\n\tif m.InitialReconnectDelay <= 0 {\n\t\tm.InitialReconnectDelay = 1 * time.Second\n\t}\n\tif m.MaxReconnectDelay <= 0 {\n\t\tm.MaxReconnectDelay = 5 * time.Minute\n\t}\n\t// unbuffered queue not allowed\n\tif m.QueueSize <= 0 {\n\t\tm.QueueSize = 128\n\t}\n\n\treturn m\n}", "func Default() *JobManager {\n\tif _default == nil {\n\t\t_defaultLock.Lock()\n\t\tdefer _defaultLock.Unlock()\n\n\t\tif _default == nil {\n\t\t\t_default = New()\n\t\t}\n\t}\n\treturn _default\n}", "func NewDefault(db *bolt.DB) (q queue.WaitQueue, err error) {\n\treturn New(db, DefaultBucket, DefaultMemQueueSize, DefaultBufSize)\n}", "func DefaultQueue(queue string) func(*Config) error {\n\treturn func(c *Config) error {\n\t\tc.DefaultQueue = queue\n\t\treturn nil\n\t}\n}", "func (obj *RabbitQueue) Default() {\n\trabbitQueueLog.Info(\"default\", \"name\", obj.Name, \"namespace\", obj.Namespace)\n\n\tif obj.Spec.QueueName == \"\" {\n\t\tobj.Spec.QueueName = obj.Name\n\t}\n}", "func NewQueueManager(name string, clusters []string, aliasQueues []AliasQueue, remoteQueues []RemoteQueue, clusterQueues []ClusterQueue, ) *QueueManager {\n\tthis := QueueManager{}\n\tthis.Name = name\n\tthis.Clusters = clusters\n\tthis.AliasQueues = aliasQueues\n\tthis.RemoteQueues = remoteQueues\n\tthis.ClusterQueues = clusterQueues\n\treturn &this\n}", "func DefaultQueueSettings() QueueSettings {\n\treturn QueueSettings{\n\t\tEnabled: true,\n\t\tNumConsumers: 10,\n\t\t// For 5000 queue elements at 100 requests/sec gives about 50 sec of survival of destination outage.\n\t\t// This is a pretty decent value for production.\n\t\t// User should calculate this from the perspective of how many seconds to buffer in case of a backend outage,\n\t\t// multiply that by the number of requests per seconds.\n\t\tQueueSize: 5000,\n\t\tPersistentStorageEnabled: false,\n\t}\n}", "func NewQueueManager(q amboy.Queue) Manager {\n\treturn &queueManager{\n\t\tqueue: q,\n\t}\n}", "func NewDefault(m map[string]interface{}) (share.Manager, error) {\n\tc := &config{}\n\tif err := mapstructure.Decode(m, c); err != nil {\n\t\terr = errors.Wrap(err, \"error creating a new manager\")\n\t\treturn nil, err\n\t}\n\n\ts, err := metadata.NewCS3Storage(c.GatewayAddr, c.ProviderAddr, c.ServiceUserID, c.ServiceUserIdp, c.MachineAuthAPIKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tindexer := indexer.CreateIndexer(s)\n\n\tclient, err := pool.GetGatewayServiceClient(c.GatewayAddr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn New(client, s, indexer)\n}", "func NewQueue(maxQueueSize, maxFlowSize uint64, helper Interface) *Queue {\n\tif maxFlowSize > maxQueueSize {\n\t\tpanic(\"MaxFlowSize > MaxQueueSize\")\n\t}\n\n\tif helper == nil {\n\t\tpanic(\"helper is nil\")\n\t}\n\n\tq := new(Queue)\n\tq.cond.L = &q.lock\n\tq.maxQueueSize = maxQueueSize\n\tq.maxFlowSize = maxFlowSize\n\tq.helper = helper\n\tq.flows = make(map[uint64]*flowInfo)\n\n\treturn q\n}", "func New(mqURL string) (models.MessageQueue, error) {\n\tmq, err := newmq(mqURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &metricMQ{mq}, nil\n}", "func NewDefaultMQService() *mqServiceImpl {\n\treturn &mqServiceImpl{}\n}", "func New() *queue {\n\treturn &queue{\n\t\titems: make([]item, DefaultCapacity),\n\t\tcapacity: DefaultCapacity,\n\t}\n}", "func newDefaultPodManager() *podManager {\n\treturn &podManager{\n\t\trunningPods: make(map[string]*runningPod),\n\t\trequests: make(chan *cniserver.PodRequest, 20),\n\t\treattachPods: make(map[string]*corev1.Pod),\n\t}\n}", "func NewDefaultManager() *Manager {\n\tm := NewManager()\n\n\t// default config\n\tm.SetAuthorizeCodeExp(time.Minute * 10)\n\tm.SetImplicitTokenCfg(&Config{AccessTokenExp: time.Hour * 1})\n\tm.SetClientTokenCfg(&Config{AccessTokenExp: time.Hour * 2})\n\tm.SetAuthorizeCodeTokenCfg(&Config{IsGenerateRefresh: true, AccessTokenExp: time.Hour * 2, RefreshTokenExp: time.Hour * 24 * 3})\n\tm.SetPasswordTokenCfg(&Config{IsGenerateRefresh: true, AccessTokenExp: time.Hour * 2, RefreshTokenExp: time.Hour * 24 * 7})\n\n\tm.MapTokenModel(models.NewToken())\n\tm.MapAuthorizeGenerate(generates.NewAuthorizeGenerate())\n\tm.MapAccessGenerate(generates.NewAccessGenerate())\n\n\treturn m\n}", "func NewQueue(maximumCapacity int, initialCapacity int, factory TokenFactory) *Queue {\n\tq := &Queue{\n\t\tmaxCapacity: maximumCapacity,\n\t\tavailableTokens: make(chan (Token), maximumCapacity),\n\t\tcommittedTokens: make(chan (Token), maximumCapacity),\n\t\tdiscardTokens: make(chan (Token), maximumCapacity),\n\t\tcloseTokens: make(chan (Token)),\n\t}\n\n\tfor i := 0; i < maximumCapacity; i++ {\n\t\ttoken := factory()\n\t\tif token == nil {\n\t\t\treturn nil\n\t\t}\n\t\tq.discardTokens <- token\n\t\tq.validTokens = append(q.validTokens, token)\n\t}\n\n\tq.EnableDisableTokens(initialCapacity)\n\n\treturn q\n}", "func NewBasicMockMessageQueue() lanternmq.MessageQueue {\n\tmq := BasicMockMessageQueue{}\n\tmq.Queue = make(chan []byte, 20)\n\n\tmq.ConnectFn = func(username string, password string, host string, port string) error {\n\t\treturn nil\n\t}\n\n\tmq.CreateChannelFn = func() (lanternmq.ChannelID, error) {\n\t\treturn 1, nil\n\t}\n\n\tmq.NumConcurrentMsgsFn = func(chID lanternmq.ChannelID, num int) error {\n\t\treturn nil\n\t}\n\n\tmq.QueueExistsFn = func(chId lanternmq.ChannelID, qName string) (bool, error) {\n\t\treturn true, nil\n\t}\n\n\tmq.DeclareQueueFn = func(chID lanternmq.ChannelID, name string) error {\n\t\treturn nil\n\t}\n\n\tmq.PublishToQueueFn = func(chID lanternmq.ChannelID, qName string, message string) error {\n\t\tif len(mq.Queue) < 20 {\n\t\t\tmq.Queue <- []byte(message)\n\t\t} else {\n\t\t\treturn errors.New(\"queue full - unable to add new message\")\n\t\t}\n\t\treturn nil\n\t}\n\n\tmq.ConsumeFromQueueFn = func(chID lanternmq.ChannelID, qName string) (lanternmq.Messages, error) {\n\t\treturn nil, nil\n\t}\n\n\tmq.ProcessMessagesFn = func(ctx context.Context, msgs lanternmq.Messages, handler lanternmq.MessageHandler, args *map[string]interface{}, errs chan<- error) {\n\t\tfor msg := range mq.Queue {\n\t\t\terr := handler(msg, args)\n\t\t\tif err != nil {\n\t\t\t\terrs <- err\n\t\t\t}\n\t\t}\n\t}\n\n\tmq.CloseFn = func() {}\n\treturn &mq\n}", "func New() *Queue {\r\n\treturn &Queue{nil,nil,0}\r\n}", "func New(mqURL string) (models.MessageQueue, error) {\n\t// Play with URL schemes here: https://play.golang.org/p/xWAf9SpCBW\n\tu, err := url.Parse(mqURL)\n\tif err != nil {\n\t\tlogrus.WithError(err).WithFields(logrus.Fields{\"url\": mqURL}).Fatal(\"bad MQ URL\")\n\t}\n\tlogrus.WithFields(logrus.Fields{\"mq\": u.Scheme}).Debug(\"selecting MQ\")\n\tswitch u.Scheme {\n\tcase \"memory\":\n\t\treturn NewMemoryMQ(), nil\n\tcase \"redis\":\n\t\treturn NewRedisMQ(u)\n\tcase \"bolt\":\n\t\treturn NewBoltMQ(u)\n\t}\n\tif strings.HasPrefix(u.Scheme, \"ironmq\") {\n\t\treturn NewIronMQ(u), nil\n\t}\n\n\treturn nil, fmt.Errorf(\"mq type not supported %v\", u.Scheme)\n}", "func NewQueue() *Queue {\n return &Queue{member: make([]interface{}, 0)}\n}", "func New() Manager {\n\treturn Manager{\n\t\tState: make(map[string]string),\n\t\tClientHolder: make(map[string]utils.Set),\n\t\tClientQueue: make(map[string]utils.Queue),\n\t}\n}", "func NewManager(logger logging.Logger) SessionManager {\n\treturn &defaultSessionManager{\n\t\tlogger: logger,\n\t\ttasks: make(map[string]exec.Execer),\n\n\t\tquit: make(chan struct{}),\n\t}\n}", "func (queue *Queue) Init() (err error) {\n\tclient := queue.GetClient()\n\n\tparams := &sqs.CreateQueueInput{\n\t\tQueueName: aws.String(queue.Name + deadLetterQueueSuffix),\n\t\tAttributes: map[string]*string{\n\t\t\t\"MessageRetentionPeriod\": aws.String(\"1209600\"),\n\t\t},\n\t}\n\tresp, err := client.CreateQueue(params)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"queueName\": queue.Name,\n\t\t\t\"error\": err,\n\t\t}).Error(\"Createing the dead letter queue\")\n\t\treturn\n\t}\n\n\tqueue.DeadLetterQueueURL = *resp.QueueUrl\n\tlog.WithFields(log.Fields{\n\t\t\"QueueUrl\": queue.DeadLetterQueueURL,\n\t}).Info(\"Dead Letter Queue initialized\")\n\n\tqueueArnAttributeName := \"QueueArn\"\n\tdeadLetterQueueAttributes, err := queue.GetAttributesByQueueURL(queue.DeadLetterQueueURL, []*string{&queueArnAttributeName})\n\tif err != nil {\n\t\treturn\n\t}\n\tredrivePolicy := &RedrivePolicy{\n\t\tMaxReceiveCount: MaxReceiveCountBeforeDead,\n\t\tDeadLetterTargetArn: *deadLetterQueueAttributes.Attributes[queueArnAttributeName],\n\t}\n\tredrivePolicyString, err := redrivePolicy.GetAsAWSString()\n\tif err != nil {\n\t\treturn\n\t}\n\tparams = &sqs.CreateQueueInput{\n\t\tQueueName: aws.String(queue.Name),\n\t\tAttributes: map[string]*string{\n\t\t\t\"RedrivePolicy\": redrivePolicyString,\n\t\t\t\"MessageRetentionPeriod\": aws.String(\"1209600\"),\n\t\t},\n\t}\n\tresp, err = client.CreateQueue(params)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"queueName\": queue.Name,\n\t\t\t\"error\": err,\n\t\t}).Error(\"Createing the queue\")\n\t\treturn\n\t}\n\n\tqueue.URL = *resp.QueueUrl\n\tlog.WithFields(log.Fields{\n\t\t\"QueueUrl\": queue.URL,\n\t}).Info(\"Queue initialized\")\n\n\treturn\n}", "func New() *Queue {\n\treturn &Queue{nil, nil, 0}\n}", "func New() *Queue {\n\treturn &Queue{nil, nil, 0}\n}", "func MyQueueConstructor() MyQueue {\n\treturn MyQueue{}\n}", "func New() *JobManager {\n\tjm := JobManager{\n\t\theartbeatInterval: DefaultHeartbeatInterval,\n\t\tjobs: map[string]*JobMeta{},\n\t\ttasks: map[string]*TaskMeta{},\n\t}\n\tjm.schedulerWorker = async.NewInterval(jm.runDueJobs, DefaultHeartbeatInterval)\n\tjm.killHangingTasksWorker = async.NewInterval(jm.killHangingTasks, DefaultHeartbeatInterval)\n\treturn &jm\n}", "func WithDefaultMaxTries(n int) Option {\n\treturn func(q *Queue) {\n\t\tq.maxTries = n\n\t}\n}", "func NewDefaultRestrictionManager(maxValueLength int) *DefaultRestrictionManager {\n\tif maxValueLength == 0 {\n\t\tmaxValueLength = defaultMaxValueLength\n\t}\n\treturn &DefaultRestrictionManager{\n\t\tdefaultRestriction: &Restriction{keyAllowed: true, maxValueLength: maxValueLength},\n\t}\n}", "func (manager *Manager) SetDefaults() {\n\tmanager.viperConfig.SetDefault(workspace, \"No name\")\n\n\tmanager.viperConfig.SetDefault(tcpAddress, \"localhost:8888\")\n\tmanager.viperConfig.SetDefault(tcpConnectionType, \"tcp\")\n\n\tmanager.viperConfig.SetDefault(httpAddress, \":8080\")\n\n\tmanager.viperConfig.SetDefault(shutdownTimeout, 15*time.Second)\n\tmanager.viperConfig.SetDefault(readTimeout, 10*time.Second)\n\tmanager.viperConfig.SetDefault(writeTimeout, 10*time.Second)\n\n\tmanager.viperConfig.SetDefault(websocketReadBufferSize, 1024)\n\tmanager.viperConfig.SetDefault(websocketWriteBufferSize, 1024)\n\tmanager.viperConfig.SetDefault(websocketMaxMessageSize, 512)\n\tmanager.viperConfig.SetDefault(websocketWriteWait, 10*time.Second)\n\tmanager.viperConfig.SetDefault(websocketPongWait, 60*time.Second)\n\tmanager.viperConfig.SetDefault(websocketPingPeriod, 60*0.9*time.Second)\n\n\tmanager.viperConfig.SetDefault(httpTimeout, 1*time.Second)\n\n\tmanager.viperConfig.SetDefault(logLevel, \"debug\")\n}", "func NewQueue() Queue {\n\treturn Queue{}\n}", "func NewQueue(name string) *Queue {\n\tredisClient := GetRedisClientFromConfig()\n\tqueue := &Queue{Name: name, RedisClient: redisClient}\n\treturn queue\n}", "func NewQueue(name string, itemType reflect.Type, maxQueueSize uint32) Queue {\n\tq := queue{\n\t\tname: name,\n\t\titemType: itemType,\n\t\tchannel: make(chan interface{}, maxQueueSize),\n\t}\n\treturn &q\n}", "func setupManager(username string, password string, brokerIp string, brokerPort int, manager *Manager, exchange string, queueName string) error {\n\tamqpURI := getAmqpUri(username, password, brokerIp, brokerPort)\n\tmanager.logger.Debugf(\"dialing %s\", amqpURI)\n\tvar err error\n\tmanager.Connection, err = amqp.Dial(amqpURI)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanager.logger.Debugf(\"got Connection, getting Channel\")\n\tmanager.Channel, err = manager.Connection.Channel()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanager.logger.Debugf(\"got Channel, declaring Exchange (%q)\", exchange)\n\n\tmanager.logger.Debugf(\"declared Exchange, declaring Queue %q\", queueName)\n\tqueue, err := manager.Channel.QueueDeclare(\n\t\tqueueName,\n\t\ttrue,\n\t\ttrue,\n\t\tfalse,\n\t\tfalse,\n\t\tnil,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmanager.logger.Debugf(\"declared Queue (%q, %d messages, %d consumers), binding to Exchange\",\n\t\tqueue.Name, queue.Messages, queue.Consumers)\n\n\tif err = manager.Channel.QueueBind(\n\t\tqueue.Name, // name of the queue\n\t\tqueue.Name, // bindingKey\n\t\texchange, // sourceExchange\n\t\tfalse, // noWait\n\t\tnil, // arguments\n\t); err != nil {\n\t\treturn err\n\t}\n\n\tmanager.logger.Debug(\"Queue bound to Exchange, starting Consume\")\n\treturn nil\n}", "func NewQueue(maxWorkers int, maxQueue int) *Queue {\n\tq := make(chan Job, maxQueue)\n\treturn &Queue{\n\t\tq,\n\t\ttrue,\n\t\t&Dispatcher{\n\t\t\tjobQueue: q,\n\t\t\tworkerPool: make(chan chan Job, maxWorkers),\n\t\t\tMaxWorkers: maxWorkers,\n\t\t},\n\t}\n}", "func LocalQueueFactory(size int) Option {\n\treturn func(env Environment) {\n\t\tif size < 5 {\n\t\t\tsize = 5\n\t\t}\n\t\tQueueFactory(makeLocalEventQueueFactory(size))\n\t}\n}", "func NewFakeQueueDispatcher() (dispatcher *FakeQueueDispatcher) {\n\tdispatcher = &FakeQueueDispatcher{}\n\tdispatcher.Messages = make([]interface{}, 0)\n\treturn\n}", "func setupDefaults() {\n\tclient.DefaultClient = grpcCli.NewClient()\n\tserver.DefaultServer = grpcSvr.NewServer()\n\tnetwork.DefaultNetwork = mucpNet.NewNetwork()\n\tmetrics.DefaultMetricsReporter = noopMet.New()\n\n\t// setup rpc implementations after the client is configured\n\tauth.DefaultAuth = authSrv.NewAuth()\n\tbroker.DefaultBroker = brokerSrv.NewBroker()\n\tevents.DefaultStream = eventsSrv.NewStream()\n\tevents.DefaultStore = eventsSrv.NewStore()\n\tregistry.DefaultRegistry = registrySrv.NewRegistry()\n\trouter.DefaultRouter = routerSrv.NewRouter()\n\tstore.DefaultStore = storeSrv.NewStore()\n\tstore.DefaultBlobStore = storeSrv.NewBlobStore()\n\truntime.DefaultRuntime = runtimeSrv.NewRuntime()\n}", "func SetDefault(jm *JobManager) {\n\t_defaultLock.Lock()\n\t_default = jm\n\t_defaultLock.Unlock()\n}", "func NewDefaultPriorityQueue() *PriorityQueue {\n\treturn NewPriorityQueue(func(interface{}) bool { return false })\n}", "func NewManager(h Handler,\n\tusername string,\n\tpassword string,\n\tbrokerIp string,\n\tbrokerPort int,\n\texchange string,\n\tqueueName string,\n\tworkers int,\n\tallocate bool,\n\tmanagerName string,\n\thandleFunction handlerFunction,\n\tlogLevel string,\n\tnet catalogue.BaseNetworkInt,\n\timg catalogue.BaseImageInt) (*Manager, error) {\n\n\tmanager := &Manager{\n\t\tConnection: nil,\n\t\tChannel: nil,\n\t\tallocate: allocate,\n\t\tworkers: workers,\n\t\terrorChan: make(chan error),\n\t\tlogger: GetLogger(managerName, logLevel),\n\t\thandlerFunction: handleFunction,\n\t\thandler: h,\n\t\timage: img,\n\t\tnetwork: net,\n\t}\n\n\terr := setupManager(username, password, brokerIp, brokerPort, manager, exchange, queueName)\n\tif err != nil {\n\t\tmanager.logger.Errorf(\"Error while setup the amqp thing: %v\", err)\n\t\treturn nil, err\n\t}\n\tmanager.queueName = queueName\n\treturn manager, nil\n}", "func NewQueue(ctx context.Context, queueID string, db *sql.DB, conf QueueConfig) (*Queue, error) {\n\tq := &Queue{ID: queueID}\n\tq.repo = repo.NewRepository(db)\n\tq.PollRate = 100 * time.Millisecond // Default\n\tq.queueSize = 10000 // Default\n\tq.retries = 3 // Default\n\tq.IsMultiQueue = conf.IsMultiQueue\n\tq.baseDelay = 3 * time.Second // Default\n\n\tif conf.PollingRate > 0 {\n\t\tq.PollRate = conf.PollingRate\n\t}\n\tif conf.Qsize > 0 {\n\t\tq.queueSize = conf.Qsize\n\t}\n\tif conf.BaseDelay > 0 {\n\t\tq.baseDelay = conf.BaseDelay\n\t}\n\tif conf.Retries >= 0 {\n\t\tq.retries = conf.Retries\n\t}\n\t// Multilevel Queue/channel created\n\ttemp := mlQueue{}\n\ttemp.notifier = make([]chan JobChan, 1)\n\ttemp.notifier[0] = make(chan JobChan, q.queueSize)\n\ttemp.total = 1\n\tq.mq = temp\n\n\tm := make(map[string][]worker.Worker)\n\tq.workers = m\n\tvar wg sync.WaitGroup\n\tq.wg = &wg\n\n\t// resume stopped jobs\n\terr := q.ResumePendingJobs(ctx)\n\tif err != nil {\n\t\tlogger.Log.Error(\"Unable to resume jobs from bucket: %s\", zap.Error(err))\n\t\t// Don't fail out, this isn't really fatal. But maybe it should be?\n\t}\n\treturn q, nil\n}", "func New(name string, c config.Config) *Queue {\n\treturn &Queue{\n\t\tname: name,\n\t\tconf: c,\n\t}\n}", "func New() Queue {\n\treturn Queue{list: linkedlist.New()}\n}", "func NewDefaultManager(\n\tdemands *cache.SafeDemandCache,\n\tbinpacker *binpacker.Binpacker,\n\tinstanceGroupLabel string) Manager {\n\treturn &defaultManager{\n\t\tdemands: demands,\n\t\tbinpacker: binpacker,\n\t\tinstanceGroupLabel: instanceGroupLabel,\n\t}\n}", "func NewQueue(action WorkAction, options ...QueueOption) *Queue {\n\tq := Queue{\n\t\tLatch: NewLatch(),\n\t\tAction: action,\n\t\tContext: context.Background(),\n\t\tMaxWork: DefaultQueueMaxWork,\n\t\tParallelism: runtime.NumCPU(),\n\t}\n\tfor _, option := range options {\n\t\toption(&q)\n\t}\n\treturn &q\n}", "func NewDefaults() map[string]interface{} {\n\tdefaults := make(map[string]interface{})\n\n\tdefaults[authPostgresURI] = \"postgresql://postgres:postgres@localhost:5432/test?sslmode=disable\"\n\tdefaults[authMigrationVersion] = 0\n\n\tdefaults[gatewayAddr] = \":10000\"\n\tdefaults[gatewayEndpoint] = \"/graphql\"\n\tdefaults[gatewayServePlayground] = true\n\tdefaults[gatewayPlaygroundEndpoint] = \"/playground\"\n\tdefaults[gatewayEnableIntrospection] = true\n\n\tdefaults[seedUserLogin] = \"root\"\n\tdefaults[seedUserPassword] = \"root\"\n\tdefaults[seedRoleTitle] = \"ROOT\"\n\tdefaults[seedRoleSuper] = true\n\n\tdefaults[sessionAccessTokenTTL] = 1000000\n\tdefaults[sessionRefreshTokenTTl] = 5000000\n\n\treturn defaults\n}", "func New(opt *Options) *Queue {\n\tif client == nil {\n\t\tredisOpt := &redis.Options{\n\t\t\tAddr: opt.Connection.Addr,\n\t\t\tPassword: opt.Connection.Password,\n\t\t\tDB: opt.Connection.DB,\n\t\t\tMaxRetries: opt.Connection.MaxRetries,\n\t\t\tDialTimeout: opt.Connection.DialTimeout,\n\t\t\tReadTimeout: opt.Connection.ReadTimeout,\n\t\t\tWriteTimeout: opt.Connection.WriteTimeout,\n\t\t\tPoolSize: opt.Connection.PoolSize,\n\t\t\tPoolTimeout: opt.Connection.PoolTimeout,\n\t\t\tIdleTimeout: opt.Connection.IdleTimeout,\n\t\t}\n\t\tclient = redis.NewClient(redisOpt)\n\t}\n\n\treturn &Queue{\n\t\tjobChannel: make(chan string, 1000),\n\t\tconcurrency: opt.Concurrency,\n\t\tqueueName: opt.QueueName,\n\t\tprocessor: opt.Processor,\n\t\terrorHandler: opt.ErrorHandler,\n\t}\n}", "func (o *SendJobCommandParams) SetDefaults() {\n\t// no default values defined for this parameter\n}", "func New() *Queue {\n\tq := new(Queue)\n\tq.length = 0\n\tq.s1 = stack.New()\n\tq.s2 = stack.New()\n\n\treturn q\n}", "func NewQueue() *Queue {\n\treturn &Queue{nil, nil, 0}\n}", "func defaultConsumerOptions() *consumerOptions {\n\treturn &consumerOptions{\n\t\tqueueDepth: 10000,\n\t\tconcurrency: 10,\n\t\tStats: &NilConsumerStatsCollector{},\n\t}\n}", "func NewQueue(action func(interface{}) error) *QueueWorker {\n\treturn &QueueWorker{\n\t\taction: action,\n\t\tlatch: &Latch{},\n\t\tmaxWork: DefaultQueueWorkerMaxWork,\n\t}\n}", "func (o *GetGPUArchitectureParams) SetDefaults() {\n\tvar (\n\t\tallowUnstableDefault = bool(false)\n\t)\n\n\tval := GetGPUArchitectureParams{\n\t\tAllowUnstable: &allowUnstableDefault,\n\t}\n\n\tval.timeout = o.timeout\n\tval.Context = o.Context\n\tval.HTTPClient = o.HTTPClient\n\t*o = val\n}", "func NewQueue(l int) *Queue {\n\tif l == -1 {\n\t\treturn &Queue{\n\t\t\tQueue: make([]types.Event, 0),\n\t\t\tL: int(^uint(0) >> 1), // max integer value, architecture independent\n\t\t}\n\t}\n\tq := &Queue{\n\t\tQueue: make([]types.Event, 0, l),\n\t\tL: l,\n\t}\n\tlog.WithFields(log.Fields{\"Capacity\": q.L}).Debugf(\"Creating queue\")\n\treturn q\n}", "func NewDefaults() *Client {\n\treturn &Client{\n\t\tsigkil: make(chan os.Signal, 1),\n\t\tsighup: make(chan os.Signal, 1),\n\t\tmenu: make(map[string]ui.MenuItem),\n\t\tplex: &logs.Timer{},\n\t\talert: &logs.Cooler{},\n\t\tLogger: logs.New(),\n\t\tConfig: &configfile.Config{\n\t\t\tApps: &apps.Apps{\n\t\t\t\tURLBase: \"/\",\n\t\t\t},\n\t\t\tServices: &services.Config{\n\t\t\t\tInterval: cnfg.Duration{Duration: services.DefaultSendInterval},\n\t\t\t\tParallel: 1,\n\t\t\t},\n\t\t\tBindAddr: configfile.DefaultBindAddr,\n\t\t\tSnapshot: &snapshot.Config{\n\t\t\t\tTimeout: cnfg.Duration{Duration: snapshot.DefaultTimeout},\n\t\t\t},\n\t\t\tLogs: &logs.Logs{\n\t\t\t\tLogFiles: DefaultLogFiles,\n\t\t\t\tLogFileMb: DefaultLogFileMb,\n\t\t\t},\n\t\t\tTimeout: cnfg.Duration{Duration: configfile.DefaultTimeout},\n\t\t}, Flags: &Flags{\n\t\t\tFlagSet: flag.NewFlagSet(DefaultName, flag.ExitOnError),\n\t\t\tConfigFile: os.Getenv(DefaultEnvPrefix + \"_CONFIG_FILE\"),\n\t\t\tEnvPrefix: DefaultEnvPrefix,\n\t\t},\n\t}\n}", "func New(hint int) *Queue {\n\treturn &Queue{\n\t\titems: make([]interface{}, 0, hint),\n\t}\n}", "func initMailQueueProducer() (err error) {\n\tnsqCfg := nsq.NewConfig()\n\tnsqCfg.UserAgent = \"tmail.queue\"\n\tNsqQueueProducer, err = nsq.NewProducer(\"127.0.0.1:4150\", nsqCfg)\n\tif Cfg.GetDebugEnabled() {\n\t\tNsqQueueProducer.SetLogger(Log, 0)\n\t} else {\n\t\tNsqQueueProducer.SetLogger(Log, 4)\n\t}\n\treturn err\n}", "func New(name string) (*Queue, error) {\n\tqueue := Queue{Name: name}\n\terr := queue.Init()\n\n\treturn &queue, err\n}", "func New(maxSize int, dropBehavior DropBehavior) *Queue {\n\treturn &Queue{\n\t\tmaxSize: maxSize,\n\t\tdropBehavior: dropBehavior,\n\t}\n}", "func New(delegate Delegate, settings Settings) (*Queue, error) {\n\tconst op = \"pq/new\"\n\n\tif delegate == nil {\n\t\treturn nil, errOp(op).of(InvalidParam).report(\"delegate must not be nil\")\n\t}\n\n\taccessor, errKind := makeAccess(delegate)\n\tif errKind != NoError {\n\t\treturn nil, errOp(op).of(errKind)\n\t}\n\n\tpageSize := delegate.PageSize()\n\n\tq := &Queue{\n\t\taccessor: accessor,\n\t\tsettings: settings,\n\t\tpagePool: newPagePool(pageSize),\n\t}\n\n\t// use pointer address as ID for correlating error messages\n\tq.id = queueID(uintptr(unsafe.Pointer(q)))\n\taccessor.quID = q.id\n\n\trootBuf, err := q.accessor.ReadRoot()\n\tif err != nil {\n\t\treturn nil, wrapErr(op, err).of(InitFailed).\n\t\t\treport(\"failed to read queue header\")\n\t}\n\n\troot := castQueueRootPage(rootBuf[:])\n\tif root.version.Get() != queueVersion {\n\t\tcause := &Error{\n\t\t\tkind: InitFailed,\n\t\t\tmsg: fmt.Sprintf(\"queue version %v\", root.version.Get()),\n\t\t}\n\t\treturn nil, wrapErr(op, cause).of(InitFailed)\n\t}\n\n\ttracef(\"open queue: %p (pageSize: %v)\\n\", q, pageSize)\n\ttraceQueueHeader(root)\n\n\tq.version = root.version.Get()\n\tq.hdrOffset = q.accessor.RootFileOffset()\n\tq.onInit()\n\treturn q, nil\n}", "func NewPrinterDefaults()(*PrinterDefaults) {\n m := &PrinterDefaults{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}", "func (r *yandexMessageQueueReconciler) SetupWithManager(mgr ctrl.Manager) error {\n\treturn ctrl.NewControllerManagedBy(mgr).\n\t\tFor(&connectorsv1.YandexMessageQueue{}).\n\t\tComplete(r)\n}", "func (o *RTRCheckAdminCommandStatusParams) SetDefaults() {\n\tvar (\n\t\tsequenceIDDefault = int64(0)\n\t)\n\n\tval := RTRCheckAdminCommandStatusParams{\n\t\tSequenceID: sequenceIDDefault,\n\t}\n\n\tval.timeout = o.timeout\n\tval.Context = o.Context\n\tval.HTTPClient = o.HTTPClient\n\t*o = val\n}", "func New(cfg Config, pubSub pubSub, metrics metricsProvider) (*Queue, error) {\n\tmsgChan, err := pubSub.SubscribeWithOpts(context.Background(), topic, spi.WithPool(cfg.PoolSize))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"subscribe to topic [%s]: %w\", topic, err)\n\t}\n\n\tq := &Queue{\n\t\tpubSub: pubSub,\n\t\tmsgChan: msgChan,\n\t\tjsonMarshal: json.Marshal,\n\t\tjsonUnmarshal: json.Unmarshal,\n\t\tmetrics: metrics,\n\t}\n\n\tq.Lifecycle = lifecycle.New(\"operation-queue\",\n\t\tlifecycle.WithStart(q.start),\n\t\tlifecycle.WithStop(q.stop),\n\t)\n\n\tq.Start()\n\n\treturn q, nil\n}", "func NewQueue() Queue {\r\n\tvar empty []int\r\n\treturn Queue{empty, len(empty)}\r\n}", "func New() *Queue {\n\titems := []*item.Item{}\n\tlock := &sync.Mutex{}\n\treturn &Queue{items, lock}\n}", "func NewQueue(\n\tservers []string,\n\topts QueueOptions,\n) (Queue, error) {\n\tq, err := newQueue(servers, opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tq.initConnections(servers)\n\tgo q.reportMetrics()\n\n\treturn q, nil\n}", "func NewQueue(storage Storage, reQueueTimeout time.Duration) Queue {\n\tif reQueueTimeout < 1 {\n\t\treQueueTimeout = time.Minute * 30\n\t}\n\n\tname := \"gocelery\"\n\tq := &queue{\n\t\tstorage: storage,\n\t\thead: 0,\n\t\ttail: 0,\n\t\trequeueTimeout: reQueueTimeout,\n\t\tqueuePrefix: fmt.Sprintf(\"%s-queue-\", name),\n\t\tqueueAckPrefix: fmt.Sprintf(\"%s-ack-\", name),\n\t}\n\n\t// restore the old state from the DB\n\tq.loadHeadTail()\n\treturn q\n}", "func (s *Store) CreateQueue(name string, overriddenSettings ...QueueSetting) (QueueMeta, QueueSettings, error) {\n\tif !isValidQueueName(name) {\n\t\treturn QueueMeta{}, QueueSettings{}, ErrInvalidQueueName\n\t}\n\n\tmeta := QueueMeta{Name: name, Created: time.Now()}\n\tsettings := defaultQueueSettings()\n\n\tfor _, setting := range overriddenSettings {\n\t\tif err := setting(&settings); err != nil {\n\t\t\treturn QueueMeta{}, QueueSettings{}, err\n\t\t}\n\t}\n\n\treturn meta, settings, s.db.Update(func(tx *bolt.Tx) error {\n\t\tqueues := tx.Bucket([]byte(\"Queues\"))\n\n\t\tbucket, err := queues.CreateBucket([]byte(name))\n\t\tif err != nil {\n\t\t\tif err == bolt.ErrBucketExists {\n\t\t\t\treturn ErrQueueExists\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\n\t\t// Meta\n\n\t\tmetaBucket, err := bucket.CreateBucketIfNotExists([]byte(\"Meta\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err = metaBucket.Put([]byte(\"Name\"), []byte(name)); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err = metaBucket.Put([]byte(\"Created\"), encodeTime(meta.Created)); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Settings\n\n\t\tsettingsBucket, err := bucket.CreateBucketIfNotExists([]byte(\"Settings\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err = settingsBucket.Put([]byte(\"LeaseDuration\"), encodeInt(settings.LeaseDuration)); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = settingsBucket.Put([]byte(\"MessageRetentionPeriod\"), encodeInt(settings.MessageRetentionPeriod)); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = settingsBucket.Put([]byte(\"DelaySeconds\"), encodeInt(settings.DelaySeconds)); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Message Buckets\n\n\t\tmessages, err := bucket.CreateBucketIfNotExists([]byte(\"Messages\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif _, err := messages.CreateBucketIfNotExists([]byte(\"Visible\")); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif _, err := messages.CreateBucketIfNotExists([]byte(\"Leased\")); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif _, err := messages.CreateBucketIfNotExists([]byte(\"Delayed\")); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n}", "func DefaultQueueKeysFunc(_ runtime.Object) []string {\n\treturn []string{DefaultQueueKey}\n}", "func New(cfg config.Queue, n notifier) *Queue {\n\tq := &Queue{\n\t\taddCh: make(chan struct{}, cfg.QueueSize),\n\t\tpopCh: make(chan struct{}, cfg.GoRoutinesSize),\n\t\taddMessage: make(chan entity.NotifierMessage, 1),\n\t\tpopMessage: make(chan entity.NotifierMessage, 1),\n\t\tnotifier: n,\n\t}\n\n\tgo q.pop()\n\tgo q.add()\n\n\treturn q\n}", "func New(cb Done, transport http.RoundTripper) *Manager {\n\treturn &Manager{\n\t\tkeys: sets.NewString(),\n\t\tcb: cb,\n\t\ttransport: transport,\n\t}\n}", "func newDefaultContainerConfig() ContainerConfig {\n\treturn ContainerConfig{\n\t\tCPU: newMinMaxAllocation(),\n\t\tMemory: newMinMaxAllocation(),\n\t\tBlockRead: newMinMaxAllocation(),\n\t\tBlockWrite: newMinMaxAllocation(),\n\t\tNetworkRx: newMinMaxAllocation(),\n\t\tNetworkTx: newMinMaxAllocation(),\n\t}\n}", "func NewQueue(ctx *pulumi.Context,\n\tname string, args *QueueArgs, opts ...pulumi.ResourceOption) (*Queue, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.HoursOfOperationArn == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'HoursOfOperationArn'\")\n\t}\n\tif args.InstanceArn == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'InstanceArn'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Queue\n\terr := ctx.RegisterResource(\"aws-native:connect:Queue\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (o *ListHetznerSizesParams) SetDefaults() {\n\t// no default values defined for this parameter\n}", "func (t *OpenconfigQos_Qos_Queues) NewQueue(Name string) (*OpenconfigQos_Qos_Queues_Queue, error){\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.Queue == nil {\n\t\tt.Queue = make(map[string]*OpenconfigQos_Qos_Queues_Queue)\n\t}\n\n\tkey := Name\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.Queue[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list Queue\", key)\n\t}\n\n\tt.Queue[key] = &OpenconfigQos_Qos_Queues_Queue{\n\t\tName: &Name,\n\t}\n\n\treturn t.Queue[key], nil\n}", "func GetDefaultManager() *Manager {\n\treturn defaultManager\n}", "func (o *GetFqdnCacheParams) SetDefaults() {\n\t// no default values defined for this parameter\n}", "func NewAPIRequestManager() *APIRequestManager {\n\treturn &APIRequestManager{\n\t\tqueue: make(chan *WorkerItem, 10),\n\t}\n}", "func New() *Queue {\r\n\treturn &Queue{\r\n\t\tdata: []int{},\r\n\t}\r\n}", "func setDefault(c *Config) {\n\tc.Token = \"\"\n\tc.GuildID = \"\"\n}", "func New(ctx context.Context, cfg models.Config) (*Queue, error) {\n\tconn, err := connect(ctx, cfg)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to connect to RabbitMQ \")\n\t}\n\n\tch, err := conn.Channel()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to open a channel \")\n\t}\n\n\t_, err = ch.QueueDeclare(\"ItemQueue\", false, false, false, false, nil)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to declare a queue \")\n\t}\n\n\treturn &Queue{ch, conn}, nil\n}", "func (c *InitManagementClusterInput) Defaults(ctx context.Context) {\n\tc.Config.Defaults()\n\tif c.Scheme == nil {\n\t\tc.Scheme = runtime.NewScheme()\n\t}\n\tif c.NewManagementClusterFn == nil {\n\t\tc.NewManagementClusterFn = func() (ManagementCluster, error) {\n\t\t\treturn kind.NewCluster(ctx, c.ManagementClusterName, c.Scheme)\n\t\t}\n\t}\n}", "func (c *InitManagementClusterInput) Defaults(ctx context.Context) {\n\tc.Config.Defaults()\n\tif c.Scheme == nil {\n\t\tc.Scheme = runtime.NewScheme()\n\t}\n\tif c.NewManagementClusterFn == nil {\n\t\tc.NewManagementClusterFn = func() (ManagementCluster, error) {\n\t\t\treturn kind.NewCluster(ctx, c.ManagementClusterName, c.Scheme)\n\t\t}\n\t}\n}", "func NewSmsTrackingWithDefaults() *SmsTracking {\n\tthis := SmsTracking{}\n\treturn &this\n}", "func (cc *ConstructionCreate) defaults() {\n\tif _, ok := cc.mutation.RawProduction(); !ok {\n\t\tv := construction.DefaultRawProduction\n\t\tcc.mutation.SetRawProduction(v)\n\t}\n\tif _, ok := cc.mutation.Production(); !ok {\n\t\tv := construction.DefaultProduction\n\t\tcc.mutation.SetProduction(v)\n\t}\n\tif _, ok := cc.mutation.GetType(); !ok {\n\t\tv := construction.DefaultType\n\t\tcc.mutation.SetType(v)\n\t}\n\tif _, ok := cc.mutation.Level(); !ok {\n\t\tv := construction.DefaultLevel\n\t\tcc.mutation.SetLevel(v)\n\t}\n\tif _, ok := cc.mutation.Modifier(); !ok {\n\t\tv := construction.DefaultModifier\n\t\tcc.mutation.SetModifier(v)\n\t}\n\tif _, ok := cc.mutation.LastUpdated(); !ok {\n\t\tv := construction.DefaultLastUpdated()\n\t\tcc.mutation.SetLastUpdated(v)\n\t}\n\tif _, ok := cc.mutation.NeedRefresh(); !ok {\n\t\tv := construction.DefaultNeedRefresh\n\t\tcc.mutation.SetNeedRefresh(v)\n\t}\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Input_Queues) NewQueue(Name string) (*OpenconfigQos_Qos_Interfaces_Interface_Input_Queues_Queue, error){\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.Queue == nil {\n\t\tt.Queue = make(map[string]*OpenconfigQos_Qos_Interfaces_Interface_Input_Queues_Queue)\n\t}\n\n\tkey := Name\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.Queue[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list Queue\", key)\n\t}\n\n\tt.Queue[key] = &OpenconfigQos_Qos_Interfaces_Interface_Input_Queues_Queue{\n\t\tName: &Name,\n\t}\n\n\treturn t.Queue[key], nil\n}", "func (o *StorageServiceMetricsHintsInProgressGetParams) SetDefaults() {\n\t// no default values defined for this parameter\n}", "func NewQueue(args []func(http.ResponseWriter, *http.Request) (http.ResponseWriter, *http.Request)) *Queue {\n\tq := &Queue{}\n\tfor _, f := range args {\n\t\tq.list = append(q.list, f)\n\t}\n\treturn q\n}", "func Constructor() MyQueue {\n\treturn Myqueue{list: listNew()}\n}", "func NewQueue() *Queue {\n\treturn &Queue{}\n}", "func NewQueue() *Queue {\n\treturn &Queue{}\n}", "func (o *GetBundleByKeyParams) SetDefaults() {\n\tvar (\n\t\tauditDefault = string(\"NONE\")\n\n\t\tincludedDeletedDefault = bool(false)\n\t)\n\n\tval := GetBundleByKeyParams{\n\t\tAudit: &auditDefault,\n\t\tIncludedDeleted: &includedDeletedDefault,\n\t}\n\n\tval.timeout = o.timeout\n\tval.Context = o.Context\n\tval.HTTPClient = o.HTTPClient\n\t*o = val\n}", "func New(\n\tlogger *zap.SugaredLogger,\n\tflushFunc, closeFunc func(),\n\topts Options,\n) *Queue {\n\tif flushFunc == nil {\n\t\tflushFunc = func() {}\n\t}\n\tif closeFunc == nil {\n\t\tcloseFunc = func() {}\n\t}\n\tif opts.Rate == 0 {\n\t\topts.Rate = 5 * time.Second\n\t}\n\n\tvar counter = int32(0)\n\treturn &Queue{\n\t\tl: logger,\n\n\t\tcloseFunc: closeFunc,\n\t\tflushFunc: flushFunc,\n\n\t\tpendingC: make(chan func(), 3*opts.BatchSize),\n\t\tpending: &counter,\n\t\trate: opts.Rate,\n\t\tbatchSize: opts.BatchSize,\n\n\t\tstopC: make(chan bool, 1),\n\t\tstopped: false,\n\t}\n}", "func (o *QtreeCollectionGetParams) SetDefaults() {\n\tvar (\n\t\treturnRecordsDefault = bool(true)\n\n\t\treturnTimeoutDefault = int64(15)\n\t)\n\n\tval := QtreeCollectionGetParams{\n\t\tReturnRecords: &returnRecordsDefault,\n\t\tReturnTimeout: &returnTimeoutDefault,\n\t}\n\n\tval.timeout = o.timeout\n\tval.Context = o.Context\n\tval.HTTPClient = o.HTTPClient\n\t*o = val\n}", "func newQueue() *Queue {\n\tl := list.New()\n\treturn &Queue{Elements: l}\n}" ]
[ "0.6368561", "0.5929313", "0.5928396", "0.59103316", "0.5819523", "0.58130354", "0.5753316", "0.5739753", "0.568538", "0.5661701", "0.5606484", "0.5487868", "0.54643965", "0.5463153", "0.54630005", "0.5449085", "0.544472", "0.54301214", "0.540289", "0.5370558", "0.535047", "0.5341004", "0.5334277", "0.5295785", "0.52576846", "0.52551335", "0.52515256", "0.52515256", "0.52440447", "0.5240541", "0.52186775", "0.52153325", "0.5206354", "0.51808494", "0.5172658", "0.5166101", "0.5157915", "0.5146065", "0.5141454", "0.5139676", "0.5139047", "0.51379216", "0.51322454", "0.5129915", "0.5124572", "0.51156694", "0.5109505", "0.5105227", "0.5103686", "0.51033306", "0.5093908", "0.50899905", "0.5077126", "0.50655735", "0.5062785", "0.5044816", "0.50357693", "0.50288504", "0.5013021", "0.50087065", "0.50034255", "0.49959683", "0.49950668", "0.49800548", "0.4976274", "0.49653354", "0.49580646", "0.49579832", "0.49559307", "0.49503115", "0.49397472", "0.4929721", "0.49250063", "0.4920364", "0.49179143", "0.49174854", "0.49174672", "0.49137354", "0.4901583", "0.48928303", "0.48868117", "0.4878816", "0.48771858", "0.48752692", "0.48736435", "0.48721433", "0.48712355", "0.48712355", "0.48708367", "0.48632345", "0.4862766", "0.48562503", "0.4856162", "0.48510158", "0.4849206", "0.4849206", "0.48466817", "0.48451757", "0.48441622", "0.48435262" ]
0.7690877
0
GetClusters returns the Clusters field value
GetClusters возвращает значение поля Clusters
func (o *QueueManager) GetClusters() []string { if o == nil { var ret []string return ret } return o.Clusters }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *Config) GetClusters(ctx context.Context, quiet bool, filterMap map[string]string, clustersName ...string) (string, error) {\n\tc.Logger.Debugf(\"Sending parameters to server to get the clusters %q\", strings.Join(clustersName, \", \"))\n\n\tfilter := MapToSlice(filterMap)\n\n\treturn c.RunGRPCnRESTFunc(\"get\", true,\n\t\tfunc() (string, error) {\n\t\t\treturn c.getClustersGRPC(ctx, quiet, filter, clustersName...)\n\t\t},\n\t\tfunc() (string, error) {\n\t\t\treturn c.getClustersHTTP(quiet, filter, clustersName...)\n\t\t})\n}", "func (c *ClientImpl) GetClusters(ctx context.Context, hcpHostURL string) (models.ClusterResp, error) {\n\tspan, _ := opentracing.StartSpanFromContext(ctx, \"Get Clusters\")\n\tdefer span.Finish()\n\n\tsession, err := c.getSession(ctx, hcpHostURL, hcpUserName, hcpPassword)\n\tif err != nil {\n\t\treturn models.ClusterResp{}, err\n\t}\n\n\tstatus = Failure\n\tmonitor := metrics.StartExternalCall(externalSvcName, \"Fetch Clusters\")\n\tdefer func() { monitor.RecordWithStatus(status) }()\n\n\tresp, err := mlopsHttp.ExecuteHTTPRequest(\n\t\tctx,\n\t\tc.client,\n\t\thcpHostURL+clusterPathV2,\n\t\thttp.MethodGet,\n\t\tmap[string]string{sessionHeader: session},\n\t\tbytes.NewReader(nil),\n\t)\n\tif err != nil {\n\t\treturn models.ClusterResp{}, errors.Wrapf(err, \"while fetching clusters in MLOps controller platform.\")\n\t}\n\tresp.Body.Close()\n\n\tstatus = Success\n\n\terr = c.deleteSession(ctx, hcpHostURL, session)\n\tif err != nil {\n\t\treturn models.ClusterResp{}, err\n\t}\n\n\tclustersResp := models.ClusterResp{}\n\tjson.NewDecoder(resp.Body).Decode(&clustersResp)\n\n\treturn clustersResp, nil\n}", "func (a *Client) GetClusters(params *GetClustersParams, opts ...ClientOption) (*GetClustersOK, *GetClustersMultiStatus, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetClustersParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"GetClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/kubernetes-protection/entities/kubernetes/clusters/v1\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\", \"application/octet-stream\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetClustersReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tswitch value := result.(type) {\n\tcase *GetClustersOK:\n\t\treturn value, nil, nil\n\tcase *GetClustersMultiStatus:\n\t\treturn nil, value, nil\n\t}\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for kubernetes_protection: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (s *RaftDatabase) Clusters() int {\n\treturn GetArg(s.name, \"clusters\").Int(s.clusters)\n}", "func (o AppProjectSpecSyncWindowsOutput) Clusters() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v AppProjectSpecSyncWindows) []string { return v.Clusters }).(pulumi.StringArrayOutput)\n}", "func Clusters() (clusters map[string][]string) {\n\tclusters = make(map[string][]string)\n\tif addr := AccessConsulAddr(); addr != \"\" && Region() != \"\" {\n\t\treturn getClustersFromConsul(addr, Region())\n\t}\n\tcs := Get(\"Key-ClusterMgrCluster\").(map[string]string)\n\tfor key, value := range cs {\n\t\tclusters[key] = strings.Split(value, \" \")\n\t}\n\treturn\n}", "func (a ClustersAPI) Get(clusterID string) (httpmodels.GetResp, error) {\n\tvar clusterInfo httpmodels.GetResp\n\n\tdata := struct {\n\t\tClusterID string `json:\"cluster_id,omitempty\" url:\"cluster_id,omitempty\"`\n\t}{\n\t\tclusterID,\n\t}\n\tresp, err := a.Client.performQuery(http.MethodGet, \"/clusters/get\", data, nil)\n\tif err != nil {\n\t\treturn clusterInfo, err\n\t}\n\n\terr = json.Unmarshal(resp, &clusterInfo)\n\treturn clusterInfo, err\n}", "func (c *cloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}", "func (e *ECS) ListClusters(req *ListClustersReq) (\n\t*ListClustersResp, error) {\n\tif req == nil {\n\t\treturn nil, fmt.Errorf(\"The req params cannot be nil\")\n\t}\n\n\tparams := makeParams(\"ListClusters\")\n\tif req.MaxResults > 0 {\n\t\tparams[\"maxResults\"] = strconv.Itoa(int(req.MaxResults))\n\t}\n\tif req.NextToken != \"\" {\n\t\tparams[\"nextToken\"] = req.NextToken\n\t}\n\n\tresp := new(ListClustersResp)\n\tif err := e.query(params, resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}", "func (h *httpCloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}", "func (c *cloud) Clusters() (cloudprovider.Clusters, bool) {\n\tklog.V(4).Infof(\"Clusters called\")\n\treturn nil, false\n}", "func (a ClustersAPI) List() ([]httpmodels.GetResp, error) {\n\tvar clusterList = struct {\n\t\tClusters []httpmodels.GetResp `json:\"clusters,omitempty\" url:\"clusters,omitempty\"`\n\t}{}\n\n\tresp, err := a.Client.performQuery(http.MethodGet, \"/clusters/list\", nil, nil)\n\tif err != nil {\n\t\treturn clusterList.Clusters, err\n\t}\n\n\terr = json.Unmarshal(resp, &clusterList)\n\treturn clusterList.Clusters, err\n}", "func (ch *ClusterHandler) GetClusters() app.Adapter {\n\treturn func(h http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tcontext := app.GetRequestContext(r)\n\n\t\t\tlogger := log.WithFields(log.Fields{\"package\": \"handlers\", \"event\": \"get_clusters\", \"request\": context.RequestId()})\n\n\t\t\tclusters, err := ch.service.GetClusters(context.RequestId())\n\t\t\tif err != nil {\n\t\t\t\tresponse := ErrorResponseAttributes{Title: \"get_clusters_error\", Detail: err.Error()}\n\t\t\t\tlogger.Error(err.Error())\n\t\t\t\trespondWithJson(w, newErrorResponse(&response, context.RequestId()), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trespondWithJson(w, newClustersResponse(clusters, context.RequestId()), http.StatusOK)\n\t\t})\n\t}\n}", "func (c *Client) GetClusters(ctx context.Context) <-chan GetClusterResult {\n\t// TODO Make the concurrency configurable\n\tconcurrency := int(math.Min(5, float64(runtime.NumCPU())))\n\tresults := make(chan GetClusterResult, concurrency)\n\n\tclusterNames, err := c.GetClusterNames(ctx)\n\tif err != nil {\n\t\tclose(results)\n\t\treturn results\n\t}\n\n\tvar wg sync.WaitGroup\n\n\tgo func() {\n\t\tdefer close(results)\n\t\tfor _, clusterName := range clusterNames {\n\t\t\twg.Add(1)\n\t\t\tgo func(name string) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tcluster, err := c.GetCluster(ctx, name)\n\t\t\t\tresult := GetClusterResult{Cluster: cluster, Error: err}\n\t\t\t\tresults <- result\n\t\t\t}(clusterName)\n\t\t}\n\t\twg.Wait()\n\t}()\n\n\treturn results\n}", "func (cloud *Cloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}", "func (o *QueueManager) GetClustersOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Clusters, true\n}", "func (client OpenShiftManagedClustersClient) Get(ctx context.Context, resourceGroupName string, resourceName string) (result v20180930preview.OpenShiftManagedCluster, err error) {\n\treq, err := client.GetPreparer(ctx, resourceGroupName, resourceName)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"containerservice.OpenShiftManagedClustersClient\", \"Get\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.GetSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"containerservice.OpenShiftManagedClustersClient\", \"Get\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.GetResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"containerservice.OpenShiftManagedClustersClient\", \"Get\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}", "func FetchClusters(c *gin.Context) {\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, \"Start listing clusters\")\n\n\tvar clusters []banzaiSimpleTypes.ClusterSimple\n\tvar response []*cloud.ClusterRepresentation\n\tdatabase.Find(&clusters)\n\n\tif len(clusters) <= 0 {\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, \"No clusters found\")\n\t\tcloud.SetResponseBodyJson(c, http.StatusNotFound, gin.H{\n\t\t\tcloud.JsonKeyStatus: http.StatusNotFound,\n\t\t\tcloud.JsonKeyMessage: \"No clusters found!\",\n\t\t})\n\t\treturn\n\t}\n\n\tfor _, cl := range clusters {\n\t\tclust := cloud.GetClusterRepresentation(&cl)\n\t\tif clust != nil {\n\t\t\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, fmt.Sprintf(\"Append %#v cluster representation to response\", clust))\n\t\t\tresponse = append(response, clust)\n\t\t}\n\n\t}\n\tcloud.SetResponseBodyJson(c, http.StatusOK, gin.H{\n\t\tcloud.JsonKeyStatus: http.StatusOK,\n\t\tcloud.JsonKeyData: response,\n\t})\n}", "func (a *DefaultApiService) ListClusters(ctx _context.Context, localVarOptionals *ListClustersOpts) (Clusters, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue Clusters\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/clusters\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif localVarOptionals != nil && localVarOptionals.Id.IsSet() {\n\t\tt:=localVarOptionals.Id.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"id[]\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"id[]\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.NotId.IsSet() {\n\t\tt:=localVarOptionals.NotId.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"!id[]\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"!id[]\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.StoryCountMin.IsSet() {\n\t\tlocalVarQueryParams.Add(\"story_count.min\", parameterToString(localVarOptionals.StoryCountMin.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.StoryCountMax.IsSet() {\n\t\tlocalVarQueryParams.Add(\"story_count.max\", parameterToString(localVarOptionals.StoryCountMax.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.TimeStart.IsSet() {\n\t\tlocalVarQueryParams.Add(\"time.start\", parameterToString(localVarOptionals.TimeStart.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.TimeEnd.IsSet() {\n\t\tlocalVarQueryParams.Add(\"time.end\", parameterToString(localVarOptionals.TimeEnd.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.EarliestStoryStart.IsSet() {\n\t\tlocalVarQueryParams.Add(\"earliest_story.start\", parameterToString(localVarOptionals.EarliestStoryStart.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.EarliestStoryEnd.IsSet() {\n\t\tlocalVarQueryParams.Add(\"earliest_story.end\", parameterToString(localVarOptionals.EarliestStoryEnd.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.LatestStoryStart.IsSet() {\n\t\tlocalVarQueryParams.Add(\"latest_story.start\", parameterToString(localVarOptionals.LatestStoryStart.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.LatestStoryEnd.IsSet() {\n\t\tlocalVarQueryParams.Add(\"latest_story.end\", parameterToString(localVarOptionals.LatestStoryEnd.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.LocationCountry.IsSet() {\n\t\tt:=localVarOptionals.LocationCountry.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"location.country\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"location.country\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.NotLocationCountry.IsSet() {\n\t\tt:=localVarOptionals.NotLocationCountry.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"!location.country\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"!location.country\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Return_.IsSet() {\n\t\tt:=localVarOptionals.Return_.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"return[]\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"return[]\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.SortBy.IsSet() {\n\t\tlocalVarQueryParams.Add(\"sort_by\", parameterToString(localVarOptionals.SortBy.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.SortDirection.IsSet() {\n\t\tlocalVarQueryParams.Add(\"sort_direction\", parameterToString(localVarOptionals.SortDirection.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Cursor.IsSet() {\n\t\tlocalVarQueryParams.Add(\"cursor\", parameterToString(localVarOptionals.Cursor.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.PerPage.IsSet() {\n\t\tlocalVarQueryParams.Add(\"per_page\", parameterToString(localVarOptionals.PerPage.Value(), \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\", \"text/xml\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"X-AYLIEN-NewsAPI-Application-ID\"] = key\n\t\t}\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"X-AYLIEN-NewsAPI-Application-Key\"] = key\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 422 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 429 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (az *Cloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}", "func ExampleClustersClient_Get() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armservicefabric.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewClustersClient().Get(ctx, \"resRg\", \"myCluster\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.Cluster = armservicefabric.Cluster{\n\t// \tName: to.Ptr(\"myCluster\"),\n\t// \tType: to.Ptr(\"Microsoft.ServiceFabric/clusters\"),\n\t// \tEtag: to.Ptr(\"W/\\\"636462502169240745\\\"\"),\n\t// \tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resRg/providers/Microsoft.ServiceFabric/clusters/myCluster\"),\n\t// \tLocation: to.Ptr(\"eastus\"),\n\t// \tTags: map[string]*string{\n\t// \t},\n\t// \tProperties: &armservicefabric.ClusterProperties{\n\t// \t\tAddOnFeatures: []*armservicefabric.AddOnFeatures{\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesRepairManager),\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesDNSService),\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesBackupRestoreService),\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesResourceMonitorService)},\n\t// \t\t\tAvailableClusterVersions: []*armservicefabric.ClusterVersionDetails{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\t\t\t\tEnvironment: to.Ptr(armservicefabric.ClusterEnvironmentWindows),\n\t// \t\t\t\t\tSupportExpiryUTC: to.Ptr(\"2018-06-15T23:59:59.9999999\"),\n\t// \t\t\t}},\n\t// \t\t\tAzureActiveDirectory: &armservicefabric.AzureActiveDirectory{\n\t// \t\t\t\tClientApplication: to.Ptr(\"d151ad89-4bce-4ae8-b3d1-1dc79679fa75\"),\n\t// \t\t\t\tClusterApplication: to.Ptr(\"5886372e-7bf4-4878-a497-8098aba608ae\"),\n\t// \t\t\t\tTenantID: to.Ptr(\"6abcc6a0-8666-43f1-87b8-172cf86a9f9c\"),\n\t// \t\t\t},\n\t// \t\t\tCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t\t}},\n\t// \t\t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t\t},\n\t// \t\t\tClientCertificateCommonNames: []*armservicefabric.ClientCertificateCommonName{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"5F3660C715EBBDA31DB1FFDCF508302348DE8E7A\"),\n\t// \t\t\t\t\tIsAdmin: to.Ptr(true),\n\t// \t\t\t}},\n\t// \t\t\tClientCertificateThumbprints: []*armservicefabric.ClientCertificateThumbprint{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tCertificateThumbprint: to.Ptr(\"5F3660C715EBBDA31DB1FFDCF508302348DE8E7A\"),\n\t// \t\t\t\t\tIsAdmin: to.Ptr(true),\n\t// \t\t\t}},\n\t// \t\t\tClusterCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\t\tClusterEndpoint: to.Ptr(\"https://eastus.servicefabric.azure.com\"),\n\t// \t\t\tClusterID: to.Ptr(\"92584666-9889-4ae8-8d02-91902923d37f\"),\n\t// \t\t\tClusterState: to.Ptr(armservicefabric.ClusterStateWaitingForNodes),\n\t// \t\t\tDiagnosticsStorageAccountConfig: &armservicefabric.DiagnosticsStorageAccountConfig{\n\t// \t\t\t\tBlobEndpoint: to.Ptr(\"https://diag.blob.core.windows.net/\"),\n\t// \t\t\t\tProtectedAccountKeyName: to.Ptr(\"StorageAccountKey1\"),\n\t// \t\t\t\tQueueEndpoint: to.Ptr(\"https://diag.queue.core.windows.net/\"),\n\t// \t\t\t\tStorageAccountName: to.Ptr(\"diag\"),\n\t// \t\t\t\tTableEndpoint: to.Ptr(\"https://diag.table.core.windows.net/\"),\n\t// \t\t\t},\n\t// \t\t\tFabricSettings: []*armservicefabric.SettingsSectionDescription{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tName: to.Ptr(\"UpgradeService\"),\n\t// \t\t\t\t\tParameters: []*armservicefabric.SettingsParameterDescription{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tName: to.Ptr(\"AppPollIntervalInSeconds\"),\n\t// \t\t\t\t\t\t\tValue: to.Ptr(\"60\"),\n\t// \t\t\t\t\t}},\n\t// \t\t\t}},\n\t// \t\t\tManagementEndpoint: to.Ptr(\"https://myCluster.eastus.cloudapp.azure.com:19080\"),\n\t// \t\t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t// \t\t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t// \t\t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t// \t\t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t// \t\t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t// \t\t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t// \t\t\t\t\tIsPrimary: to.Ptr(true),\n\t// \t\t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t// \t\t\t}},\n\t// \t\t\tProvisioningState: to.Ptr(armservicefabric.ProvisioningStateSucceeded),\n\t// \t\t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelSilver),\n\t// \t\t\tReverseProxyCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t\t}},\n\t// \t\t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t\t},\n\t// \t\t\tUpgradeDescription: &armservicefabric.ClusterUpgradePolicy{\n\t// \t\t\t\tDeltaHealthPolicy: &armservicefabric.ClusterUpgradeDeltaHealthPolicy{\n\t// \t\t\t\t\tApplicationDeltaHealthPolicies: map[string]*armservicefabric.ApplicationDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\"fabric:/myApp1\": &armservicefabric.ApplicationDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\tDefaultServiceTypeDeltaHealthPolicy: &armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tServiceTypeDeltaHealthPolicies: map[string]*armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\"myServiceType1\": &armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tMaxPercentDeltaUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\tMaxPercentDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\tMaxPercentUpgradeDomainDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t},\n\t// \t\t\t\tForceRestart: to.Ptr(false),\n\t// \t\t\t\tHealthCheckRetryTimeout: to.Ptr(\"00:05:00\"),\n\t// \t\t\t\tHealthCheckStableDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\tHealthCheckWaitDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\tHealthPolicy: &armservicefabric.ClusterHealthPolicy{\n\t// \t\t\t\t\tApplicationHealthPolicies: map[string]*armservicefabric.ApplicationHealthPolicy{\n\t// \t\t\t\t\t\t\"fabric:/myApp1\": &armservicefabric.ApplicationHealthPolicy{\n\t// \t\t\t\t\t\t\tDefaultServiceTypeHealthPolicy: &armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tServiceTypeHealthPolicies: map[string]*armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\"myServiceType1\": &armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\tMaxPercentUnhealthyServices: to.Ptr[int32](100),\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tMaxPercentUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\tMaxPercentUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t},\n\t// \t\t\t\tUpgradeDomainTimeout: to.Ptr(\"00:15:00\"),\n\t// \t\t\t\tUpgradeReplicaSetCheckTimeout: to.Ptr(\"00:10:00\"),\n\t// \t\t\t\tUpgradeTimeout: to.Ptr(\"01:00:00\"),\n\t// \t\t\t},\n\t// \t\t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeManual),\n\t// \t\t\tVMImage: to.Ptr(\"Windows\"),\n\t// \t\t},\n\t// \t}\n}", "func (a *Client) ListClusters(params *ListClustersParams, authInfo runtime.ClientAuthInfoWriter) (*ListClustersOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewListClustersParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"ListClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/v1/clusters\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &ListClustersReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*ListClustersOK), nil\n\n}", "func (s *Server) GetClusters() []*api.Cluster {\n\tinstances := s.doGetClusters()\n\tclusters := make([]*api.Cluster, len(instances))\n\tfor i, instance := range instances {\n\t\tclusters[i] = convertClusterToAPI(instance)\n\t}\n\treturn clusters\n}", "func (bc *Baiducloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}", "func (adm Admin) ListClusters() (string, error) {\n\tconn := newConnection(adm.ZkSvr)\n\terr := conn.Connect()\n\tif err != nil {\n\t\tfmt.Println(\"Failed to connect to zookeeper.\")\n\t\treturn \"\", err\n\t}\n\tdefer conn.Disconnect()\n\n\tvar clusters []string\n\n\tchildren, err := conn.Children(\"/\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, cluster := range children {\n\t\tif ok, err := conn.IsClusterSetup(cluster); ok && err == nil {\n\t\t\tclusters = append(clusters, cluster)\n\t\t}\n\t}\n\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"Existing clusters: \\n\")\n\n\tfor _, cluster := range clusters {\n\t\tbuffer.WriteString(\" \" + cluster + \"\\n\")\n\t}\n\treturn buffer.String(), nil\n}", "func (q *QueryResolver) Clusters(ctx context.Context) ([]*ClusterInfoResolver, error) {\n\tgrpcAPI := q.Env.VizierClusterInfo\n\tresp, err := grpcAPI.GetClusterInfo(ctx, &cloudpb.GetClusterInfoRequest{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar res []*ClusterInfoResolver\n\tfor _, cluster := range resp.Clusters {\n\t\tresolver, err := clusterInfoToResolver(cluster)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tres = append(res, resolver)\n\t}\n\treturn res, nil\n}", "func (r *ProjectsInstancesClustersService) Get(name string) *ProjectsInstancesClustersGetCall {\n\tc := &ProjectsInstancesClustersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}", "func (a *Client) GetCombinedCloudClusters(params *GetCombinedCloudClustersParams, opts ...ClientOption) (*GetCombinedCloudClustersOK, *GetCombinedCloudClustersMultiStatus, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetCombinedCloudClustersParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"GetCombinedCloudClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/kubernetes-protection/entities/cloud_cluster/v1\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\", \"application/octet-stream\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetCombinedCloudClustersReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tswitch value := result.(type) {\n\tcase *GetCombinedCloudClustersOK:\n\t\treturn value, nil, nil\n\tcase *GetCombinedCloudClustersMultiStatus:\n\t\treturn nil, value, nil\n\t}\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for kubernetes_protection: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (c *Client) GetClustersSync(ctx context.Context) ([]*Cluster, error) {\n\tclusters := make([]*Cluster, 0)\n\n\tfor result := range c.GetClusters(ctx) {\n\t\tif result.Error != nil {\n\t\t\treturn nil, result.Error\n\t\t}\n\t\tclusters = append(clusters, result.Cluster)\n\t}\n\n\treturn clusters, nil\n}", "func (adm Admin) ListClusters() (string, error) {\n\tvar clusters []string\n\n\tchildren, err := adm.zkClient.Children(\"/\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, cluster := range children {\n\t\tif ok, err := adm.isClusterSetup(cluster); ok && err == nil {\n\t\t\tclusters = append(clusters, cluster)\n\t\t}\n\t}\n\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"Existing clusters: \\n\")\n\n\tfor _, cluster := range clusters {\n\t\tbuffer.WriteString(\" \" + cluster + \"\\n\")\n\t}\n\treturn buffer.String(), nil\n}", "func handleGetClusters(c *Context, w http.ResponseWriter, r *http.Request) {\n\tpaging, err := parsePaging(r.URL)\n\tif err != nil {\n\t\tc.Logger.WithError(err).Error(\"failed to parse paging parameters\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tfilter := &model.ClusterFilter{\n\t\tPaging: paging,\n\t}\n\n\tclusters, err := c.Store.GetClusterDTOs(filter)\n\tif err != nil {\n\t\tc.Logger.WithError(err).Error(\"failed to query clusters\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif clusters == nil {\n\t\tclusters = []*model.ClusterDTO{}\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\toutputJSON(c, w, clusters)\n}", "func (s *clusterService) Clusters(ctx context.Context, options ...rest.HTTPClientOption) ([]cluster.Cluster, error) {\n\t_, err := Start(ctx, s.Factories().ClusterCacheFactory(), options...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclusterCache.RLock()\n\tdefer clusterCache.RUnlock()\n\n\treturn Clusters(clusterCache.Clusters()), nil\n}", "func (c Client) ListClusters() (ClusterList, error) {\n\tbody, err := c.watsonClient.MakeRequest(\"GET\", c.version+\"/solr_clusters\", nil, nil)\n\tif err != nil {\n\t\treturn ClusterList{}, err\n\t}\n\tvar response ClusterList\n\terr = json.Unmarshal(body, &response)\n\treturn response, err\n}", "func (c *krakenClusters) Get(name string, options v1.GetOptions) (result *v1alpha1.KrakenCluster, err error) {\n\tresult = &v1alpha1.KrakenCluster{}\n\terr = c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"krakenclusters\").\n\t\tName(name).\n\t\tVersionedParams(&options, scheme.ParameterCodec).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}", "func (c starterClusterServiceOp) List(ctx context.Context) (*[]models.Cluster, *Response, error) {\n\tvar clusterList []models.Cluster\n\tgraphqlRequest := models.GraphqlRequest{\n\t\tName: \"clusters\",\n\t\tOperation: models.Query,\n\t\tInput: clusterList,\n\t\tArgs: models.ClusterListInput{\n\t\t\tProductType: models.Starter,\n\t\t},\n\t\tResponse: clusterList,\n\t}\n\treq, err := c.client.NewRequest(&graphqlRequest)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := c.client.Do(ctx, req, &clusterList)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn &clusterList, resp, err\n}", "func getClusters(kubeconfig string) ([]string, error) {\n\tkubectlArgs := []string{\"kubectl\"}\n\tif kubeconfig != \"\" {\n\t\tkubectlArgs = append(kubectlArgs, fmt.Sprintf(\"--kubeconfig=%s\", kubeconfig))\n\t}\n\tcontextArgs := append(kubectlArgs, []string{\"config\", \"get-contexts\", \"-o=name\"}...)\n\toutput, err := runCommand(contextArgs)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error in getting contexts from kubeconfig: %s\", err)\n\t}\n\treturn strings.Split(output, \"\\n\"), nil\n}", "func (nh *NodeHost) Clusters() []*node {\n\tresult := make([]*node, 0)\n\tnh.clusterMu.RLock()\n\tnh.clusterMu.clusters.Range(func(k, v interface{}) bool {\n\t\tresult = append(result, v.(*node))\n\t\treturn true\n\t})\n\tnh.clusterMu.RUnlock()\n\n\treturn result\n}", "func (a *ClustersApiService) ListClusters(ctx _context.Context, space string) ApiListClustersRequest {\n\treturn ApiListClustersRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tspace: space,\n\t}\n}", "func GetMultipleClustersName(cmd *cobra.Command, args []string) ([]string, error) {\n\tif len(args) == 0 {\n\t\treturn nil, UserErrorf(\"requires a cluster name\")\n\t}\n\treturn args, nil\n}", "func (bc *Baiducloud) ListClusters(ctx context.Context) ([]string, error) {\n\treturn nil, fmt.Errorf(\"ListClusters unimplemented\")\n}", "func (a *ClustersApiService) ListClustersExecute(r ApiListClustersRequest) (ListClustersResponse, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue ListClustersResponse\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"ClustersApiService.ListClusters\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/spaces/{space}/clusters\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"space\"+\"}\", _neturl.PathEscape(parameterToString(r.space, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (svc ServerlessClusterService) List(ctx context.Context) (*[]models.Cluster, *Response, error) {\n\tvar clusterList []models.Cluster\n\tgraphqlRequest := models.GraphqlRequest{\n\t\tName: \"clusters\",\n\t\tOperation: models.Query,\n\t\tInput: nil,\n\t\tArgs: models.ClusterListInput{\n\t\t\tProductType: models.Starter,\n\t\t},\n\t\tResponse: clusterList,\n\t}\n\treq, err := svc.client.NewRequest(&graphqlRequest)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := svc.client.Do(ctx, req, &clusterList)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn &clusterList, resp, err\n}", "func (m *MockBuilder) Clusters() []string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Clusters\")\n\tret0, _ := ret[0].([]string)\n\treturn ret0\n}", "func (cc *CloudComb) GetClustersImages() (string, error) {\n\tresult, _, err := cc.doRESTRequest(\"GET\", \"/api/v1/apps/images\", \"\", nil, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn result, nil\n}", "func (ds *DiscoveryService) ListClusters(request *restful.Request, response *restful.Response) {\n\tkey := request.Request.URL.String()\n\tout, cached := ds.cdsCache.cachedDiscoveryResponse(key)\n\tif !cached {\n\t\tif sc := request.PathParameter(ServiceCluster); sc != ds.mesh.IstioServiceCluster {\n\t\t\terrorResponse(response, http.StatusNotFound,\n\t\t\t\tfmt.Sprintf(\"Unexpected %s %q\", ServiceCluster, sc))\n\t\t\treturn\n\t\t}\n\n\t\t// service-node holds the IP address\n\t\tip := request.PathParameter(ServiceNode)\n\t\t// CDS computes clusters that are referenced by RDS routes for a particular proxy node\n\t\t// TODO: this implementation is inefficient as it is recomputing all the routes for all proxies\n\t\t// There is a lot of potential to cache and reuse cluster definitions across proxies and also\n\t\t// skip computing the actual HTTP routes\n\t\tinstances := ds.services.HostInstances(map[string]bool{ip: true})\n\t\tservices := ds.services.Services()\n\t\thttpRouteConfigs := buildOutboundHTTPRoutes(instances, services, &ProxyContext{\n\t\t\tDiscovery: ds.services,\n\t\t\tConfig: ds.config,\n\t\t\tMeshConfig: ds.mesh,\n\t\t\tIPAddress: ip,\n\t\t})\n\n\t\t// de-duplicate and canonicalize clusters\n\t\tclusters := httpRouteConfigs.clusters().normalize()\n\n\t\t// apply custom policies for HTTP clusters\n\t\tfor _, cluster := range clusters {\n\t\t\tinsertDestinationPolicy(ds.config, cluster)\n\t\t}\n\n\t\tvar err error\n\t\tif out, err = json.MarshalIndent(ClusterManager{Clusters: clusters}, \" \", \" \"); err != nil {\n\t\t\terrorResponse(response, http.StatusInternalServerError, err.Error())\n\t\t\treturn\n\t\t}\n\t\tds.cdsCache.updateCachedDiscoveryResponse(key, out)\n\t}\n\twriteResponse(response, out)\n}", "func (e *ECS) DescribeClusters(req *DescribeClustersReq) (*DescribeClustersResp, error) {\n\tif req == nil {\n\t\treturn nil, fmt.Errorf(\"The req params cannot be nil\")\n\t}\n\n\tparams := makeParams(\"DescribeClusters\")\n\tif len(req.Clusters) > 0 {\n\t\taddParamsList(params, \"clusters.member\", req.Clusters)\n\t}\n\n\tresp := new(DescribeClustersResp)\n\tif err := e.query(params, resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}", "func (c *ClustersController) List(ctx *app.ListClustersContext) error {\n\t// return a single cluster given its URL\n\tif ctx.ClusterURL != nil {\n\t\t// authorization is checked at the service level for more consistency accross the codebase.\n\t\tclustr, err := c.app.ClusterService().FindByURL(ctx, *ctx.ClusterURL)\n\t\tif err != nil {\n\t\t\tif ok, _ := errors.IsNotFoundError(err); ok {\n\t\t\t\t// no result found, return an empty array\n\t\t\t\treturn ctx.OK(&app.ClusterList{\n\t\t\t\t\tData: []*app.ClusterData{},\n\t\t\t\t})\n\t\t\t}\n\t\t\t// something wrong happened, return the error\n\t\t\treturn app.JSONErrorResponse(ctx, err)\n\t\t}\n\t\treturn ctx.OK(&app.ClusterList{\n\t\t\tData: []*app.ClusterData{convertToClusterData(*clustr)},\n\t\t})\n\t}\n\t// otherwise, list all clusters\n\tclusters, err := c.app.ClusterService().List(ctx, ctx.Type)\n\tif err != nil {\n\t\treturn app.JSONErrorResponse(ctx, err)\n\t}\n\tvar data []*app.ClusterData\n\tfor _, clustr := range clusters {\n\t\tdata = append(data, convertToClusterData(clustr))\n\t}\n\treturn ctx.OK(&app.ClusterList{\n\t\tData: data,\n\t})\n}", "func ListClusters(c *cli.Context) error {\n\tif err := printClusters(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func GetClusterNodes(cs *framework.ClientSet) (int, error) {\n\tnodes, err := getNodesByLabel(cs, \"\")\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"unable to get the number of cluster nodes: %v\", err)\n\t}\n\n\treturn len(nodes), nil\n}", "func (c *ClientIMPL) GetCluster(ctx context.Context) (resp Cluster, err error) {\n\tvar systemList []Cluster\n\tcluster := Cluster{}\n\tqp := c.APIClient().QueryParamsWithFields(&cluster)\n\n\tmajorMinorVersion, err := c.GetSoftwareMajorMinorVersion(ctx)\n\tif err != nil {\n\t\tlog.Errorf(\"Couldn't find the array version %s\", err.Error())\n\t} else {\n\t\tif majorMinorVersion >= 3.0 {\n\t\t\tqp.Select(\"nvm_subsystem_nqn\")\n\t\t}\n\t}\n\t_, err = c.APIClient().Query(\n\t\tctx,\n\t\tRequestConfig{\n\t\t\tMethod: \"GET\",\n\t\t\tEndpoint: clusterURL,\n\t\t\tQueryParams: qp,\n\t\t},\n\t\t&systemList)\n\terr = WrapErr(err)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\treturn systemList[0], err\n}", "func (c starterClusterServiceOp) Get(ctx context.Context, input *models.GetStarterClusterInput) (*models.Cluster, *Response, error) {\n\tvar cluster models.Cluster\n\tvar graphqlRequest = models.GraphqlRequest{\n\t\tName: \"cluster\",\n\t\tOperation: models.Query,\n\t\tInput: nil,\n\t\tArgs: *input,\n\t\tResponse: cluster,\n\t}\n\treq, err := c.client.NewRequest(&graphqlRequest)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := c.client.Do(ctx, req, &cluster)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn &cluster, resp, err\n}", "func ExampleClustersClient_List() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armservicefabric.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewClustersClient().List(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.ClusterListResult = armservicefabric.ClusterListResult{\n\t// \tValue: []*armservicefabric.Cluster{\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"myCluster\"),\n\t// \t\t\tType: to.Ptr(\"Microsoft.ServiceFabric/clusters\"),\n\t// \t\t\tEtag: to.Ptr(\"W/\\\"636462502169240745\\\"\"),\n\t// \t\t\tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resRg/providers/Microsoft.ServiceFabric/clusters/myCluster\"),\n\t// \t\t\tLocation: to.Ptr(\"eastus\"),\n\t// \t\t\tTags: map[string]*string{\n\t// \t\t\t},\n\t// \t\t\tProperties: &armservicefabric.ClusterProperties{\n\t// \t\t\t\tAddOnFeatures: []*armservicefabric.AddOnFeatures{\n\t// \t\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesRepairManager),\n\t// \t\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesDNSService),\n\t// \t\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesBackupRestoreService),\n\t// \t\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesResourceMonitorService)},\n\t// \t\t\t\t\tAvailableClusterVersions: []*armservicefabric.ClusterVersionDetails{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\t\t\t\t\t\tEnvironment: to.Ptr(armservicefabric.ClusterEnvironmentWindows),\n\t// \t\t\t\t\t\t\tSupportExpiryUTC: to.Ptr(\"2018-06-15T23:59:59.9999999\"),\n\t// \t\t\t\t\t}},\n\t// \t\t\t\t\tAzureActiveDirectory: &armservicefabric.AzureActiveDirectory{\n\t// \t\t\t\t\t\tClientApplication: to.Ptr(\"d151ad89-4bce-4ae8-b3d1-1dc79679fa75\"),\n\t// \t\t\t\t\t\tClusterApplication: to.Ptr(\"5886372e-7bf4-4878-a497-8098aba608ae\"),\n\t// \t\t\t\t\t\tTenantID: to.Ptr(\"6abcc6a0-8666-43f1-87b8-172cf86a9f9c\"),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\t\t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tClientCertificateCommonNames: []*armservicefabric.ClientCertificateCommonName{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"5F3660C715EBBDA31DB1FFDCF508302348DE8E7A\"),\n\t// \t\t\t\t\t\t\tIsAdmin: to.Ptr(true),\n\t// \t\t\t\t\t}},\n\t// \t\t\t\t\tClientCertificateThumbprints: []*armservicefabric.ClientCertificateThumbprint{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tCertificateThumbprint: to.Ptr(\"5F3660C715EBBDA31DB1FFDCF508302348DE8E7A\"),\n\t// \t\t\t\t\t\t\tIsAdmin: to.Ptr(false),\n\t// \t\t\t\t\t}},\n\t// \t\t\t\t\tClusterCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\t\t\t\tClusterEndpoint: to.Ptr(\"https://eastus.servicefabric.azure.com\"),\n\t// \t\t\t\t\tClusterID: to.Ptr(\"92584666-9889-4ae8-8d02-91902923d37f\"),\n\t// \t\t\t\t\tClusterState: to.Ptr(armservicefabric.ClusterStateWaitingForNodes),\n\t// \t\t\t\t\tDiagnosticsStorageAccountConfig: &armservicefabric.DiagnosticsStorageAccountConfig{\n\t// \t\t\t\t\t\tBlobEndpoint: to.Ptr(\"https://diag.blob.core.windows.net/\"),\n\t// \t\t\t\t\t\tProtectedAccountKeyName: to.Ptr(\"StorageAccountKey1\"),\n\t// \t\t\t\t\t\tQueueEndpoint: to.Ptr(\"https://diag.queue.core.windows.net/\"),\n\t// \t\t\t\t\t\tStorageAccountName: to.Ptr(\"diag\"),\n\t// \t\t\t\t\t\tTableEndpoint: to.Ptr(\"https://diag.table.core.windows.net/\"),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tFabricSettings: []*armservicefabric.SettingsSectionDescription{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tName: to.Ptr(\"UpgradeService\"),\n\t// \t\t\t\t\t\t\tParameters: []*armservicefabric.SettingsParameterDescription{\n\t// \t\t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\t\tName: to.Ptr(\"AppPollIntervalInSeconds\"),\n\t// \t\t\t\t\t\t\t\t\tValue: to.Ptr(\"60\"),\n\t// \t\t\t\t\t\t\t}},\n\t// \t\t\t\t\t}},\n\t// \t\t\t\t\tManagementEndpoint: to.Ptr(\"https://myCluster.eastus.cloudapp.azure.com:19080\"),\n\t// \t\t\t\t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t// \t\t\t\t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t// \t\t\t\t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t// \t\t\t\t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t// \t\t\t\t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t// \t\t\t\t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t// \t\t\t\t\t\t\tIsPrimary: to.Ptr(true),\n\t// \t\t\t\t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t// \t\t\t\t\t}},\n\t// \t\t\t\t\tProvisioningState: to.Ptr(armservicefabric.ProvisioningStateSucceeded),\n\t// \t\t\t\t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelSilver),\n\t// \t\t\t\t\tReverseProxyCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\t\t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tUpgradeDescription: &armservicefabric.ClusterUpgradePolicy{\n\t// \t\t\t\t\t\tDeltaHealthPolicy: &armservicefabric.ClusterUpgradeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\tApplicationDeltaHealthPolicies: map[string]*armservicefabric.ApplicationDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\"fabric:/myApp1\": &armservicefabric.ApplicationDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\tDefaultServiceTypeDeltaHealthPolicy: &armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\tServiceTypeDeltaHealthPolicies: map[string]*armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\"myServiceType1\": &armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tMaxPercentDeltaUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\tMaxPercentDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\tMaxPercentUpgradeDomainDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tForceRestart: to.Ptr(false),\n\t// \t\t\t\t\t\tHealthCheckRetryTimeout: to.Ptr(\"00:05:00\"),\n\t// \t\t\t\t\t\tHealthCheckStableDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\tHealthCheckWaitDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\tHealthPolicy: &armservicefabric.ClusterHealthPolicy{\n\t// \t\t\t\t\t\t\tApplicationHealthPolicies: map[string]*armservicefabric.ApplicationHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\"fabric:/myApp1\": &armservicefabric.ApplicationHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\tDefaultServiceTypeHealthPolicy: &armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\tMaxPercentUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\tServiceTypeHealthPolicies: map[string]*armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\"myServiceType1\": &armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\t\t\tMaxPercentUnhealthyServices: to.Ptr[int32](100),\n\t// \t\t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tMaxPercentUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\tMaxPercentUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tUpgradeDomainTimeout: to.Ptr(\"00:15:00\"),\n\t// \t\t\t\t\t\tUpgradeReplicaSetCheckTimeout: to.Ptr(\"00:10:00\"),\n\t// \t\t\t\t\t\tUpgradeTimeout: to.Ptr(\"01:00:00\"),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeManual),\n\t// \t\t\t\t\tVMImage: to.Ptr(\"Windows\"),\n\t// \t\t\t\t},\n\t// \t\t\t},\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"myCluster2\"),\n\t// \t\t\t\tType: to.Ptr(\"Microsoft.ServiceFabric/clusters\"),\n\t// \t\t\t\tEtag: to.Ptr(\"W/\\\"636462502164040075\\\"\"),\n\t// \t\t\t\tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resRg/providers/Microsoft.ServiceFabric/clusters/myCluster2\"),\n\t// \t\t\t\tLocation: to.Ptr(\"eastus\"),\n\t// \t\t\t\tTags: map[string]*string{\n\t// \t\t\t\t},\n\t// \t\t\t\tProperties: &armservicefabric.ClusterProperties{\n\t// \t\t\t\t\tAddOnFeatures: []*armservicefabric.AddOnFeatures{\n\t// \t\t\t\t\t\tto.Ptr(armservicefabric.AddOnFeaturesRepairManager)},\n\t// \t\t\t\t\t\tAvailableClusterVersions: []*armservicefabric.ClusterVersionDetails{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tCodeVersion: to.Ptr(\"6.1.187.1\"),\n\t// \t\t\t\t\t\t\t\tEnvironment: to.Ptr(armservicefabric.ClusterEnvironmentLinux),\n\t// \t\t\t\t\t\t\t\tSupportExpiryUTC: to.Ptr(\"2018-06-15T23:59:59.9999999\"),\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tClientCertificateCommonNames: []*armservicefabric.ClientCertificateCommonName{\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tClientCertificateThumbprints: []*armservicefabric.ClientCertificateThumbprint{\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tClusterCodeVersion: to.Ptr(\"6.1.187.1\"),\n\t// \t\t\t\t\t\tClusterEndpoint: to.Ptr(\"https://eastus.servicefabric.azure.com\"),\n\t// \t\t\t\t\t\tClusterID: to.Ptr(\"2747e469-b24e-4039-8a0a-46151419523f\"),\n\t// \t\t\t\t\t\tClusterState: to.Ptr(armservicefabric.ClusterStateWaitingForNodes),\n\t// \t\t\t\t\t\tDiagnosticsStorageAccountConfig: &armservicefabric.DiagnosticsStorageAccountConfig{\n\t// \t\t\t\t\t\t\tBlobEndpoint: to.Ptr(\"https://diag.blob.core.windows.net/\"),\n\t// \t\t\t\t\t\t\tProtectedAccountKeyName: to.Ptr(\"StorageAccountKey1\"),\n\t// \t\t\t\t\t\t\tQueueEndpoint: to.Ptr(\"https://diag.queue.core.windows.net/\"),\n\t// \t\t\t\t\t\t\tStorageAccountName: to.Ptr(\"diag\"),\n\t// \t\t\t\t\t\t\tTableEndpoint: to.Ptr(\"https://diag.table.core.windows.net/\"),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tFabricSettings: []*armservicefabric.SettingsSectionDescription{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tName: to.Ptr(\"UpgradeService\"),\n\t// \t\t\t\t\t\t\t\tParameters: []*armservicefabric.SettingsParameterDescription{\n\t// \t\t\t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\t\t\tName: to.Ptr(\"AppPollIntervalInSeconds\"),\n\t// \t\t\t\t\t\t\t\t\t\tValue: to.Ptr(\"60\"),\n\t// \t\t\t\t\t\t\t\t}},\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tManagementEndpoint: to.Ptr(\"http://myCluster2.eastus.cloudapp.azure.com:19080\"),\n\t// \t\t\t\t\t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t// \t\t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t// \t\t\t\t\t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t// \t\t\t\t\t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t// \t\t\t\t\t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t// \t\t\t\t\t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t// \t\t\t\t\t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t// \t\t\t\t\t\t\t\tIsPrimary: to.Ptr(true),\n\t// \t\t\t\t\t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t// \t\t\t\t\t\t}},\n\t// \t\t\t\t\t\tProvisioningState: to.Ptr(armservicefabric.ProvisioningStateSucceeded),\n\t// \t\t\t\t\t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelSilver),\n\t// \t\t\t\t\t\tUpgradeDescription: &armservicefabric.ClusterUpgradePolicy{\n\t// \t\t\t\t\t\t\tDeltaHealthPolicy: &armservicefabric.ClusterUpgradeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\tMaxPercentUpgradeDomainDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tForceRestart: to.Ptr(false),\n\t// \t\t\t\t\t\t\tHealthCheckRetryTimeout: to.Ptr(\"00:05:00\"),\n\t// \t\t\t\t\t\t\tHealthCheckStableDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\t\tHealthCheckWaitDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\t\t\t\tHealthPolicy: &armservicefabric.ClusterHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\tMaxPercentUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tUpgradeDomainTimeout: to.Ptr(\"00:15:00\"),\n\t// \t\t\t\t\t\t\tUpgradeReplicaSetCheckTimeout: to.Ptr(\"00:10:00\"),\n\t// \t\t\t\t\t\t\tUpgradeTimeout: to.Ptr(\"01:00:00\"),\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeManual),\n\t// \t\t\t\t\t\tVMImage: to.Ptr(\"Ubuntu\"),\n\t// \t\t\t\t\t},\n\t// \t\t\t}},\n\t// \t\t}\n}", "func List() ([]clusterapi.Cluster, error) {\n\tvar clusterList []clusterapi.Cluster\n\terr := utils.BrowseMetadataContent(clusterapi.ClusterMetadataPrefix, func(buf *bytes.Buffer) error {\n\t\tvar c clusterapi.Cluster\n\t\terr := gob.NewDecoder(buf).Decode(&c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tclusterList = append(clusterList, c)\n\t\treturn nil\n\t})\n\treturn clusterList, err\n}", "func (store *CenterStore) GetCenters(data []core.Elemt, space core.Space, k int, clust core.Clust) (core.Clust, error) {\n\tvar centers, ok = store.centers[k]\n\n\tif !ok {\n\t\treturn store.genCenters(data, space, k, clust)\n\t}\n\n\treturn centers, nil\n}", "func (s *ocmClient) GetCluster() (*ClusterInfo, error) {\n\n\t// fetch the clusterversion, which contains the internal ID\n\tcv := &configv1.ClusterVersion{}\n\terr := s.client.Get(context.TODO(), types.NamespacedName{Name: \"version\"}, cv)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't get clusterversion: %v\", err)\n\t}\n\texternalID := cv.Spec.ClusterID\n\n\tcsUrl, err := url.Parse(s.ocmBaseUrl.String())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't parse OCM API url: %v\", err)\n\t}\n\tcsUrl.Path = path.Join(csUrl.Path, CLUSTERS_V1_PATH)\n\n\tresponse, err := s.httpClient.R().\n\t\tSetQueryParams(map[string]string{\n\t\t\t\"page\": \"1\",\n\t\t\t\"size\": \"1\",\n\t\t\t\"search\": fmt.Sprintf(\"external_id = '%s'\", externalID),\n\t\t}).\n\t\tSetResult(&ClusterList{}).\n\t\tExpectContentType(\"application/json\").\n\t\tGet(csUrl.String())\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't query OCM cluster service: request to '%v' returned error '%v'\", csUrl.String(), err)\n\t}\n\n\toperationId := response.Header().Get(OPERATION_ID_HEADER)\n\tif response.IsError() {\n\t\treturn nil, fmt.Errorf(\"request to '%v' received error code %v, operation id '%v'\", csUrl.String(), response.StatusCode(), operationId)\n\t}\n\n\tlog.Info(fmt.Sprintf(\"request to '%v' received response code %v, operation id: '%v'\", csUrl.String(), response.StatusCode(), operationId))\n\n\tlistResponse := response.Result().(*ClusterList)\n\tif listResponse.Size != 1 || len(listResponse.Items) != 1 {\n\t\treturn nil, ErrClusterIdNotFound\n\t}\n\n\treturn &listResponse.Items[0], nil\n}", "func RetrieveClusters(manifests string) cluster.Map {\n\tklog.V(1).Info(\"retrieving clusters from manifests\")\n\tclusters := cluster.Map{}\n\tdocuments := yamlutils.SplitDocuments(manifests)\n\tscheme := runtime.NewScheme()\n\tif err := clusterv1alpha1.AddToScheme(scheme); err != nil {\n\t\treturn cluster.Map{}\n\t}\n\tserializer := json.NewSerializerWithOptions(json.DefaultMetaFactory, scheme, scheme, json.SerializerOptions{Yaml: true})\n\tfor _, document := range documents {\n\t\tclusterObj := clusterv1alpha1.Cluster{}\n\t\tif _, _, err := serializer.Decode([]byte(document), nil, &clusterObj); err != nil || clusterObj.TypeMeta.Kind != \"Cluster\" {\n\t\t\tcontinue\n\t\t}\n\t\tinternalCluster, err := cluster.NewClusterFromv1alpha1(&clusterObj)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tclusters[internalCluster.Name] = internalCluster\n\t}\n\treturn clusters\n}", "func (p *Provider) List() ([]string, error) {\n\treturn p.provider.ListClusters()\n}", "func Clusters(api API) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tclusters := api.Clusters()\n\t\tm := make(map[string]map[string]any, len(clusters))\n\t\tfor _, c := range clusters {\n\t\t\tm[c.ID] = c.Debug()\n\t\t}\n\n\t\tdata, err := json.Marshal(m)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tfmt.Fprintf(w, \"could not marshal cluster debug map: %s\\n\", err)\n\t\t\treturn\n\t\t}\n\n\t\tw.Write(data)\n\t\tw.Write([]byte(\"\\n\"))\n\t}\n}", "func (svc ServerlessClusterService) Get(ctx context.Context,\n\tinput *models.GetServerlessClusterInput) (*models.Cluster, *Response, error) {\n\tvar cluster models.Cluster\n\tvar graphqlRequest = models.GraphqlRequest{\n\t\tName: \"cluster\",\n\t\tOperation: models.Query,\n\t\tInput: nil,\n\t\tArgs: *input,\n\t\tResponse: cluster,\n\t}\n\treq, err := svc.client.NewRequest(&graphqlRequest)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := svc.client.Do(ctx, req, &cluster)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn &cluster, resp, err\n}", "func Clusters(clusters map[string]cluster.Cluster) []cluster.Cluster {\n\tcs := make([]cluster.Cluster, 0, len(clusters))\n\tfor _, cls := range clusters {\n\t\tcs = append(cs, cls)\n\t}\n\treturn cs\n}", "func (p *v1Provider) GetCluster(w http.ResponseWriter, r *http.Request) {\n\thttpapi.IdentifyEndpoint(r, \"/v1/clusters/current\")\n\ttoken := p.CheckToken(r)\n\tif !token.Require(w, \"cluster:show_basic\") {\n\t\treturn\n\t}\n\tshowBasic := !token.Check(\"cluster:show\")\n\n\tfilter := reports.ReadFilter(r, p.Cluster.GetServiceTypesForArea)\n\tif showBasic {\n\t\tfilter.IsSubcapacityAllowed = func(serviceType, resourceName string) bool {\n\t\t\ttoken.Context.Request[\"service\"] = serviceType\n\t\t\ttoken.Context.Request[\"resource\"] = resourceName\n\t\t\treturn token.Check(\"cluster:show_subcapacity\")\n\t\t}\n\t}\n\n\tcluster, err := reports.GetClusterResources(p.Cluster, p.DB, filter)\n\tif respondwith.ErrorText(w, err) {\n\t\treturn\n\t}\n\trespondwith.JSON(w, 200, map[string]interface{}{\"cluster\": cluster})\n}", "func listClusters(w http.ResponseWriter, r *http.Request, t auth.Token) (err error) {\n\tctx := r.Context()\n\tallowed := permission.Check(t, permission.PermClusterRead)\n\tif !allowed {\n\t\treturn permission.ErrUnauthorized\n\t}\n\tclusters, err := servicemanager.Cluster.List(ctx)\n\tif err != nil {\n\t\tif err == provTypes.ErrNoCluster {\n\t\t\tw.WriteHeader(http.StatusNoContent)\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tadmin := permission.Check(t, permission.PermClusterAdmin)\n\tif !admin {\n\t\tfor i := range clusters {\n\t\t\tclusters[i].CleanUpSensitive()\n\t\t}\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\treturn json.NewEncoder(w).Encode(clusters)\n}", "func AzureGetClusters(subscriptionID, clientID, clientSecret, tenantID, resourceGroupName string, admin bool) (string, error) {\n\tctx := context.Background()\n\tclient := containerservice.NewManagedClustersClient(subscriptionID)\n\n\tauthorizer, err := getAzureAuthorizer(clientID, clientSecret, tenantID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tclient.Authorizer = authorizer\n\n\tvar clusters []string\n\n\tfor list, err := client.ListComplete(ctx); list.NotDone(); err = list.Next() {\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tvar res containerservice.CredentialResults\n\t\tname := *list.Value().Name\n\n\t\tif admin {\n\t\t\tres, err = client.ListClusterAdminCredentials(ctx, resourceGroupName, name)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t} else {\n\t\t\tres, err = client.ListClusterUserCredentials(ctx, resourceGroupName, name)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t}\n\n\t\tfor _, kubeconfig := range *res.Kubeconfigs {\n\t\t\tvar kubeconfigJSON interface{}\n\t\t\terr := yaml.Unmarshal(*kubeconfig.Value, &kubeconfigJSON)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\tkubeconfigJSON = convert(kubeconfigJSON)\n\t\t\tkubeconfigJSONString, err := json.Marshal(kubeconfigJSON)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\tclusters = append(clusters, fmt.Sprintf(\"{\\\"name\\\": \\\"%s_%s_%s\\\", \\\"kubeconfig\\\": %s}\", *kubeconfig.Name, resourceGroupName, name, kubeconfigJSONString))\n\t\t}\n\t}\n\n\treturn fmt.Sprintf(\"[%s]\", strings.Join(clusters, \",\")), nil\n}", "func (a *Client) VirtualizationClustersRead(params *VirtualizationClustersReadParams) (*VirtualizationClustersReadOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewVirtualizationClustersReadParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"virtualization_clusters_read\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/virtualization/clusters/{id}/\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &VirtualizationClustersReadReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*VirtualizationClustersReadOK), nil\n\n}", "func GetClusterCIDRs(lister configlistersv1.NetworkLister, recorder events.Recorder) ([]string, error) {\n\tnetwork, err := lister.Get(\"cluster\")\n\tif errors.IsNotFound(err) {\n\t\trecorder.Warningf(\"ObserveRestrictedCIDRFailed\", \"Required networks.%s/cluster not found\", configv1.GroupName)\n\t\treturn nil, nil\n\t}\n\tif err != nil {\n\t\trecorder.Warningf(\"ObserveRestrictedCIDRFailed\", \"error getting networks.%s/cluster: %v\", configv1.GroupName, err)\n\t\treturn nil, err\n\t}\n\n\tif len(network.Status.ClusterNetwork) == 0 {\n\t\trecorder.Warningf(\"ObserveClusterCIDRFailed\", \"Required status.clusterNetwork field is not set in networks.%s/cluster\", configv1.GroupName)\n\t\treturn nil, fmt.Errorf(\"networks.%s/cluster: status.clusterNetwork not found\", configv1.GroupName)\n\t}\n\n\tvar clusterCIDRs []string\n\tfor i, clusterNetwork := range network.Status.ClusterNetwork {\n\t\tif len(clusterNetwork.CIDR) == 0 {\n\t\t\trecorder.Warningf(\"ObserveRestrictedCIDRFailed\", \"Required status.clusterNetwork[%d].cidr field is not set in networks.%s/cluster\", i, configv1.GroupName)\n\t\t\treturn nil, fmt.Errorf(\"networks.%s/cluster: status.clusterNetwork[%d].cidr not found\", configv1.GroupName, i)\n\t\t}\n\t\tclusterCIDRs = append(clusterCIDRs, clusterNetwork.CIDR)\n\t}\n\t// TODO fallback to podCIDR? is that still a thing?\n\treturn clusterCIDRs, nil\n}", "func (a *ClusterControllerApiService) GetClustersUsingGET(ctx _context.Context, account string, application string, clusterName string) apiGetClustersUsingGETRequest {\n\treturn apiGetClustersUsingGETRequest{\n\t\tapiService: a,\n\t\tctx: ctx,\n\t\taccount: account,\n\t\tapplication: application,\n\t\tclusterName: clusterName,\n\t}\n}", "func NewClusters(db *gorm.DB) *Clusters {\n\treturn &Clusters{db: db}\n}", "func (o *ResourceLimits) GetK8sClustersProvisioned() *int32 {\n\tif o == nil {\n\t\treturn nil\n\t}\n\n\treturn o.K8sClustersProvisioned\n}", "func ExampleSnowball_ListClusters_shared00() {\n\tsvc := snowball.New(session.New())\n\tinput := &snowball.ListClustersInput{}\n\n\tresult, err := svc.ListClusters(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase snowball.ErrCodeInvalidNextTokenException:\n\t\t\t\tfmt.Println(snowball.ErrCodeInvalidNextTokenException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (a *ClustersApiService) ClusterServiceListClusters(ctx context.Context, body Servicev1ClusterQuery) (V1Clusterlist, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Post\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue V1Clusterlist\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/gitops/api/v1/clusters\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tlocalVarQueryParams.Add(\"routingId\", body.AccountIdentifier)\n\t// body params\n\tlocalVarPostBody = &body\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"x-api-key\"] = key\n\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode < 300 {\n\t\t// If we succeed, return the data, otherwise pass on to decode error.\n\t\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\tif err == nil {\n\t\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t\t}\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v V1Clusterlist\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 0 {\n\t\t\tvar v GatewayruntimeError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}", "func AWSGetClusters(accessKeyId, secretAccessKey, region string) (string, error) {\n\tvar clusters []*eks.Cluster\n\tvar names []*string\n\tvar nextToken *string\n\n\tcred := credentials.NewStaticCredentials(accessKeyId, secretAccessKey, \"\")\n\n\tsess, err := session.NewSession(&aws.Config{Region: aws.String(region), Credentials: cred})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\teksClient := eks.New(sess)\n\n\tfor {\n\t\tc, err := eksClient.ListClusters(&eks.ListClustersInput{NextToken: nextToken})\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tnames = append(names, c.Clusters...)\n\n\t\tif c.NextToken == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tnextToken = c.NextToken\n\t}\n\n\tfor _, name := range names {\n\t\tcluster, err := eksClient.DescribeCluster(&eks.DescribeClusterInput{Name: name})\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif *cluster.Cluster.Status == eks.ClusterStatusActive {\n\t\t\tclusters = append(clusters, cluster.Cluster)\n\t\t}\n\t}\n\n\tif clusters != nil {\n\t\tb, err := json.Marshal(clusters)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\treturn string(b), nil\n\t}\n\n\treturn \"\", nil\n}", "func (a *Client) VirtualizationClustersList(params *VirtualizationClustersListParams) (*VirtualizationClustersListOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewVirtualizationClustersListParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"virtualization_clusters_list\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/virtualization/clusters/\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &VirtualizationClustersListReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*VirtualizationClustersListOK), nil\n\n}", "func NewGetClustersOK() *GetClustersOK {\n\treturn &GetClustersOK{}\n}", "func getClusterNameForMultiVC(ctx context.Context, vs *multiVCvSphere,\n\tclientIndex int) ([]*object.ClusterComputeResource,\n\t*VsanClient, error) {\n\n\tvar vsanHealthClient *VsanClient\n\tvar err error\n\tc := newClientForMultiVC(ctx, vs)\n\n\tdatacenter := strings.Split(multiVCe2eVSphere.multivcConfig.Global.Datacenters, \",\")\n\n\tfor i, client := range c {\n\t\tif clientIndex == i {\n\t\t\tvsanHealthClient, err = newVsanHealthSvcClient(ctx, client.Client)\n\t\t\tgomega.Expect(err).NotTo(gomega.HaveOccurred())\n\t\t}\n\t}\n\n\tfinder := find.NewFinder(vsanHealthClient.vim25Client, false)\n\tdc, err := finder.Datacenter(ctx, datacenter[0])\n\tgomega.Expect(err).NotTo(gomega.HaveOccurred())\n\tfinder.SetDatacenter(dc)\n\n\tclusterComputeResource, err := finder.ClusterComputeResourceList(ctx, \"*\")\n\tframework.Logf(\"clusterComputeResource %v\", clusterComputeResource)\n\tgomega.Expect(err).NotTo(gomega.HaveOccurred())\n\n\treturn clusterComputeResource, vsanHealthClient, err\n}", "func (o GetClustersResultOutput) ClusterIdentifiers() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetClustersResult) []string { return v.ClusterIdentifiers }).(pulumi.StringArrayOutput)\n}", "func (elementConfiguration *ElementConfiguration) ListClusters() ([]string, error) {\n\t// collect names\n\tclusterConfigurations := []string{}\n\n elementConfiguration.ClustersX.RLock()\n\tfor clusterConfiguration := range elementConfiguration.Clusters {\n\t\tclusterConfigurations = append(clusterConfigurations, clusterConfiguration)\n\t}\n\telementConfiguration.ClustersX.RUnlock()\n\n\t// success\n\treturn clusterConfigurations, nil\n}", "func (a *Client) GetMsgVpnDistributedCacheClusters(params *GetMsgVpnDistributedCacheClustersParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnDistributedCacheClustersOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnDistributedCacheClustersParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnDistributedCacheClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/distributedCaches/{cacheName}/clusters\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnDistributedCacheClustersReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnDistributedCacheClustersOK), nil\n\n}", "func (cb *clientBase) GetCluster() string {\n\treturn cb.cluster\n}", "func (s *RpcClient) GetClusterNodes(ctx context.Context) ([]GetClusterNodesResponse, error) {\n\tres := struct {\n\t\tGeneralResponse\n\t\tResult []GetClusterNodesResponse `json:\"result\"`\n\t}{}\n\terr := s.request(ctx, \"getClusterNodes\", []interface{}{}, &res)\n\tif err != nil {\n\t\treturn []GetClusterNodesResponse{}, err\n\t}\n\tif res.Error != nil {\n\t\treturn []GetClusterNodesResponse{}, errors.New(res.Error.Message)\n\t}\n\treturn res.Result, nil\n}", "func (op *outputProvider) GetRemoteClusters(opts ...services.MarshalOption) ([]types.RemoteCluster, error) {\n\treturn op.impersonatedClient.GetRemoteClusters(opts...)\n}", "func (page ClusterListResultPage) Values() []Cluster {\n\tif page.clr.IsEmpty() {\n\t\treturn nil\n\t}\n\treturn *page.clr.Value\n}", "func (m *RedisProxy) GetCluster() string {\n\tif m != nil {\n\t\treturn m.Cluster\n\t}\n\treturn \"\"\n}", "func (connection *Connection) GetClusterNodes() []*URL {\n\tif connection.IsCluster() {\n\t\treturn connection.adabasToData.transactions.clusterNodes\n\t}\n\treturn make([]*URL, 0)\n}", "func GetClusterId() string {\n\treturn axClusterId\n}", "func (a *Client) V2ListClusters(ctx context.Context, params *V2ListClustersParams) (*V2ListClustersOK, error) {\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"v2ListClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/v2/clusters\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &V2ListClustersReader{formats: a.formats},\n\t\tAuthInfo: a.authInfo,\n\t\tContext: ctx,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*V2ListClustersOK), nil\n\n}", "func ListAllClusters(response *JsonListClustersMap) *JsonListClustersMap {\n\tvar SIDCluster int\n\tvar SName string\n\tvar SAWSAccount int64\n\tvar SAWSRegion string\n\tvar SAWSEnvironment string\n\tvar SK8sVersion string\n\n\tvar SNodeType string\n\tvar SNodeInstance string\n\tvar STotalInstances int\n\n\tvar totalInstances int\n\n\tdescription := make(DescriptionMap)\n\n\tdb, err := sql.Open(\"mysql\", UserDB+\":\"+PassDB+\"@tcp(\"+HostDB+\":\"+PortDB+\")/\"+DatabaseDB+\"?charset=utf8\")\n\tcheckErr(err)\n\n\tdefer db.Close()\n\n\trows, err := db.Query(\"SELECT id_cluster, nome, aws_account, aws_region, aws_env, k8s_version FROM clusters ORDER BY nome\")\n\tcheckErr(err)\n\n\tfor rows.Next() {\n\t\terr = rows.Scan(&SIDCluster, &SName, &SAWSAccount, &SAWSRegion, &SAWSEnvironment, &SK8sVersion)\n\t\tcheckErr(err)\n\n\t\tdescription = DescriptionMap{}\n\t\ttotalInstances = 0\n\n\t\trows1, err := db.Query(\"SELECT node_type, node_instance, total_instances FROM nodes WHERE id_cluster=?\", SIDCluster)\n\t\tcheckErr(err)\n\n\t\tfor rows1.Next() {\n\t\t\terr = rows1.Scan(&SNodeType, &SNodeInstance, &STotalInstances)\n\t\t\tcheckErr(err)\n\n\t\t\tdescription[SNodeType] = append(\n\t\t\t\tdescription[SNodeType],\n\t\t\t\tDescriptionStruct{\n\t\t\t\t\tDescription{\n\t\t\t\t\t\tType: SNodeInstance,\n\t\t\t\t\t\tTotalTypeInstances: STotalInstances,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t)\n\n\t\t\ttotalInstances = totalInstances + STotalInstances\n\t\t}\n\n\t\t*response = append(\n\t\t\t*response,\n\t\t\tjsonListClusters{\n\t\t\t\tClusterName: SName,\n\t\t\t\tAws: AWS{\n\t\t\t\t\tAccount: SAWSAccount,\n\t\t\t\t\tRegion: SAWSRegion,\n\t\t\t\t\tEnvironment: SAWSEnvironment,\n\t\t\t\t},\n\t\t\t\tK8SVersion: SK8sVersion,\n\t\t\t\tInstances: Instances{\n\t\t\t\t\tTotalInstances: totalInstances,\n\t\t\t\t\tDescription: description,\n\t\t\t\t},\n\t\t\t},\n\t\t)\n\t}\n\n\treturn response\n}", "func (m *Manager) GetClusterList() ([]Cluster, error) {\n\tnames, err := m.specManager.List()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar clusters = []Cluster{}\n\n\tfor _, name := range names {\n\t\tmetadata, err := m.meta(name)\n\t\tif err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) &&\n\t\t\t!errors.Is(perrs.Cause(err), spec.ErrNoTiSparkMaster) {\n\t\t\treturn nil, perrs.Trace(err)\n\t\t}\n\n\t\tbase := metadata.GetBaseMeta()\n\n\t\tclusters = append(clusters, Cluster{\n\t\t\tName: name,\n\t\t\tUser: base.User,\n\t\t\tVersion: base.Version,\n\t\t\tPath: m.specManager.Path(name),\n\t\t\tPrivateKey: m.specManager.Path(name, \"ssh\", \"id_rsa\"),\n\t\t})\n\t}\n\n\treturn clusters, nil\n}", "func (r *ProjectsInstancesClustersService) List(parent string) *ProjectsInstancesClustersListCall {\n\tc := &ProjectsInstancesClustersListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.parent = parent\n\treturn c\n}", "func GetClusterMode() string {\n\treturn masterRTCfg.clusterMode\n}", "func (p PGSQLConnection) GetAllClusters() ([]ClusterModel, error) {\n\tclusters := []ClusterModel{}\n\tif err := p.connection.Select(&clusters, \"SELECT * FROM clusters\"); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn clusters, nil\n}", "func (o GetClustersClusterOutput) ClusterId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetClustersCluster) string { return v.ClusterId }).(pulumi.StringOutput)\n}", "func clusterList() []string {\n\tif c := envy.String(\"DQLITED_CLUSTER\"); c != \"\" {\n\t\treturn strings.Split(c, \",\")\n\t}\n\treturn defaultCluster\n}", "func (d *Dao) OverlordClusters(c context.Context, zone, appid string) (ocs []*model.OverlordCluster, err error) {\n\tvar res struct {\n\t\tData []*model.OverlordApiserver `json:\"grouped_clusters\"`\n\t}\n\tif err = d.client.RESTfulGet(c, apiserverURI, \"\", nil, &res, appid); err != nil {\n\t\tlog.Error(\"overlord cluster url(%s) appid(%s) error(%v)\", apiserverURI, appid, err)\n\t\treturn\n\t}\nGETALL:\n\tfor _, oa := range res.Data {\n\t\tif zone == \"\" || oa.Group == zone {\n\t\t\tfor _, oc := range oa.Clusters {\n\t\t\t\tcluster := &model.OverlordCluster{\n\t\t\t\t\tName: oc.Name,\n\t\t\t\t\tType: oc.Type,\n\t\t\t\t\tZone: zone,\n\t\t\t\t\tHashMethod: \"fnv1a_64\",\n\t\t\t\t\tHashDistribution: \"ketama\",\n\t\t\t\t\tHashTag: \"{}\",\n\t\t\t\t\tListenProto: \"tcp\",\n\t\t\t\t\tListenAddr: net.JoinHostPort(\"0.0.0.0\", strconv.Itoa(oc.FrontEndPort)),\n\t\t\t\t\tDailTimeout: 1000,\n\t\t\t\t\tReadTimeout: 1000,\n\t\t\t\t\tWriteTimeout: 1000,\n\t\t\t\t\tNodeConn: 2,\n\t\t\t\t\tPingFailLimit: 3,\n\t\t\t\t\tPingAutoEject: true,\n\t\t\t\t}\n\t\t\t\tfor _, oci := range oc.Instances {\n\t\t\t\t\tif oc.Type == \"redis_cluster\" && oci.Role != \"master\" {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\ton := &model.OverlordNode{\n\t\t\t\t\t\tAlias: oci.Alias,\n\t\t\t\t\t\tAddr: net.JoinHostPort(oci.IP, strconv.Itoa(oci.Port)),\n\t\t\t\t\t\tWeight: oci.Weight,\n\t\t\t\t\t}\n\t\t\t\t\tcluster.Nodes = append(cluster.Nodes, on)\n\t\t\t\t}\n\t\t\t\tocs = append(ocs, cluster)\n\t\t\t}\n\t\t}\n\t}\n\tif len(ocs) == 0 && zone != \"\" {\n\t\tzone = \"\"\n\t\tgoto GETALL\n\t}\n\treturn\n}", "func fetchCluster(c *gin.Context) string {\n\tconst key = \"cluster\"\n\n\tswitch {\n\tcase len(c.Param(key)) > 0:\n\t\treturn c.Param(key)\n\tcase len(c.Query(key)) > 0:\n\t\treturn c.Query(key)\n\tcase len(c.PostForm(key)) > 0:\n\t\treturn c.PostForm(key)\n\tdefault:\n\t\treturn \"\"\n\t}\n}", "func (a *Client) GetClusterCredentials(params *GetClusterCredentialsParams, authInfo runtime.ClientAuthInfoWriter) (*GetClusterCredentialsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetClusterCredentialsParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"GetClusterCredentials\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/v1/clusters/{name}/credentials\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetClusterCredentialsReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetClusterCredentialsOK), nil\n\n}", "func (o LookupResponsePolicyResultOutput) GkeClusters() ResponsePolicyGKEClusterResponseArrayOutput {\n\treturn o.ApplyT(func(v LookupResponsePolicyResult) []ResponsePolicyGKEClusterResponse { return v.GkeClusters }).(ResponsePolicyGKEClusterResponseArrayOutput)\n}", "func GetManegementCluster(version, capiImage, capdImage string) ([]runtime.Object, error) {\n\tcapiObjects, err := GetCAPI(version, capiImage)\n\tif err != nil {\n\t\treturn []runtime.Object{}, err\n\t}\n\n\tnamespaceObj := GetNamespace()\n\tstatefulSet := GetStatefulSet(capdImage)\n\tclusterRole := GetClusterRole()\n\tclusterRoleBinding := GetClusterRoleBinding()\n\n\treturn append(capiObjects,\n\t\t&namespaceObj,\n\t\t&statefulSet,\n\t\t&clusterRole,\n\t\t&clusterRoleBinding,\n\t), nil\n}", "func (c *Client) Cluster(ctx context.Context) ([]NodeInfo, error) {\n\trequest := protocol.Message{}\n\trequest.Init(16)\n\tresponse := protocol.Message{}\n\tresponse.Init(512)\n\n\tprotocol.EncodeCluster(&request, protocol.ClusterFormatV1)\n\n\tif err := c.protocol.Call(ctx, &request, &response); err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to send Cluster request\")\n\t}\n\n\tservers, err := protocol.DecodeNodes(&response)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to parse Node response\")\n\t}\n\n\treturn servers, nil\n}", "func GetClusterIPs(service *corev1.Service) []string {\n\tclusterIPs := []string{service.Spec.ClusterIP}\n\tif len(service.Spec.ClusterIPs) > 0 {\n\t\tclusterIPs = service.Spec.ClusterIPs\n\t}\n\n\t// Same IPv6 could be represented differently (as from rfc5952):\n\t// 2001:db8:0:0:aaaa::1\n\t// 2001:db8::aaaa:0:0:1\n\t// 2001:db8:0::aaaa:0:0:1\n\t// net.ParseIP(ip).String() output is used as a normalization form\n\t// for all cases above it returns 2001:db8::aaaa:0:0:1\n\t// without the normalization there could be mismatches in key lookups e.g. for PTR\n\tnormalized := make([]string, 0, len(clusterIPs))\n\tfor _, ip := range clusterIPs {\n\t\tnormalized = append(normalized, net.ParseIP(ip).String())\n\t}\n\n\treturn normalized\n}", "func (a *Client) ListAvailableClusters(ctx context.Context, params *ListAvailableClustersParams) (*ListAvailableClustersOK, error) {\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"listAvailableClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/heappe/ClusterInformation/ListAvailableClusters\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &ListAvailableClustersReader{formats: a.formats},\n\t\tAuthInfo: a.authInfo,\n\t\tContext: ctx,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*ListAvailableClustersOK), nil\n\n}" ]
[ "0.7150111", "0.70848936", "0.7050372", "0.69830257", "0.69812804", "0.68225586", "0.6785897", "0.67075115", "0.67008924", "0.66717374", "0.6653548", "0.6628853", "0.65589386", "0.6548702", "0.6535888", "0.6530604", "0.6489139", "0.6475645", "0.6451163", "0.6437961", "0.64292145", "0.6420646", "0.64115655", "0.6351999", "0.6342829", "0.63368356", "0.63260955", "0.63172734", "0.6294134", "0.6274878", "0.62505454", "0.62502795", "0.6240707", "0.6152818", "0.61403", "0.61298174", "0.6111065", "0.6086631", "0.60699946", "0.605888", "0.6055227", "0.6039949", "0.60248417", "0.60127914", "0.5990848", "0.5971463", "0.5963538", "0.5935251", "0.59316564", "0.59266365", "0.59207803", "0.59110713", "0.5910659", "0.590048", "0.5897609", "0.58610535", "0.58484685", "0.5844702", "0.58280903", "0.5825456", "0.58106434", "0.5751794", "0.5746291", "0.5745103", "0.57434946", "0.5741837", "0.5721784", "0.57198244", "0.57118505", "0.5692283", "0.567878", "0.5669264", "0.56662285", "0.5651515", "0.56448567", "0.563336", "0.56233066", "0.56175965", "0.5593433", "0.55922747", "0.5577536", "0.55719817", "0.55662704", "0.5562364", "0.5552811", "0.55524254", "0.5536599", "0.55298865", "0.5524156", "0.5522188", "0.5513524", "0.55062175", "0.55043143", "0.5485468", "0.5481368", "0.5479709", "0.5474633", "0.54723483", "0.5467098", "0.5448341" ]
0.76226926
0
GetClustersOk returns a tuple with the Clusters field value and a boolean to check if the value has been set.
GetClustersOk возвращает кортеж с значением поля Clusters и булевым значением для проверки, было ли значение задано.
func (o *QueueManager) GetClustersOk() (*[]string, bool) { if o == nil { return nil, false } return &o.Clusters, true }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewGetClustersOK() *GetClustersOK {\n\treturn &GetClustersOK{}\n}", "func (o *ResourceLimits) GetK8sClustersProvisionedOk() (*int32, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.K8sClustersProvisioned, true\n}", "func (o *VirtualizationVmwareVirtualMachineAllOf) GetClusterOk() (*VirtualizationVmwareClusterRelationship, bool) {\n\tif o == nil || o.Cluster == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Cluster, true\n}", "func NewDescribeClustersOK() *DescribeClustersOK {\n\n\treturn &DescribeClustersOK{}\n}", "func (c *cloud) Clusters() (cloudprovider.Clusters, bool) {\n\tklog.V(4).Infof(\"Clusters called\")\n\treturn nil, false\n}", "func (c *Config) GetClusters(ctx context.Context, quiet bool, filterMap map[string]string, clustersName ...string) (string, error) {\n\tc.Logger.Debugf(\"Sending parameters to server to get the clusters %q\", strings.Join(clustersName, \", \"))\n\n\tfilter := MapToSlice(filterMap)\n\n\treturn c.RunGRPCnRESTFunc(\"get\", true,\n\t\tfunc() (string, error) {\n\t\t\treturn c.getClustersGRPC(ctx, quiet, filter, clustersName...)\n\t\t},\n\t\tfunc() (string, error) {\n\t\t\treturn c.getClustersHTTP(quiet, filter, clustersName...)\n\t\t})\n}", "func (o *ListClustersOnEndpointUsingGETOK) IsSuccess() bool {\n\treturn true\n}", "func (c *cloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}", "func (o AppProjectSpecSyncWindowsOutput) Clusters() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v AppProjectSpecSyncWindows) []string { return v.Clusters }).(pulumi.StringArrayOutput)\n}", "func (a *Client) GetClusters(params *GetClustersParams, opts ...ClientOption) (*GetClustersOK, *GetClustersMultiStatus, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetClustersParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"GetClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/kubernetes-protection/entities/kubernetes/clusters/v1\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\", \"application/octet-stream\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetClustersReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tswitch value := result.(type) {\n\tcase *GetClustersOK:\n\t\treturn value, nil, nil\n\tcase *GetClustersMultiStatus:\n\t\treturn nil, value, nil\n\t}\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for kubernetes_protection: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (o *NiatelemetryNexusDashboardsAllOf) GetClusterNameOk() (*string, bool) {\n\tif o == nil || o.ClusterName == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterName, true\n}", "func (o *ProjectDeploymentRuleResponse) GetClusterOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Cluster, true\n}", "func (h *httpCloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}", "func (cloud *Cloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}", "func (o *VirtualizationIweClusterAllOf) GetClusterNameOk() (*string, bool) {\n\tif o == nil || o.ClusterName == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterName, true\n}", "func (o *ClusterSummaryDTO) GetClusteredOk() (*bool, bool) {\n\tif o == nil || o.Clustered == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Clustered, true\n}", "func (az *Cloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}", "func compareClusters(got, want *types.Cluster) bool {\n\tresult := false\n\tif reflect.DeepEqual(got.Status, want.Status) {\n\t\tresult = true\n\t}\n\n\treturn result\n}", "func (bc *Baiducloud) Clusters() (cloudprovider.Clusters, bool) {\n\treturn nil, false\n}", "func (o *VirtualizationBaseHostPciDeviceAllOf) GetClusterOk() (*VirtualizationBaseClusterRelationship, bool) {\n\tif o == nil || o.Cluster == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Cluster, true\n}", "func NewListClustersOK() *ListClustersOK {\n\treturn &ListClustersOK{}\n}", "func (o *VirtualizationIweVirtualMachine) GetClusterOk() (*VirtualizationIweClusterRelationship, bool) {\n\tif o == nil || o.Cluster == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Cluster, true\n}", "func (o *QueueManager) GetClusters() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\n\treturn o.Clusters\n}", "func (o *HyperflexEncryption) GetClusterOk() (*HyperflexClusterRelationship, bool) {\n\tif o == nil || o.Cluster == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Cluster, true\n}", "func (o *VirtualizationIweHost) GetClusterOk() (*VirtualizationIweClusterRelationship, bool) {\n\tif o == nil || o.Cluster == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Cluster, true\n}", "func (o *StorageHyperFlexStorageContainer) GetClusterOk() (*HyperflexClusterRelationship, bool) {\n\tif o == nil || o.Cluster == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Cluster, true\n}", "func (o *ListClustersOnEndpointUsingGETForbidden) IsSuccess() bool {\n\treturn false\n}", "func (o *HyperflexMapClusterIdToProtectionInfoAllOf) GetClusterIdOk() (*string, bool) {\n\tif o == nil || o.ClusterId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterId, true\n}", "func (o *ClusterSummaryDTO) GetConnectedToClusterOk() (*bool, bool) {\n\tif o == nil || o.ConnectedToCluster == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ConnectedToCluster, true\n}", "func (o *MoveClustersAccepted) IsSuccess() bool {\n\treturn true\n}", "func (s *RaftDatabase) Clusters() int {\n\treturn GetArg(s.name, \"clusters\").Int(s.clusters)\n}", "func (c *ClientImpl) GetClusters(ctx context.Context, hcpHostURL string) (models.ClusterResp, error) {\n\tspan, _ := opentracing.StartSpanFromContext(ctx, \"Get Clusters\")\n\tdefer span.Finish()\n\n\tsession, err := c.getSession(ctx, hcpHostURL, hcpUserName, hcpPassword)\n\tif err != nil {\n\t\treturn models.ClusterResp{}, err\n\t}\n\n\tstatus = Failure\n\tmonitor := metrics.StartExternalCall(externalSvcName, \"Fetch Clusters\")\n\tdefer func() { monitor.RecordWithStatus(status) }()\n\n\tresp, err := mlopsHttp.ExecuteHTTPRequest(\n\t\tctx,\n\t\tc.client,\n\t\thcpHostURL+clusterPathV2,\n\t\thttp.MethodGet,\n\t\tmap[string]string{sessionHeader: session},\n\t\tbytes.NewReader(nil),\n\t)\n\tif err != nil {\n\t\treturn models.ClusterResp{}, errors.Wrapf(err, \"while fetching clusters in MLOps controller platform.\")\n\t}\n\tresp.Body.Close()\n\n\tstatus = Success\n\n\terr = c.deleteSession(ctx, hcpHostURL, session)\n\tif err != nil {\n\t\treturn models.ClusterResp{}, err\n\t}\n\n\tclustersResp := models.ClusterResp{}\n\tjson.NewDecoder(resp.Body).Decode(&clustersResp)\n\n\treturn clustersResp, nil\n}", "func (a *Client) ListClusters(params *ListClustersParams, authInfo runtime.ClientAuthInfoWriter) (*ListClustersOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewListClustersParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"ListClusters\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/v1/clusters\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &ListClustersReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*ListClustersOK), nil\n\n}", "func (o *QueueManager) GetClusterQueuesOk() (*[]ClusterQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.ClusterQueues, true\n}", "func (o *V0037Node) GetCoresOk() (*int32, bool) {\n\tif o == nil || o.Cores == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Cores, true\n}", "func (o *ClusterSummaryDTO) GetConnectedNodesOk() (*string, bool) {\n\tif o == nil || o.ConnectedNodes == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ConnectedNodes, true\n}", "func (e *ECS) ListClusters(req *ListClustersReq) (\n\t*ListClustersResp, error) {\n\tif req == nil {\n\t\treturn nil, fmt.Errorf(\"The req params cannot be nil\")\n\t}\n\n\tparams := makeParams(\"ListClusters\")\n\tif req.MaxResults > 0 {\n\t\tparams[\"maxResults\"] = strconv.Itoa(int(req.MaxResults))\n\t}\n\tif req.NextToken != \"\" {\n\t\tparams[\"nextToken\"] = req.NextToken\n\t}\n\n\tresp := new(ListClustersResp)\n\tif err := e.query(params, resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}", "func (o *V0037JobProperties) GetClusterConstraintsOk() (*string, bool) {\n\tif o == nil || o.ClusterConstraints == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterConstraints, true\n}", "func (o *NiatelemetryNexusDashboardsAllOf) GetIsClusterHealthyOk() (*string, bool) {\n\tif o == nil || o.IsClusterHealthy == nil {\n\t\treturn nil, false\n\t}\n\treturn o.IsClusterHealthy, true\n}", "func (m *MockBuilder) Clusters() []string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Clusters\")\n\tret0, _ := ret[0].([]string)\n\treturn ret0\n}", "func (o *MoveClustersForbidden) IsSuccess() bool {\n\treturn false\n}", "func (o *NiatelemetryNexusDashboardsAllOf) GetClusterUuidOk() (*string, bool) {\n\tif o == nil || o.ClusterUuid == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterUuid, true\n}", "func (o *VirtualizationIweClusterAllOf) GetHxClusterOk() (*StorageBaseClusterRelationship, bool) {\n\tif o == nil || o.HxCluster == nil {\n\t\treturn nil, false\n\t}\n\treturn o.HxCluster, true\n}", "func (o *V2GetClusterDefaultConfigOK) IsSuccess() bool {\n\treturn true\n}", "func (o *MetroclusterSvmGetOK) IsSuccess() bool {\n\treturn true\n}", "func (s *Snapshot) NumClusters(ns core.Namespace) int {\n\tif val, ok := s.clusters[ns]; ok && val != nil {\n\t\treturn len(val)\n\t\t//return val.Len()\n\t}\n\treturn 0\n}", "func (adm Admin) ListClusters() (string, error) {\n\tvar clusters []string\n\n\tchildren, err := adm.zkClient.Children(\"/\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, cluster := range children {\n\t\tif ok, err := adm.isClusterSetup(cluster); ok && err == nil {\n\t\t\tclusters = append(clusters, cluster)\n\t\t}\n\t}\n\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"Existing clusters: \\n\")\n\n\tfor _, cluster := range clusters {\n\t\tbuffer.WriteString(\" \" + cluster + \"\\n\")\n\t}\n\treturn buffer.String(), nil\n}", "func (o *NiatelemetryNexusDashboardsAllOf) GetNdClusterSizeOk() (*int64, bool) {\n\tif o == nil || o.NdClusterSize == nil {\n\t\treturn nil, false\n\t}\n\treturn o.NdClusterSize, true\n}", "func (a *DefaultApiService) ListClusters(ctx _context.Context, localVarOptionals *ListClustersOpts) (Clusters, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue Clusters\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/clusters\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif localVarOptionals != nil && localVarOptionals.Id.IsSet() {\n\t\tt:=localVarOptionals.Id.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"id[]\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"id[]\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.NotId.IsSet() {\n\t\tt:=localVarOptionals.NotId.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"!id[]\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"!id[]\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.StoryCountMin.IsSet() {\n\t\tlocalVarQueryParams.Add(\"story_count.min\", parameterToString(localVarOptionals.StoryCountMin.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.StoryCountMax.IsSet() {\n\t\tlocalVarQueryParams.Add(\"story_count.max\", parameterToString(localVarOptionals.StoryCountMax.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.TimeStart.IsSet() {\n\t\tlocalVarQueryParams.Add(\"time.start\", parameterToString(localVarOptionals.TimeStart.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.TimeEnd.IsSet() {\n\t\tlocalVarQueryParams.Add(\"time.end\", parameterToString(localVarOptionals.TimeEnd.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.EarliestStoryStart.IsSet() {\n\t\tlocalVarQueryParams.Add(\"earliest_story.start\", parameterToString(localVarOptionals.EarliestStoryStart.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.EarliestStoryEnd.IsSet() {\n\t\tlocalVarQueryParams.Add(\"earliest_story.end\", parameterToString(localVarOptionals.EarliestStoryEnd.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.LatestStoryStart.IsSet() {\n\t\tlocalVarQueryParams.Add(\"latest_story.start\", parameterToString(localVarOptionals.LatestStoryStart.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.LatestStoryEnd.IsSet() {\n\t\tlocalVarQueryParams.Add(\"latest_story.end\", parameterToString(localVarOptionals.LatestStoryEnd.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.LocationCountry.IsSet() {\n\t\tt:=localVarOptionals.LocationCountry.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"location.country\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"location.country\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.NotLocationCountry.IsSet() {\n\t\tt:=localVarOptionals.NotLocationCountry.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"!location.country\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"!location.country\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Return_.IsSet() {\n\t\tt:=localVarOptionals.Return_.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"return[]\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"return[]\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.SortBy.IsSet() {\n\t\tlocalVarQueryParams.Add(\"sort_by\", parameterToString(localVarOptionals.SortBy.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.SortDirection.IsSet() {\n\t\tlocalVarQueryParams.Add(\"sort_direction\", parameterToString(localVarOptionals.SortDirection.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.Cursor.IsSet() {\n\t\tlocalVarQueryParams.Add(\"cursor\", parameterToString(localVarOptionals.Cursor.Value(), \"\"))\n\t}\n\tif localVarOptionals != nil && localVarOptionals.PerPage.IsSet() {\n\t\tlocalVarQueryParams.Add(\"per_page\", parameterToString(localVarOptionals.PerPage.Value(), \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\", \"text/xml\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"X-AYLIEN-NewsAPI-Application-ID\"] = key\n\t\t}\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"X-AYLIEN-NewsAPI-Application-Key\"] = key\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 422 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 429 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v Errors\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (o *ClusterNtpKeysGetOK) IsSuccess() bool {\n\treturn true\n}", "func ExampleSnowball_ListClusters_shared00() {\n\tsvc := snowball.New(session.New())\n\tinput := &snowball.ListClustersInput{}\n\n\tresult, err := svc.ListClusters(input)\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase snowball.ErrCodeInvalidNextTokenException:\n\t\t\t\tfmt.Println(snowball.ErrCodeInvalidNextTokenException, aerr.Error())\n\t\t\tdefault:\n\t\t\t\tfmt.Println(aerr.Error())\n\t\t\t}\n\t\t} else {\n\t\t\t// Print the error, cast err to awserr.Error to get the Code and\n\t\t\t// Message from an error.\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(result)\n}", "func (a *ClustersApiService) ListClusters(ctx _context.Context, space string) ApiListClustersRequest {\n\treturn ApiListClustersRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tspace: space,\n\t}\n}", "func (o *VirtualizationIweClusterAllOf) GetComputeNodeCountOk() (*int64, bool) {\n\tif o == nil || o.ComputeNodeCount == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ComputeNodeCount, true\n}", "func (adm Admin) ListClusters() (string, error) {\n\tconn := newConnection(adm.ZkSvr)\n\terr := conn.Connect()\n\tif err != nil {\n\t\tfmt.Println(\"Failed to connect to zookeeper.\")\n\t\treturn \"\", err\n\t}\n\tdefer conn.Disconnect()\n\n\tvar clusters []string\n\n\tchildren, err := conn.Children(\"/\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, cluster := range children {\n\t\tif ok, err := conn.IsClusterSetup(cluster); ok && err == nil {\n\t\t\tclusters = append(clusters, cluster)\n\t\t}\n\t}\n\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"Existing clusters: \\n\")\n\n\tfor _, cluster := range clusters {\n\t\tbuffer.WriteString(\" \" + cluster + \"\\n\")\n\t}\n\treturn buffer.String(), nil\n}", "func (o *V2ResetClusterNotFound) IsSuccess() bool {\n\treturn false\n}", "func (o *HyperflexSoftwareVersionPolicy) GetClusterProfilesOk() ([]HyperflexClusterProfileRelationship, bool) {\n\tif o == nil || o.ClusterProfiles == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterProfiles, true\n}", "func (o *V2ImportClusterCreated) IsSuccess() bool {\n\treturn true\n}", "func (o *V2GetClusterDefaultConfigUnauthorized) IsSuccess() bool {\n\treturn false\n}", "func (o *V2ResetClusterAccepted) IsSuccess() bool {\n\treturn true\n}", "func (o *HyperflexHxapDvUplink) GetClusterOk() (*HyperflexHxapClusterRelationship, bool) {\n\tif o == nil || o.Cluster == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Cluster, true\n}", "func (o *MoveClustersBadRequest) IsSuccess() bool {\n\treturn false\n}", "func (m *MockEKSServiceInterface) ListClusters(input *eks.ListClustersInput) (*eks.ListClustersOutput, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListClusters\", input)\n\tret0, _ := ret[0].(*eks.ListClustersOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (mr *MockBuilderMockRecorder) Clusters() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Clusters\", reflect.TypeOf((*MockBuilder)(nil).Clusters))\n}", "func NewGetClusterOK() *GetClusterOK {\n\n\treturn &GetClusterOK{}\n}", "func (o *VirtualizationIweClusterAllOf) GetConvergedNodeCountOk() (*int64, bool) {\n\tif o == nil || o.ConvergedNodeCount == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ConvergedNodeCount, true\n}", "func (o *Cluster) GetCommunicationUrisOk() (*[]string, bool) {\n\tif o == nil || o.CommunicationUris == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CommunicationUris, true\n}", "func (o *ResourceLimits) HasK8sClustersProvisioned() bool {\n\tif o != nil && o.K8sClustersProvisioned != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (bc *Baiducloud) ListClusters(ctx context.Context) ([]string, error) {\n\treturn nil, fmt.Errorf(\"ListClusters unimplemented\")\n}", "func (o *V2GetPresignedForClusterCredentialsOK) IsSuccess() bool {\n\treturn true\n}", "func (c *client) ClusterExists() (bool, error) {\n\tclusterJSON, err := c.runCmd(\"cluster\", \"list\", \"-o\", \"json\")\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tclusterList := &ClusterList{}\n\tif err := clusterList.Unmarshal([]byte(clusterJSON)); err != nil {\n\t\treturn false, err\n\t}\n\n\tfor _, cluster := range clusterList.Clusters {\n\t\tif cluster.Name == c.clusterName {\n\t\t\tif c.verbose {\n\t\t\t\tfmt.Printf(\"k3d cluster '%s' exists\", c.clusterName)\n\t\t\t}\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\tif c.verbose {\n\t\tfmt.Printf(\"k3d cluster '%s' does not exist\", c.clusterName)\n\t}\n\treturn false, nil\n}", "func ExampleClustersClient_Get() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armservicefabric.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewClustersClient().Get(ctx, \"resRg\", \"myCluster\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.Cluster = armservicefabric.Cluster{\n\t// \tName: to.Ptr(\"myCluster\"),\n\t// \tType: to.Ptr(\"Microsoft.ServiceFabric/clusters\"),\n\t// \tEtag: to.Ptr(\"W/\\\"636462502169240745\\\"\"),\n\t// \tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/resRg/providers/Microsoft.ServiceFabric/clusters/myCluster\"),\n\t// \tLocation: to.Ptr(\"eastus\"),\n\t// \tTags: map[string]*string{\n\t// \t},\n\t// \tProperties: &armservicefabric.ClusterProperties{\n\t// \t\tAddOnFeatures: []*armservicefabric.AddOnFeatures{\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesRepairManager),\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesDNSService),\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesBackupRestoreService),\n\t// \t\t\tto.Ptr(armservicefabric.AddOnFeaturesResourceMonitorService)},\n\t// \t\t\tAvailableClusterVersions: []*armservicefabric.ClusterVersionDetails{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\t\t\t\tEnvironment: to.Ptr(armservicefabric.ClusterEnvironmentWindows),\n\t// \t\t\t\t\tSupportExpiryUTC: to.Ptr(\"2018-06-15T23:59:59.9999999\"),\n\t// \t\t\t}},\n\t// \t\t\tAzureActiveDirectory: &armservicefabric.AzureActiveDirectory{\n\t// \t\t\t\tClientApplication: to.Ptr(\"d151ad89-4bce-4ae8-b3d1-1dc79679fa75\"),\n\t// \t\t\t\tClusterApplication: to.Ptr(\"5886372e-7bf4-4878-a497-8098aba608ae\"),\n\t// \t\t\t\tTenantID: to.Ptr(\"6abcc6a0-8666-43f1-87b8-172cf86a9f9c\"),\n\t// \t\t\t},\n\t// \t\t\tCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t\t}},\n\t// \t\t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t\t},\n\t// \t\t\tClientCertificateCommonNames: []*armservicefabric.ClientCertificateCommonName{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"5F3660C715EBBDA31DB1FFDCF508302348DE8E7A\"),\n\t// \t\t\t\t\tIsAdmin: to.Ptr(true),\n\t// \t\t\t}},\n\t// \t\t\tClientCertificateThumbprints: []*armservicefabric.ClientCertificateThumbprint{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tCertificateThumbprint: to.Ptr(\"5F3660C715EBBDA31DB1FFDCF508302348DE8E7A\"),\n\t// \t\t\t\t\tIsAdmin: to.Ptr(true),\n\t// \t\t\t}},\n\t// \t\t\tClusterCodeVersion: to.Ptr(\"6.1.480.9494\"),\n\t// \t\t\tClusterEndpoint: to.Ptr(\"https://eastus.servicefabric.azure.com\"),\n\t// \t\t\tClusterID: to.Ptr(\"92584666-9889-4ae8-8d02-91902923d37f\"),\n\t// \t\t\tClusterState: to.Ptr(armservicefabric.ClusterStateWaitingForNodes),\n\t// \t\t\tDiagnosticsStorageAccountConfig: &armservicefabric.DiagnosticsStorageAccountConfig{\n\t// \t\t\t\tBlobEndpoint: to.Ptr(\"https://diag.blob.core.windows.net/\"),\n\t// \t\t\t\tProtectedAccountKeyName: to.Ptr(\"StorageAccountKey1\"),\n\t// \t\t\t\tQueueEndpoint: to.Ptr(\"https://diag.queue.core.windows.net/\"),\n\t// \t\t\t\tStorageAccountName: to.Ptr(\"diag\"),\n\t// \t\t\t\tTableEndpoint: to.Ptr(\"https://diag.table.core.windows.net/\"),\n\t// \t\t\t},\n\t// \t\t\tFabricSettings: []*armservicefabric.SettingsSectionDescription{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tName: to.Ptr(\"UpgradeService\"),\n\t// \t\t\t\t\tParameters: []*armservicefabric.SettingsParameterDescription{\n\t// \t\t\t\t\t\t{\n\t// \t\t\t\t\t\t\tName: to.Ptr(\"AppPollIntervalInSeconds\"),\n\t// \t\t\t\t\t\t\tValue: to.Ptr(\"60\"),\n\t// \t\t\t\t\t}},\n\t// \t\t\t}},\n\t// \t\t\tManagementEndpoint: to.Ptr(\"https://myCluster.eastus.cloudapp.azure.com:19080\"),\n\t// \t\t\tNodeTypes: []*armservicefabric.NodeTypeDescription{\n\t// \t\t\t\t{\n\t// \t\t\t\t\tName: to.Ptr(\"nt1vm\"),\n\t// \t\t\t\t\tApplicationPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\tEndPort: to.Ptr[int32](30000),\n\t// \t\t\t\t\t\tStartPort: to.Ptr[int32](20000),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tClientConnectionEndpointPort: to.Ptr[int32](19000),\n\t// \t\t\t\t\tDurabilityLevel: to.Ptr(armservicefabric.DurabilityLevelBronze),\n\t// \t\t\t\t\tEphemeralPorts: &armservicefabric.EndpointRangeDescription{\n\t// \t\t\t\t\t\tEndPort: to.Ptr[int32](64000),\n\t// \t\t\t\t\t\tStartPort: to.Ptr[int32](49000),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tHTTPGatewayEndpointPort: to.Ptr[int32](19007),\n\t// \t\t\t\t\tIsPrimary: to.Ptr(true),\n\t// \t\t\t\t\tVMInstanceCount: to.Ptr[int32](5),\n\t// \t\t\t}},\n\t// \t\t\tProvisioningState: to.Ptr(armservicefabric.ProvisioningStateSucceeded),\n\t// \t\t\tReliabilityLevel: to.Ptr(armservicefabric.ReliabilityLevelSilver),\n\t// \t\t\tReverseProxyCertificateCommonNames: &armservicefabric.ServerCertificateCommonNames{\n\t// \t\t\t\tCommonNames: []*armservicefabric.ServerCertificateCommonName{\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tCertificateCommonName: to.Ptr(\"abc.com\"),\n\t// \t\t\t\t\t\tCertificateIssuerThumbprint: to.Ptr(\"12599211F8F14C90AFA9532AD79A6F2CA1C00622\"),\n\t// \t\t\t\t}},\n\t// \t\t\t\tX509StoreName: to.Ptr(armservicefabric.StoreNameMy),\n\t// \t\t\t},\n\t// \t\t\tUpgradeDescription: &armservicefabric.ClusterUpgradePolicy{\n\t// \t\t\t\tDeltaHealthPolicy: &armservicefabric.ClusterUpgradeDeltaHealthPolicy{\n\t// \t\t\t\t\tApplicationDeltaHealthPolicies: map[string]*armservicefabric.ApplicationDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\"fabric:/myApp1\": &armservicefabric.ApplicationDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\tDefaultServiceTypeDeltaHealthPolicy: &armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tServiceTypeDeltaHealthPolicies: map[string]*armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\"myServiceType1\": &armservicefabric.ServiceTypeDeltaHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\tMaxPercentDeltaUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tMaxPercentDeltaUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\tMaxPercentDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t\tMaxPercentUpgradeDomainDeltaUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t},\n\t// \t\t\t\tForceRestart: to.Ptr(false),\n\t// \t\t\t\tHealthCheckRetryTimeout: to.Ptr(\"00:05:00\"),\n\t// \t\t\t\tHealthCheckStableDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\tHealthCheckWaitDuration: to.Ptr(\"00:00:30\"),\n\t// \t\t\t\tHealthPolicy: &armservicefabric.ClusterHealthPolicy{\n\t// \t\t\t\t\tApplicationHealthPolicies: map[string]*armservicefabric.ApplicationHealthPolicy{\n\t// \t\t\t\t\t\t\"fabric:/myApp1\": &armservicefabric.ApplicationHealthPolicy{\n\t// \t\t\t\t\t\t\tDefaultServiceTypeHealthPolicy: &armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\tMaxPercentUnhealthyServices: to.Ptr[int32](0),\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\tServiceTypeHealthPolicies: map[string]*armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\"myServiceType1\": &armservicefabric.ServiceTypeHealthPolicy{\n\t// \t\t\t\t\t\t\t\t\tMaxPercentUnhealthyServices: to.Ptr[int32](100),\n\t// \t\t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t\t},\n\t// \t\t\t\t\t\t},\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\tMaxPercentUnhealthyApplications: to.Ptr[int32](0),\n\t// \t\t\t\t\tMaxPercentUnhealthyNodes: to.Ptr[int32](0),\n\t// \t\t\t\t},\n\t// \t\t\t\tUpgradeDomainTimeout: to.Ptr(\"00:15:00\"),\n\t// \t\t\t\tUpgradeReplicaSetCheckTimeout: to.Ptr(\"00:10:00\"),\n\t// \t\t\t\tUpgradeTimeout: to.Ptr(\"01:00:00\"),\n\t// \t\t\t},\n\t// \t\t\tUpgradeMode: to.Ptr(armservicefabric.UpgradeModeManual),\n\t// \t\t\tVMImage: to.Ptr(\"Windows\"),\n\t// \t\t},\n\t// \t}\n}", "func (o *ClusteSummaryEntity) GetClusterSummaryOk() (*ClusterSummaryDTO, bool) {\n\tif o == nil || o.ClusterSummary == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterSummary, true\n}", "func (c Client) ListClusters() (ClusterList, error) {\n\tbody, err := c.watsonClient.MakeRequest(\"GET\", c.version+\"/solr_clusters\", nil, nil)\n\tif err != nil {\n\t\treturn ClusterList{}, err\n\t}\n\tvar response ClusterList\n\terr = json.Unmarshal(body, &response)\n\treturn response, err\n}", "func NewPostClustersMulticlusterConfigOK() *PostClustersMulticlusterConfigOK {\n\treturn &PostClustersMulticlusterConfigOK{}\n}", "func (o *SubmitReplayRequestEntity) GetClusterNodeIdOk() (*string, bool) {\n\tif o == nil || o.ClusterNodeId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterNodeId, true\n}", "func (o *V2UpdateClusterUISettingsOK) IsSuccess() bool {\n\treturn true\n}", "func (o *KubernetesContainerRuntimePolicy) GetClusterProfilesOk() ([]KubernetesClusterProfileRelationship, bool) {\n\tif o == nil || o.ClusterProfiles == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterProfiles, true\n}", "func (a ClustersAPI) Get(clusterID string) (httpmodels.GetResp, error) {\n\tvar clusterInfo httpmodels.GetResp\n\n\tdata := struct {\n\t\tClusterID string `json:\"cluster_id,omitempty\" url:\"cluster_id,omitempty\"`\n\t}{\n\t\tclusterID,\n\t}\n\tresp, err := a.Client.performQuery(http.MethodGet, \"/clusters/get\", data, nil)\n\tif err != nil {\n\t\treturn clusterInfo, err\n\t}\n\n\terr = json.Unmarshal(resp, &clusterInfo)\n\treturn clusterInfo, err\n}", "func (o *NetgroupsSettingsCollectionGetOK) IsSuccess() bool {\n\treturn true\n}", "func NewGetVSphereDatacentersOK() *GetVSphereDatacentersOK {\n\treturn &GetVSphereDatacentersOK{}\n}", "func (_m *ComputeAPI) LookupClusters(project string) ([]*container.Cluster, error) {\n\tret := _m.Called(project)\n\n\tvar r0 []*container.Cluster\n\tif rf, ok := ret.Get(0).(func(string) []*container.Cluster); ok {\n\t\tr0 = rf(project)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]*container.Cluster)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(string) error); ok {\n\t\tr1 = rf(project)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (o *UcsdBackupInfoAllOf) GetConnectorsOk() ([]UcsdConnectorPack, bool) {\n\tif o == nil || o.Connectors == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Connectors, true\n}", "func (k Kind) ClusterExists() (bool, error) {\n\tcmd := kindCommand(\"kind get clusters\")\n\tcmd.Stdout = nil\n\tcmd.Stderr = nil\n\toutput, err := cmd.Output()\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn strings.Contains(string(output), \"kind\"), nil\n}", "func (c *Client) GetClusters(ctx context.Context) <-chan GetClusterResult {\n\t// TODO Make the concurrency configurable\n\tconcurrency := int(math.Min(5, float64(runtime.NumCPU())))\n\tresults := make(chan GetClusterResult, concurrency)\n\n\tclusterNames, err := c.GetClusterNames(ctx)\n\tif err != nil {\n\t\tclose(results)\n\t\treturn results\n\t}\n\n\tvar wg sync.WaitGroup\n\n\tgo func() {\n\t\tdefer close(results)\n\t\tfor _, clusterName := range clusterNames {\n\t\t\twg.Add(1)\n\t\t\tgo func(name string) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tcluster, err := c.GetCluster(ctx, name)\n\t\t\t\tresult := GetClusterResult{Cluster: cluster, Error: err}\n\t\t\t\tresults <- result\n\t\t\t}(clusterName)\n\t\t}\n\t\twg.Wait()\n\t}()\n\n\treturn results\n}", "func (o *V2ImportClusterForbidden) IsSuccess() bool {\n\treturn false\n}", "func (a ClustersAPI) List() ([]httpmodels.GetResp, error) {\n\tvar clusterList = struct {\n\t\tClusters []httpmodels.GetResp `json:\"clusters,omitempty\" url:\"clusters,omitempty\"`\n\t}{}\n\n\tresp, err := a.Client.performQuery(http.MethodGet, \"/clusters/list\", nil, nil)\n\tif err != nil {\n\t\treturn clusterList.Clusters, err\n\t}\n\n\terr = json.Unmarshal(resp, &clusterList)\n\treturn clusterList.Clusters, err\n}", "func NewGetClusterInfoOK() *GetClusterInfoOK {\n\treturn &GetClusterInfoOK{}\n}", "func (o *ClusterNtpKeysGetDefault) IsSuccess() bool {\n\treturn o._statusCode/100 == 2\n}", "func (o *ClusterRequest) GetMaxRunningNodesOk() (*int32, bool) {\n\tif o == nil || o.MaxRunningNodes == nil {\n\t\treturn nil, false\n\t}\n\treturn o.MaxRunningNodes, true\n}", "func (o *NvmeServiceCollectionGetOK) IsSuccess() bool {\n\treturn true\n}", "func (o *VirtualizationVmwareVirtualMachineAllOf) HasCluster() bool {\n\tif o != nil && o.Cluster != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *PcloudVpnconnectionsNetworksGetOK) IsSuccess() bool {\n\treturn true\n}", "func (a *ClustersApiService) ListClustersExecute(r ApiListClustersRequest) (ListClustersResponse, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue ListClustersResponse\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"ClustersApiService.ListClusters\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/spaces/{space}/clusters\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"space\"+\"}\", _neturl.PathEscape(parameterToString(r.space, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func FetchClusters(c *gin.Context) {\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, \"Start listing clusters\")\n\n\tvar clusters []banzaiSimpleTypes.ClusterSimple\n\tvar response []*cloud.ClusterRepresentation\n\tdatabase.Find(&clusters)\n\n\tif len(clusters) <= 0 {\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, \"No clusters found\")\n\t\tcloud.SetResponseBodyJson(c, http.StatusNotFound, gin.H{\n\t\t\tcloud.JsonKeyStatus: http.StatusNotFound,\n\t\t\tcloud.JsonKeyMessage: \"No clusters found!\",\n\t\t})\n\t\treturn\n\t}\n\n\tfor _, cl := range clusters {\n\t\tclust := cloud.GetClusterRepresentation(&cl)\n\t\tif clust != nil {\n\t\t\tbanzaiUtils.LogInfo(banzaiConstants.TagListClusters, fmt.Sprintf(\"Append %#v cluster representation to response\", clust))\n\t\t\tresponse = append(response, clust)\n\t\t}\n\n\t}\n\tcloud.SetResponseBodyJson(c, http.StatusOK, gin.H{\n\t\tcloud.JsonKeyStatus: http.StatusOK,\n\t\tcloud.JsonKeyData: response,\n\t})\n}", "func (p KopsProvisioner) clusterConfigExists(sc *kapp.StackConfig, providerImpl provider.Provider) (bool, error) {\n\n\tproviderVars := provider.GetVars(providerImpl)\n\tlog.Debugf(\"Checking if Kops cluster config exists for values: %#v\", providerVars)\n\n\tprovisionerValues := providerVars[PROVISIONER_KEY].(map[interface{}]interface{})\n\tkopsConfig, err := getKopsConfig(provisionerValues)\n\tif err != nil {\n\t\treturn false, errors.WithStack(err)\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel() // The cancel should be deferred so resources are cleaned up\n\n\targs := []string{\n\t\t\"get\",\n\t\t\"clusters\",\n\t}\n\n\targs = parameteriseValues(args, kopsConfig.Params.Global)\n\n\tcmd := exec.CommandContext(ctx, KOPS_PATH, args...)\n\tcmd.Env = os.Environ()\n\n\terr = cmd.Run()\n\tif ctx.Err() == context.DeadlineExceeded {\n\t\treturn false, errors.New(\"Timed out trying to retrieve kops cluster config. Check your credentials.\")\n\t}\n\tif err != nil {\n\t\tif _, ok := err.(*exec.ExitError); ok {\n\t\t\tlog.Debug(\"Cluster config doesn't exist\")\n\t\t\treturn false, nil\n\t\t} else {\n\t\t\treturn false, errors.Wrap(err, \"Error fetching kops clusters\")\n\t\t}\n\t}\n\n\treturn true, nil\n}", "func (o *TechsupportmanagementEndPointAllOf) GetClusterMemberOk() (*AssetClusterMemberRelationship, bool) {\n\tif o == nil || o.ClusterMember == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterMember, true\n}", "func (o *ClusterMetricsNodes) GetComputeOk() (*float64, bool) {\n\tif o == nil || o.Compute == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Compute, true\n}", "func (j *Juju) ClusterReady() (bool, error) {\n\ttmp := \"JUJU_DATA=\" + JujuDataPrefix + j.Name\n\tcmd := exec.Command(\"juju\", \"status\", \"--format=json\")\n\tcmd.Env = append(os.Environ(), tmp)\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"ClusterReady error: %v: %s\", err, err.(*exec.ExitError).Stderr)\n\t}\n\n\terr = json.Unmarshal([]byte(out), &jStats)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"ClusterReady error: %v: %s\", err, err.(*exec.ExitError).Stderr)\n\t}\n\n\tfor k := range jStats.Machines {\n\t\tmachineStatus := jStats.Machines[k].MachStatus[\"current\"]\n\t\tif machineStatus != \"started\" {\n\t\t\tlog.WithFields(logrus.Fields{\"name\": j.Name}).Info(\"Cluster Not Ready\")\n\t\t\treturn false, nil\n\t\t}\n\t}\n\n\tfor k := range jStats.ApplicationResults {\n\t\tappStatus := jStats.ApplicationResults[k].AppStatus[\"current\"]\n\t\tif appStatus != \"active\" {\n\t\t\tlog.WithFields(logrus.Fields{\"name\": j.Name}).Info(\"Cluster Not Ready\")\n\t\t\treturn false, nil\n\t\t}\n\t}\n\n\tlog.WithFields(logrus.Fields{\"name\": j.Name}).Info(\"Cluster Ready\")\n\treturn true, nil\n}", "func Clusters() (clusters map[string][]string) {\n\tclusters = make(map[string][]string)\n\tif addr := AccessConsulAddr(); addr != \"\" && Region() != \"\" {\n\t\treturn getClustersFromConsul(addr, Region())\n\t}\n\tcs := Get(\"Key-ClusterMgrCluster\").(map[string]string)\n\tfor key, value := range cs {\n\t\tclusters[key] = strings.Split(value, \" \")\n\t}\n\treturn\n}", "func (c *Client) GetClustersSync(ctx context.Context) ([]*Cluster, error) {\n\tclusters := make([]*Cluster, 0)\n\n\tfor result := range c.GetClusters(ctx) {\n\t\tif result.Error != nil {\n\t\t\treturn nil, result.Error\n\t\t}\n\t\tclusters = append(clusters, result.Cluster)\n\t}\n\n\treturn clusters, nil\n}" ]
[ "0.6719827", "0.65017885", "0.6465093", "0.646219", "0.6398727", "0.63660103", "0.63552487", "0.63049424", "0.627333", "0.62654936", "0.62586135", "0.6258082", "0.6212132", "0.6196596", "0.6170194", "0.61492944", "0.611987", "0.60981774", "0.60886943", "0.6081035", "0.6077892", "0.6050239", "0.6047554", "0.60216284", "0.59879977", "0.59566593", "0.5946033", "0.59301275", "0.5927767", "0.5911564", "0.58934337", "0.5866069", "0.58345604", "0.5772485", "0.5769356", "0.5660747", "0.56599987", "0.56461686", "0.5645801", "0.56294334", "0.55949396", "0.5581085", "0.55757517", "0.5563501", "0.5559378", "0.55457735", "0.5543135", "0.5541259", "0.5540796", "0.5539327", "0.55194145", "0.55074257", "0.5496589", "0.5492242", "0.5481603", "0.5480428", "0.5463602", "0.545469", "0.54539853", "0.54432994", "0.54390264", "0.54226196", "0.54190487", "0.5414241", "0.539737", "0.5394293", "0.5378104", "0.536012", "0.53583074", "0.5351078", "0.5350199", "0.5337369", "0.53363717", "0.5322175", "0.5313674", "0.53130996", "0.5307607", "0.53044075", "0.5302095", "0.5289414", "0.52863485", "0.5278594", "0.5277239", "0.5264858", "0.5254316", "0.5239916", "0.52364874", "0.5236181", "0.5233157", "0.52274984", "0.5223034", "0.5221388", "0.5219198", "0.52090365", "0.5195012", "0.51941526", "0.51929855", "0.51912737", "0.5186529", "0.5186496" ]
0.8224033
0
GetAliasQueues returns the AliasQueues field value
GetAliasQueues возвращает значение поля AliasQueues
func (o *QueueManager) GetAliasQueues() []AliasQueue { if o == nil { var ret []AliasQueue return ret } return o.AliasQueues }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *QueueManager) GetAliasQueuesOk() (*[]AliasQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.AliasQueues, true\n}", "func (o *QueueManager) SetAliasQueues(v []AliasQueue) {\n\to.AliasQueues = v\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tdefer un(lock(b))\n\n\tqs := make(map[string]int)\n\tfor q, items := range b.heaps {\n\t\tif len(qq.MatchPrefix) != 0 || len(qq.MatchExact) != 0 {\n\t\t\tif !matchesPrefix(q, qq.MatchPrefix...) && !matchesExact(q, qq.MatchExact...) {\n\t\t\t\t// no match\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tqs[q] = items.Len()\n\t\tif qq.Limit > 0 && len(qs) >= qq.Limit {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn qs, nil\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).Queues(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"grpc queues: %w\", unpackGRPCError(err))\n\t}\n\tqs := make(map[string]int)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = int(q.NumTasks)\n\t}\n\treturn qs, nil\n}", "func GetQueues(c *gin.Context) {\n\t//TODO: create a while both back and front until value is != nil\n\tsize := len(queue)\n\tlog.Printf(\"squeue: %v\", queue)\n\tif size == 0 {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue don't have any item!\",\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"queues\": queue,\n\t})\n\tlog.Printf(\"equeue: %v\", queue)\n}", "func (gores *Gores) Queues() []string {\n\tqueues := make([]string, 0)\n\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\tdata, _ := conn.Do(\"SMEMBERS\", watchedQueues)\n\tfor _, q := range data.([]interface{}) {\n\t\tqueues = append(queues, string(q.([]byte)))\n\t}\n\n\treturn queues\n}", "func listQueues(ENV string) []string {\n \t// Using the SDK's default configuration, loading additional config\n\t// and credentials values from the environment variables, shared\n\t// credentials, and shared configuration files\n\n\tsess, err := session.NewSession(&aws.Config{\n\t Region: aws.String(\"us-east-1\")},\n\t)\n\n // Create a SQS service client.\n svc := sqs.New(sess)\n\n\t//have to create a session object first\n\toutput, err := svc.ListQueues(&sqs.ListQueuesInput{\n\t QueueNamePrefix: aws.String(ENV),\n })\n\tif err != nil { panic(err) }\n\n\tqueues := output.QueueUrls\n\tfinal_queues := []string{}\n\n\tfor _, i := range queues {\n\t fmt.Println(string(*i))\n\t final_queues = append(final_queues, *i)\n }\n\treturn final_queues\n}", "func (i *Inspector) Queues() ([]string, error) {\n\treturn i.rdb.AllQueues()\n}", "func (s *API) ListQueues(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"ListQueues\")\n\n\tqueueNamePrefix := req.FormValue(\"QueueNamePrefix\")\n\tvar queues []string\n\tfor k, v := range s.sqs.queues {\n\t\tif strings.HasPrefix(k, queueNamePrefix) {\n\t\t\tqueues = append(queues, v.url)\n\t\t}\n\t}\n\n\tresponse := ListQueuesResponse{\n\t\tResult: ListQueuesResult{queues},\n\t\tMetaData: ResponseMetaData{\"00000000-0000-0000-0000-000000000000\"},\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\tenc := xml.NewEncoder(w)\n\tenc.Indent(\" \", \" \")\n\tif err := enc.Encode(response); err != nil {\n\t\tlog.Errorf(\"error: %s\", err)\n\t}\n}", "func (t *TopicCache) GetQueue(projectName, serviceName string) []string {\n\tt.RLock()\n\tdefer t.RUnlock()\n\n\tif len(t.inQueue[projectName+serviceName]) >= 100 {\n\t\treturn t.inQueue[projectName+serviceName][:99]\n\t}\n\n\treturn t.inQueue[projectName+serviceName]\n}", "func GetAvailableQueues() ([]string, error) {\n\tclient := &http.Client{}\n\n\tres, err := client.Get(Host + \"/queues\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trespBody := res.Body\n\tdefer respBody.Close()\n\n\tavailableQueues := struct {\n\t\tQueues []string `json:\"queues\"`\n\t}{}\n\tif err := json.NewDecoder(respBody).Decode(&availableQueues); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn availableQueues.Queues, nil\n}", "func (h *Hospital) ConsumeQueues(ctx context.Context, t *testing.T) (events int, messages []string) {\n\tt.Helper()\n\treturn h.ConsumeQueuesWithLimit(ctx, t, -1, true)\n}", "func (a *adapter) queueLookup(queueName string) (*sqs.GetQueueUrlOutput, error) {\n\treturn a.sqsClient.GetQueueUrl(&sqs.GetQueueUrlInput{\n\t\tQueueName: &queueName,\n\t})\n}", "func (bs *BeanstalkdConnectionPool) ListQueues() (queueNames []string, err error) {\n\tqueueNames, err = bs.getGlobalConnect().ListTubes()\n\treturn\n}", "func QueuedAsinsGet(c *gin.Context) {\n\tvar asins []models.QueuedAsin\n\tmodels.DB.Order(\"id asc\").Preload(\"Feed\").Find(&asins)\n\n\tH := DefaultH(c)\n\tH[\"Title\"] = \"Queued Asins\"\n\tH[\"Asins\"] = asins\n\tc.HTML(200, \"admin/asins/queued_index\", H)\n}", "func (db *BotDB) GetAliases(user uint64) []string {\n\tq, err := db.sqlGetAliases.Query(user)\n\tif db.CheckError(\"GetAliases\", err) != nil {\n\t\treturn []string{}\n\t}\n\tdefer q.Close()\n\treturn db.parseStringResults(q)\n}", "func (nd *Node) GetAliases() []*Host {\n\tnd.mutex.RLock()\n\taliases := nd.aliases\n\tnd.mutex.RUnlock()\n\n\treturn aliases\n}", "func (m *Endpoint) GetAliases() []string {\n\tif m != nil {\n\t\treturn m.Aliases\n\t}\n\treturn nil\n}", "func (q VariadicQuery) GetAlias() string {\n\treturn q.Alias\n}", "func (p *Project) Queues() (*[]Queue, error) {\n qs := make([]Queue, 0)\n err := Mongo.Get(\"queue\", bson.M{\"project\": p.ID}, MaxQueuesPerProject, &qs)\n return &qs, err\n}", "func (a AliasedName) GetAlias() string { return a.Alias }", "func (q *Queue) GetQueue() []types.Event {\n\treturn q.Queue\n}", "func (a *Admin) GetChainAliases(_ *http.Request, args *GetChainAliasesArgs, reply *GetChainAliasesReply) error {\n\ta.Log.Debug(\"API called\",\n\t\tzap.String(\"service\", \"admin\"),\n\t\tzap.String(\"method\", \"getChainAliases\"),\n\t\tlogging.UserString(\"chain\", args.Chain),\n\t)\n\n\tid, err := ids.FromString(args.Chain)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treply.Aliases, err = a.ChainManager.Aliases(id)\n\treturn err\n}", "func (o EndpointsResponseOutput) Queue() pulumi.StringOutput {\n\treturn o.ApplyT(func(v EndpointsResponse) string { return v.Queue }).(pulumi.StringOutput)\n}", "func (s *Store) GetQueueNames() ([]string, error) {\n\tvar names []string\n\treturn names, s.db.View(func(tx *bolt.Tx) error {\n\t\treturn s.queues(tx).ForEach(func(key, value []byte) error {\n\t\t\tnames = append(names, string(key))\n\t\t\treturn nil\n\t\t})\n\t})\n}", "func (b *backend) QueueStats(ctx context.Context, qq *entroq.QueuesQuery) (map[string]*entroq.QueueStat, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).QueueStats(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get queue stats over gRPC: %w\", err)\n\t}\n\tqs := make(map[string]*entroq.QueueStat)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = &entroq.QueueStat{\n\t\t\tName: q.Name,\n\t\t\tSize: int(q.NumTasks),\n\t\t\tClaimed: int(q.NumClaimed),\n\t\t\tAvailable: int(q.NumAvailable),\n\t\t\tMaxClaims: int(q.MaxClaims),\n\t\t}\n\t}\n\treturn qs, nil\n}", "func (o *Project) GetAlias() []ProjectAlias {\n\tif o == nil || o.Alias == nil {\n\t\tvar ret []ProjectAlias\n\t\treturn ret\n\t}\n\treturn *o.Alias\n}", "func groomQueues(queues *Queues) (err kv.Error) {\n\tfor qName, qDetails := range *queues {\n\t\t// If we have enough runners drop the queue as it needs nothing done to it\n\t\tif len(qDetails.NodeGroup) == 0 || qDetails.Running >= qDetails.Ready+qDetails.NotVisible {\n\t\t\tif logger.IsTrace() {\n\t\t\t\tlogger.Trace(\"queue already handled\", \"queue\", qName, \"stack\", stack.Trace().TrimRuntime())\n\t\t\t}\n\t\t\tdelete(*queues, qName)\n\t\t}\n\t}\n\treturn nil\n}", "func (o EndpointsResponsePtrOutput) Queue() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *EndpointsResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Queue\n\t}).(pulumi.StringPtrOutput)\n}", "func (s QueueSetSpy) Queues() map[DeploymentID]*R11nQueue {\n\tres := s.Called()\n\treturn res.Get(0).(map[DeploymentID]*R11nQueue)\n}", "func (o TopicRuleSqsOutput) QueueUrl() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TopicRuleSqs) string { return v.QueueUrl }).(pulumi.StringOutput)\n}", "func (p *Process) CmdGetQueue(pac teoapi.Packet) (err error) {\n\tdata := pac.RemoveTrailingZero(pac.Data())\n\trequest := cdb.KeyValue{Cmd: pac.Cmd()}\n\tif err = request.UnmarshalText(data); err != nil {\n\t\treturn\n\t}\n\t// Return only Value for text requests and all fields for json\n\tresponce := request\n\tif responce.Value, err = p.tcdb.GetQueue(request.Key); err != nil {\n\t\treturn\n\t} else if !request.RequestInJSON {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), responce.Value)\n\t} else if retdata, err := responce.MarshalText(); err == nil {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), retdata)\n\t}\n\treturn\n}", "func QueueARN() reference.ExtractValueFn {\n\treturn func(mg resource.Managed) string {\n\t\tcr, ok := mg.(*Queue)\n\t\tif !ok {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn cr.Status.AtProvider.ARN\n\t}\n}", "func (q *queueImp) Lookup(queue string, group string) ([]*model.QueueInfo, error) {\n\n\tqueueInfos := make([]*model.QueueInfo, 0)\n\tswitch {\n\tcase queue == \"\":\n\t\t//Get all queue's information\n\t\tqueueMap, err := q.extendManager.GetQueueMap()\n\t\tif err != nil {\n\t\t\treturn queueInfos, errors.Trace(err)\n\t\t}\n\t\tfor queueName, groupNames := range queueMap {\n\t\t\tgroupConfigs := make([]*model.GroupConfig, 0)\n\t\t\tfor _, groupName := range groupNames {\n\t\t\t\tconfig, err := q.extendManager.GetGroupConfig(groupName, queueName)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn queueInfos, errors.Trace(err)\n\t\t\t\t}\n\t\t\t\tif config != nil {\n\t\t\t\t\tgroupConfigs = append(groupConfigs, &model.GroupConfig{\n\t\t\t\t\t\tGroup: config.Group,\n\t\t\t\t\t\tWrite: config.Write,\n\t\t\t\t\t\tRead: config.Read,\n\t\t\t\t\t\tUrl: config.Url,\n\t\t\t\t\t\tIps: config.Ips,\n\t\t\t\t\t})\n\t\t\t\t} else {\n\t\t\t\t\tlog.Warnf(\"config is nil queue:%s, group:%s\", queueName, groupName)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tctime, _ := q.extendManager.QueueCreateTime(queueName)\n\t\t\tqueueInfos = append(queueInfos, &model.QueueInfo{\n\t\t\t\tQueue: queueName,\n\t\t\t\tCtime: ctime,\n\t\t\t\tLength: 0,\n\t\t\t\tGroups: groupConfigs,\n\t\t\t})\n\t\t}\n\tcase queue != \"\" && group == \"\":\n\t\t//Get a queue's all groups information\n\t\tqueueMap, err := q.extendManager.GetQueueMap()\n\t\tif err != nil {\n\t\t\treturn queueInfos, errors.Trace(err)\n\t\t}\n\t\tgroupNames, exists := queueMap[queue]\n\t\tif !exists {\n\t\t\tbreak\n\t\t}\n\t\tgroupConfigs := make([]*model.GroupConfig, 0)\n\t\tfor _, gName := range groupNames {\n\t\t\tconfig, err := q.extendManager.GetGroupConfig(gName, queue)\n\t\t\tif err != nil {\n\t\t\t\treturn queueInfos, errors.Trace(err)\n\t\t\t}\n\t\t\tif config != nil {\n\t\t\t\tgroupConfigs = append(groupConfigs, &model.GroupConfig{\n\t\t\t\t\tGroup: config.Group,\n\t\t\t\t\tWrite: config.Write,\n\t\t\t\t\tRead: config.Read,\n\t\t\t\t\tUrl: config.Url,\n\t\t\t\t\tIps: config.Ips,\n\t\t\t\t})\n\t\t\t} else {\n\t\t\t\tlog.Warnf(\"config is nil queue:%s, group:%s\", queue, gName)\n\t\t\t}\n\t\t}\n\n\t\tctime, _ := q.extendManager.QueueCreateTime(queue)\n\t\tqueueInfos = append(queueInfos, &model.QueueInfo{\n\t\t\tQueue: queue,\n\t\t\tCtime: ctime,\n\t\t\tLength: 0,\n\t\t\tGroups: groupConfigs,\n\t\t})\n\tdefault:\n\t\t//Get group's information by queue and group's name\n\t\tconfig, err := q.extendManager.GetGroupConfig(group, queue)\n\t\tif err != nil {\n\t\t\treturn queueInfos, errors.Trace(err)\n\t\t}\n\t\tgroupConfigs := make([]*model.GroupConfig, 0)\n\t\tif config != nil {\n\t\t\tgroupConfigs = append(groupConfigs, &model.GroupConfig{\n\t\t\t\tGroup: config.Group,\n\t\t\t\tWrite: config.Write,\n\t\t\t\tRead: config.Read,\n\t\t\t\tUrl: config.Url,\n\t\t\t\tIps: config.Ips,\n\t\t\t})\n\t\t}\n\n\t\tctime, _ := q.extendManager.QueueCreateTime(queue)\n\t\tqueueInfos = append(queueInfos, &model.QueueInfo{\n\t\t\tQueue: queue,\n\t\t\tCtime: ctime,\n\t\t\tLength: 0,\n\t\t\tGroups: groupConfigs,\n\t\t})\n\t}\n\treturn queueInfos, nil\n}", "func (s *SQSServer) pollQueues(pollctx, taskctx context.Context, queues []QueueConf) error {\n\tfor _, qconf := range queues {\n\t\tq, err := s.getQueue(pollctx, qconf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq := &sqs.GetQueueAttributesInput{\n\t\t\tAttributeNames: []types.QueueAttributeName{(\"VisibilityTimeout\")},\n\t\t\tQueueUrl: &q.url,\n\t\t}\n\t\tresp, err := s.sqsSrv(q.QueueConf).GetQueueAttributes(pollctx, req)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to get queue attributes for '%s' - %s\", q.Name, err.Error())\n\t\t}\n\t\tto := resp.Attributes[\"VisibilityTimeout\"]\n\t\tif to == \"\" {\n\t\t\treturn fmt.Errorf(\"No visibility timeout returned by SQS for queue '%s'\", q.Name)\n\t\t}\n\t\tvisTimeout, err := strconv.Atoi(to)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to convert visibility timeout from '%s' to int - '%s'\", to, err.Error())\n\t\t}\n\t\t// Each queue runs in a dedicated go routine.\n\t\tgo func(vt int32) {\n\t\t\ts.queuePollers.Add(1)\n\t\t\tdefer s.queuePollers.Done()\n\t\t\ts.run(pollctx, taskctx, q, vt)\n\t\t}(int32(visTimeout))\n\t}\n\n\treturn nil\n}", "func (client *Client) GetQueueURL(name string) (string, error) {\n\tvar parsedResponse GetQueueURLResult\n\turl := NewGetQueueURLRequest(client.EndPointURL, name).URL()\n\n\tresp, err := client.Get(url)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\treturn \"\", errors.New(string(body))\n\t}\n\n\terr = xml.Unmarshal(body, &parsedResponse)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn parsedResponse.QueueURL, nil\n}", "func (o QueueOutput) QueueArn() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Queue) pulumi.StringOutput { return v.QueueArn }).(pulumi.StringOutput)\n}", "func (ClearTrans) GetQueue() string {\n\treturn \"cy_rubik_clearTrans\"\n}", "func GetQueueEndpoint(baseUri string, accountName string) string {\n\treturn fmt.Sprintf(\"https://%s.queue.%s\", accountName, baseUri)\n}", "func (svc *AdminBuildService) GetQueue(opt *GetQueueOptions) (*[]library.BuildQueue, *Response, error) {\n\t// set the API endpoint path we send the request to\n\tu := \"/api/v1/admin/builds/queue\"\n\n\t// add optional arguments if supplied\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// BuildQueue type we want to return\n\tv := new([]library.BuildQueue)\n\n\tresp, err := svc.client.Call(\"GET\", u, nil, v)\n\n\treturn v, resp, err\n}", "func (s *Filters) SetQueues(v []*string) *Filters {\n\ts.Queues = v\n\treturn s\n}", "func (a *Client) GetMsgVpnJndiQueues(params *GetMsgVpnJndiQueuesParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnJndiQueuesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnJndiQueuesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnJndiQueues\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/jndiQueues\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnJndiQueuesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnJndiQueuesOK), nil\n\n}", "func (o DotnetSettingsOutput) ForcedNamespaceAliases() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v DotnetSettings) []string { return v.ForcedNamespaceAliases }).(pulumi.StringArrayOutput)\n}", "func (c *Client) GetBotAliases(ctx context.Context, params *GetBotAliasesInput, optFns ...func(*Options)) (*GetBotAliasesOutput, error) {\n\tif params == nil {\n\t\tparams = &GetBotAliasesInput{}\n\t}\n\n\tresult, metadata, err := c.invokeOperation(ctx, \"GetBotAliases\", params, optFns, addOperationGetBotAliasesMiddlewares)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := result.(*GetBotAliasesOutput)\n\tout.ResultMetadata = metadata\n\treturn out, nil\n}", "func (s *UserDataFilters) SetQueues(v []*string) *UserDataFilters {\n\ts.Queues = v\n\treturn s\n}", "func (player *musicPlayer) getQueueInfo() ([]string, error) {\n\tplayer.Lock()\n\tdefer player.Unlock()\n\tif len(player.state.queue) == 0 {\n\t\treturn nil, errors.New(cannot_get_queue_info_msg)\n\t}\n\t//make a copy to the queue\n\tcopy := make([]string, 0, len(player.state.queue))\n\tfor _, el := range player.state.queue {\n\t\tcopy = append(copy, el)\n\t}\n\treturn copy, nil\n}", "func (o ServiceBusQueueOutputDataSourceOutput) QueueName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ServiceBusQueueOutputDataSource) *string { return v.QueueName }).(pulumi.StringPtrOutput)\n}", "func (o DotnetSettingsPtrOutput) ForcedNamespaceAliases() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *DotnetSettings) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ForcedNamespaceAliases\n\t}).(pulumi.StringArrayOutput)\n}", "func (o DiagnosticsStorageAccountConfigOutput) QueueEndpoint() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DiagnosticsStorageAccountConfig) string { return v.QueueEndpoint }).(pulumi.StringOutput)\n}", "func (o DotnetSettingsResponseOutput) ForcedNamespaceAliases() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v DotnetSettingsResponse) []string { return v.ForcedNamespaceAliases }).(pulumi.StringArrayOutput)\n}", "func GetHostAliases(ctx context.Context) ([]string, error) {\n\tname, err := GetHostname(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"couldn't extract a host alias from the kubelet: %w\", err)\n\t}\n\tif err := validate.ValidHostname(name); err != nil {\n\t\treturn nil, fmt.Errorf(\"host alias from kubelet is not valid: %w\", err)\n\t}\n\treturn []string{name}, nil\n}", "func PopulateQueues(c *gin.Context) {\n\tif queue == nil {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue doesn't exist, please create it!!!\",\n\t\t})\n\t\treturn\n\t}\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"roberto\",\n\t\tEMAIL: \"roberto@rr.com\",\n\t\tUUID: \"1\",\n\t\tMSG: \"lindo\",\n\t})\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"alex\",\n\t\tEMAIL: \"alex@rr.com\",\n\t\tUUID: \"2\",\n\t\tMSG: \"lindox\",\n\t})\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"ale\",\n\t\tEMAIL: \"ale@rr.com\",\n\t\tUUID: \"3\",\n\t\tMSG: \"linduxo\",\n\t})\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"msg\": queue,\n\t})\n}", "func (s *Store) GetQueueSettings(name string) (QueueSettings, error) {\n\tvar settings QueueSettings\n\treturn settings, s.db.View(func(tx *bolt.Tx) error {\n\t\ts, err := s.getQueueSettings(tx, name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsettings = s\n\t\treturn nil\n\t})\n}", "func (o LookupQueueResultOutput) AppEngineHttpQueue() AppEngineHttpQueueResponseOutput {\n\treturn o.ApplyT(func(v LookupQueueResult) AppEngineHttpQueueResponse { return v.AppEngineHttpQueue }).(AppEngineHttpQueueResponseOutput)\n}", "func (psc *PartitionSchedulingContext) GetQueue(queueName string) *SchedulingQueue {\n psc.lock.RLock()\n defer psc.lock.RUnlock()\n\n return psc.queues[queueName]\n}", "func (_PlasmaFramework *PlasmaFrameworkCaller) ExitsQueues(opts *bind.CallOpts, arg0 [32]byte) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _PlasmaFramework.contract.Call(opts, out, \"exitsQueues\", arg0)\n\treturn *ret0, err\n}", "func (this *Queue) GetQueue() (val Mensaje, err error) {\n\t// Primero determina si la cola está vacía\n\tif this.rear == this.front {\n\t\treturn Mensaje{0, \"0\", \"0\"}, errors.New(\"Cola de Mensajes Vacia\")\n\t}\n\tthis.front++\n\tval = this.array[this.front]\n\treturn val, err\n}", "func (storage *SrvStorage) GetVhostQueues(vhost string) []*queue.Queue {\n\tvar queues []*queue.Queue\n\tstorage.db.Iterate(\n\t\tfunc(key []byte, value []byte) {\n\t\t\tif !bytes.HasPrefix(key, []byte(queuePrefix)) || getVhostFromKey(string(key)) != vhost {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tq := &queue.Queue{}\n\t\t\tq.Unmarshal(value, storage.protoVersion)\n\t\t\tqueues = append(queues, q)\n\t\t},\n\t)\n\n\treturn queues\n}", "func (o DiagnosticsStorageAccountConfigResponseOutput) QueueEndpoint() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DiagnosticsStorageAccountConfigResponse) string { return v.QueueEndpoint }).(pulumi.StringOutput)\n}", "func DeclareQueues(ch *amqp.Channel, queueName string) (amqp.Queue, amqp.Queue) {\n\treturn declareQueue(ch, queueName), declareResponseQueue(ch, queueName)\n}", "func (o ServiceBusQueueOutputDataSourceResponseOutput) QueueName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ServiceBusQueueOutputDataSourceResponse) *string { return v.QueueName }).(pulumi.StringPtrOutput)\n}", "func (c *apiConsumers) TeamsQueue() <-chan *TeamDTO {\n\treturn c.queue\n}", "func (o TopicRuleErrorActionSqsOutput) QueueUrl() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TopicRuleErrorActionSqs) string { return v.QueueUrl }).(pulumi.StringOutput)\n}", "func (_Rootchain *RootchainCaller) ExitsQueues(opts *bind.CallOpts, arg0 common.Address) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _Rootchain.contract.Call(opts, out, \"exitsQueues\", arg0)\n\treturn *ret0, err\n}", "func GetHostAliases(ctx context.Context) ([]string, error) {\n\treturn nil, fmt.Errorf(\"Kubernetes support not build: couldn't extract a host alias from the kubelet\")\n}", "func (b *backend) QueueStats(ctx context.Context, qq *entroq.QueuesQuery) (map[string]*entroq.QueueStat, error) {\n\tdefer un(lock(b))\n\n\tnow, err := b.Time(ctx)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"tasks current time\")\n\t}\n\n\tqs := make(map[string]*entroq.QueueStat)\n\tfor q, heap := range b.heaps {\n\t\tif qq.Limit > 0 && len(qs) >= qq.Limit {\n\t\t\tbreak\n\t\t}\n\n\t\tif len(qq.MatchPrefix) != 0 || len(qq.MatchExact) != 0 {\n\t\t\tif !matchesPrefix(q, qq.MatchPrefix...) && !matchesExact(q, qq.MatchExact...) {\n\t\t\t\t// no match\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\t// Find out how many tasks are claimed. This means they both have a\n\t\t// claimant and their arrival time is in the future.\n\t\tclaimed := 0\n\t\tavailable := 0\n\t\tmaxClaims := 0\n\t\tfor _, item := range heap.Items() {\n\t\t\tif item.task.At.After(now) {\n\t\t\t\tif item.task.Claimant != uuid.Nil {\n\t\t\t\t\tclaimed++\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tavailable++\n\t\t\t}\n\t\t\tif int(item.task.Claims) > maxClaims {\n\t\t\t\tmaxClaims = int(item.task.Claims)\n\t\t\t}\n\t\t}\n\t\tqs[q] = &entroq.QueueStat{\n\t\t\tName: q,\n\t\t\tSize: heap.Len(),\n\t\t\tClaimed: claimed,\n\t\t\tAvailable: available,\n\t\t\tMaxClaims: maxClaims,\n\t\t}\n\t}\n\treturn qs, nil\n}", "func (s *API) GetQueueURL(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"GetQueueURL\")\n\n\tqueueName := req.FormValue(\"QueueName\")\n\tqueue, ok := s.sqs.queues[queueName]\n\tif !ok {\n\t\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\terror := ErrorResponse{\n\t\t\tError: ErrorResult{\n\t\t\t\tType: \"Not Found\",\n\t\t\t\tCode: \"AWS.SimpleQueueService.NonExistentQueue\",\n\t\t\t\tMessage: \"The specified queue does not exist for this wsdl version.\",\n\t\t\t},\n\t\t\tRequestId: \"00000000-0000-0000-0000-000000000000\",\n\t\t}\n\t\tenc := xml.NewEncoder(w)\n\t\tenc.Indent(\" \", \" \")\n\t\tif err := enc.Encode(error); err != nil {\n\t\t\tlog.Errorf(\"error: %s\", err)\n\t\t}\n\t\treturn\n\t}\n\n\tresponse := GetQueueURLResponse{\n\t\tResult: GetQueueURLResult{queue.url},\n\t\tMetaData: ResponseMetaData{\"00000000-0000-0000-0000-000000000000\"},\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\tenc := xml.NewEncoder(w)\n\tenc.Indent(\" \", \" \")\n\tif err := enc.Encode(response); err != nil {\n\t\tlog.Errorf(\"error: %s\", err)\n\t}\n\n}", "func (_PlasmaFramework *PlasmaFrameworkSession) ExitsQueues(arg0 [32]byte) (common.Address, error) {\n\treturn _PlasmaFramework.Contract.ExitsQueues(&_PlasmaFramework.CallOpts, arg0)\n}", "func DeleteTorrentFromQueues(torrentHash string, db *storm.DB) {\n\ttorrentQueues := Storage.FetchQueues(db)\n\tfor x, torrentHashActive := range torrentQueues.ActiveTorrents { //FOR EXTRA CAUTION deleting it from both queues in case a mistake occurred.\n\t\tif torrentHash == torrentHashActive {\n\t\t\ttorrentQueues.ActiveTorrents = append(torrentQueues.ActiveTorrents[:x], torrentQueues.ActiveTorrents[x+1:]...)\n\t\t\tLogger.Info(\"Removing Torrent from Active: \", torrentHash)\n\t\t}\n\t}\n\tfor x, torrentHashQueued := range torrentQueues.QueuedTorrents { //FOR EXTRA CAUTION deleting it from both queues in case a mistake occurred.\n\t\tif torrentHash == torrentHashQueued {\n\t\t\ttorrentQueues.QueuedTorrents = append(torrentQueues.QueuedTorrents[:x], torrentQueues.QueuedTorrents[x+1:]...)\n\t\t\tLogger.Info(\"Removing Torrent from Queued\", torrentHash)\n\t\t}\n\t}\n\tfor x, torrentHashActive := range torrentQueues.ForcedTorrents { //FOR EXTRA CAUTION deleting it from all queues in case a mistake occurred.\n\t\tif torrentHash == torrentHashActive {\n\t\t\ttorrentQueues.ForcedTorrents = append(torrentQueues.ForcedTorrents[:x], torrentQueues.ForcedTorrents[x+1:]...)\n\t\t\tLogger.Info(\"Removing Torrent from Forced: \", torrentHash)\n\t\t}\n\t}\n\tStorage.UpdateQueues(db, torrentQueues)\n\tLogger.WithFields(logrus.Fields{\"Torrent Hash\": torrentHash, \"TorrentQueues\": torrentQueues}).Info(\"Removing Torrent from all Queues\")\n}", "func GetInfoFromQueue(q amqp.Queue) QueueInfo {\n\n\treturn QueueInfo{\n\t\tName: q.Name,\n\t\tConsumers: q.Consumers,\n\t\tMessages: q.Messages,\n\t}\n}", "func (a *Client) GetMsgVpnQueues(params *GetMsgVpnQueuesParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnQueuesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnQueuesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnQueues\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/queues\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnQueuesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnQueuesOK), nil\n\n}", "func (b *Backend) GetLeagueByQueue(league string, queue string) (*riotclient.LeagueListDTO, error) {\n\treturn nil, fmt.Errorf(\"Not implemented\")\n}", "func RemoveDuplicatesFromQueues(db *storm.DB) {\n\ttorrentQueues := Storage.FetchQueues(db)\n\tfor _, torrentHash := range torrentQueues.ActiveTorrents {\n\t\tfor i, queuedHash := range torrentQueues.QueuedTorrents {\n\t\t\tif torrentHash == queuedHash {\n\t\t\t\ttorrentQueues.QueuedTorrents = append(torrentQueues.QueuedTorrents[:i], torrentQueues.QueuedTorrents[i+1:]...)\n\t\t\t}\n\t\t}\n\t}\n\tStorage.UpdateQueues(db, torrentQueues)\n}", "func (_PlasmaFramework *PlasmaFrameworkCallerSession) ExitsQueues(arg0 [32]byte) (common.Address, error) {\n\treturn _PlasmaFramework.Contract.ExitsQueues(&_PlasmaFramework.CallOpts, arg0)\n}", "func getQueueUrl(id string) (queueUrl string, retErr error) {\n\n\t//Creazione client DynamoDB\n\tsvc := dynamodb.New(common.Sess)\n\n\tresult, err := svc.GetItem(&dynamodb.GetItemInput{\n\t\tTableName: aws.String(subTableName),\n\t\tKey: map[string]*dynamodb.AttributeValue{\n\t\t\t\"SubID\": {\n\t\t\t\tS: aws.String(id),\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\tcommon.Warning(\"[BROKER] Errore nel retreive del subscriber con ID: \" + id + \".\\n\" + err.Error())\n\t\treturn \"\", err\n\t}\n\n\titem := common.SubscriberEntry{}\n\n\terr = dynamodbattribute.UnmarshalMap(result.Item, &item)\n\tif err != nil {\n\t\tcommon.Warning(\"[BROKER] Errore nell'unmarshaling del risultato\")\n\t\treturn \"\", err\n\t}\n\tif item.SubID == \"\" {\n\t\tcommon.Warning(\"[BROKER] Nessun subscriber trovato con id \" + id)\n\t\treturn \"\", errors.New(\"no item found\")\n\t}\n\n\tcommon.Info(\"[BROKER] Subscriber trovato: \" + item.SubID + \"\\n\\t\" + item.QueueURL)\n\n\treturn item.QueueURL, nil\n}", "func ListMatchmakingQueues(settings *playfab.Settings, postData *ListMatchmakingQueuesRequestModel, entityToken string) (*ListMatchmakingQueuesResultModel, error) {\n if entityToken == \"\" {\n return nil, playfab.NewCustomError(\"entityToken should not be an empty string\", playfab.ErrorGeneric)\n }\n b, errMarshal := json.Marshal(postData)\n if errMarshal != nil {\n return nil, playfab.NewCustomError(errMarshal.Error(), playfab.ErrorMarshal)\n }\n\n sourceMap, err := playfab.Request(settings, b, \"/Match/ListMatchmakingQueues\", \"X-EntityToken\", entityToken)\n if err != nil {\n return nil, err\n }\n \n result := &ListMatchmakingQueuesResultModel{}\n\n config := mapstructure.DecoderConfig{\n DecodeHook: playfab.StringToDateTimeHook,\n Result: result,\n }\n \n decoder, errDecoding := mapstructure.NewDecoder(&config)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n \n errDecoding = decoder.Decode(sourceMap)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n\n return result, nil\n}", "func queueName(name string, withoutPrefix bool) string {\n\t// Allow using a nameless queue\n\tif name == \"\" || withoutPrefix {\n\t\treturn name\n\t}\n\n\treturn \"relay.\" + name\n}", "func (m *MatchInfo) GetQueue(client *static.Client) (static.Queue, error) {\n\treturn client.GetQueue(m.QueueID)\n}", "func (o DiagnosticsStorageAccountConfigResponsePtrOutput) QueueEndpoint() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DiagnosticsStorageAccountConfigResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.QueueEndpoint\n\t}).(pulumi.StringPtrOutput)\n}", "func (o *QueueManager) GetClusterQueues() []ClusterQueue {\n\tif o == nil {\n\t\tvar ret []ClusterQueue\n\t\treturn ret\n\t}\n\n\treturn o.ClusterQueues\n}", "func (taskBolt *TaskBolt) ReadQueue(n int) []*ga4gh_task_exec.Job {\n\tjobs := make([]*ga4gh_task_exec.Job, 0)\n\ttaskBolt.db.View(func(tx *bolt.Tx) error {\n\n\t\t// Iterate over the JobsQueued bucket, reading the first `n` jobs\n\t\tc := tx.Bucket(JobsQueued).Cursor()\n\t\tfor k, _ := c.First(); k != nil && len(jobs) < n; k, _ = c.Next() {\n\t\t\tid := string(k)\n\t\t\tjob := getJob(tx, id)\n\t\t\tjobs = append(jobs, job)\n\t\t}\n\t\treturn nil\n\t})\n\treturn jobs\n}", "func queueName(src *v1alpha1.AWSS3Source) string {\n\treturn \"s3-events_\" + src.Spec.ARN.Resource\n}", "func (s *Service) Queue() amboy.Queue {\n\treturn s.queue\n}", "func (svc *SQS) XGetQueueURL(ctx context.Context, queueName string) (queueURL string, err error) {\n\tresp, err := svc.GetQueueURL(ctx, GetQueueURLRequest{\n\t\tQueueName: queueName,\n\t})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn resp.QueueURL, nil\n}", "func (o DiagnosticsStorageAccountConfigPtrOutput) QueueEndpoint() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DiagnosticsStorageAccountConfig) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.QueueEndpoint\n\t}).(pulumi.StringPtrOutput)\n}", "func (a *Alias) ToString() string {\n\tif len(a.AdvancedAliases) != 0 {\n\t\taliases := make([]string, len(a.AdvancedAliases))\n\t\tfor i, advancedAlias := range a.AdvancedAliases {\n\t\t\taliases[i] = aws.StringValue(advancedAlias.Alias)\n\t\t}\n\t\treturn strings.Join(aliases, \",\")\n\t}\n\tif a.StringSliceOrString.String != nil {\n\t\treturn aws.StringValue(a.StringSliceOrString.String)\n\t}\n\treturn strings.Join(a.StringSliceOrString.StringSlice, \",\")\n}", "func (mb *client) ReadFIFOQueue(address uint16) (results []byte, err error) {\n\trequest := ProtocolDataUnit{\n\t\tFunctionCode: FuncCodeReadFIFOQueue,\n\t\tData: dataBlock(address),\n\t}\n\tresponse, err := mb.send(&request)\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(response.Data) < 4 {\n\t\terr = fmt.Errorf(\"modbus: response data size '%v' is less than expected '%v'\", len(response.Data), 4)\n\t\treturn\n\t}\n\tcount := int(binary.BigEndian.Uint16(response.Data))\n\tif count != (len(response.Data) - 1) {\n\t\terr = fmt.Errorf(\"modbus: response data size '%v' does not match count '%v'\", len(response.Data)-1, count)\n\t\treturn\n\t}\n\tcount = int(binary.BigEndian.Uint16(response.Data[2:]))\n\tif count > 31 {\n\t\terr = fmt.Errorf(\"modbus: fifo count '%v' is greater than expected '%v'\", count, 31)\n\t\treturn\n\t}\n\tresults = response.Data[4:]\n\treturn\n}", "func (s *Store) GetQueueStatistics(name string) (QueueStatistics, error) {\n\treturn QueueStatistics{}, nil\n}", "func (i *aliasTarget) getAliasTargetHostedZoneID() string {\n\tif i == nil {\n\t\treturn \"\"\n\t}\n\treturn i.HostedZoneID\n}", "func (o RuleTargetDeadLetterQueueOutput) Arn() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v RuleTargetDeadLetterQueue) *string { return v.Arn }).(pulumi.StringPtrOutput)\n}", "func (i *aliasTarget) getAliasDNSName() string {\n\tif i == nil {\n\t\treturn \"\"\n\t}\n\treturn i.DNSName\n}", "func UConverterGetAliases(arg1 string, arg2 *UErrorCode) (_swig_ret []string)", "func (c *EmployeeClient) QueryQueue(e *Employee) *QueueQuery {\n\tquery := &QueueQuery{config: c.config}\n\tquery.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {\n\t\tid := e.ID\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(employee.Table, employee.FieldID, id),\n\t\t\tsqlgraph.To(queue.Table, queue.FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.O2M, false, employee.QueueTable, employee.QueueColumn),\n\t\t)\n\t\tfromV = sqlgraph.Neighbors(e.driver.Dialect(), step)\n\t\treturn fromV, nil\n\t}\n\treturn query\n}", "func deleteQueues(ctx *TestContext) {\n\tfor _, q := range ctx.Queues {\n\t\tDeleteQueue(ctx, q)\n\t}\n}", "func (s *RedisDeviceStore) DownlinkQueue(appID, devID string) (DownlinkQueue, error) {\n\treturn &RedisDownlinkQueue{\n\t\tappID: appID,\n\t\tdevID: devID,\n\t\tqueues: s.queues,\n\t}, nil\n}", "func (o *SamlConfigurationProperties) GetSpPrivateKeyAlias() SamlConfigurationPropertyItemsString {\n\tif o == nil || o.SpPrivateKeyAlias == nil {\n\t\tvar ret SamlConfigurationPropertyItemsString\n\t\treturn ret\n\t}\n\treturn *o.SpPrivateKeyAlias\n}", "func (s *azureServiceBusScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) {\n\tqueuelen, err := s.GetAzureServiceBusLength(ctx)\n\n\tif err != nil {\n\t\tazureServiceBusLog.Error(err, \"error getting service bus entity length\")\n\t\treturn []external_metrics.ExternalMetricValue{}, err\n\t}\n\n\tmetric := external_metrics.ExternalMetricValue{\n\t\tMetricName: metricName,\n\t\tValue: *resource.NewQuantity(int64(queuelen), resource.DecimalSI),\n\t\tTimestamp: metav1.Now(),\n\t}\n\n\treturn append([]external_metrics.ExternalMetricValue{}, metric), nil\n}", "func (q *queue) GetName() string {\n\treturn q.name\n}", "func (pub *Publisher) QueueName() string {\r\n\treturn pub.queueName\r\n}", "func (c *restClient) ListQueues(ctx context.Context, req *cloudtaskspb.ListQueuesRequest, opts ...gax.CallOption) *QueueIterator {\n\tit := &QueueIterator{}\n\treq = proto.Clone(req).(*cloudtaskspb.ListQueuesRequest)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tit.InternalFetch = func(pageSize int, pageToken string) ([]*cloudtaskspb.Queue, string, error) {\n\t\tresp := &cloudtaskspb.ListQueuesResponse{}\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken = pageToken\n\t\t}\n\t\tif pageSize > math.MaxInt32 {\n\t\t\treq.PageSize = math.MaxInt32\n\t\t} else if pageSize != 0 {\n\t\t\treq.PageSize = int32(pageSize)\n\t\t}\n\t\tbaseUrl, err := url.Parse(c.endpoint)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tbaseUrl.Path += fmt.Sprintf(\"/v2beta3/%v/queues\", req.GetParent())\n\n\t\tparams := url.Values{}\n\t\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\t\tif req.GetFilter() != \"\" {\n\t\t\tparams.Add(\"filter\", fmt.Sprintf(\"%v\", req.GetFilter()))\n\t\t}\n\t\tif req.GetPageSize() != 0 {\n\t\t\tparams.Add(\"pageSize\", fmt.Sprintf(\"%v\", req.GetPageSize()))\n\t\t}\n\t\tif req.GetPageToken() != \"\" {\n\t\t\tparams.Add(\"pageToken\", fmt.Sprintf(\"%v\", req.GetPageToken()))\n\t\t}\n\t\tif req.GetReadMask() != nil {\n\t\t\treadMask, err := protojson.Marshal(req.GetReadMask())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, \"\", err\n\t\t\t}\n\t\t\tparams.Add(\"readMask\", string(readMask[1:len(readMask)-1]))\n\t\t}\n\n\t\tbaseUrl.RawQuery = params.Encode()\n\n\t\t// Build HTTP headers from client and context metadata.\n\t\thds := append(c.xGoogHeaders, \"Content-Type\", \"application/json\")\n\t\theaders := gax.BuildHeaders(ctx, hds...)\n\t\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\t\tif settings.Path != \"\" {\n\t\t\t\tbaseUrl.Path = settings.Path\n\t\t\t}\n\t\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thttpReq.Header = headers\n\n\t\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer httpRsp.Body.Close()\n\n\t\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}, opts...)\n\t\tif e != nil {\n\t\t\treturn nil, \"\", e\n\t\t}\n\t\tit.Response = resp\n\t\treturn resp.GetQueues(), resp.GetNextPageToken(), nil\n\t}\n\n\tfetch := func(pageSize int, pageToken string) (string, error) {\n\t\titems, nextPageToken, err := it.InternalFetch(pageSize, pageToken)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tit.items = append(it.items, items...)\n\t\treturn nextPageToken, nil\n\t}\n\n\tit.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)\n\tit.pageInfo.MaxSize = int(req.GetPageSize())\n\tit.pageInfo.Token = req.GetPageToken()\n\n\treturn it\n}" ]
[ "0.69151425", "0.6435573", "0.6247431", "0.6227677", "0.6098082", "0.59897524", "0.58581173", "0.5692911", "0.56441885", "0.5639317", "0.5635707", "0.54951936", "0.54639095", "0.5432769", "0.53856283", "0.53819096", "0.53484887", "0.5290492", "0.5287443", "0.5241385", "0.52201736", "0.5193116", "0.51760435", "0.5118323", "0.51103204", "0.5089046", "0.5078399", "0.50702375", "0.5046697", "0.5034584", "0.50287783", "0.5027905", "0.5007089", "0.49887496", "0.49882758", "0.4979757", "0.49775475", "0.49771214", "0.4973581", "0.49435443", "0.49400744", "0.49381882", "0.49256247", "0.49200523", "0.4919063", "0.48988461", "0.48888874", "0.4886248", "0.4876699", "0.4871946", "0.48709017", "0.48704988", "0.48689255", "0.48380634", "0.48321846", "0.48276863", "0.48178735", "0.48153856", "0.48122102", "0.48113808", "0.48036042", "0.4803099", "0.47833392", "0.47506607", "0.47491416", "0.47476757", "0.4719639", "0.47042498", "0.4702077", "0.46949622", "0.46943215", "0.46889198", "0.46811938", "0.4680082", "0.4679647", "0.4675511", "0.46735367", "0.46548513", "0.46544385", "0.46489146", "0.46485028", "0.46439692", "0.46436137", "0.46430463", "0.46403083", "0.4640157", "0.46350938", "0.46334526", "0.4632484", "0.46324128", "0.46283746", "0.46224973", "0.46193987", "0.46156996", "0.4608924", "0.4605449", "0.46037555", "0.46032417", "0.4595018", "0.45928654" ]
0.7781025
0
GetAliasQueuesOk returns a tuple with the AliasQueues field value and a boolean to check if the value has been set.
GetAliasQueuesOk возвращает кортеж с значением поля AliasQueues и булевым значением для проверки, было ли значение задано.
func (o *QueueManager) GetAliasQueuesOk() (*[]AliasQueue, bool) { if o == nil { return nil, false } return &o.AliasQueues, true }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *QueueManager) GetAliasQueues() []AliasQueue {\n\tif o == nil {\n\t\tvar ret []AliasQueue\n\t\treturn ret\n\t}\n\n\treturn o.AliasQueues\n}", "func (o *QueueManager) SetAliasQueues(v []AliasQueue) {\n\to.AliasQueues = v\n}", "func (o *QueueManager) GetClusterQueuesOk() (*[]ClusterQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.ClusterQueues, true\n}", "func (o *QueueManager) GetRemoteQueuesOk() (*[]RemoteQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteQueues, true\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Output_Queues) IsYANGGoStruct() {}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues) IsYANGGoStruct() {}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tdefer un(lock(b))\n\n\tqs := make(map[string]int)\n\tfor q, items := range b.heaps {\n\t\tif len(qq.MatchPrefix) != 0 || len(qq.MatchExact) != 0 {\n\t\t\tif !matchesPrefix(q, qq.MatchPrefix...) && !matchesExact(q, qq.MatchExact...) {\n\t\t\t\t// no match\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tqs[q] = items.Len()\n\t\tif qq.Limit > 0 && len(qs) >= qq.Limit {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn qs, nil\n}", "func (*OpenconfigQos_Qos_Queues) IsYANGGoStruct() {}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).Queues(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"grpc queues: %w\", unpackGRPCError(err))\n\t}\n\tqs := make(map[string]int)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = int(q.NumTasks)\n\t}\n\treturn qs, nil\n}", "func (c *Context) HasQueuesMap(key string) bool {\n\treturn c.makross.HasQueuesMap(key)\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue) IsYANGGoStruct() {}", "func (m *Makross) HasQueuesMap(key string) bool {\n\tif value, okay := m.QueuesMap.Load(key); okay {\n\t\tif pqueue, okay := value.(*prior.PriorityQueue); okay {\n\t\t\tif pqueue.Length() > 0 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (o *VnicEthAdapterPolicyInventory) GetTxQueueSettingsOk() (*VnicEthTxQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.TxQueueSettings.Get(), o.TxQueueSettings.IsSet()\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_Queues) IsYANGGoStruct() {}", "func (o *VnicEthAdapterPolicyInventory) GetRxQueueSettingsOk() (*VnicEthRxQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RxQueueSettings.Get(), o.RxQueueSettings.IsSet()\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue) IsYANGGoStruct() {}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Output_Queues) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Output_Queues\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue_Config) IsYANGGoStruct() {}", "func (o *Platform) GetAliasOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Alias, true\n}", "func (u *Unpackerr) haveSonarrQitem(name string) bool {\n\tfor _, server := range u.Sonarr {\n\t\tfor _, q := range server.Queue.Records {\n\t\t\tif q.Title == name {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}", "func (_PlasmaFramework *PlasmaFrameworkCaller) ExitsQueues(opts *bind.CallOpts, arg0 [32]byte) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _PlasmaFramework.contract.Call(opts, out, \"exitsQueues\", arg0)\n\treturn *ret0, err\n}", "func (o *VnicEthAdapterPolicyInventory) GetCompletionQueueSettingsOk() (*VnicCompletionQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CompletionQueueSettings.Get(), o.CompletionQueueSettings.IsSet()\n}", "func (s *API) ListQueues(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"ListQueues\")\n\n\tqueueNamePrefix := req.FormValue(\"QueueNamePrefix\")\n\tvar queues []string\n\tfor k, v := range s.sqs.queues {\n\t\tif strings.HasPrefix(k, queueNamePrefix) {\n\t\t\tqueues = append(queues, v.url)\n\t\t}\n\t}\n\n\tresponse := ListQueuesResponse{\n\t\tResult: ListQueuesResult{queues},\n\t\tMetaData: ResponseMetaData{\"00000000-0000-0000-0000-000000000000\"},\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\tenc := xml.NewEncoder(w)\n\tenc.Indent(\" \", \" \")\n\tif err := enc.Encode(response); err != nil {\n\t\tlog.Errorf(\"error: %s\", err)\n\t}\n}", "func (o *VulnUpdateNotification) GetQueueIdOk() (*string, bool) {\n\tif o == nil || o.QueueId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.QueueId, true\n}", "func (o *Project) GetAliasOk() (*[]ProjectAlias, bool) {\n\tif o == nil || o.Alias == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Alias, true\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue_Config) IsYANGGoStruct() {}", "func (_PlasmaFramework *PlasmaFrameworkSession) ExitsQueues(arg0 [32]byte) (common.Address, error) {\n\treturn _PlasmaFramework.Contract.ExitsQueues(&_PlasmaFramework.CallOpts, arg0)\n}", "func (_Rootchain *RootchainCaller) ExitsQueues(opts *bind.CallOpts, arg0 common.Address) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _Rootchain.contract.Call(opts, out, \"exitsQueues\", arg0)\n\treturn *ret0, err\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue_State) IsYANGGoStruct() {}", "func (h *Hospital) ConsumeQueues(ctx context.Context, t *testing.T) (events int, messages []string) {\n\tt.Helper()\n\treturn h.ConsumeQueuesWithLimit(ctx, t, -1, true)\n}", "func groomQueues(queues *Queues) (err kv.Error) {\n\tfor qName, qDetails := range *queues {\n\t\t// If we have enough runners drop the queue as it needs nothing done to it\n\t\tif len(qDetails.NodeGroup) == 0 || qDetails.Running >= qDetails.Ready+qDetails.NotVisible {\n\t\t\tif logger.IsTrace() {\n\t\t\t\tlogger.Trace(\"queue already handled\", \"queue\", qName, \"stack\", stack.Trace().TrimRuntime())\n\t\t\t}\n\t\t\tdelete(*queues, qName)\n\t\t}\n\t}\n\treturn nil\n}", "func (_PlasmaFramework *PlasmaFrameworkCallerSession) ExitsQueues(arg0 [32]byte) (common.Address, error) {\n\treturn _PlasmaFramework.Contract.ExitsQueues(&_PlasmaFramework.CallOpts, arg0)\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue_State) IsYANGGoStruct() {}", "func (q *QueueWatcher) checkQueueEmpty(jobName string) (bool, error) {\n\tqueues, err := q.client.Queues()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tfor _, queue := range queues {\n\t\tif queue.JobName == jobName {\n\t\t\treturn queue.Count == 0, nil\n\t\t}\n\t}\n\n\t// If queue does not exist consider it empty.\n\t// QueueWatcher is not active in the initial phase during which no items\n\t// have been enqueued yet.\n\t// E.g. when active checks start, the ProductionExhausted channel has been\n\t// closed.\n\treturn true, nil\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *UserDataFilters) SetQueues(v []*string) *UserDataFilters {\n\ts.Queues = v\n\treturn s\n}", "func (o *Environment) GetQuotasOk() (*EnvironmentQuotas, bool) {\n\tif o == nil || o.Quotas == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Quotas, true\n}", "func (s *Filters) SetQueues(v []*string) *Filters {\n\ts.Queues = v\n\treturn s\n}", "func (o *SamlConfigurationProperties) GetSpPrivateKeyAliasOk() (*SamlConfigurationPropertyItemsString, bool) {\n\tif o == nil || o.SpPrivateKeyAlias == nil {\n\t\treturn nil, false\n\t}\n\treturn o.SpPrivateKeyAlias, true\n}", "func GetAvailableQueues() ([]string, error) {\n\tclient := &http.Client{}\n\n\tres, err := client.Get(Host + \"/queues\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trespBody := res.Body\n\tdefer respBody.Close()\n\n\tavailableQueues := struct {\n\t\tQueues []string `json:\"queues\"`\n\t}{}\n\tif err := json.NewDecoder(respBody).Decode(&availableQueues); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn availableQueues.Queues, nil\n}", "func (gores *Gores) Queues() []string {\n\tqueues := make([]string, 0)\n\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\tdata, _ := conn.Do(\"SMEMBERS\", watchedQueues)\n\tfor _, q := range data.([]interface{}) {\n\t\tqueues = append(queues, string(q.([]byte)))\n\t}\n\n\treturn queues\n}", "func (ss *SqsService) IsQueueEmpty(ctx context.Context) (isEmpty bool) {\n\tisEmpty = false\n\tinput := &sqs.GetQueueAttributesInput{\n\t\tQueueUrl: &ss.queueURL,\n\t\tAttributeNames: []types.QueueAttributeName{\n\t\t\t\"ApproximateNumberOfMessages\",\n\t\t\t\"ApproximateNumberOfMessagesNotVisible\",\n\t\t},\n\t}\n\toutput, err := ss.client.GetQueueAttributes(ctx, input)\n\n\tif err != nil {\n\t\tlog.Printf(\"Faided to get queue attributes from Queue %s, please try again later - %s\", ss.queueName, err.Error())\n\t\treturn\n\t}\n\n\tvisible, _ := strconv.Atoi(output.Attributes[\"ApproximateNumberOfMessages\"])\n\tnotVisible, _ := strconv.Atoi(output.Attributes[\"ApproximateNumberOfMessagesNotVisible\"])\n\n\tlog.Printf(\"Queue %s has %d not visible message(s) and %d visable message(s)\\n\", ss.queueName, notVisible, visible)\n\n\tif visible+notVisible <= 1 {\n\t\tisEmpty = true\n\t}\n\treturn\n}", "func (stats *APTQueueStats) HasErrors() bool {\n\treturn len(stats.Errors) > 0\n}", "func (i *Inspector) Queues() ([]string, error) {\n\treturn i.rdb.AllQueues()\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_Queues_Queue) IsYANGGoStruct() {}", "func (o *FiltersApiLog) GetQueryApiNamesOk() ([]string, bool) {\n\tif o == nil || o.QueryApiNames == nil {\n\t\tvar ret []string\n\t\treturn ret, false\n\t}\n\treturn *o.QueryApiNames, true\n}", "func (_Rootchain *RootchainSession) ExitsQueues(arg0 common.Address) (common.Address, error) {\n\treturn _Rootchain.Contract.ExitsQueues(&_Rootchain.CallOpts, arg0)\n}", "func (o *FiltersApiLog) GetQueryCallNamesOk() ([]string, bool) {\n\tif o == nil || o.QueryCallNames == nil {\n\t\tvar ret []string\n\t\treturn ret, false\n\t}\n\treturn *o.QueryCallNames, true\n}", "func IsQueueExist(name string, ch *amqp.Channel) bool {\n\tvar exist bool\n\t_, err := ch.QueueInspect(name)\n\tif err == nil {\n\t\texist = true\n\t}\n\n\treturn exist\n}", "func (_Rootchain *RootchainCallerSession) ExitsQueues(arg0 common.Address) (common.Address, error) {\n\treturn _Rootchain.Contract.ExitsQueues(&_Rootchain.CallOpts, arg0)\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (o *QueueManager) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (s *SQSServer) pollQueues(pollctx, taskctx context.Context, queues []QueueConf) error {\n\tfor _, qconf := range queues {\n\t\tq, err := s.getQueue(pollctx, qconf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq := &sqs.GetQueueAttributesInput{\n\t\t\tAttributeNames: []types.QueueAttributeName{(\"VisibilityTimeout\")},\n\t\t\tQueueUrl: &q.url,\n\t\t}\n\t\tresp, err := s.sqsSrv(q.QueueConf).GetQueueAttributes(pollctx, req)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to get queue attributes for '%s' - %s\", q.Name, err.Error())\n\t\t}\n\t\tto := resp.Attributes[\"VisibilityTimeout\"]\n\t\tif to == \"\" {\n\t\t\treturn fmt.Errorf(\"No visibility timeout returned by SQS for queue '%s'\", q.Name)\n\t\t}\n\t\tvisTimeout, err := strconv.Atoi(to)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to convert visibility timeout from '%s' to int - '%s'\", to, err.Error())\n\t\t}\n\t\t// Each queue runs in a dedicated go routine.\n\t\tgo func(vt int32) {\n\t\t\ts.queuePollers.Add(1)\n\t\t\tdefer s.queuePollers.Done()\n\t\t\ts.run(pollctx, taskctx, q, vt)\n\t\t}(int32(visTimeout))\n\t}\n\n\treturn nil\n}", "func (*OpenconfigQos_Qos_Queues_Queue) IsYANGGoStruct() {}", "func (c *checkQueueAttributeImpl) CheckQueueAttributeQuery(options CheckQueueAttributeOptions) icinga.Result {\n\tname := \"Queue.Attributes\"\n\n\tstatusCheck, err := icinga.NewStatusCheck(options.ThresholdWarning, options.ThresholdCritical)\n\tif err != nil {\n\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"can't check status: %v\", err))\n\t}\n\n\tif len(options.OkIfQueueIsMissing) > 0 {\n\t\tproperty := \"broker=\\\"0.0.0.0\\\"\"\n\t\tattribute := \"QueueNames\"\n\t\tqueueSearchResult, err := c.JolokiaClient.GetAttr(options.Domain, []string{property}, attribute)\n\t\tif err != nil {\n\t\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"can't query QueueNames in Jolokia: %v\", err))\n\t\t}\n\t\tif queueSearchResult == nil {\n\t\t\tif (options.Verbose > 0) {\n\t\t\t\tlog.Printf(\"No queues found: [%v]\", queueSearchResult)\n\t\t\t}\n\t\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"can't find QueueNames for [%v]\", property))\n\t\t}\n\n\t\tif !queueExists(queueSearchResult.([] interface{}), options.OkIfQueueIsMissing) {\n\t\t\tif (options.Verbose > 0) {\n\t\t\t\tlog.Printf(\"Queue [%v] not in queue list [%v]\", options.OkIfQueueIsMissing, queueSearchResult.([] interface{}))\n\t\t\t}\n\t\t\treturn icinga.NewResult(name, icinga.ServiceStatusOk, fmt.Sprintf(\"queue [%v] does not exist\", options.OkIfQueueIsMissing))\n\t\t}\n\t}\n\n\tsearchResult, err := c.JolokiaClient.GetAttr(options.Domain, []string{options.Queue}, options.Attribute)\n\tif err != nil {\n\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"can't query Jolokia: %v\", err))\n\t}\n\n\tresult, err := utils.ToFloat(searchResult)\n\tif err != nil {\n\t\tif (options.Verbose > 0) {\n\t\t\tlog.Printf(\"An error occured with result [%v]\", searchResult)\n\t\t}\n\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"query result is invalid: %v\", err))\n\t}\n\n\tmessage := fmt.Sprintf(\"Search produced: %v\", searchResult)\n\tstatus := statusCheck.Check(result)\n\n\treturn icinga.NewResult(name, status, message)\n}", "func GetQueues(c *gin.Context) {\n\t//TODO: create a while both back and front until value is != nil\n\tsize := len(queue)\n\tlog.Printf(\"squeue: %v\", queue)\n\tif size == 0 {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue don't have any item!\",\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"queues\": queue,\n\t})\n\tlog.Printf(\"equeue: %v\", queue)\n}", "func (o *V0037JobProperties) GetRequeueOk() (*bool, bool) {\n\tif o == nil || o.Requeue == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Requeue, true\n}", "func (t *OpenconfigQos_Qos_Queues) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Queues\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func ValidateQueues(db *storm.DB, config Settings.FullClientSettings, tclient *torrent.Client) {\n\ttorrentQueues := Storage.FetchQueues(db)\n\tfor len(torrentQueues.ActiveTorrents) > config.MaxActiveTorrents {\n\t\tremoveTorrent := torrentQueues.ActiveTorrents[:1]\n\t\tfor _, singleTorrent := range tclient.Torrents() {\n\t\t\tif singleTorrent.InfoHash().String() == removeTorrent[0] {\n\t\t\t\tsingleTorrentFromStorage := Storage.FetchTorrentFromStorage(db, removeTorrent[0])\n\t\t\t\tRemoveTorrentFromActive(&singleTorrentFromStorage, singleTorrent, db)\n\t\t\t}\n\t\t}\n\t}\n\ttorrentQueues = Storage.FetchQueues(db)\n\tfor _, singleTorrent := range tclient.Torrents() {\n\t\tsingleTorrentFromStorage := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())\n\t\tif singleTorrentFromStorage.TorrentStatus == \"Stopped\" {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, queuedTorrent := range torrentQueues.QueuedTorrents { //If we have a queued torrent that is missing data, and an active torrent that is seeding, then prioritize the missing data one\n\t\t\tif singleTorrent.InfoHash().String() == queuedTorrent {\n\t\t\t\tif singleTorrent.BytesMissing() > 0 {\n\t\t\t\t\tfor _, activeTorrent := range torrentQueues.ActiveTorrents {\n\t\t\t\t\t\tfor _, singleActiveTorrent := range tclient.Torrents() {\n\t\t\t\t\t\t\tif activeTorrent == singleActiveTorrent.InfoHash().String() {\n\t\t\t\t\t\t\t\tif singleActiveTorrent.Seeding() == true {\n\t\t\t\t\t\t\t\t\tsingleActiveTFS := Storage.FetchTorrentFromStorage(db, activeTorrent)\n\t\t\t\t\t\t\t\t\tLogger.WithFields(logrus.Fields{\"TorrentName\": singleActiveTFS.TorrentName}).Info(\"Seeding, Removing from active to add queued\")\n\t\t\t\t\t\t\t\t\tRemoveTorrentFromActive(&singleActiveTFS, singleActiveTorrent, db)\n\t\t\t\t\t\t\t\t\tsingleQueuedTFS := Storage.FetchTorrentFromStorage(db, queuedTorrent)\n\t\t\t\t\t\t\t\t\tLogger.WithFields(logrus.Fields{\"TorrentName\": singleQueuedTFS.TorrentName}).Info(\"Adding torrent to the queue, not active\")\n\t\t\t\t\t\t\t\t\tAddTorrentToActive(&singleQueuedTFS, singleTorrent, db)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_Queues_Queue_Config) IsYANGGoStruct() {}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue_Config) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue_Config\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (mr *MockSQSAPIMockRecorder) ListQueues(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ListQueues\", reflect.TypeOf((*MockSQSAPI)(nil).ListQueues), arg0)\n}", "func (o *NetworkDns) GetNameServersOk() ([]string, bool) {\n\tif o == nil || o.NameServers == nil {\n\t\treturn nil, false\n\t}\n\treturn o.NameServers, true\n}", "func (sub *subState) isQueueSubscriber() bool {\n\treturn sub.QGroup != \"\"\n}", "func (o *URL) GetAliasOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Alias, true\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues) IsYANGGoStruct() {}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_Queues_Queue_State) IsYANGGoStruct() {}", "func (target *ElasticsearchTarget) HasQueueStore() bool {\n\treturn target.store != nil\n}", "func (q *execQueue) canQueue() bool {\n\tq.mu.Lock()\n\tok := !q.isClosed() && len(q.funcs) < cap(q.funcs)\n\tq.mu.Unlock()\n\treturn ok\n}", "func (_PlasmaFramework *PlasmaFrameworkCaller) HasExitQueue(opts *bind.CallOpts, vaultId *big.Int, token common.Address) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _PlasmaFramework.contract.Call(opts, out, \"hasExitQueue\", vaultId, token)\n\treturn *ret0, err\n}", "func (*OpenconfigQos_Qos_Queues_Queue_Config) IsYANGGoStruct() {}", "func (stats *APTQueueStats) HasWarnings() bool {\n\treturn len(stats.Warnings) > 0\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue_Config) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue_Config\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (*OpenconfigQos_Qos_Queues_Queue_State) IsYANGGoStruct() {}", "func (*OpenconfigQos_Qos_Queues_Queue_Red) IsYANGGoStruct() {}", "func (o DotnetSettingsPtrOutput) ForcedNamespaceAliases() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *DotnetSettings) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ForcedNamespaceAliases\n\t}).(pulumi.StringArrayOutput)\n}", "func (qc *QueueConfig) Exists() bool {\n\treturn qc._exists\n}", "func (t *TopicCache) IsQueueEmpty(projectName, serviceName string) bool {\n\tt.RLock()\n\tdefer t.RUnlock()\n\n\t_, ok := t.inQueue[projectName+serviceName]\n\n\treturn !ok\n}", "func (o *VulnUpdateNotification) HasQueueId() bool {\n\tif o != nil && o.QueueId != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *IpamAliasEditInput) GetAliasNameOk() (*string, bool) {\n\tif o == nil || o.AliasName == nil {\n\t\treturn nil, false\n\t}\n\treturn o.AliasName, true\n}", "func DeclareQueues(ch *amqp.Channel, queueName string) (amqp.Queue, amqp.Queue) {\n\treturn declareQueue(ch, queueName), declareResponseQueue(ch, queueName)\n}", "func setupValidQueueNames() {\n\tfor _, jType := range models.ValidJobTypes {\n\t\tvar jt = string(jType)\n\t\tvalidQueues[jt] = true\n\t\tvalidQueueList = append(validQueueList, jt)\n\t}\n}", "func (o DotnetSettingsResponseOutput) ForcedNamespaceAliases() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v DotnetSettingsResponse) []string { return v.ForcedNamespaceAliases }).(pulumi.StringArrayOutput)\n}", "func (o *LocalDatabaseProvider) GetDnsServersOk() ([]string, bool) {\n\tif o == nil || o.DnsServers == nil {\n\t\treturn nil, false\n\t}\n\treturn o.DnsServers, true\n}", "func (o *VnicEthAdapterPolicyInventory) HasTxQueueSettings() bool {\n\tif o != nil && o.TxQueueSettings.IsSet() {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (s QueueSetSpy) Queues() map[DeploymentID]*R11nQueue {\n\tres := s.Called()\n\treturn res.Get(0).(map[DeploymentID]*R11nQueue)\n}", "func NewGetCallQueueitemsOK() *GetCallQueueitemsOK {\n\treturn &GetCallQueueitemsOK{}\n}", "func (o *DnsEventAllOf) GetQtypeOk() (*string, bool) {\n\tif o == nil || o.Qtype == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Qtype, true\n}", "func IsAlias(name, alias string) bool {\n\td := registry.Driver(name)\n\tfor _, da := range d.Alias {\n\t\tif da == alias {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (o *Replication) GetMaxQueueSizeBytesOk() (*int64, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.MaxQueueSizeBytes, true\n}", "func (o *User) GetMailboxSettingsOk() (AnyOfmicrosoftGraphMailboxSettings, bool) {\n\tif o == nil || o.MailboxSettings == nil {\n\t\tvar ret AnyOfmicrosoftGraphMailboxSettings\n\t\treturn ret, false\n\t}\n\treturn *o.MailboxSettings, true\n}", "func (o *W2) GetAllocatedTipsOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.AllocatedTips.Get(), o.AllocatedTips.IsSet()\n}", "func QueueMatch(ctx context.Context, t *testing.T, client *entroq.EntroQ, qPrefix string) {\n\tqueue1 := path.Join(qPrefix, \"queue-1\")\n\tqueue2 := path.Join(qPrefix, \"queue-2\")\n\tqueue3 := path.Join(qPrefix, \"queue-3\")\n\tquirkyQueue := path.Join(qPrefix, \"quirky=queue\")\n\n\twantQueues := map[string]int{\n\t\tqueue1: 1,\n\t\tqueue2: 2,\n\t\tqueue3: 3,\n\t\tquirkyQueue: 1,\n\t}\n\n\t// Add tasks so that queues have a certain number of things in them, as above.\n\tvar toInsert []entroq.ModifyArg\n\tfor q, n := range wantQueues {\n\t\tfor i := 0; i < n; i++ {\n\t\t\ttoInsert = append(toInsert, entroq.InsertingInto(q))\n\t\t}\n\t}\n\tinserted, _, err := client.Modify(ctx, toInsert...)\n\tif err != nil {\n\t\tt.Fatalf(\"in QueueMatch - inserting empty tasks: %v\", err)\n\t}\n\n\t// Check that we got everything inserted.\n\tif want, got := len(inserted), len(toInsert); want != got {\n\t\tt.Fatalf(\"in QueueMatch - want %d inserted, got %d\", want, got)\n\t}\n\n\t// Check that we can get exact numbers for all of the above using MatchExact.\n\tfor q, n := range wantQueues {\n\t\tqs, err := client.Queues(ctx, entroq.MatchExact(q))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"QueueMatch single - getting queue: %v\", err)\n\t\t}\n\t\tif len(qs) != 1 {\n\t\t\tt.Errorf(\"QueueMatch single - expected 1 entry, got %d\", len(qs))\n\t\t}\n\t\tif want, got := n, qs[q]; want != got {\n\t\t\tt.Errorf(\"QueueMatch single - expected %d values in queue %q, got %d\", want, q, got)\n\t\t}\n\t}\n\n\t// Check that passing multiple exact matches works properly.\n\tmultiExactCases := []struct {\n\t\tq1 string\n\t\tq2 string\n\t}{\n\t\t{queue1, queue2},\n\t\t{queue1, queue3},\n\t\t{quirkyQueue, queue2},\n\t\t{\"bogus\", queue3},\n\t}\n\n\tfor _, c := range multiExactCases {\n\t\tqs, err := client.Queues(ctx, entroq.MatchExact(c.q1), entroq.MatchExact(c.q2))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"QueueMatch multi - getting multiple queues: %v\", err)\n\t\t}\n\t\tif len(qs) > 2 {\n\t\t\tt.Errorf(\"QueueMatch multi - expected no more than 2 entries, got %d\", len(qs))\n\t\t}\n\t\twant1, want2 := wantQueues[c.q1], wantQueues[c.q2]\n\t\tif got1, got2 := qs[c.q1], qs[c.q2]; want1 != got1 || want2 != got2 {\n\t\t\tt.Errorf(\"QueueMatch multi - wanted %q:%d, %q:%d, got %q:%d, %q:%d\", c.q1, want1, c.q2, want2, c.q1, got1, c.q2, got2)\n\t\t}\n\t}\n\n\t// Check prefix matching.\n\tprefixCases := []struct {\n\t\tprefix string\n\t\tqn int\n\t\tn int\n\t}{\n\t\t{path.Join(qPrefix, \"queue-\"), 3, 6},\n\t\t{path.Join(qPrefix, \"qu\"), 4, 7},\n\t\t{path.Join(qPrefix, \"qui\"), 1, 1},\n\t}\n\n\tfor _, c := range prefixCases {\n\t\tqs, err := client.Queues(ctx, entroq.MatchPrefix(c.prefix))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"QueueMatch prefix - queues error: %v\", err)\n\t\t}\n\t\tif want, got := c.qn, len(qs); want != got {\n\t\t\tt.Errorf(\"QueueMatch prefix - want %d queues, got %d\", want, got)\n\t\t}\n\t\ttot := 0\n\t\tfor _, n := range qs {\n\t\t\ttot += n\n\t\t}\n\t\tif want, got := c.n, tot; want != got {\n\t\t\tt.Errorf(\"QueueMatch prefix - want %d total items, got %d\", want, got)\n\t\t}\n\t}\n}", "func (o DotnetSettingsOutput) ForcedNamespaceAliases() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v DotnetSettings) []string { return v.ForcedNamespaceAliases }).(pulumi.StringArrayOutput)\n}", "func (o *Project) HasAlias() bool {\n\tif o != nil && o.Alias != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (bs *BeanstalkdConnectionPool) ListQueues() (queueNames []string, err error) {\n\tqueueNames, err = bs.getGlobalConnect().ListTubes()\n\treturn\n}", "func (j *Job) DestinationMQ() bool {\n\treturn j.Publish != \"\" && (j.Destination == \"mq\" || j.Destination == \"both\" || j.Destination == \"\")\n}", "func (_PlasmaFramework *PlasmaFrameworkCallerSession) HasExitQueue(vaultId *big.Int, token common.Address) (bool, error) {\n\treturn _PlasmaFramework.Contract.HasExitQueue(&_PlasmaFramework.CallOpts, vaultId, token)\n}", "func (*OpenconfigQos_Qos_Queues_Queue_Wred) IsYANGGoStruct() {}" ]
[ "0.62684894", "0.6182187", "0.6036427", "0.5732854", "0.55811864", "0.5505796", "0.5485279", "0.54428786", "0.535559", "0.5352854", "0.52992433", "0.5274927", "0.5233722", "0.5213937", "0.5182979", "0.51760995", "0.51753753", "0.51179737", "0.510792", "0.5093728", "0.50886714", "0.5077195", "0.5077019", "0.5073604", "0.5070931", "0.5055009", "0.50076914", "0.49961957", "0.49958527", "0.4994109", "0.49924216", "0.4980261", "0.4964561", "0.4950706", "0.49246693", "0.49225292", "0.49120596", "0.48996034", "0.48967153", "0.48941848", "0.48781726", "0.48721504", "0.4869644", "0.48496425", "0.48418292", "0.48400497", "0.48394054", "0.483484", "0.48331857", "0.48250756", "0.48206323", "0.4805135", "0.47896424", "0.47882307", "0.47721645", "0.47683075", "0.47668794", "0.47585508", "0.47560215", "0.474742", "0.47449997", "0.4740495", "0.4736876", "0.4723869", "0.46963224", "0.46940932", "0.46837297", "0.4681356", "0.46779615", "0.4663454", "0.4651283", "0.46473032", "0.46464393", "0.46454248", "0.46380576", "0.46341732", "0.46036628", "0.45887274", "0.45851693", "0.45845893", "0.45833793", "0.45824075", "0.4581199", "0.45500588", "0.45257246", "0.450709", "0.45051086", "0.45019165", "0.44997787", "0.44928822", "0.44921127", "0.44908398", "0.44847196", "0.44809794", "0.44780758", "0.44757226", "0.4475278", "0.44647294", "0.44577336", "0.44524524" ]
0.8438502
0
GetRemoteQueues returns the RemoteQueues field value
GetRemoteQueues возвращает значение поля RemoteQueues
func (o *QueueManager) GetRemoteQueues() []RemoteQueue { if o == nil { var ret []RemoteQueue return ret } return o.RemoteQueues }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *QueueManager) GetRemoteQueuesOk() (*[]RemoteQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteQueues, true\n}", "func (o *QueueManager) SetRemoteQueues(v []RemoteQueue) {\n\to.RemoteQueues = v\n}", "func GetQueues(c *gin.Context) {\n\t//TODO: create a while both back and front until value is != nil\n\tsize := len(queue)\n\tlog.Printf(\"squeue: %v\", queue)\n\tif size == 0 {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue don't have any item!\",\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"queues\": queue,\n\t})\n\tlog.Printf(\"equeue: %v\", queue)\n}", "func (gores *Gores) Queues() []string {\n\tqueues := make([]string, 0)\n\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\tdata, _ := conn.Do(\"SMEMBERS\", watchedQueues)\n\tfor _, q := range data.([]interface{}) {\n\t\tqueues = append(queues, string(q.([]byte)))\n\t}\n\n\treturn queues\n}", "func GetRemoteHosts() []string {\r\n\tret := make([]string, 0)\r\n\r\n\tmutex.RLock()\r\n\tdefer mutex.RUnlock()\r\n\r\n\tnodeKey := hex.EncodeToString(GetNodePubKey())\r\n\tfor pubKey, item := range nodes {\r\n\t\tif pubKey != nodeKey && !item.Stopped {\r\n\t\t\tret = append(ret, item.TCPAddress)\r\n\t\t}\r\n\t}\r\n\treturn ret\r\n}", "func (i *Inspector) Queues() ([]string, error) {\n\treturn i.rdb.AllQueues()\n}", "func GetAvailableQueues() ([]string, error) {\n\tclient := &http.Client{}\n\n\tres, err := client.Get(Host + \"/queues\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trespBody := res.Body\n\tdefer respBody.Close()\n\n\tavailableQueues := struct {\n\t\tQueues []string `json:\"queues\"`\n\t}{}\n\tif err := json.NewDecoder(respBody).Decode(&availableQueues); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn availableQueues.Queues, nil\n}", "func (p *Project) Queues() (*[]Queue, error) {\n qs := make([]Queue, 0)\n err := Mongo.Get(\"queue\", bson.M{\"project\": p.ID}, MaxQueuesPerProject, &qs)\n return &qs, err\n}", "func (a *Client) GetMsgVpnQueues(params *GetMsgVpnQueuesParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnQueuesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnQueuesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnQueues\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/queues\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnQueuesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnQueuesOK), nil\n\n}", "func GetRemoteServers() ([]*remoteServer, error) {\n\ts, err := getStorage()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif s.RemoteServers == nil {\n\t\treturn make([]*remoteServer, 0), nil\n\t}\n\n\treturn s.RemoteServers, nil\n}", "func (storage *SrvStorage) GetVhostQueues(vhost string) []*queue.Queue {\n\tvar queues []*queue.Queue\n\tstorage.db.Iterate(\n\t\tfunc(key []byte, value []byte) {\n\t\t\tif !bytes.HasPrefix(key, []byte(queuePrefix)) || getVhostFromKey(string(key)) != vhost {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tq := &queue.Queue{}\n\t\t\tq.Unmarshal(value, storage.protoVersion)\n\t\t\tqueues = append(queues, q)\n\t\t},\n\t)\n\n\treturn queues\n}", "func (a *Client) GetMsgVpnJndiQueues(params *GetMsgVpnJndiQueuesParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnJndiQueuesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnJndiQueuesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnJndiQueues\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/jndiQueues\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnJndiQueuesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnJndiQueuesOK), nil\n\n}", "func (bs *BeanstalkdConnectionPool) ListQueues() (queueNames []string, err error) {\n\tqueueNames, err = bs.getGlobalConnect().ListTubes()\n\treturn\n}", "func listQueues(ENV string) []string {\n \t// Using the SDK's default configuration, loading additional config\n\t// and credentials values from the environment variables, shared\n\t// credentials, and shared configuration files\n\n\tsess, err := session.NewSession(&aws.Config{\n\t Region: aws.String(\"us-east-1\")},\n\t)\n\n // Create a SQS service client.\n svc := sqs.New(sess)\n\n\t//have to create a session object first\n\toutput, err := svc.ListQueues(&sqs.ListQueuesInput{\n\t QueueNamePrefix: aws.String(ENV),\n })\n\tif err != nil { panic(err) }\n\n\tqueues := output.QueueUrls\n\tfinal_queues := []string{}\n\n\tfor _, i := range queues {\n\t fmt.Println(string(*i))\n\t final_queues = append(final_queues, *i)\n }\n\treturn final_queues\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).Queues(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"grpc queues: %w\", unpackGRPCError(err))\n\t}\n\tqs := make(map[string]int)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = int(q.NumTasks)\n\t}\n\treturn qs, nil\n}", "func (t *TopicCache) GetQueue(projectName, serviceName string) []string {\n\tt.RLock()\n\tdefer t.RUnlock()\n\n\tif len(t.inQueue[projectName+serviceName]) >= 100 {\n\t\treturn t.inQueue[projectName+serviceName][:99]\n\t}\n\n\treturn t.inQueue[projectName+serviceName]\n}", "func (s *API) ListQueues(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"ListQueues\")\n\n\tqueueNamePrefix := req.FormValue(\"QueueNamePrefix\")\n\tvar queues []string\n\tfor k, v := range s.sqs.queues {\n\t\tif strings.HasPrefix(k, queueNamePrefix) {\n\t\t\tqueues = append(queues, v.url)\n\t\t}\n\t}\n\n\tresponse := ListQueuesResponse{\n\t\tResult: ListQueuesResult{queues},\n\t\tMetaData: ResponseMetaData{\"00000000-0000-0000-0000-000000000000\"},\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\tenc := xml.NewEncoder(w)\n\tenc.Indent(\" \", \" \")\n\tif err := enc.Encode(response); err != nil {\n\t\tlog.Errorf(\"error: %s\", err)\n\t}\n}", "func getServerQueue() (err error) {\n\tserverData := make([]byte, 256)\n\tserverConnection, err := net.Dial(\"udp\", ServerAddress)\n\tif err != nil {\n\t\treturn err\n\t} else {\n\t\tdefer serverConnection.Close()\n\t}\n\n\t// UDP voodoo to get server info -- https://github.com/LiquidObsidian/fivereborn-query/blob/master/index.js#L54\n\tfmt.Fprintf(serverConnection, \"\\xFF\\xFF\\xFF\\xFFgetinfo f\")\n\t_, err = bufio.NewReader(serverConnection).Read(serverData)\n\n\tif err == nil {\n\t\tserverData := bytes.Split(serverData, []byte(\"\\n\"))\n\t\tserverDetails := bytes.Split(serverData[1], []byte(\"\\\\\"))\n\t\tserverQueue := bytes.FieldsFunc(serverDetails[12], func(c rune) bool { return c == '[' || c == ']' })\n\n\t\tcurrentPlayerValues, _ := strconv.ParseInt(string(serverDetails[4]), 0, 64)\n\t\tcurrentserverQueueValues, _ := strconv.ParseInt(string(serverQueue[0]), 0, 64)\n\t\tServerDetails.ServerQueue.CurrentPlayers = currentPlayerValues\n\n\t\tif currentserverQueueValues >= 1 {\n\t\t\tServerDetails.ServerQueue.CurrentQueue = currentserverQueueValues\n\t\t}\n\t} else {\n\t\treturn err\n\t}\n\n\treturn\n}", "func (m *AudioRoutingGroup) GetReceivers()([]string) {\n val, err := m.GetBackingStore().Get(\"receivers\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]string)\n }\n return nil\n}", "func (svc *AdminBuildService) GetQueue(opt *GetQueueOptions) (*[]library.BuildQueue, *Response, error) {\n\t// set the API endpoint path we send the request to\n\tu := \"/api/v1/admin/builds/queue\"\n\n\t// add optional arguments if supplied\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// BuildQueue type we want to return\n\tv := new([]library.BuildQueue)\n\n\tresp, err := svc.client.Call(\"GET\", u, nil, v)\n\n\treturn v, resp, err\n}", "func (base Base) ListRemote() (result []string, err error) {\n\treturn\n}", "func RemoteBucketList(remoteURL string) ([]couchbase.BucketInfo, error) {\n\tbucketInfosObj, err := simple_utils.ExecWithTimeout2(remoteBucketList, remoteURL, base.DefaultHttpTimeout, logger_utils)\n\tif bucketInfosObj != nil {\n\t\treturn bucketInfosObj.([]couchbase.BucketInfo), err\n\t} else {\n\t\treturn nil, err\n\t}\n}", "func (p *Process) CmdGetQueue(pac teoapi.Packet) (err error) {\n\tdata := pac.RemoveTrailingZero(pac.Data())\n\trequest := cdb.KeyValue{Cmd: pac.Cmd()}\n\tif err = request.UnmarshalText(data); err != nil {\n\t\treturn\n\t}\n\t// Return only Value for text requests and all fields for json\n\tresponce := request\n\tif responce.Value, err = p.tcdb.GetQueue(request.Key); err != nil {\n\t\treturn\n\t} else if !request.RequestInJSON {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), responce.Value)\n\t} else if retdata, err := responce.MarshalText(); err == nil {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), retdata)\n\t}\n\treturn\n}", "func remoteBucketList(remoteURLObj interface{}) (interface{}, error) {\n\tremoteURL := remoteURLObj.(string)\n\treturn couchbase.GetBucketList(remoteURL)\n}", "func (m SQSMonitor) receiveQueueMessages(qURL string) ([]*sqs.Message, error) {\n\tresult, err := m.SQS.ReceiveMessage(&sqs.ReceiveMessageInput{\n\t\tAttributeNames: []*string{\n\t\t\taws.String(sqs.MessageSystemAttributeNameSentTimestamp),\n\t\t},\n\t\tMessageAttributeNames: []*string{\n\t\t\taws.String(sqs.QueueAttributeNameAll),\n\t\t},\n\t\tQueueUrl: &qURL,\n\t\tMaxNumberOfMessages: aws.Int64(10),\n\t\tVisibilityTimeout: aws.Int64(20), // 20 seconds\n\t\tWaitTimeSeconds: aws.Int64(20), // Max long polling\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn result.Messages, nil\n}", "func (connector *DbConnector) GetRemoteTriggersToCheck(count int) ([]string, error) {\n\treturn connector.getTriggersToCheck(remoteTriggersToCheckKey, count)\n}", "func (c *restClient) ListQueues(ctx context.Context, req *cloudtaskspb.ListQueuesRequest, opts ...gax.CallOption) *QueueIterator {\n\tit := &QueueIterator{}\n\treq = proto.Clone(req).(*cloudtaskspb.ListQueuesRequest)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tit.InternalFetch = func(pageSize int, pageToken string) ([]*cloudtaskspb.Queue, string, error) {\n\t\tresp := &cloudtaskspb.ListQueuesResponse{}\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken = pageToken\n\t\t}\n\t\tif pageSize > math.MaxInt32 {\n\t\t\treq.PageSize = math.MaxInt32\n\t\t} else if pageSize != 0 {\n\t\t\treq.PageSize = int32(pageSize)\n\t\t}\n\t\tbaseUrl, err := url.Parse(c.endpoint)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tbaseUrl.Path += fmt.Sprintf(\"/v2beta3/%v/queues\", req.GetParent())\n\n\t\tparams := url.Values{}\n\t\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\t\tif req.GetFilter() != \"\" {\n\t\t\tparams.Add(\"filter\", fmt.Sprintf(\"%v\", req.GetFilter()))\n\t\t}\n\t\tif req.GetPageSize() != 0 {\n\t\t\tparams.Add(\"pageSize\", fmt.Sprintf(\"%v\", req.GetPageSize()))\n\t\t}\n\t\tif req.GetPageToken() != \"\" {\n\t\t\tparams.Add(\"pageToken\", fmt.Sprintf(\"%v\", req.GetPageToken()))\n\t\t}\n\t\tif req.GetReadMask() != nil {\n\t\t\treadMask, err := protojson.Marshal(req.GetReadMask())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, \"\", err\n\t\t\t}\n\t\t\tparams.Add(\"readMask\", string(readMask[1:len(readMask)-1]))\n\t\t}\n\n\t\tbaseUrl.RawQuery = params.Encode()\n\n\t\t// Build HTTP headers from client and context metadata.\n\t\thds := append(c.xGoogHeaders, \"Content-Type\", \"application/json\")\n\t\theaders := gax.BuildHeaders(ctx, hds...)\n\t\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\t\tif settings.Path != \"\" {\n\t\t\t\tbaseUrl.Path = settings.Path\n\t\t\t}\n\t\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thttpReq.Header = headers\n\n\t\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer httpRsp.Body.Close()\n\n\t\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}, opts...)\n\t\tif e != nil {\n\t\t\treturn nil, \"\", e\n\t\t}\n\t\tit.Response = resp\n\t\treturn resp.GetQueues(), resp.GetNextPageToken(), nil\n\t}\n\n\tfetch := func(pageSize int, pageToken string) (string, error) {\n\t\titems, nextPageToken, err := it.InternalFetch(pageSize, pageToken)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tit.items = append(it.items, items...)\n\t\treturn nextPageToken, nil\n\t}\n\n\tit.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)\n\tit.pageInfo.MaxSize = int(req.GetPageSize())\n\tit.pageInfo.Token = req.GetPageToken()\n\n\treturn it\n}", "func (client *Client) GetQueueURL(name string) (string, error) {\n\tvar parsedResponse GetQueueURLResult\n\turl := NewGetQueueURLRequest(client.EndPointURL, name).URL()\n\n\tresp, err := client.Get(url)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\treturn \"\", errors.New(string(body))\n\t}\n\n\terr = xml.Unmarshal(body, &parsedResponse)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn parsedResponse.QueueURL, nil\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tdefer un(lock(b))\n\n\tqs := make(map[string]int)\n\tfor q, items := range b.heaps {\n\t\tif len(qq.MatchPrefix) != 0 || len(qq.MatchExact) != 0 {\n\t\t\tif !matchesPrefix(q, qq.MatchPrefix...) && !matchesExact(q, qq.MatchExact...) {\n\t\t\t\t// no match\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tqs[q] = items.Len()\n\t\tif qq.Limit > 0 && len(qs) >= qq.Limit {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn qs, nil\n}", "func (q *Queue) GetQueue() []types.Event {\n\treturn q.Queue\n}", "func (u *Unpackerr) getSonarrQueue() {\n\tfor _, server := range u.Sonarr {\n\t\tif server.APIKey == \"\" {\n\t\t\tu.Debugf(\"Sonarr (%s): skipped, no API key\", server.URL)\n\n\t\t\tcontinue\n\t\t}\n\n\t\tqueue, err := server.GetQueue(DefaultQueuePageSize, 1)\n\t\tif err != nil {\n\t\t\tu.Printf(\"[ERROR] Sonarr (%s): %v\", server.URL, err)\n\n\t\t\treturn\n\t\t}\n\n\t\t// Only update if there was not an error fetching.\n\t\tserver.Queue = queue\n\t\tu.Printf(\"[Sonarr] Updated (%s): %d Items Queued\", server.URL, len(queue.Records))\n\t}\n}", "func (b *Buckets) RemoteBuckets(ctx context.Context, id thread.ID) (list []Info, err error) {\n\tctx = b.Context(ctx)\n\tvar threads []cmd.Thread\n\tif id.Defined() {\n\t\tthreads = []cmd.Thread{{ID: id}}\n\t} else {\n\t\tthreads = b.clients.ListThreads(ctx, true)\n\t}\n\tfor _, t := range threads {\n\t\tctx = common.NewThreadIDContext(ctx, t.ID)\n\t\tres, err := b.clients.Buckets.List(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, root := range res.Roots {\n\t\t\tinfo, err := pbRootToInfo(root)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tlist = append(list, info)\n\t\t}\n\t}\n\treturn list, nil\n}", "func (o *VnicEthAdapterPolicyInventory) GetRxQueueSettings() VnicEthRxQueueSettings {\n\tif o == nil || o.RxQueueSettings.Get() == nil {\n\t\tvar ret VnicEthRxQueueSettings\n\t\treturn ret\n\t}\n\treturn *o.RxQueueSettings.Get()\n}", "func (this *Queue) GetQueue() (val Mensaje, err error) {\n\t// Primero determina si la cola está vacía\n\tif this.rear == this.front {\n\t\treturn Mensaje{0, \"0\", \"0\"}, errors.New(\"Cola de Mensajes Vacia\")\n\t}\n\tthis.front++\n\tval = this.array[this.front]\n\treturn val, err\n}", "func (p *Pool) GetQueue() chan ThreeDPrinter {\n\treturn p.printers\n}", "func getMessages(svc *sqs.SQS, queue string) (*sqs.ReceiveMessageOutput, error) {\n\tparams := &sqs.ReceiveMessageInput{\n\t\tQueueUrl: &queue,\n\t\tMaxNumberOfMessages: aws.Int64(maxNumMessagesToFetch),\n\t\tVisibilityTimeout: aws.Int64(defaultVisibilityTimeout),\n\t\tWaitTimeSeconds: aws.Int64(longPollTimeSeconds),\n\t\tMessageAttributeNames: requiredAttributes,\n\t}\n\tlogging.Debug(\"Polling SQS queue for messages.\", nil)\n\tresp, err := svc.ReceiveMessage(params)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}", "func (a *Client) GetMsgVpnQueue(params *GetMsgVpnQueueParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnQueueOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnQueueParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnQueue\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/queues/{queueName}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnQueueReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnQueueOK), nil\n\n}", "func GetDefaultRemoteHosts() []string {\r\n\tret := make([]string, 0)\r\n\r\n\tmutex.RLock()\r\n\tdefer mutex.RUnlock()\r\n\r\n\tnodeKey := hex.EncodeToString(GetNodePubKey())\r\n\tfor pubKey, item := range nodes {\r\n\t\tif pubKey != nodeKey && !item.Stopped {\r\n\t\t\tret = append(ret, item.TCPAddress)\r\n\t\t}\r\n\t}\r\n\tif len(ret) == 0 && len(conf.Config.NodesAddr) > 0 {\r\n\t\tret = append(ret, conf.Config.NodesAddr[0])\r\n\t}\r\n\treturn ret\r\n}", "func (o *NSQProducer) GetRemoteAddress() string {\n\tif o == nil || o.RemoteAddress == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.RemoteAddress\n}", "func GetCachedRemoteRepos(artDetails *jfauth.ServiceDetails) (*[]string, error) {\n\tremoteRepos := []string{}\n\tstorageInfoGB := []RepoStorageUsedSpaceInfo{}\n\tresp, err := getHttpResp(artDetails, \"api/storageinfo\")\n\tif err != nil {\n\t\tjflog.Error(\"Failed to get http resp for api/storageinfo\")\n\t}\n\tStorageInfo := &StorageInfo{}\n\tif err := json.Unmarshal(resp, &StorageInfo); err != nil {\n\t\treturn &remoteRepos, err\n\t}\n\n\t// Gather repoType CACHE that has storage space > 1 GB\n\tfor _, r := range *&StorageInfo.RepoStorage {\n\t\tif r.RepoType == \"CACHE\" && strings.Contains(r.UsedSpace, \"GB\") {\n\t\t\tre := regexp.MustCompile(`[-]?\\d[\\d,]*[\\.]?[\\d{2}]*`)\n\t\t\tusedSpaceGB, err := strconv.ParseFloat(re.FindString(r.UsedSpace), 64)\n\t\t\tif err != nil {\n\t\t\t\tjflog.Error(\"Failed used space to float for repo %s\", r.Key)\n\t\t\t}\n\t\t\tstorageInfoGB = append(storageInfoGB, RepoStorageUsedSpaceInfo{r.Key, r.RepoType, r.FoldersCount, r.FilesCount, usedSpaceGB, r.PackageType})\n\n\t\t}\n\t}\n\n\tsort.Slice(storageInfoGB, func(i, j int) bool { return storageInfoGB[i].UsedSpaceGB > storageInfoGB[j].UsedSpaceGB })\n\n\t//for _, r := range storageInfoGB {\n\t//\tremoteRepos = append(remoteRepos, strings.ReplaceAll(r.Key, \"-cache\", \"\"))\n\t//}\n\tremoteRepos = append([]string{\"atlassian\"}, remoteRepos...)\n\tremoteRepos = append([]string{\"docker-bintray-io\"}, remoteRepos...)\n\treturn &remoteRepos, nil\n}", "func (s *SQSServer) pollQueues(pollctx, taskctx context.Context, queues []QueueConf) error {\n\tfor _, qconf := range queues {\n\t\tq, err := s.getQueue(pollctx, qconf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq := &sqs.GetQueueAttributesInput{\n\t\t\tAttributeNames: []types.QueueAttributeName{(\"VisibilityTimeout\")},\n\t\t\tQueueUrl: &q.url,\n\t\t}\n\t\tresp, err := s.sqsSrv(q.QueueConf).GetQueueAttributes(pollctx, req)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to get queue attributes for '%s' - %s\", q.Name, err.Error())\n\t\t}\n\t\tto := resp.Attributes[\"VisibilityTimeout\"]\n\t\tif to == \"\" {\n\t\t\treturn fmt.Errorf(\"No visibility timeout returned by SQS for queue '%s'\", q.Name)\n\t\t}\n\t\tvisTimeout, err := strconv.Atoi(to)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to convert visibility timeout from '%s' to int - '%s'\", to, err.Error())\n\t\t}\n\t\t// Each queue runs in a dedicated go routine.\n\t\tgo func(vt int32) {\n\t\t\ts.queuePollers.Add(1)\n\t\t\tdefer s.queuePollers.Done()\n\t\t\ts.run(pollctx, taskctx, q, vt)\n\t\t}(int32(visTimeout))\n\t}\n\n\treturn nil\n}", "func (o *QueueManager) GetAliasQueues() []AliasQueue {\n\tif o == nil {\n\t\tvar ret []AliasQueue\n\t\treturn ret\n\t}\n\n\treturn o.AliasQueues\n}", "func (m *MatchInfo) GetQueue(client *static.Client) (static.Queue, error) {\n\treturn client.GetQueue(m.QueueID)\n}", "func (svc *SQS) XGetQueueURL(ctx context.Context, queueName string) (queueURL string, err error) {\n\tresp, err := svc.GetQueueURL(ctx, GetQueueURLRequest{\n\t\tQueueName: queueName,\n\t})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn resp.QueueURL, nil\n}", "func (cfg Config) GetRemoteHost() (remoteHost string) {\n\treturn cfg.RemoteHost\n}", "func (cfg *Config) MQServers() string {\n\treturn os.Getenv(\"MQ_SERVERS\")\n}", "func (qc *queueClient) rejectQueue(msgs []rpccapnp.Message) []rpccapnp.Message {\n\tqc.mu.Lock()\n\tfor {\n\t\tc := qc.pop()\n\t\tif w := c.which(); w == qcallRemoteCall {\n\t\t\tmsgs = c.a.reject(msgs, errQueueCallCancel)\n\t\t} else if w == qcallLocalCall {\n\t\t\tc.f.Reject(errQueueCallCancel)\n\t\t} else if w == qcallDisembargo {\n\t\t\tm := newDisembargoMessage(nil, rpccapnp.Disembargo_context_Which_receiverLoopback, c.embargoID)\n\t\t\td, _ := m.Disembargo()\n\t\t\td.SetTarget(c.embargoTarget)\n\t\t\tmsgs = append(msgs, m)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tqc.mu.Unlock()\n\treturn msgs\n}", "func (s *Filters) SetQueues(v []*string) *Filters {\n\ts.Queues = v\n\treturn s\n}", "func (s QueueSetSpy) Queues() map[DeploymentID]*R11nQueue {\n\tres := s.Called()\n\treturn res.Get(0).(map[DeploymentID]*R11nQueue)\n}", "func PopulateQueues(c *gin.Context) {\n\tif queue == nil {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue doesn't exist, please create it!!!\",\n\t\t})\n\t\treturn\n\t}\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"roberto\",\n\t\tEMAIL: \"roberto@rr.com\",\n\t\tUUID: \"1\",\n\t\tMSG: \"lindo\",\n\t})\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"alex\",\n\t\tEMAIL: \"alex@rr.com\",\n\t\tUUID: \"2\",\n\t\tMSG: \"lindox\",\n\t})\n\tqueue = enqueue(queue, qMessage{\n\t\tUSER: \"ale\",\n\t\tEMAIL: \"ale@rr.com\",\n\t\tUUID: \"3\",\n\t\tMSG: \"linduxo\",\n\t})\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"msg\": queue,\n\t})\n}", "func GetQueued(sender types.Service) types.QueuedSender {\n\tqs := &queuedSender{\n\t\tsender: sender,\n\t}\n\treturn qs\n}", "func (s *rabbitMQScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) {\n\tmessages, publishRate, err := s.getQueueStatus()\n\tif err != nil {\n\t\treturn []external_metrics.ExternalMetricValue{}, fmt.Errorf(\"error inspecting rabbitMQ: %s\", err)\n\t}\n\n\tvar metricValue resource.Quantity\n\tif s.metadata.mode == rabbitModeQueueLength {\n\t\tmetricValue = *resource.NewQuantity(int64(messages), resource.DecimalSI)\n\t} else {\n\t\tmetricValue = *resource.NewMilliQuantity(int64(publishRate*1000), resource.DecimalSI)\n\t}\n\n\tmetric := external_metrics.ExternalMetricValue{\n\t\tMetricName: metricName,\n\t\tValue: metricValue,\n\t\tTimestamp: metav1.Now(),\n\t}\n\n\treturn append([]external_metrics.ExternalMetricValue{}, metric), nil\n}", "func (a *Client) GetMsgVpnJndiQueue(params *GetMsgVpnJndiQueueParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnJndiQueueOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnJndiQueueParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnJndiQueue\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/jndiQueues/{queueName}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnJndiQueueReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnJndiQueueOK), nil\n\n}", "func (s *UserDataFilters) SetQueues(v []*string) *UserDataFilters {\n\ts.Queues = v\n\treturn s\n}", "func (a *Client) GetMsgVpnQueueSubscriptions(params *GetMsgVpnQueueSubscriptionsParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnQueueSubscriptionsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnQueueSubscriptionsParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnQueueSubscriptions\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/queues/{queueName}/subscriptions\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnQueueSubscriptionsReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnQueueSubscriptionsOK), nil\n\n}", "func (m *VpnConfiguration) GetServers()([]VpnServerable) {\n val, err := m.GetBackingStore().Get(\"servers\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]VpnServerable)\n }\n return nil\n}", "func (player *musicPlayer) getQueueInfo() ([]string, error) {\n\tplayer.Lock()\n\tdefer player.Unlock()\n\tif len(player.state.queue) == 0 {\n\t\treturn nil, errors.New(cannot_get_queue_info_msg)\n\t}\n\t//make a copy to the queue\n\tcopy := make([]string, 0, len(player.state.queue))\n\tfor _, el := range player.state.queue {\n\t\tcopy = append(copy, el)\n\t}\n\treturn copy, nil\n}", "func GetRemoteHost(remoteURL string) *string {\n\tvar remoteHostReference []string\n\tremoteHostReference = []string{\"github\", \"gitlab\", \"bitbucket\", \"azure\", \"codecommit\"}\n\n\tfor _, host := range remoteHostReference {\n\t\tif strings.Contains(remoteURL, host) {\n\t\t\treturn &host\n\t\t}\n\t}\n\treturn nil\n}", "func QueueRemoteWrite(req *gomemcached.MCRequest) {\n\n\tkey := req.Key\n\tnodeList := getVbucketNode(int(findShard(string(key))))\n\tnodes := strings.Split(nodeList, \";\")\n\n\tif len(nodes) < 1 {\n\t\tlog.Fatal(\"Nodelist is empty. Cannot proceed\")\n\t}\n\n\tif len(nodes) < 2 {\n\t\t//no replica\n\t\treturn\n\t}\n\n\tvar remoteNode string\n\t// figure out which is the remote host and queue to the write to that node\n\tfor _, node := range nodes {\n\t\tfound := false\n\t\thostname := strings.Split(node, \":\")\n\t\tfor _, ip := range ipList {\n\t\t\tif ip == hostname[0] {\n\t\t\t\tfound = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif found == false {\n\t\t\tremoteNode = node\n\t\t}\n\t}\n\n\tri := &repItem{host: remoteNode, req: req, opcode: OP_REP}\n\trepChan <- ri\n\treturn\n}", "func (cfg Config) GetRmqQueueConfig(queue string) RmqQueue {\n\treturn cfg.rmqQueueMap[queue]\n}", "func getQueueUrl(id string) (queueUrl string, retErr error) {\n\n\t//Creazione client DynamoDB\n\tsvc := dynamodb.New(common.Sess)\n\n\tresult, err := svc.GetItem(&dynamodb.GetItemInput{\n\t\tTableName: aws.String(subTableName),\n\t\tKey: map[string]*dynamodb.AttributeValue{\n\t\t\t\"SubID\": {\n\t\t\t\tS: aws.String(id),\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\tcommon.Warning(\"[BROKER] Errore nel retreive del subscriber con ID: \" + id + \".\\n\" + err.Error())\n\t\treturn \"\", err\n\t}\n\n\titem := common.SubscriberEntry{}\n\n\terr = dynamodbattribute.UnmarshalMap(result.Item, &item)\n\tif err != nil {\n\t\tcommon.Warning(\"[BROKER] Errore nell'unmarshaling del risultato\")\n\t\treturn \"\", err\n\t}\n\tif item.SubID == \"\" {\n\t\tcommon.Warning(\"[BROKER] Nessun subscriber trovato con id \" + id)\n\t\treturn \"\", errors.New(\"no item found\")\n\t}\n\n\tcommon.Info(\"[BROKER] Subscriber trovato: \" + item.SubID + \"\\n\\t\" + item.QueueURL)\n\n\treturn item.QueueURL, nil\n}", "func (n *NetworkInterface) Get() (string, error) {\n\tn.mu.Lock()\n\tdefer n.mu.Unlock()\n\t//fmt.Println(\"qu len: \", len(n.Queue))\n\tif len(n.Queue) > 0 {\n\t\ttoReturn := n.Queue[0]\n\t\tn.Queue = n.Queue[1:]\n\t\treturn toReturn, nil\n\t}\n\treturn \"\", errors.New(\"Empty\")\n}", "func (h *Hospital) ConsumeQueues(ctx context.Context, t *testing.T) (events int, messages []string) {\n\tt.Helper()\n\treturn h.ConsumeQueuesWithLimit(ctx, t, -1, true)\n}", "func (multi_queue *MultiQueue) Pop(timeout int) (string, error) {\n\tq, err := multi_queue.SelectHealthyQueue()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tconn := q.pooledConnection.Get()\n\tdefer conn.Close()\n\n\tr, err := redis.Strings(conn.Do(\"BRPOP\", multi_queue.key, timeout))\n\tif err == nil {\n\t\treturn r[1], nil\n\t} else {\n\t\tif err != redis.ErrNil {\n\t\t\tq.QueueError()\n\t\t}\n\t\treturn \"\", err\n\t}\n}", "func (o LookupQueueResultOutput) AppEngineHttpQueue() AppEngineHttpQueueResponseOutput {\n\treturn o.ApplyT(func(v LookupQueueResult) AppEngineHttpQueueResponse { return v.AppEngineHttpQueue }).(AppEngineHttpQueueResponseOutput)\n}", "func (o *Replication) GetRemoteBucketID() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.RemoteBucketID\n}", "func ListMatchmakingQueues(settings *playfab.Settings, postData *ListMatchmakingQueuesRequestModel, entityToken string) (*ListMatchmakingQueuesResultModel, error) {\n if entityToken == \"\" {\n return nil, playfab.NewCustomError(\"entityToken should not be an empty string\", playfab.ErrorGeneric)\n }\n b, errMarshal := json.Marshal(postData)\n if errMarshal != nil {\n return nil, playfab.NewCustomError(errMarshal.Error(), playfab.ErrorMarshal)\n }\n\n sourceMap, err := playfab.Request(settings, b, \"/Match/ListMatchmakingQueues\", \"X-EntityToken\", entityToken)\n if err != nil {\n return nil, err\n }\n \n result := &ListMatchmakingQueuesResultModel{}\n\n config := mapstructure.DecoderConfig{\n DecodeHook: playfab.StringToDateTimeHook,\n Result: result,\n }\n \n decoder, errDecoding := mapstructure.NewDecoder(&config)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n \n errDecoding = decoder.Decode(sourceMap)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n\n return result, nil\n}", "func (ClearTrans) GetQueue() string {\n\treturn \"cy_rubik_clearTrans\"\n}", "func (c *restClient) GetQueue(ctx context.Context, req *cloudtaskspb.GetQueueRequest, opts ...gax.CallOption) (*cloudtaskspb.Queue, error) {\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v2beta3/%v\", req.GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\tif req.GetReadMask() != nil {\n\t\treadMask, err := protojson.Marshal(req.GetReadMask())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tparams.Add(\"readMask\", string(readMask[1:len(readMask)-1]))\n\t}\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"name\", url.QueryEscape(req.GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\topts = append((*c.CallOptions).GetQueue[0:len((*c.CallOptions).GetQueue):len((*c.CallOptions).GetQueue)], opts...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &cloudtaskspb.Queue{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn resp, nil\n}", "func (s *API) GetQueueURL(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"GetQueueURL\")\n\n\tqueueName := req.FormValue(\"QueueName\")\n\tqueue, ok := s.sqs.queues[queueName]\n\tif !ok {\n\t\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\terror := ErrorResponse{\n\t\t\tError: ErrorResult{\n\t\t\t\tType: \"Not Found\",\n\t\t\t\tCode: \"AWS.SimpleQueueService.NonExistentQueue\",\n\t\t\t\tMessage: \"The specified queue does not exist for this wsdl version.\",\n\t\t\t},\n\t\t\tRequestId: \"00000000-0000-0000-0000-000000000000\",\n\t\t}\n\t\tenc := xml.NewEncoder(w)\n\t\tenc.Indent(\" \", \" \")\n\t\tif err := enc.Encode(error); err != nil {\n\t\t\tlog.Errorf(\"error: %s\", err)\n\t\t}\n\t\treturn\n\t}\n\n\tresponse := GetQueueURLResponse{\n\t\tResult: GetQueueURLResult{queue.url},\n\t\tMetaData: ResponseMetaData{\"00000000-0000-0000-0000-000000000000\"},\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\tenc := xml.NewEncoder(w)\n\tenc.Indent(\" \", \" \")\n\tif err := enc.Encode(response); err != nil {\n\t\tlog.Errorf(\"error: %s\", err)\n\t}\n\n}", "func groomQueues(queues *Queues) (err kv.Error) {\n\tfor qName, qDetails := range *queues {\n\t\t// If we have enough runners drop the queue as it needs nothing done to it\n\t\tif len(qDetails.NodeGroup) == 0 || qDetails.Running >= qDetails.Ready+qDetails.NotVisible {\n\t\t\tif logger.IsTrace() {\n\t\t\t\tlogger.Trace(\"queue already handled\", \"queue\", qName, \"stack\", stack.Trace().TrimRuntime())\n\t\t\t}\n\t\t\tdelete(*queues, qName)\n\t\t}\n\t}\n\treturn nil\n}", "func GetQueueURL(c context.Context, api SQSReceiveMessageAPI, input *sqs.GetQueueUrlInput) (*sqs.GetQueueUrlOutput, error) {\n\treturn api.GetQueueUrl(c, input)\n}", "func (r *RPC) GetQueueClient() queue.Client {\r\n\treturn r.c\r\n}", "func (s *ItemQueue) GetMessages() []int {\n\tvar messages []int\n\ts.lock.Lock()\n\n\tfor i := 0; i < len(s.items); i++ {\n\t\t\tmessages[i] = s.items[i].ID\n\t}\n\n\ts.lock.Unlock()\n\treturn messages\n}", "func (c *Config) GetRemoteHost() string {\n\tif c.sandboxMode {\n\t\treturn fmt.Sprintf(\"sandbox.payfast.co.za\")\n\t}\n\n\treturn fmt.Sprintf(\"www.payfast.co.za\")\n}", "func receiveQueueMessage(receiveQueue string) (messages []*sqs.Message, retErr error) {\n\n\tsvc := sqs.New(common.Sess)\n\tvar messagesList []*sqs.Message\n\n\tresult, err := svc.ReceiveMessage(&sqs.ReceiveMessageInput{\n\t\tAttributeNames: []*string{\n\t\t\taws.String(sqs.MessageSystemAttributeNameSentTimestamp),\n\t\t},\n\t\tMessageAttributeNames: []*string{\n\t\t\taws.String(sqs.QueueAttributeNameAll),\n\t\t},\n\t\tWaitTimeSeconds: aws.Int64(common.Config.PollingTime), \t//Long polling\n\t\tMaxNumberOfMessages: aws.Int64(common.Config.MaxRcvMessage),\n\t\tQueueUrl: &receiveQueue,\n\t})\n\n\n\tif err != nil {\n\t\tcommon.Warning(\"[BROKER] Errore nell'ottenimento del messaggio. \" + err.Error())\n\t\treturn nil, err\n\t}\n\tif len(result.Messages) == 0 {\n\t\tcommon.Info(\"[BROKER] Nessun messaggio ricevuto\")\n\t\treturn\n\t} else {\n\t\tsendLogMessage(\"Messaggi ricevuti: \" + strconv.Itoa(len(result.Messages)))\n\n\t\tfor _, mess := range result.Messages {\n\n\t\t\terr = sendMessage(*mess)\n\n\t\t\tif err != nil {\n\t\t\t\tcommon.Warning(\"[BROKER] Errore nell'invio del messaggio dal broker. \" + err.Error())\n\t\t\t}\n\n\t\t\t//Messaggio eliminato solo dopoche viene mandato\n\t\t\t_, err := svc.DeleteMessage(&sqs.DeleteMessageInput{\n\t\t\t\tQueueUrl: &receiveQueue,\n\t\t\t\tReceiptHandle: mess.ReceiptHandle,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tcommon.Info(\"[BROKER] Errore nell'eliminazione del messaggio. \" + err.Error())\n\t\t\t} else {\n\t\t\t\tmessagesList = append(messagesList, mess)\n\t\t\t\tcommon.Info(\"[BROKER] Messaggio eliminato con successo\")\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\treturn messagesList, nil\n\n}", "func GetFromQueue(queue string) ([]byte, error) {\n\treturn cache.Get(queue)\n}", "func (d *Device) GetQueue(qf *QueueFamily) *Queue {\n\n\tvar vkq vk.Queue\n\n\tvk.GetDeviceQueue(d.VKDevice, uint32(qf.Index), 0, &vkq)\n\n\tvar queue Queue\n\tqueue.QueueFamily = qf\n\tqueue.Device = d\n\tqueue.VKQueue = vkq\n\n\treturn &queue\n}", "func (c *QueueClient) Get(ctx context.Context, id int) (*Queue, error) {\n\treturn c.Query().Where(queue.ID(id)).Only(ctx)\n}", "func (c *apiConsumers) TeamsQueue() <-chan *TeamDTO {\n\treturn c.queue\n}", "func (c *connection) getQueueLength(inputs input) (int, error) {\n\n\tif inputs.limit > 0 {\n\t\treturn inputs.limit, nil\n\t}\n\n\tqLength, err := redis.Int(conn.redis.Do(\"LLEN\", inputs.source))\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tif qLength < 1 {\n\t\treturn 0, fmt.Errorf(\"Source queue is empty\")\n\t}\n\n\treturn qLength, nil\n}", "func (client DeploymentsClient) GetRemoteDebuggingConfigSender(req *http.Request) (*http.Response, error) {\n\treturn client.Send(req, azure.DoRetryWithRegistration(client.Client))\n}", "func (wr *WorkerRef) GetRemote(ctx context.Context, createIfNeeded bool, compressionType compression.Type, g session.Group) (*solver.Remote, error) {\n\tif w, ok := wr.Worker.(interface {\n\t\tGetRemote(context.Context, cache.ImmutableRef, bool, compression.Type, session.Group) (*solver.Remote, error)\n\t}); ok {\n\t\treturn w.GetRemote(ctx, wr.ImmutableRef, createIfNeeded, compressionType, g)\n\t}\n\treturn wr.ImmutableRef.GetRemote(ctx, createIfNeeded, compressionType, g)\n}", "func printQueue(q *Queue) {\n\tfmt.Println(q.values)\n}", "func (s *server) GetBroadcasts(overhead, limit int) [][]byte {\n\treturn s.queue.GetBroadcasts(overhead, limit)\n}", "func (b *Backend) GetLeagueByQueue(league string, queue string) (*riotclient.LeagueListDTO, error) {\n\treturn nil, fmt.Errorf(\"Not implemented\")\n}", "func GetRemoteAddressSet(ctx *gin.Context) (remoteIp, remotePort string) {\n\tremoteIp, remotePort = \"0.0.0.0\", \"0\"\n\n\tif ctx == nil || ctx.Request == nil {\n\t\treturn\n\t}\n\n\tvar err error\n\tif remoteIp, remotePort, err = net.SplitHostPort(ctx.Request.RemoteAddr); err != nil {\n\t\treturn\n\t}\n\n\tforwardedRemoteIp := ctx.GetHeader(\"x-forwarded-for\")\n\n\t// Deal with forwarded remote ip\n\tif len(forwardedRemoteIp) > 0 {\n\t\tif forwardedRemoteIp == \"::1\" {\n\t\t\tforwardedRemoteIp = \"localhost\"\n\t\t}\n\n\t\tremoteIp = forwardedRemoteIp\n\t}\n\n\tif remoteIp == \"::1\" {\n\t\tremoteIp = \"localhost\"\n\t}\n\n\treturn remoteIp, remotePort\n}", "func (h *HTTPClient) Dequeue(ctx context.Context, token, projID, qName string, num int, timeout Timeout, wait Wait, delete bool) ([]DequeuedMessage, error) {\n\tif !timeoutInRange(timeout) {\n\t\treturn nil, ErrTimeoutOutOfRange\n\t}\n\tif !waitInRange(wait) {\n\t\treturn nil, ErrWaitOutOfRange\n\t}\n\n\tbody := &bytes.Buffer{}\n\tif err := json.NewEncoder(body).Encode(dequeueReq{Num: num, Timeout: int(timeout), Wait: int(wait), Delete: delete}); err != nil {\n\t\treturn nil, err\n\t}\n\treq, err := h.newReq(\"POST\", token, projID, fmt.Sprintf(\"queues/%s/reservations\", qName), body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tret := new(dequeueResp)\n\tdoFunc := func(resp *http.Response, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tif err := json.NewDecoder(resp.Body).Decode(ret); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\tif err := gorion.HTTPDo(ctx, h.client, h.transport, req, doFunc); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret.Messages, nil\n}", "func radioGetQueueHandler(c echo.Context) error {\n\tlinks := radio.queue\n\tuserID := getUserIDFromContext(c)\n\tvotes := service.GetVotesForUser(links, userID)\n\n\tfor i, l := range links {\n\t\tif vote, ok := votes[l.LinkID]; ok {\n\t\t\tlinks[i].MyVote = vote\n\t\t} else {\n\t\t\tlinks[i].MyVote = 0\n\t\t}\n\t}\n\n\treturn c.JSON(http.StatusOK, echo.Map{\n\t\t\"links\": links,\n\t\t\"votes\": votes,\n\t})\n}", "func GetQueue(id string) Queue {\n\tservice := broker.GetService(ServiceName).(*QueueService)\n\treturn service.getQueue(id)\n}", "func (o *KvmPolicyInventory) GetRemotePort() int64 {\n\tif o == nil || o.RemotePort == nil {\n\t\tvar ret int64\n\t\treturn ret\n\t}\n\treturn *o.RemotePort\n}", "func (r *RPC) GetQueueClient() queue.Client {\n\treturn r.c\n}", "func mainAdminBucketRemoteList(ctx *cli.Context) error {\n\tcheckAdminBucketRemoteListSyntax(ctx)\n\n\t// Additional command specific theme customization.\n\tconsole.SetColor(\"RemoteListMessage\", color.New(color.Bold, color.FgHiGreen))\n\tconsole.SetColor(\"RemoteListEmpty\", color.New(color.FgYellow))\n\tconsole.SetColor(\"SourceBucket\", color.New(color.FgYellow))\n\tconsole.SetColor(\"TargetBucket\", color.New(color.FgYellow))\n\tconsole.SetColor(\"TargetURL\", color.New(color.FgHiWhite))\n\tconsole.SetColor(\"ARN\", color.New(color.FgCyan))\n\tconsole.SetColor(\"Arrow\", color.New(color.FgHiWhite))\n\n\t// Get the alias parameter from cli\n\targs := ctx.Args()\n\taliasedURL := args.Get(0)\n\t_, sourceBucket := url2Alias(aliasedURL)\n\n\t// Create a new MinIO Admin Client\n\tclient, err := newAdminClient(aliasedURL)\n\tfatalIf(err, \"Unable to initialize admin connection.\")\n\ttargets, e := client.ListRemoteTargets(globalContext, sourceBucket, ctx.String(\"service\"))\n\tfatalIf(probe.NewError(e).Trace(args...), \"Unable to list remote target\")\n\tprintRemotes(ctx, aliasedURL, targets)\n\treturn nil\n}", "func (o *SmscSession) GetRemoteAddr() string {\n\tif o == nil || o.RemoteAddr == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.RemoteAddr\n}", "func (q *Queue) Get(number int) ([]interface{}, error) {\n\tif number < 1 {\n\t\treturn []interface{}{}, nil\n\t}\n\n\tq.lock.Lock()\n\n\tif q.disposed {\n\t\tq.lock.Unlock()\n\t\treturn nil, errors.New(\"Queue has been disposed\")\n\t}\n\n\tvar items []interface{}\n\tif len(q.items) == 0 {\n\t\tsema := newSema()\n\t\tq.waiters.put(sema)\n\t\tsema.wg.Add(1)\n\t\tq.lock.Unlock()\n\n\t\tsema.wg.Wait()\n\t\t// We are now inside put's lock.\n\t\tif q.disposed {\n\t\t\treturn nil, errors.New(\"Queue has been disposed\")\n\t\t}\n\n\t\titems = q.items.get(number)\n\t\tsema.response.Done()\n\t\treturn items, nil\n\t}\n\n\titems = q.items.get(number)\n\tq.lock.Unlock()\n\treturn items, nil\n}", "func GetQueue(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *QueueState, opts ...pulumi.ResourceOption) (*Queue, error) {\n\tvar resource Queue\n\terr := ctx.ReadResource(\"aws-native:connect:Queue\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (bp *Processer) GetRemoteAddr() string {\n\taddr := bp.g.Sock.RemoteAddr()\n\treturn addr.String()\n}", "func (bq *InMemoryBuildQueue) ListPlatformQueues(ctx context.Context, request *emptypb.Empty) (*buildqueuestate.ListPlatformQueuesResponse, error) {\n\tbq.enter(bq.clock.Now())\n\tdefer bq.leave()\n\n\t// Obtain platform queue IDs in sorted order.\n\tplatformQueueList := append(platformQueueList(nil), bq.platformQueues...)\n\tsort.Sort(platformQueueList)\n\n\t// Extract status.\n\tplatformQueues := make([]*buildqueuestate.PlatformQueueState, 0, len(bq.platformQueues))\n\tfor _, pq := range platformQueueList {\n\t\tsizeClassQueues := make([]*buildqueuestate.SizeClassQueueState, 0, len(pq.sizeClassQueues))\n\t\tfor i, scq := range pq.sizeClassQueues {\n\t\t\texecutingWorkersCount := uint32(0)\n\t\t\tfor _, w := range scq.workers {\n\t\t\t\tif w.currentTask != nil {\n\t\t\t\t\texecutingWorkersCount++\n\t\t\t\t}\n\t\t\t}\n\t\t\tactiveInvocationsCount := uint32(0)\n\t\t\tfor _, i := range scq.invocations {\n\t\t\t\tif i.isActive() {\n\t\t\t\t\tactiveInvocationsCount++\n\t\t\t\t}\n\t\t\t}\n\t\t\tsizeClassQueues = append(sizeClassQueues, &buildqueuestate.SizeClassQueueState{\n\t\t\t\tSizeClass: pq.sizeClasses[i],\n\t\t\t\tTimeout: bq.cleanupQueue.getTimestamp(scq.cleanupKey),\n\t\t\t\tInvocationsCount: uint32(len(scq.invocations)),\n\t\t\t\tQueuedInvocationsCount: uint32(scq.queuedInvocations.Len()),\n\t\t\t\tActiveInvocationsCount: uint32(activeInvocationsCount),\n\t\t\t\tWorkersCount: uint32(len(scq.workers)),\n\t\t\t\tExecutingWorkersCount: executingWorkersCount,\n\t\t\t\tDrainsCount: uint32(len(scq.drains)),\n\t\t\t})\n\t\t}\n\t\tplatformQueues = append(platformQueues, &buildqueuestate.PlatformQueueState{\n\t\t\tName: pq.platformKey.GetPlatformQueueName(),\n\t\t\tSizeClassQueues: sizeClassQueues,\n\t\t})\n\t}\n\treturn &buildqueuestate.ListPlatformQueuesResponse{\n\t\tPlatformQueues: platformQueues,\n\t}, nil\n}", "func (svc *SQS) XListAllQueueURLs(ctx context.Context, queuePrefix string) (queueURLs []string, err error) {\n\tresp, err := svc.ListQueues(ctx, ListQueuesRequest{\n\t\tQueueNamePrefix: queuePrefix,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresults := resp.QueueURLs\n\tnextToken := resp.NextToken\n\tfor nextToken != \"\" {\n\t\tresp, err := svc.ListQueues(ctx, ListQueuesRequest{\n\t\t\tQueueNamePrefix: queuePrefix,\n\t\t\tNextToken: nextToken,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresults = append(results, resp.QueueURLs...)\n\t\tnextToken = resp.NextToken\n\t}\n\n\treturn results, nil\n}", "func (s *Store) GetQueueNames() ([]string, error) {\n\tvar names []string\n\treturn names, s.db.View(func(tx *bolt.Tx) error {\n\t\treturn s.queues(tx).ForEach(func(key, value []byte) error {\n\t\t\tnames = append(names, string(key))\n\t\t\treturn nil\n\t\t})\n\t})\n}" ]
[ "0.71619385", "0.61695373", "0.6127869", "0.6031343", "0.5995589", "0.5916818", "0.58918214", "0.5793508", "0.57635283", "0.57260025", "0.57182837", "0.56593096", "0.5565563", "0.5556938", "0.5451114", "0.54059637", "0.5343811", "0.53087986", "0.53000456", "0.52932256", "0.5202478", "0.5202026", "0.51352566", "0.5096544", "0.5083316", "0.50753343", "0.5045381", "0.5038165", "0.50206137", "0.50194395", "0.5001005", "0.49750003", "0.49579534", "0.4944364", "0.49298406", "0.49111122", "0.4910221", "0.4906531", "0.49031442", "0.48946214", "0.4891568", "0.4883973", "0.48648852", "0.48286587", "0.4789293", "0.4785026", "0.47594467", "0.47512728", "0.47358534", "0.47358033", "0.47339717", "0.4733591", "0.4727068", "0.47222948", "0.47170427", "0.47135445", "0.47113577", "0.4709004", "0.46976623", "0.46943414", "0.469314", "0.46926573", "0.46891418", "0.46861315", "0.46844608", "0.46694505", "0.4664064", "0.4657276", "0.4646711", "0.46339905", "0.46229827", "0.4618968", "0.46186715", "0.4612663", "0.46069223", "0.45983294", "0.45978802", "0.45940563", "0.4591997", "0.45916057", "0.4585771", "0.4584739", "0.45590138", "0.45555526", "0.45398208", "0.45337144", "0.45322356", "0.4525946", "0.45216835", "0.4516763", "0.45081013", "0.45048097", "0.45039165", "0.45037025", "0.44986156", "0.44960508", "0.44853693", "0.44829133", "0.44707265", "0.44690043" ]
0.76292956
0
GetRemoteQueuesOk returns a tuple with the RemoteQueues field value and a boolean to check if the value has been set.
GetRemoteQueuesOk возвращает кортеж с значением поля RemoteQueues и булевым значением для проверки, было ли значение задано.
func (o *QueueManager) GetRemoteQueuesOk() (*[]RemoteQueue, bool) { if o == nil { return nil, false } return &o.RemoteQueues, true }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *QueueManager) GetRemoteQueues() []RemoteQueue {\n\tif o == nil {\n\t\tvar ret []RemoteQueue\n\t\treturn ret\n\t}\n\n\treturn o.RemoteQueues\n}", "func (o *QueueManager) GetAliasQueuesOk() (*[]AliasQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.AliasQueues, true\n}", "func (o *QueueManager) GetClusterQueuesOk() (*[]ClusterQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.ClusterQueues, true\n}", "func (o *QueueManager) SetRemoteQueues(v []RemoteQueue) {\n\to.RemoteQueues = v\n}", "func (o *VnicEthAdapterPolicyInventory) GetRxQueueSettingsOk() (*VnicEthRxQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RxQueueSettings.Get(), o.RxQueueSettings.IsSet()\n}", "func (o *NSQProducer) GetRemoteAddressOk() (*string, bool) {\n\tif o == nil || o.RemoteAddress == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RemoteAddress, true\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues) IsYANGGoStruct() {}", "func (o *VnicEthAdapterPolicyInventory) GetCompletionQueueSettingsOk() (*VnicCompletionQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CompletionQueueSettings.Get(), o.CompletionQueueSettings.IsSet()\n}", "func (o *NotificationConfig) GetReceiversOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Receivers, true\n}", "func (o *VulnUpdateNotification) GetQueueIdOk() (*string, bool) {\n\tif o == nil || o.QueueId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.QueueId, true\n}", "func (o *V0037JobProperties) GetRequeueOk() (*bool, bool) {\n\tif o == nil || o.Requeue == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Requeue, true\n}", "func GetAvailableQueues() ([]string, error) {\n\tclient := &http.Client{}\n\n\tres, err := client.Get(Host + \"/queues\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trespBody := res.Body\n\tdefer respBody.Close()\n\n\tavailableQueues := struct {\n\t\tQueues []string `json:\"queues\"`\n\t}{}\n\tif err := json.NewDecoder(respBody).Decode(&availableQueues); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn availableQueues.Queues, nil\n}", "func (i *Inspector) Queues() ([]string, error) {\n\treturn i.rdb.AllQueues()\n}", "func (o *KvmPolicyInventory) GetRemotePortOk() (*int64, bool) {\n\tif o == nil || o.RemotePort == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RemotePort, true\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue) IsYANGGoStruct() {}", "func (o *VnicEthAdapterPolicyInventory) GetTxQueueSettingsOk() (*VnicEthTxQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.TxQueueSettings.Get(), o.TxQueueSettings.IsSet()\n}", "func (o *VirtualizationVmwareVirtualMachineAllOf) GetRemoteDisplayInfoOk() (*VirtualizationVmwareRemoteDisplayInfo, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RemoteDisplayInfo.Get(), o.RemoteDisplayInfo.IsSet()\n}", "func (a *Client) GetMsgVpnQueues(params *GetMsgVpnQueuesParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnQueuesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnQueuesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnQueues\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/queues\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnQueuesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnQueuesOK), nil\n\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Output_Queues) IsYANGGoStruct() {}", "func GetQueues(c *gin.Context) {\n\t//TODO: create a while both back and front until value is != nil\n\tsize := len(queue)\n\tlog.Printf(\"squeue: %v\", queue)\n\tif size == 0 {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue don't have any item!\",\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"queues\": queue,\n\t})\n\tlog.Printf(\"equeue: %v\", queue)\n}", "func (o *Replication) GetRemoteBucketIDOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteBucketID, true\n}", "func (o *LocalDatabaseProvider) GetDnsServersOk() ([]string, bool) {\n\tif o == nil || o.DnsServers == nil {\n\t\treturn nil, false\n\t}\n\treturn o.DnsServers, true\n}", "func (o *RemoteEnvironmentConfigListDto) GetValuesOk() (*[]RemoteEnvironmentConfigStub, bool) {\n\tif o == nil || o.Values == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Values, true\n}", "func IsRemotePlan(planContents []byte) bool {\n\t// We add a header to plans generated by the remote backend so we can\n\t// detect that they're remote in the apply phase.\n\tremoteOpsHeaderBytes := []byte(remoteOpsHeader)\n\treturn bytes.Equal(planContents[:len(remoteOpsHeaderBytes)], remoteOpsHeaderBytes)\n}", "func (gores *Gores) Queues() []string {\n\tqueues := make([]string, 0)\n\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\tdata, _ := conn.Do(\"SMEMBERS\", watchedQueues)\n\tfor _, q := range data.([]interface{}) {\n\t\tqueues = append(queues, string(q.([]byte)))\n\t}\n\n\treturn queues\n}", "func (o *SmscSession) GetRemoteAddrOk() (*string, bool) {\n\tif o == nil || o.RemoteAddr == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RemoteAddr, true\n}", "func (o *Replication) GetMaxQueueSizeBytesOk() (*int64, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.MaxQueueSizeBytes, true\n}", "func (o *FiltersVmGroup) GetSubnetIdsOk() (*[]string, bool) {\n\tif o == nil || o.SubnetIds == nil {\n\t\treturn nil, false\n\t}\n\treturn o.SubnetIds, true\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).Queues(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"grpc queues: %w\", unpackGRPCError(err))\n\t}\n\tqs := make(map[string]int)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = int(q.NumTasks)\n\t}\n\treturn qs, nil\n}", "func (*OpenconfigQos_Qos_Queues) IsYANGGoStruct() {}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue_State) IsYANGGoStruct() {}", "func (s *API) ListQueues(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"ListQueues\")\n\n\tqueueNamePrefix := req.FormValue(\"QueueNamePrefix\")\n\tvar queues []string\n\tfor k, v := range s.sqs.queues {\n\t\tif strings.HasPrefix(k, queueNamePrefix) {\n\t\t\tqueues = append(queues, v.url)\n\t\t}\n\t}\n\n\tresponse := ListQueuesResponse{\n\t\tResult: ListQueuesResult{queues},\n\t\tMetaData: ResponseMetaData{\"00000000-0000-0000-0000-000000000000\"},\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\tenc := xml.NewEncoder(w)\n\tenc.Indent(\" \", \" \")\n\tif err := enc.Encode(response); err != nil {\n\t\tlog.Errorf(\"error: %s\", err)\n\t}\n}", "func (o *V0037Node) GetBoardsOk() (*int32, bool) {\n\tif o == nil || o.Boards == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Boards, true\n}", "func (o *NodeUpdate) GetDnsServersOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.DnsServers, true\n}", "func (o *NotificationConfig) GetBccReceiversOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.BccReceivers, true\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (a *Client) GetMsgVpnJndiQueues(params *GetMsgVpnJndiQueuesParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnJndiQueuesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnJndiQueuesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnJndiQueues\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/jndiQueues\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnJndiQueuesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnJndiQueuesOK), nil\n\n}", "func (o *SMSConnectorSettings) GetDecodersOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Decoders, true\n}", "func (s *SQSServer) pollQueues(pollctx, taskctx context.Context, queues []QueueConf) error {\n\tfor _, qconf := range queues {\n\t\tq, err := s.getQueue(pollctx, qconf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq := &sqs.GetQueueAttributesInput{\n\t\t\tAttributeNames: []types.QueueAttributeName{(\"VisibilityTimeout\")},\n\t\t\tQueueUrl: &q.url,\n\t\t}\n\t\tresp, err := s.sqsSrv(q.QueueConf).GetQueueAttributes(pollctx, req)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to get queue attributes for '%s' - %s\", q.Name, err.Error())\n\t\t}\n\t\tto := resp.Attributes[\"VisibilityTimeout\"]\n\t\tif to == \"\" {\n\t\t\treturn fmt.Errorf(\"No visibility timeout returned by SQS for queue '%s'\", q.Name)\n\t\t}\n\t\tvisTimeout, err := strconv.Atoi(to)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to convert visibility timeout from '%s' to int - '%s'\", to, err.Error())\n\t\t}\n\t\t// Each queue runs in a dedicated go routine.\n\t\tgo func(vt int32) {\n\t\t\ts.queuePollers.Add(1)\n\t\t\tdefer s.queuePollers.Done()\n\t\t\ts.run(pollctx, taskctx, q, vt)\n\t\t}(int32(visTimeout))\n\t}\n\n\treturn nil\n}", "func (s *UserDataFilters) SetQueues(v []*string) *UserDataFilters {\n\ts.Queues = v\n\treturn s\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue_Config) IsYANGGoStruct() {}", "func (s *Filters) SetQueues(v []*string) *Filters {\n\ts.Queues = v\n\treturn s\n}", "func RmqQueueStat(jBody []byte) (messageCount int, result int) {\n\ttype QueueFailedCount struct {\n\t\tCount int `json:\"messages\"` // figure out which of these we need\n\t}\n\tresult = 0 // explicitly zeroing\n\tvar queueFailedCount QueueFailedCount\n\tmarshalerr := json.Unmarshal(jBody, &queueFailedCount)\n\tif marshalerr != nil {\n\t\tfmt.Println(marshalerr)\n\t}\n\tmessageCount = queueFailedCount.Count\n\tif queueFailedCount.Count > 0 && queueFailedCount.Count < 20 {\n\t\tresult = 1\n\t} else if queueFailedCount.Count > 19 {\n\t\tresult = 2\n\t}\n\treturn messageCount, result\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_Queues) IsYANGGoStruct() {}", "func (o *SecurityProblem) GetManagementZonesOk() (*[]ManagementZone, bool) {\n\tif o == nil || o.ManagementZones == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ManagementZones, true\n}", "func (connector *DbConnector) GetRemoteTriggersToCheck(count int) ([]string, error) {\n\treturn connector.getTriggersToCheck(remoteTriggersToCheckKey, count)\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue) IsYANGGoStruct() {}", "func (m *ExtractorClientMock) MinimockGetChatHistoryRemoteDone() bool {\n\tfor _, e := range m.GetChatHistoryRemoteMock.expectations {\n\t\tif mm_atomic.LoadUint64(&e.Counter) < 1 {\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// if default expectation was set then invocations count should be greater than zero\n\tif m.GetChatHistoryRemoteMock.defaultExpectation != nil && mm_atomic.LoadUint64(&m.afterGetChatHistoryRemoteCounter) < 1 {\n\t\treturn false\n\t}\n\t// if func was set then invocations count should be greater than zero\n\tif m.funcGetChatHistoryRemote != nil && mm_atomic.LoadUint64(&m.afterGetChatHistoryRemoteCounter) < 1 {\n\t\treturn false\n\t}\n\treturn true\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tdefer un(lock(b))\n\n\tqs := make(map[string]int)\n\tfor q, items := range b.heaps {\n\t\tif len(qq.MatchPrefix) != 0 || len(qq.MatchExact) != 0 {\n\t\t\tif !matchesPrefix(q, qq.MatchPrefix...) && !matchesExact(q, qq.MatchExact...) {\n\t\t\t\t// no match\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tqs[q] = items.Len()\n\t\tif qq.Limit > 0 && len(qs) >= qq.Limit {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn qs, nil\n}", "func (o *NotificationProjectBudgetNotification) GetTeamIdsOk() (*[]int32, bool) {\n\tif o == nil || o.TeamIds == nil {\n\t\treturn nil, false\n\t}\n\treturn o.TeamIds, true\n}", "func groomQueues(queues *Queues) (err kv.Error) {\n\tfor qName, qDetails := range *queues {\n\t\t// If we have enough runners drop the queue as it needs nothing done to it\n\t\tif len(qDetails.NodeGroup) == 0 || qDetails.Running >= qDetails.Ready+qDetails.NotVisible {\n\t\t\tif logger.IsTrace() {\n\t\t\t\tlogger.Trace(\"queue already handled\", \"queue\", qName, \"stack\", stack.Trace().TrimRuntime())\n\t\t\t}\n\t\t\tdelete(*queues, qName)\n\t\t}\n\t}\n\treturn nil\n}", "func (o *ApplianceAllOfNetworkingIpv4Dhcp) GetRoutersOk() (*bool, bool) {\n\tif o == nil || o.Routers == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Routers, true\n}", "func (bq *InMemoryBuildQueue) ListPlatformQueues(ctx context.Context, request *emptypb.Empty) (*buildqueuestate.ListPlatformQueuesResponse, error) {\n\tbq.enter(bq.clock.Now())\n\tdefer bq.leave()\n\n\t// Obtain platform queue IDs in sorted order.\n\tplatformQueueList := append(platformQueueList(nil), bq.platformQueues...)\n\tsort.Sort(platformQueueList)\n\n\t// Extract status.\n\tplatformQueues := make([]*buildqueuestate.PlatformQueueState, 0, len(bq.platformQueues))\n\tfor _, pq := range platformQueueList {\n\t\tsizeClassQueues := make([]*buildqueuestate.SizeClassQueueState, 0, len(pq.sizeClassQueues))\n\t\tfor i, scq := range pq.sizeClassQueues {\n\t\t\texecutingWorkersCount := uint32(0)\n\t\t\tfor _, w := range scq.workers {\n\t\t\t\tif w.currentTask != nil {\n\t\t\t\t\texecutingWorkersCount++\n\t\t\t\t}\n\t\t\t}\n\t\t\tactiveInvocationsCount := uint32(0)\n\t\t\tfor _, i := range scq.invocations {\n\t\t\t\tif i.isActive() {\n\t\t\t\t\tactiveInvocationsCount++\n\t\t\t\t}\n\t\t\t}\n\t\t\tsizeClassQueues = append(sizeClassQueues, &buildqueuestate.SizeClassQueueState{\n\t\t\t\tSizeClass: pq.sizeClasses[i],\n\t\t\t\tTimeout: bq.cleanupQueue.getTimestamp(scq.cleanupKey),\n\t\t\t\tInvocationsCount: uint32(len(scq.invocations)),\n\t\t\t\tQueuedInvocationsCount: uint32(scq.queuedInvocations.Len()),\n\t\t\t\tActiveInvocationsCount: uint32(activeInvocationsCount),\n\t\t\t\tWorkersCount: uint32(len(scq.workers)),\n\t\t\t\tExecutingWorkersCount: executingWorkersCount,\n\t\t\t\tDrainsCount: uint32(len(scq.drains)),\n\t\t\t})\n\t\t}\n\t\tplatformQueues = append(platformQueues, &buildqueuestate.PlatformQueueState{\n\t\t\tName: pq.platformKey.GetPlatformQueueName(),\n\t\t\tSizeClassQueues: sizeClassQueues,\n\t\t})\n\t}\n\treturn &buildqueuestate.ListPlatformQueuesResponse{\n\t\tPlatformQueues: platformQueues,\n\t}, nil\n}", "func (o *Replication) GetRemoteIDOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteID, true\n}", "func (_PlasmaFramework *PlasmaFrameworkCaller) HasExitQueue(opts *bind.CallOpts, vaultId *big.Int, token common.Address) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _PlasmaFramework.contract.Call(opts, out, \"hasExitQueue\", vaultId, token)\n\treturn *ret0, err\n}", "func (o *HyperflexHxapDvUplink) GetVlansOk() (*string, bool) {\n\tif o == nil || o.Vlans == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Vlans, true\n}", "func (o *SMSConnectorSettings) GetLimitsOk() (*Thresholds, bool) {\n\tif o == nil || o.Limits == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Limits, true\n}", "func (bs *BeanstalkdConnectionPool) ListQueues() (queueNames []string, err error) {\n\tqueueNames, err = bs.getGlobalConnect().ListTubes()\n\treturn\n}", "func (o *Replication) GetCurrentQueueSizeBytesOk() (*int64, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.CurrentQueueSizeBytes, true\n}", "func (o *VnicEthAdapterPolicyInventory) HasRxQueueSettings() bool {\n\tif o != nil && o.RxQueueSettings.IsSet() {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *CouponLimitConfigs) GetLimitsOk() ([]LimitConfig, bool) {\n\tif o == nil || o.Limits == nil {\n\t\tvar ret []LimitConfig\n\t\treturn ret, false\n\t}\n\treturn *o.Limits, true\n}", "func NewGetCallQueueitemsOK() *GetCallQueueitemsOK {\n\treturn &GetCallQueueitemsOK{}\n}", "func (o *NewCoupons) GetLimitsOk() ([]LimitConfig, bool) {\n\tif o == nil || o.Limits == nil {\n\t\tvar ret []LimitConfig\n\t\treturn ret, false\n\t}\n\treturn *o.Limits, true\n}", "func (o *NotificationConfig) GetCcReceiversOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.CcReceivers, true\n}", "func (o *VirtualizationVmwareVirtualMachineAllOf) HasRemoteDisplayInfo() bool {\n\tif o != nil && o.RemoteDisplayInfo.IsSet() {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *Wireless) GetChannelsOk() (string, bool) {\n\tif o == nil || o.Channels == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.Channels, true\n}", "func (o *NetworkDns) GetNameServersOk() ([]string, bool) {\n\tif o == nil || o.NameServers == nil {\n\t\treturn nil, false\n\t}\n\treturn o.NameServers, true\n}", "func (o *StorageRemoteKeySetting) GetIsPasswordSetOk() (*bool, bool) {\n\tif o == nil || o.IsPasswordSet == nil {\n\t\treturn nil, false\n\t}\n\treturn o.IsPasswordSet, true\n}", "func (u *Unpackerr) haveSonarrQitem(name string) bool {\n\tfor _, server := range u.Sonarr {\n\t\tfor _, q := range server.Queue.Records {\n\t\t\tif q.Title == name {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (o *NSQProducer) HasRemoteAddress() bool {\n\tif o != nil && o.RemoteAddress != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *StorageRemoteKeySetting) GetPortOk() (*int64, bool) {\n\tif o == nil || o.Port == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Port, true\n}", "func (o *StatusAzureServiceBus) GetRecordsProcessedOk() (*int64, bool) {\n\tif o == nil || IsNil(o.RecordsProcessed) {\n\t\treturn nil, false\n\t}\n\treturn o.RecordsProcessed, true\n}", "func (o *FiltersApiLog) GetQueryApiNamesOk() ([]string, bool) {\n\tif o == nil || o.QueryApiNames == nil {\n\t\tvar ret []string\n\t\treturn ret, false\n\t}\n\treturn *o.QueryApiNames, true\n}", "func QueueStatus_Values() []string {\n\treturn []string{\n\t\tQueueStatusEnabled,\n\t\tQueueStatusDisabled,\n\t}\n}", "func (o *FiltersVirtualGateway) GetVirtualGatewayIdsOk() ([]string, bool) {\n\tif o == nil || o.VirtualGatewayIds == nil {\n\t\tvar ret []string\n\t\treturn ret, false\n\t}\n\treturn *o.VirtualGatewayIds, true\n}", "func (o *FeedSyncResult) GetGroupsOk() ([]GroupSyncResult, bool) {\n\tif o == nil || o.Groups == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Groups, true\n}", "func (o *VirtualizationVmwareVirtualMachineAllOf) GetRemoteDisplayVncEnabledOk() (*bool, bool) {\n\tif o == nil || o.RemoteDisplayVncEnabled == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RemoteDisplayVncEnabled, true\n}", "func (o *W2) GetNonqualifiedPlansOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.NonqualifiedPlans.Get(), o.NonqualifiedPlans.IsSet()\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue_State) IsYANGGoStruct() {}", "func (o *V0037Node) GetSocketsOk() (*int32, bool) {\n\tif o == nil || o.Sockets == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Sockets, true\n}", "func (o *User) GetMessagesOk() ([]MicrosoftGraphMessage, bool) {\n\tif o == nil || o.Messages == nil {\n\t\tvar ret []MicrosoftGraphMessage\n\t\treturn ret, false\n\t}\n\treturn *o.Messages, true\n}", "func (m *Makross) HasQueuesMap(key string) bool {\n\tif value, okay := m.QueuesMap.Load(key); okay {\n\t\tif pqueue, okay := value.(*prior.PriorityQueue); okay {\n\t\t\tif pqueue.Length() > 0 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (*OpenconfigQos_Qos_Queues_Queue_Red) IsYANGGoStruct() {}", "func (o *FiltersSecurityGroup) GetOutboundRuleProtocolsOk() (*[]string, bool) {\n\tif o == nil || o.OutboundRuleProtocols == nil {\n\t\treturn nil, false\n\t}\n\treturn o.OutboundRuleProtocols, true\n}", "func (o *KvmPolicyInventory) HasRemotePort() bool {\n\tif o != nil && o.RemotePort != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *User) GetMailboxSettingsOk() (AnyOfmicrosoftGraphMailboxSettings, bool) {\n\tif o == nil || o.MailboxSettings == nil {\n\t\tvar ret AnyOfmicrosoftGraphMailboxSettings\n\t\treturn ret, false\n\t}\n\treturn *o.MailboxSettings, true\n}", "func (_PlasmaFramework *PlasmaFrameworkCaller) ExitsQueues(opts *bind.CallOpts, arg0 [32]byte) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _PlasmaFramework.contract.Call(opts, out, \"exitsQueues\", arg0)\n\treturn *ret0, err\n}", "func (o *UsageSNMPHour) GetSnmpDevicesOk() (*int64, bool) {\n\tif o == nil || o.SnmpDevices == nil {\n\t\treturn nil, false\n\t}\n\treturn o.SnmpDevices, true\n}", "func (_DappboxManager *DappboxManagerCaller) IsRemoteFolder(opts *bind.CallOpts, dappboxAddress common.Address, remoteFolderAddress common.Address) (bool, common.Address, *big.Int, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t\tret1 = new(common.Address)\n\t\tret2 = new(*big.Int)\n\t)\n\tout := &[]interface{}{\n\t\tret0,\n\t\tret1,\n\t\tret2,\n\t}\n\terr := _DappboxManager.contract.Call(opts, out, \"isRemoteFolder\", dappboxAddress, remoteFolderAddress)\n\treturn *ret0, *ret1, *ret2, err\n}", "func (o *VirtualizationVmwareVirtualMachineAllOf) GetNetworksOk() ([]VirtualizationBaseNetworkRelationship, bool) {\n\tif o == nil || o.Networks == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Networks, true\n}", "func (o *FiltersNatService) GetSubnetIdsOk() ([]string, bool) {\n\tif o == nil || o.SubnetIds == nil {\n\t\tvar ret []string\n\t\treturn ret, false\n\t}\n\treturn *o.SubnetIds, true\n}", "func (o *SoftwarerepositoryLocalMachineAllOf) GetUploadUrlsOk() ([]string, bool) {\n\tif o == nil || o.UploadUrls == nil {\n\t\treturn nil, false\n\t}\n\treturn o.UploadUrls, true\n}", "func (o *VisuallyComplete2Settings) GetMutationBlacklistOk() (*string, bool) {\n\tif o == nil || o.MutationBlacklist == nil {\n\t\treturn nil, false\n\t}\n\treturn o.MutationBlacklist, true\n}", "func (o *FiltersSubnet) GetSubnetIdsOk() ([]string, bool) {\n\tif o == nil || o.SubnetIds == nil {\n\t\tvar ret []string\n\t\treturn ret, false\n\t}\n\treturn *o.SubnetIds, true\n}", "func (connector *DbConnector) GetRemoteTriggersToCheckCount() (int64, error) {\n\treturn connector.getTriggersToCheckCount(remoteTriggersToCheckKey)\n}", "func (_PlasmaFramework *PlasmaFrameworkSession) ExitsQueues(arg0 [32]byte) (common.Address, error) {\n\treturn _PlasmaFramework.Contract.ExitsQueues(&_PlasmaFramework.CallOpts, arg0)\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_Queues_Queue) IsYANGGoStruct() {}", "func (p *Project) Queues() (*[]Queue, error) {\n qs := make([]Queue, 0)\n err := Mongo.Get(\"queue\", bson.M{\"project\": p.ID}, MaxQueuesPerProject, &qs)\n return &qs, err\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue_Config) IsYANGGoStruct() {}" ]
[ "0.63053876", "0.61726433", "0.5913533", "0.5784514", "0.57285035", "0.533683", "0.53098166", "0.52802896", "0.5174174", "0.5162988", "0.5104646", "0.50820893", "0.5044645", "0.5033634", "0.50302416", "0.5001202", "0.49617392", "0.49253264", "0.49231443", "0.48999754", "0.48885736", "0.4880531", "0.48601952", "0.4841383", "0.4839321", "0.4827003", "0.48217717", "0.47937664", "0.47796562", "0.4776574", "0.47542107", "0.47247502", "0.46949935", "0.46794948", "0.46789235", "0.46750748", "0.46690944", "0.4649491", "0.46421465", "0.46382114", "0.46373102", "0.46326855", "0.46309564", "0.46191058", "0.46060085", "0.46052495", "0.460306", "0.45956945", "0.4593882", "0.45751137", "0.45655227", "0.45647424", "0.45595235", "0.4548738", "0.45453075", "0.45442015", "0.4541691", "0.4540848", "0.45343843", "0.4526562", "0.45254922", "0.45216408", "0.45181453", "0.4512346", "0.4509706", "0.4504498", "0.45039773", "0.45025113", "0.44958487", "0.44787934", "0.44773033", "0.44641584", "0.44640988", "0.44581118", "0.44520712", "0.44426504", "0.44367278", "0.44320062", "0.44301763", "0.44275033", "0.44245714", "0.44134593", "0.4412306", "0.44094756", "0.4408505", "0.44066787", "0.43993542", "0.4390439", "0.4387919", "0.43774816", "0.43516698", "0.43472597", "0.43472567", "0.43453172", "0.43426707", "0.43275246", "0.4323224", "0.43189925", "0.43179637", "0.43171772" ]
0.8458737
0
GetClusterQueues returns the ClusterQueues field value
GetClusterQueues возвращает значение поля ClusterQueues
func (o *QueueManager) GetClusterQueues() []ClusterQueue { if o == nil { var ret []ClusterQueue return ret } return o.ClusterQueues }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *QueueManager) GetClusterQueuesOk() (*[]ClusterQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.ClusterQueues, true\n}", "func (o *QueueManager) GetClusters() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\n\treturn o.Clusters\n}", "func (client *Client) GetClusterQueueInfo(request *GetClusterQueueInfoRequest) (response *GetClusterQueueInfoResponse, err error) {\n\tresponse = CreateGetClusterQueueInfoResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).Queues(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"grpc queues: %w\", unpackGRPCError(err))\n\t}\n\tqs := make(map[string]int)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = int(q.NumTasks)\n\t}\n\treturn qs, nil\n}", "func (gores *Gores) Queues() []string {\n\tqueues := make([]string, 0)\n\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\tdata, _ := conn.Do(\"SMEMBERS\", watchedQueues)\n\tfor _, q := range data.([]interface{}) {\n\t\tqueues = append(queues, string(q.([]byte)))\n\t}\n\n\treturn queues\n}", "func (t *TopicCache) GetQueue(projectName, serviceName string) []string {\n\tt.RLock()\n\tdefer t.RUnlock()\n\n\tif len(t.inQueue[projectName+serviceName]) >= 100 {\n\t\treturn t.inQueue[projectName+serviceName][:99]\n\t}\n\n\treturn t.inQueue[projectName+serviceName]\n}", "func GetQueues(c *gin.Context) {\n\t//TODO: create a while both back and front until value is != nil\n\tsize := len(queue)\n\tlog.Printf(\"squeue: %v\", queue)\n\tif size == 0 {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue don't have any item!\",\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"queues\": queue,\n\t})\n\tlog.Printf(\"equeue: %v\", queue)\n}", "func GetAvailableQueues() ([]string, error) {\n\tclient := &http.Client{}\n\n\tres, err := client.Get(Host + \"/queues\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trespBody := res.Body\n\tdefer respBody.Close()\n\n\tavailableQueues := struct {\n\t\tQueues []string `json:\"queues\"`\n\t}{}\n\tif err := json.NewDecoder(respBody).Decode(&availableQueues); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn availableQueues.Queues, nil\n}", "func (p *Project) Queues() (*[]Queue, error) {\n qs := make([]Queue, 0)\n err := Mongo.Get(\"queue\", bson.M{\"project\": p.ID}, MaxQueuesPerProject, &qs)\n return &qs, err\n}", "func (o *QueueManager) SetClusterQueues(v []ClusterQueue) {\n\to.ClusterQueues = v\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tdefer un(lock(b))\n\n\tqs := make(map[string]int)\n\tfor q, items := range b.heaps {\n\t\tif len(qq.MatchPrefix) != 0 || len(qq.MatchExact) != 0 {\n\t\t\tif !matchesPrefix(q, qq.MatchPrefix...) && !matchesExact(q, qq.MatchExact...) {\n\t\t\t\t// no match\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tqs[q] = items.Len()\n\t\tif qq.Limit > 0 && len(qs) >= qq.Limit {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn qs, nil\n}", "func (i *Inspector) Queues() ([]string, error) {\n\treturn i.rdb.AllQueues()\n}", "func (bs *BeanstalkdConnectionPool) ListQueues() (queueNames []string, err error) {\n\tqueueNames, err = bs.getGlobalConnect().ListTubes()\n\treturn\n}", "func listQueues(ENV string) []string {\n \t// Using the SDK's default configuration, loading additional config\n\t// and credentials values from the environment variables, shared\n\t// credentials, and shared configuration files\n\n\tsess, err := session.NewSession(&aws.Config{\n\t Region: aws.String(\"us-east-1\")},\n\t)\n\n // Create a SQS service client.\n svc := sqs.New(sess)\n\n\t//have to create a session object first\n\toutput, err := svc.ListQueues(&sqs.ListQueuesInput{\n\t QueueNamePrefix: aws.String(ENV),\n })\n\tif err != nil { panic(err) }\n\n\tqueues := output.QueueUrls\n\tfinal_queues := []string{}\n\n\tfor _, i := range queues {\n\t fmt.Println(string(*i))\n\t final_queues = append(final_queues, *i)\n }\n\treturn final_queues\n}", "func (c *restClient) ListQueues(ctx context.Context, req *cloudtaskspb.ListQueuesRequest, opts ...gax.CallOption) *QueueIterator {\n\tit := &QueueIterator{}\n\treq = proto.Clone(req).(*cloudtaskspb.ListQueuesRequest)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tit.InternalFetch = func(pageSize int, pageToken string) ([]*cloudtaskspb.Queue, string, error) {\n\t\tresp := &cloudtaskspb.ListQueuesResponse{}\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken = pageToken\n\t\t}\n\t\tif pageSize > math.MaxInt32 {\n\t\t\treq.PageSize = math.MaxInt32\n\t\t} else if pageSize != 0 {\n\t\t\treq.PageSize = int32(pageSize)\n\t\t}\n\t\tbaseUrl, err := url.Parse(c.endpoint)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tbaseUrl.Path += fmt.Sprintf(\"/v2beta3/%v/queues\", req.GetParent())\n\n\t\tparams := url.Values{}\n\t\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\t\tif req.GetFilter() != \"\" {\n\t\t\tparams.Add(\"filter\", fmt.Sprintf(\"%v\", req.GetFilter()))\n\t\t}\n\t\tif req.GetPageSize() != 0 {\n\t\t\tparams.Add(\"pageSize\", fmt.Sprintf(\"%v\", req.GetPageSize()))\n\t\t}\n\t\tif req.GetPageToken() != \"\" {\n\t\t\tparams.Add(\"pageToken\", fmt.Sprintf(\"%v\", req.GetPageToken()))\n\t\t}\n\t\tif req.GetReadMask() != nil {\n\t\t\treadMask, err := protojson.Marshal(req.GetReadMask())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, \"\", err\n\t\t\t}\n\t\t\tparams.Add(\"readMask\", string(readMask[1:len(readMask)-1]))\n\t\t}\n\n\t\tbaseUrl.RawQuery = params.Encode()\n\n\t\t// Build HTTP headers from client and context metadata.\n\t\thds := append(c.xGoogHeaders, \"Content-Type\", \"application/json\")\n\t\theaders := gax.BuildHeaders(ctx, hds...)\n\t\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\t\tif settings.Path != \"\" {\n\t\t\t\tbaseUrl.Path = settings.Path\n\t\t\t}\n\t\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thttpReq.Header = headers\n\n\t\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer httpRsp.Body.Close()\n\n\t\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}, opts...)\n\t\tif e != nil {\n\t\t\treturn nil, \"\", e\n\t\t}\n\t\tit.Response = resp\n\t\treturn resp.GetQueues(), resp.GetNextPageToken(), nil\n\t}\n\n\tfetch := func(pageSize int, pageToken string) (string, error) {\n\t\titems, nextPageToken, err := it.InternalFetch(pageSize, pageToken)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tit.items = append(it.items, items...)\n\t\treturn nextPageToken, nil\n\t}\n\n\tit.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)\n\tit.pageInfo.MaxSize = int(req.GetPageSize())\n\tit.pageInfo.Token = req.GetPageToken()\n\n\treturn it\n}", "func (client *Client) GetClusterQueueInfoWithCallback(request *GetClusterQueueInfoRequest, callback func(response *GetClusterQueueInfoResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetClusterQueueInfoResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetClusterQueueInfo(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (o *QueueManager) GetAliasQueues() []AliasQueue {\n\tif o == nil {\n\t\tvar ret []AliasQueue\n\t\treturn ret\n\t}\n\n\treturn o.AliasQueues\n}", "func (ClearTrans) GetQueue() string {\n\treturn \"cy_rubik_clearTrans\"\n}", "func (storage *SrvStorage) GetVhostQueues(vhost string) []*queue.Queue {\n\tvar queues []*queue.Queue\n\tstorage.db.Iterate(\n\t\tfunc(key []byte, value []byte) {\n\t\t\tif !bytes.HasPrefix(key, []byte(queuePrefix)) || getVhostFromKey(string(key)) != vhost {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tq := &queue.Queue{}\n\t\t\tq.Unmarshal(value, storage.protoVersion)\n\t\t\tqueues = append(queues, q)\n\t\t},\n\t)\n\n\treturn queues\n}", "func (svc *AdminBuildService) GetQueue(opt *GetQueueOptions) (*[]library.BuildQueue, *Response, error) {\n\t// set the API endpoint path we send the request to\n\tu := \"/api/v1/admin/builds/queue\"\n\n\t// add optional arguments if supplied\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// BuildQueue type we want to return\n\tv := new([]library.BuildQueue)\n\n\tresp, err := svc.client.Call(\"GET\", u, nil, v)\n\n\treturn v, resp, err\n}", "func (s *API) ListQueues(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"ListQueues\")\n\n\tqueueNamePrefix := req.FormValue(\"QueueNamePrefix\")\n\tvar queues []string\n\tfor k, v := range s.sqs.queues {\n\t\tif strings.HasPrefix(k, queueNamePrefix) {\n\t\t\tqueues = append(queues, v.url)\n\t\t}\n\t}\n\n\tresponse := ListQueuesResponse{\n\t\tResult: ListQueuesResult{queues},\n\t\tMetaData: ResponseMetaData{\"00000000-0000-0000-0000-000000000000\"},\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\tenc := xml.NewEncoder(w)\n\tenc.Indent(\" \", \" \")\n\tif err := enc.Encode(response); err != nil {\n\t\tlog.Errorf(\"error: %s\", err)\n\t}\n}", "func (r *RPC) GetQueueClient() queue.Client {\r\n\treturn r.c\r\n}", "func (r *RPC) GetQueueClient() queue.Client {\n\treturn r.c\n}", "func (h *Hospital) ConsumeQueues(ctx context.Context, t *testing.T) (events int, messages []string) {\n\tt.Helper()\n\treturn h.ConsumeQueuesWithLimit(ctx, t, -1, true)\n}", "func (this *Queue) GetQueue() (val Mensaje, err error) {\n\t// Primero determina si la cola está vacía\n\tif this.rear == this.front {\n\t\treturn Mensaje{0, \"0\", \"0\"}, errors.New(\"Cola de Mensajes Vacia\")\n\t}\n\tthis.front++\n\tval = this.array[this.front]\n\treturn val, err\n}", "func (p *Process) CmdGetQueue(pac teoapi.Packet) (err error) {\n\tdata := pac.RemoveTrailingZero(pac.Data())\n\trequest := cdb.KeyValue{Cmd: pac.Cmd()}\n\tif err = request.UnmarshalText(data); err != nil {\n\t\treturn\n\t}\n\t// Return only Value for text requests and all fields for json\n\tresponce := request\n\tif responce.Value, err = p.tcdb.GetQueue(request.Key); err != nil {\n\t\treturn\n\t} else if !request.RequestInJSON {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), responce.Value)\n\t} else if retdata, err := responce.MarshalText(); err == nil {\n\t\t_, err = p.tcdb.con.SendAnswer(pac, pac.Cmd(), retdata)\n\t}\n\treturn\n}", "func (s *UserDataFilters) SetQueues(v []*string) *UserDataFilters {\n\ts.Queues = v\n\treturn s\n}", "func (q *Queue) GetQueue() []types.Event {\n\treturn q.Queue\n}", "func (s QueueSetSpy) Queues() map[DeploymentID]*R11nQueue {\n\tres := s.Called()\n\treturn res.Get(0).(map[DeploymentID]*R11nQueue)\n}", "func (s *Filters) SetQueues(v []*string) *Filters {\n\ts.Queues = v\n\treturn s\n}", "func (m *RedisProxy) GetCluster() string {\n\tif m != nil {\n\t\treturn m.Cluster\n\t}\n\treturn \"\"\n}", "func (p *Pool) GetQueue() chan ThreeDPrinter {\n\treturn p.printers\n}", "func maximumClique(g graph.Undirected) (k int, maxClique []graph.Node, cliques [][]graph.Node) {\n\tcliques = topo.BronKerbosch(g)\n\tfor _, c := range topo.BronKerbosch(g) {\n\t\tif len(c) > len(maxClique) {\n\t\t\tmaxClique = c\n\t\t}\n\t}\n\treturn len(maxClique), maxClique, cliques\n}", "func (cfg *Config) MQServers() string {\n\treturn os.Getenv(\"MQ_SERVERS\")\n}", "func (a *Client) GetMsgVpnJndiQueues(params *GetMsgVpnJndiQueuesParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnJndiQueuesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnJndiQueuesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnJndiQueues\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/jndiQueues\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnJndiQueuesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnJndiQueuesOK), nil\n\n}", "func (o *QueueManager) GetRemoteQueues() []RemoteQueue {\n\tif o == nil {\n\t\tvar ret []RemoteQueue\n\t\treturn ret\n\t}\n\n\treturn o.RemoteQueues\n}", "func (rqs *R11nQueueSet) Queues() map[DeploymentID]*R11nQueue {\n\trqs.Lock()\n\tdefer rqs.Unlock()\n\ts := make(map[DeploymentID]*R11nQueue, len(rqs.set))\n\tfor k, v := range rqs.set {\n\t\ts[k] = v\n\t}\n\treturn s\n}", "func (c *ClientIMPL) GetCluster(ctx context.Context) (resp Cluster, err error) {\n\tvar systemList []Cluster\n\tcluster := Cluster{}\n\tqp := c.APIClient().QueryParamsWithFields(&cluster)\n\n\tmajorMinorVersion, err := c.GetSoftwareMajorMinorVersion(ctx)\n\tif err != nil {\n\t\tlog.Errorf(\"Couldn't find the array version %s\", err.Error())\n\t} else {\n\t\tif majorMinorVersion >= 3.0 {\n\t\t\tqp.Select(\"nvm_subsystem_nqn\")\n\t\t}\n\t}\n\t_, err = c.APIClient().Query(\n\t\tctx,\n\t\tRequestConfig{\n\t\t\tMethod: \"GET\",\n\t\t\tEndpoint: clusterURL,\n\t\t\tQueryParams: qp,\n\t\t},\n\t\t&systemList)\n\terr = WrapErr(err)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\treturn systemList[0], err\n}", "func (o *QueueManager) GetClustersOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Clusters, true\n}", "func (p Peel) AllQueuesConsumerGroups() (map[string][]string, error) {\n\tkk, err := p.c.KeyScan(core.Key{Base: \"*\", Subs: []string{\"*\"}})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tm := map[string]map[string]struct{}{}\n\tfor _, k := range kk {\n\t\tif k, err = queueKeyUnmarshal(k); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif m[k.Base] == nil {\n\t\t\tm[k.Base] = map[string]struct{}{}\n\t\t}\n\t\tif k.Subs[0] == \"available\" {\n\t\t\tcontinue\n\t\t}\n\t\tm[k.Base][k.Subs[0]] = struct{}{}\n\t}\n\n\toutm := map[string][]string{}\n\tfor q, cgm := range m {\n\t\toutm[q] = make([]string, 0, len(cgm))\n\t\tfor cg := range cgm {\n\t\t\toutm[q] = append(outm[q], cg)\n\t\t}\n\t}\n\n\treturn outm, nil\n}", "func (taskBolt *TaskBolt) ReadQueue(n int) []*ga4gh_task_exec.Job {\n\tjobs := make([]*ga4gh_task_exec.Job, 0)\n\ttaskBolt.db.View(func(tx *bolt.Tx) error {\n\n\t\t// Iterate over the JobsQueued bucket, reading the first `n` jobs\n\t\tc := tx.Bucket(JobsQueued).Cursor()\n\t\tfor k, _ := c.First(); k != nil && len(jobs) < n; k, _ = c.Next() {\n\t\t\tid := string(k)\n\t\t\tjob := getJob(tx, id)\n\t\t\tjobs = append(jobs, job)\n\t\t}\n\t\treturn nil\n\t})\n\treturn jobs\n}", "func (obj *miner) Queue() buckets.Buckets {\n\treturn obj.queue\n}", "func (c *jsiiProxy_CfnQueue) ToString() *string {\n\tvar returns *string\n\n\t_jsii_.Invoke(\n\t\tc,\n\t\t\"toString\",\n\t\tnil, // no parameters\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func (c *restClient) GetQueue(ctx context.Context, req *cloudtaskspb.GetQueueRequest, opts ...gax.CallOption) (*cloudtaskspb.Queue, error) {\n\tbaseUrl, err := url.Parse(c.endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbaseUrl.Path += fmt.Sprintf(\"/v2beta3/%v\", req.GetName())\n\n\tparams := url.Values{}\n\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\tif req.GetReadMask() != nil {\n\t\treadMask, err := protojson.Marshal(req.GetReadMask())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tparams.Add(\"readMask\", string(readMask[1:len(readMask)-1]))\n\t}\n\n\tbaseUrl.RawQuery = params.Encode()\n\n\t// Build HTTP headers from client and context metadata.\n\thds := []string{\"x-goog-request-params\", fmt.Sprintf(\"%s=%v\", \"name\", url.QueryEscape(req.GetName()))}\n\n\thds = append(c.xGoogHeaders, hds...)\n\thds = append(hds, \"Content-Type\", \"application/json\")\n\theaders := gax.BuildHeaders(ctx, hds...)\n\topts = append((*c.CallOptions).GetQueue[0:len((*c.CallOptions).GetQueue):len((*c.CallOptions).GetQueue)], opts...)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tresp := &cloudtaskspb.Queue{}\n\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\tif settings.Path != \"\" {\n\t\t\tbaseUrl.Path = settings.Path\n\t\t}\n\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttpReq = httpReq.WithContext(ctx)\n\t\thttpReq.Header = headers\n\n\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer httpRsp.Body.Close()\n\n\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, opts...)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn resp, nil\n}", "func (m *MatchInfo) GetQueue(client *static.Client) (static.Queue, error) {\n\treturn client.GetQueue(m.QueueID)\n}", "func (psc *PartitionSchedulingContext) GetQueue(queueName string) *SchedulingQueue {\n psc.lock.RLock()\n defer psc.lock.RUnlock()\n\n return psc.queues[queueName]\n}", "func (d *Device) GetQueue(qf *QueueFamily) *Queue {\n\n\tvar vkq vk.Queue\n\n\tvk.GetDeviceQueue(d.VKDevice, uint32(qf.Index), 0, &vkq)\n\n\tvar queue Queue\n\tqueue.QueueFamily = qf\n\tqueue.Device = d\n\tqueue.VKQueue = vkq\n\n\treturn &queue\n}", "func (c *Cluster) GetKeyspaces(ctx context.Context) ([]*vtadminpb.Keyspace, error) {\n\tspan, ctx := trace.NewSpan(ctx, \"Cluster.GetKeyspaces\")\n\tdefer span.Finish()\n\n\tAnnotateSpan(c, span)\n\n\tif err := c.Vtctld.Dial(ctx); err != nil {\n\t\treturn nil, fmt.Errorf(\"Vtctld.Dial(cluster=%s) failed: %w\", c.ID, err)\n\t}\n\n\tresp, err := c.Vtctld.GetKeyspaces(ctx, &vtctldatapb.GetKeyspacesRequest{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar (\n\t\tm sync.Mutex\n\t\twg sync.WaitGroup\n\t\trec concurrency.AllErrorRecorder\n\t\tkeyspaces = make([]*vtadminpb.Keyspace, len(resp.Keyspaces))\n\t)\n\n\tfor i, ks := range resp.Keyspaces {\n\t\twg.Add(1)\n\t\tgo func(i int, ks *vtctldatapb.Keyspace) {\n\t\t\tdefer wg.Done()\n\n\t\t\tshards, err := c.FindAllShardsInKeyspace(ctx, ks.Name, FindAllShardsInKeyspaceOptions{SkipDial: true})\n\t\t\tif err != nil {\n\t\t\t\trec.RecordError(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tkeyspace := &vtadminpb.Keyspace{\n\t\t\t\tCluster: c.ToProto(),\n\t\t\t\tKeyspace: ks,\n\t\t\t\tShards: shards,\n\t\t\t}\n\n\t\t\tm.Lock()\n\t\t\tdefer m.Unlock()\n\t\t\tkeyspaces[i] = keyspace\n\t\t}(i, ks)\n\t}\n\n\twg.Wait()\n\tif rec.HasErrors() {\n\t\treturn nil, rec.Error()\n\t}\n\n\treturn keyspaces, nil\n}", "func (client *Client) GetClusterQueueInfoWithChan(request *GetClusterQueueInfoRequest) (<-chan *GetClusterQueueInfoResponse, <-chan error) {\n\tresponseChan := make(chan *GetClusterQueueInfoResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetClusterQueueInfo(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (a *Client) GetMsgVpnQueues(params *GetMsgVpnQueuesParams, authInfo runtime.ClientAuthInfoWriter) (*GetMsgVpnQueuesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMsgVpnQueuesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMsgVpnQueues\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/msgVpns/{msgVpnName}/queues\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetMsgVpnQueuesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetMsgVpnQueuesOK), nil\n\n}", "func (c *NetClient) OutQueue() chan<- []byte {\n\treturn c.outQueue\n}", "func (o TopicRuleSqsOutput) QueueUrl() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TopicRuleSqs) string { return v.QueueUrl }).(pulumi.StringOutput)\n}", "func (cb *clientBase) GetCluster() string {\n\treturn cb.cluster\n}", "func (mq MetricsQueue) Len() int { return len(mq) }", "func (s *SQSServer) pollQueues(pollctx, taskctx context.Context, queues []QueueConf) error {\n\tfor _, qconf := range queues {\n\t\tq, err := s.getQueue(pollctx, qconf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq := &sqs.GetQueueAttributesInput{\n\t\t\tAttributeNames: []types.QueueAttributeName{(\"VisibilityTimeout\")},\n\t\t\tQueueUrl: &q.url,\n\t\t}\n\t\tresp, err := s.sqsSrv(q.QueueConf).GetQueueAttributes(pollctx, req)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to get queue attributes for '%s' - %s\", q.Name, err.Error())\n\t\t}\n\t\tto := resp.Attributes[\"VisibilityTimeout\"]\n\t\tif to == \"\" {\n\t\t\treturn fmt.Errorf(\"No visibility timeout returned by SQS for queue '%s'\", q.Name)\n\t\t}\n\t\tvisTimeout, err := strconv.Atoi(to)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to convert visibility timeout from '%s' to int - '%s'\", to, err.Error())\n\t\t}\n\t\t// Each queue runs in a dedicated go routine.\n\t\tgo func(vt int32) {\n\t\t\ts.queuePollers.Add(1)\n\t\t\tdefer s.queuePollers.Done()\n\t\t\ts.run(pollctx, taskctx, q, vt)\n\t\t}(int32(visTimeout))\n\t}\n\n\treturn nil\n}", "func GetMessageQueue() *MessageQueue {\n\treturn messageQueue\n}", "func clusterList() []string {\n\tif c := envy.String(\"DQLITED_CLUSTER\"); c != \"\" {\n\t\treturn strings.Split(c, \",\")\n\t}\n\treturn defaultCluster\n}", "func (b *backend) QueueStats(ctx context.Context, qq *entroq.QueuesQuery) (map[string]*entroq.QueueStat, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).QueueStats(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get queue stats over gRPC: %w\", err)\n\t}\n\tqs := make(map[string]*entroq.QueueStat)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = &entroq.QueueStat{\n\t\t\tName: q.Name,\n\t\t\tSize: int(q.NumTasks),\n\t\t\tClaimed: int(q.NumClaimed),\n\t\t\tAvailable: int(q.NumAvailable),\n\t\t\tMaxClaims: int(q.MaxClaims),\n\t\t}\n\t}\n\treturn qs, nil\n}", "func groomQueues(queues *Queues) (err kv.Error) {\n\tfor qName, qDetails := range *queues {\n\t\t// If we have enough runners drop the queue as it needs nothing done to it\n\t\tif len(qDetails.NodeGroup) == 0 || qDetails.Running >= qDetails.Ready+qDetails.NotVisible {\n\t\t\tif logger.IsTrace() {\n\t\t\t\tlogger.Trace(\"queue already handled\", \"queue\", qName, \"stack\", stack.Trace().TrimRuntime())\n\t\t\t}\n\t\t\tdelete(*queues, qName)\n\t\t}\n\t}\n\treturn nil\n}", "func (o NetworkInterfaceOutput) QueueCount() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v NetworkInterface) *int { return v.QueueCount }).(pulumi.IntPtrOutput)\n}", "func (client *Client) GetQueueURL(name string) (string, error) {\n\tvar parsedResponse GetQueueURLResult\n\turl := NewGetQueueURLRequest(client.EndPointURL, name).URL()\n\n\tresp, err := client.Get(url)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\treturn \"\", errors.New(string(body))\n\t}\n\n\terr = xml.Unmarshal(body, &parsedResponse)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn parsedResponse.QueueURL, nil\n}", "func (c *MQCache) Len() (totalLen int64, queuesLen []int64) {\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\tfor i := 0; i < c.queues; i++ {\n\t\tc.queuesLen[i] = c.q[i].len()\n\t\ttotalLen += c.queuesLen[i]\n\t}\n\treturn totalLen, c.queuesLen\n}", "func (m *SetNodePoolSizeRequest) GetClusterId() string {\n\tif m != nil {\n\t\treturn m.ClusterId\n\t}\n\treturn \"\"\n}", "func GetQueue(id string) Queue {\n\tservice := broker.GetService(ServiceName).(*QueueService)\n\treturn service.getQueue(id)\n}", "func GetClusterConfig(req *restful.Request, resp *restful.Response) {\n\tconst (\n\t\thandler = \"GetClusterConfig\"\n\t)\n\tspan := v1http.SetHTTPSpanContextInfo(req, handler)\n\tdefer span.Finish()\n\n\tr, err := generateData(req, getCls)\n\tif err != nil {\n\t\tutils.SetSpanLogTagError(span, err)\n\t\tblog.Errorf(\"%s | err: %v\", common.BcsErrStorageGetResourceFailStr, err)\n\t\tlib.ReturnRest(&lib.RestResponse{\n\t\t\tResp: resp,\n\t\t\tErrCode: common.BcsErrStorageGetResourceFail,\n\t\t\tMessage: common.BcsErrStorageGetResourceFailStr})\n\t\treturn\n\t}\n\tlib.ReturnRest(&lib.RestResponse{Resp: resp, Data: r})\n}", "func (c *QueueClient) Get(ctx context.Context, id int) (*Queue, error) {\n\treturn c.Query().Where(queue.ID(id)).Only(ctx)\n}", "func (clusterInfo ClusterInfo) CreateQueues(queues []rh.QueueInfo) error {\n\trmqc, err := rh.NewClient(clusterInfo.AdminURL(), clusterInfo.UserName, clusterInfo.Password)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, queue := range queues {\n\t\tlog.Printf(\"Creating queue %v\", queue.Name)\n\t\t_, err = rmqc.DeclareQueue(clusterInfo.Vhost, queue.Name, rh.QueueSettings{Durable: queue.Durable, AutoDelete: queue.AutoDelete, Arguments: queue.Arguments})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (o *V0037Node) GetThreads() int32 {\n\tif o == nil || o.Threads == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.Threads\n}", "func (m SQSMonitor) receiveQueueMessages(qURL string) ([]*sqs.Message, error) {\n\tresult, err := m.SQS.ReceiveMessage(&sqs.ReceiveMessageInput{\n\t\tAttributeNames: []*string{\n\t\t\taws.String(sqs.MessageSystemAttributeNameSentTimestamp),\n\t\t},\n\t\tMessageAttributeNames: []*string{\n\t\t\taws.String(sqs.QueueAttributeNameAll),\n\t\t},\n\t\tQueueUrl: &qURL,\n\t\tMaxNumberOfMessages: aws.Int64(10),\n\t\tVisibilityTimeout: aws.Int64(20), // 20 seconds\n\t\tWaitTimeSeconds: aws.Int64(20), // Max long polling\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn result.Messages, nil\n}", "func GetQueue(config *Configuration) (*Queue, error) {\n\tvar wg sync.WaitGroup\n\tvar wk int\n\n\tq := Queue{&wg, false, nil, nil, nil, nil, &wk}\n\n\tq.Config = config\n\n\tconn, err := amqp.Dial(config.Host)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tq.connection = conn\n\tq.Connected = true\n\tch, err := q.connection.Channel()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tq.channel = ch\n\tq.channel.Qos(config.PrefetchCount, config.PrefetchByteSize, true)\n\n\tiq, err := q.channel.QueueDeclare(config.RoutingKey, config.Durable, config.DeleteIfUnused, config.Exclusive, config.NoWait, config.arguments)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif config.Exchange != \"\" {\n\t\terr = q.bind()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tq.internalQueue = &iq\n\n\treturn &q, nil\n}", "func (o LookupQueueResultOutput) AppEngineHttpQueue() AppEngineHttpQueueResponseOutput {\n\treturn o.ApplyT(func(v LookupQueueResult) AppEngineHttpQueueResponse { return v.AppEngineHttpQueue }).(AppEngineHttpQueueResponseOutput)\n}", "func (s consumerNamespaceLister) Get(name string) (*arbv1.Consumer, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(arbv1.Resource(\"queue\"), name)\n\t}\n\treturn obj.(*arbv1.Consumer), nil\n}", "func (a *adapter) queueLookup(queueName string) (*sqs.GetQueueUrlOutput, error) {\n\treturn a.sqsClient.GetQueueUrl(&sqs.GetQueueUrlInput{\n\t\tQueueName: &queueName,\n\t})\n}", "func SetQueueSettings(ctx *context.Context) {\n\tqid := ctx.ParamsInt64(\"qid\")\n\tmq := queue.GetManager().GetManagedQueue(qid)\n\tif mq == nil {\n\t\tctx.Status(http.StatusNotFound)\n\t\treturn\n\t}\n\tif _, ok := mq.Managed.(queue.ManagedPool); !ok {\n\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.pool.none\"))\n\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\treturn\n\t}\n\n\tmaxNumberStr := ctx.FormString(\"max-number\")\n\tnumberStr := ctx.FormString(\"number\")\n\ttimeoutStr := ctx.FormString(\"timeout\")\n\n\tvar err error\n\tvar maxNumber, number int\n\tvar timeout time.Duration\n\tif len(maxNumberStr) > 0 {\n\t\tmaxNumber, err = strconv.Atoi(maxNumberStr)\n\t\tif err != nil {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.maxnumberworkers.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t\tif maxNumber < -1 {\n\t\t\tmaxNumber = -1\n\t\t}\n\t} else {\n\t\tmaxNumber = mq.MaxNumberOfWorkers()\n\t}\n\n\tif len(numberStr) > 0 {\n\t\tnumber, err = strconv.Atoi(numberStr)\n\t\tif err != nil || number < 0 {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.numberworkers.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tnumber = mq.BoostWorkers()\n\t}\n\n\tif len(timeoutStr) > 0 {\n\t\ttimeout, err = time.ParseDuration(timeoutStr)\n\t\tif err != nil {\n\t\t\tctx.Flash.Error(ctx.Tr(\"admin.monitor.queue.settings.timeout.error\"))\n\t\t\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n\t\t\treturn\n\t\t}\n\t} else {\n\t\ttimeout = mq.BoostTimeout()\n\t}\n\n\tmq.SetPoolSettings(maxNumber, number, timeout)\n\tctx.Flash.Success(ctx.Tr(\"admin.monitor.queue.settings.changed\"))\n\tctx.Redirect(setting.AppSubURL + \"/admin/monitor/queue/\" + strconv.FormatInt(qid, 10))\n}", "func (o TopicRuleSqsPtrOutput) QueueUrl() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *TopicRuleSqs) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.QueueUrl\n\t}).(pulumi.StringPtrOutput)\n}", "func (s databaseClusterNamespaceLister) Get(name string) (*v1alpha1.DatabaseCluster, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1alpha1.Resource(\"databasecluster\"), name)\n\t}\n\treturn obj.(*v1alpha1.DatabaseCluster), nil\n}", "func (o *QueueManager) GetAliasQueuesOk() (*[]AliasQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.AliasQueues, true\n}", "func (m *ListNodePoolsRequest) GetClusterId() string {\n\tif m != nil {\n\t\treturn m.ClusterId\n\t}\n\treturn \"\"\n}", "func (m *RedisProxy_PrefixRoutes) GetCatchAllCluster() string {\n\tif m != nil {\n\t\treturn m.CatchAllCluster\n\t}\n\treturn \"\"\n}", "func GetSchedulableClusterZones(ctx context.Context, c clientset.Interface) (sets.Set[string], error) {\n\t// GetReadySchedulableNodes already filters our tainted and unschedulable nodes.\n\tnodes, err := GetReadySchedulableNodes(ctx, c)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error getting nodes while attempting to list cluster zones: %w\", err)\n\t}\n\n\t// collect values of zone label from all nodes\n\tzones := sets.New[string]()\n\tfor _, node := range nodes.Items {\n\t\tif zone, found := node.Labels[v1.LabelFailureDomainBetaZone]; found {\n\t\t\tzones.Insert(zone)\n\t\t}\n\n\t\tif zone, found := node.Labels[v1.LabelTopologyZone]; found {\n\t\t\tzones.Insert(zone)\n\t\t}\n\t}\n\treturn zones, nil\n}", "func demo_queue() {\n fmt.Print(\"\\n---QUEUE Logic---\\n\\n\")\n q := queue.Queue{}\n\n for i := 0; i <= 5; i++ {\n q.Enqueue(i)\n }\n fmt.Print(\"---Queue Before Dequeue---\\n\")\n q.PrintAll()\n dequeued := q.Dequeue()\n fmt.Printf(\"Dequeued Value: %v\\n\", dequeued)\n fmt.Print(\"---Queue After Dequeue---\\n\")\n q.PrintAll()\n}", "func GetFromQueue(queue string) ([]byte, error) {\n\treturn cache.Get(queue)\n}", "func (c *Consumer) Messages() <-chan Message {\n\treturn c.queue\n}", "func (o *V0037JobProperties) GetClusterConstraints() string {\n\tif o == nil || o.ClusterConstraints == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.ClusterConstraints\n}", "func (s *Store) GetQueueNames() ([]string, error) {\n\tvar names []string\n\treturn names, s.db.View(func(tx *bolt.Tx) error {\n\t\treturn s.queues(tx).ForEach(func(key, value []byte) error {\n\t\t\tnames = append(names, string(key))\n\t\t\treturn nil\n\t\t})\n\t})\n}", "func (mb *client) ReadFIFOQueue(address uint16) (results []byte, err error) {\n\trequest := ProtocolDataUnit{\n\t\tFunctionCode: FuncCodeReadFIFOQueue,\n\t\tData: dataBlock(address),\n\t}\n\tresponse, err := mb.send(&request)\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(response.Data) < 4 {\n\t\terr = fmt.Errorf(\"modbus: response data size '%v' is less than expected '%v'\", len(response.Data), 4)\n\t\treturn\n\t}\n\tcount := int(binary.BigEndian.Uint16(response.Data))\n\tif count != (len(response.Data) - 1) {\n\t\terr = fmt.Errorf(\"modbus: response data size '%v' does not match count '%v'\", len(response.Data)-1, count)\n\t\treturn\n\t}\n\tcount = int(binary.BigEndian.Uint16(response.Data[2:]))\n\tif count > 31 {\n\t\terr = fmt.Errorf(\"modbus: fifo count '%v' is greater than expected '%v'\", count, 31)\n\t\treturn\n\t}\n\tresults = response.Data[4:]\n\treturn\n}", "func (q *SimpleQueue) Get(ctx context.Context, user cn.CapUser, idStart int64, cntLimit int) (messages []*MessageWithMeta, err *mft.Error) {\n\tmessages, _, err = q.GetSegment(ctx, user, idStart, cntLimit, nil)\n\n\treturn messages, err\n}", "func (service *ContrailService) GetQosQueue(ctx context.Context, request *models.GetQosQueueRequest) (response *models.GetQosQueueResponse, err error) {\n\tspec := &models.ListSpec{\n\t\tLimit: 1,\n\t\tFilters: []*models.Filter{\n\t\t\t&models.Filter{\n\t\t\t\tKey: \"uuid\",\n\t\t\t\tValues: []string{request.ID},\n\t\t\t},\n\t\t},\n\t}\n\tlistRequest := &models.ListQosQueueRequest{\n\t\tSpec: spec,\n\t}\n\tvar result *models.ListQosQueueResponse\n\tif err := common.DoInTransaction(\n\t\tservice.DB,\n\t\tfunc(tx *sql.Tx) error {\n\t\t\tresult, err = db.ListQosQueue(ctx, tx, listRequest)\n\t\t\treturn err\n\t\t}); err != nil {\n\t\treturn nil, common.ErrorInternal\n\t}\n\tif len(result.QosQueues) == 0 {\n\t\treturn nil, common.ErrorNotFound\n\t}\n\tresponse = &models.GetQosQueueResponse{\n\t\tQosQueue: result.QosQueues[0],\n\t}\n\treturn response, nil\n}", "func (o *ClusterRequest) GetMaxRunningNodes() int32 {\n\tif o == nil || o.MaxRunningNodes == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.MaxRunningNodes\n}", "func deleteQueues(ctx *TestContext) {\n\tfor _, q := range ctx.Queues {\n\t\tDeleteQueue(ctx, q)\n\t}\n}", "func (service *ContrailService) ListQosQueue(\n\tctx context.Context,\n\trequest *models.ListQosQueueRequest) (response *models.ListQosQueueResponse, err error) {\n\tif err := common.DoInTransaction(\n\t\tservice.DB,\n\t\tfunc(tx *sql.Tx) error {\n\t\t\tresponse, err = db.ListQosQueue(ctx, tx, request)\n\t\t\treturn err\n\t\t}); err != nil {\n\t\treturn nil, common.ErrorInternal\n\t}\n\treturn response, nil\n}", "func GetClusterZones(ctx context.Context, c clientset.Interface) (sets.String, error) {\n\tnodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error getting nodes while attempting to list cluster zones: %w\", err)\n\t}\n\n\t// collect values of zone label from all nodes\n\tzones := sets.NewString()\n\tfor _, node := range nodes.Items {\n\t\tif zone, found := node.Labels[v1.LabelFailureDomainBetaZone]; found {\n\t\t\tzones.Insert(zone)\n\t\t}\n\n\t\tif zone, found := node.Labels[v1.LabelTopologyZone]; found {\n\t\t\tzones.Insert(zone)\n\t\t}\n\t}\n\treturn zones, nil\n}", "func (s *ItemQueue) GetMessages() []int {\n\tvar messages []int\n\ts.lock.Lock()\n\n\tfor i := 0; i < len(s.items); i++ {\n\t\t\tmessages[i] = s.items[i].ID\n\t}\n\n\ts.lock.Unlock()\n\treturn messages\n}", "func (d Dispatcher) JobQueueCount() int {\n\treturn d.GetJobPQ().Len()\n}", "func (m *ServerContext) OperationQueue() cutter.OperationQueue {\n\treturn m.OpQueue\n}", "func (o *PendingDeleteCluster) GetCluster() (value *Cluster, ok bool) {\n\tok = o != nil && o.bitmap_&16 != 0\n\tif ok {\n\t\tvalue = o.cluster\n\t}\n\treturn\n}", "func (c *checkQueueAttributeImpl) CheckQueueAttributeQuery(options CheckQueueAttributeOptions) icinga.Result {\n\tname := \"Queue.Attributes\"\n\n\tstatusCheck, err := icinga.NewStatusCheck(options.ThresholdWarning, options.ThresholdCritical)\n\tif err != nil {\n\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"can't check status: %v\", err))\n\t}\n\n\tif len(options.OkIfQueueIsMissing) > 0 {\n\t\tproperty := \"broker=\\\"0.0.0.0\\\"\"\n\t\tattribute := \"QueueNames\"\n\t\tqueueSearchResult, err := c.JolokiaClient.GetAttr(options.Domain, []string{property}, attribute)\n\t\tif err != nil {\n\t\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"can't query QueueNames in Jolokia: %v\", err))\n\t\t}\n\t\tif queueSearchResult == nil {\n\t\t\tif (options.Verbose > 0) {\n\t\t\t\tlog.Printf(\"No queues found: [%v]\", queueSearchResult)\n\t\t\t}\n\t\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"can't find QueueNames for [%v]\", property))\n\t\t}\n\n\t\tif !queueExists(queueSearchResult.([] interface{}), options.OkIfQueueIsMissing) {\n\t\t\tif (options.Verbose > 0) {\n\t\t\t\tlog.Printf(\"Queue [%v] not in queue list [%v]\", options.OkIfQueueIsMissing, queueSearchResult.([] interface{}))\n\t\t\t}\n\t\t\treturn icinga.NewResult(name, icinga.ServiceStatusOk, fmt.Sprintf(\"queue [%v] does not exist\", options.OkIfQueueIsMissing))\n\t\t}\n\t}\n\n\tsearchResult, err := c.JolokiaClient.GetAttr(options.Domain, []string{options.Queue}, options.Attribute)\n\tif err != nil {\n\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"can't query Jolokia: %v\", err))\n\t}\n\n\tresult, err := utils.ToFloat(searchResult)\n\tif err != nil {\n\t\tif (options.Verbose > 0) {\n\t\t\tlog.Printf(\"An error occured with result [%v]\", searchResult)\n\t\t}\n\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"query result is invalid: %v\", err))\n\t}\n\n\tmessage := fmt.Sprintf(\"Search produced: %v\", searchResult)\n\tstatus := statusCheck.Check(result)\n\n\treturn icinga.NewResult(name, status, message)\n}", "func (o NetworkInterfaceResponseOutput) QueueCount() pulumi.IntOutput {\n\treturn o.ApplyT(func(v NetworkInterfaceResponse) int { return v.QueueCount }).(pulumi.IntOutput)\n}", "func printQueue(q *Queue) {\n\tfmt.Println(q.values)\n}", "func (player *musicPlayer) getQueueInfo() ([]string, error) {\n\tplayer.Lock()\n\tdefer player.Unlock()\n\tif len(player.state.queue) == 0 {\n\t\treturn nil, errors.New(cannot_get_queue_info_msg)\n\t}\n\t//make a copy to the queue\n\tcopy := make([]string, 0, len(player.state.queue))\n\tfor _, el := range player.state.queue {\n\t\tcopy = append(copy, el)\n\t}\n\treturn copy, nil\n}" ]
[ "0.6691833", "0.6425696", "0.6164038", "0.6133957", "0.6117693", "0.6031181", "0.60217136", "0.5825728", "0.57695156", "0.57689", "0.5742274", "0.56948394", "0.55601054", "0.55268496", "0.5474696", "0.5450112", "0.5429199", "0.539163", "0.5356741", "0.52030075", "0.51988304", "0.5182143", "0.51490265", "0.51087147", "0.5103361", "0.5099392", "0.50851905", "0.5066335", "0.50540495", "0.5025029", "0.49590418", "0.4906924", "0.49022672", "0.48896283", "0.48868766", "0.48661202", "0.48596022", "0.48557484", "0.48547757", "0.48427603", "0.48256513", "0.48014542", "0.479396", "0.4790269", "0.47799766", "0.47531062", "0.4700358", "0.46963513", "0.46939135", "0.46890926", "0.46743894", "0.46738762", "0.46722856", "0.466206", "0.46505934", "0.46260458", "0.46253824", "0.46127254", "0.46066052", "0.46048033", "0.45979723", "0.45952168", "0.45886198", "0.4588499", "0.4586456", "0.45849684", "0.45836443", "0.45542175", "0.45519477", "0.45509022", "0.45242378", "0.45202643", "0.4508231", "0.45065653", "0.44599864", "0.44561458", "0.4452559", "0.44497415", "0.4448812", "0.44433922", "0.44373363", "0.44353268", "0.44342008", "0.4428824", "0.44241643", "0.4419383", "0.44171697", "0.4397003", "0.4396048", "0.4394981", "0.43945265", "0.43902647", "0.43878347", "0.43853906", "0.43821993", "0.4378504", "0.43714702", "0.4365387", "0.43605918", "0.435876" ]
0.741813
0
GetClusterQueuesOk returns a tuple with the ClusterQueues field value and a boolean to check if the value has been set.
GetClusterQueuesOk возвращает кортеж с значением поля ClusterQueues и булевым значением для проверки, было ли значение задано.
func (o *QueueManager) GetClusterQueuesOk() (*[]ClusterQueue, bool) { if o == nil { return nil, false } return &o.ClusterQueues, true }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *QueueManager) GetAliasQueuesOk() (*[]AliasQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.AliasQueues, true\n}", "func (o *QueueManager) GetClustersOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Clusters, true\n}", "func (o *QueueManager) GetRemoteQueuesOk() (*[]RemoteQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteQueues, true\n}", "func (o *QueueManager) GetClusterQueues() []ClusterQueue {\n\tif o == nil {\n\t\tvar ret []ClusterQueue\n\t\treturn ret\n\t}\n\n\treturn o.ClusterQueues\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tresp, err := pb.NewEntroQClient(b.conn).Queues(ctx, &pb.QueuesRequest{\n\t\tMatchPrefix: qq.MatchPrefix,\n\t\tMatchExact: qq.MatchExact,\n\t\tLimit: int32(qq.Limit),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"grpc queues: %w\", unpackGRPCError(err))\n\t}\n\tqs := make(map[string]int)\n\tfor _, q := range resp.Queues {\n\t\tqs[q.Name] = int(q.NumTasks)\n\t}\n\treturn qs, nil\n}", "func (o *QueueManager) SetClusterQueues(v []ClusterQueue) {\n\to.ClusterQueues = v\n}", "func (c *Context) HasQueuesMap(key string) bool {\n\treturn c.makross.HasQueuesMap(key)\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[string]int, error) {\n\tdefer un(lock(b))\n\n\tqs := make(map[string]int)\n\tfor q, items := range b.heaps {\n\t\tif len(qq.MatchPrefix) != 0 || len(qq.MatchExact) != 0 {\n\t\t\tif !matchesPrefix(q, qq.MatchPrefix...) && !matchesExact(q, qq.MatchExact...) {\n\t\t\t\t// no match\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tqs[q] = items.Len()\n\t\tif qq.Limit > 0 && len(qs) >= qq.Limit {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn qs, nil\n}", "func (client *Client) GetClusterQueueInfo(request *GetClusterQueueInfoRequest) (response *GetClusterQueueInfoResponse, err error) {\n\tresponse = CreateGetClusterQueueInfoResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func GetAvailableQueues() ([]string, error) {\n\tclient := &http.Client{}\n\n\tres, err := client.Get(Host + \"/queues\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trespBody := res.Body\n\tdefer respBody.Close()\n\n\tavailableQueues := struct {\n\t\tQueues []string `json:\"queues\"`\n\t}{}\n\tif err := json.NewDecoder(respBody).Decode(&availableQueues); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn availableQueues.Queues, nil\n}", "func (o *V0037Node) GetThreadsOk() (*int32, bool) {\n\tif o == nil || o.Threads == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Threads, true\n}", "func (m *Makross) HasQueuesMap(key string) bool {\n\tif value, okay := m.QueuesMap.Load(key); okay {\n\t\tif pqueue, okay := value.(*prior.PriorityQueue); okay {\n\t\t\tif pqueue.Length() > 0 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (gores *Gores) Queues() []string {\n\tqueues := make([]string, 0)\n\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\tdata, _ := conn.Do(\"SMEMBERS\", watchedQueues)\n\tfor _, q := range data.([]interface{}) {\n\t\tqueues = append(queues, string(q.([]byte)))\n\t}\n\n\treturn queues\n}", "func (s *UserDataFilters) SetQueues(v []*string) *UserDataFilters {\n\ts.Queues = v\n\treturn s\n}", "func (o *VnicEthAdapterPolicyInventory) GetTxQueueSettingsOk() (*VnicEthTxQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.TxQueueSettings.Get(), o.TxQueueSettings.IsSet()\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Output_Queues) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Output_Queues\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func GetQueues(c *gin.Context) {\n\t//TODO: create a while both back and front until value is != nil\n\tsize := len(queue)\n\tlog.Printf(\"squeue: %v\", queue)\n\tif size == 0 {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"msg\": \"queue don't have any item!\",\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"queues\": queue,\n\t})\n\tlog.Printf(\"equeue: %v\", queue)\n}", "func (s *Filters) SetQueues(v []*string) *Filters {\n\ts.Queues = v\n\treturn s\n}", "func (o *VnicEthAdapterPolicyInventory) GetCompletionQueueSettingsOk() (*VnicCompletionQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CompletionQueueSettings.Get(), o.CompletionQueueSettings.IsSet()\n}", "func (i *Inspector) Queues() ([]string, error) {\n\treturn i.rdb.AllQueues()\n}", "func (o *VulnUpdateNotification) GetQueueIdOk() (*string, bool) {\n\tif o == nil || o.QueueId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.QueueId, true\n}", "func (h *Hospital) ConsumeQueues(ctx context.Context, t *testing.T) (events int, messages []string) {\n\tt.Helper()\n\treturn h.ConsumeQueuesWithLimit(ctx, t, -1, true)\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (o *QueueManager) GetClusters() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\n\treturn o.Clusters\n}", "func (s *API) ListQueues(w http.ResponseWriter, req *http.Request) {\n\tlog.Debug(\"ListQueues\")\n\n\tqueueNamePrefix := req.FormValue(\"QueueNamePrefix\")\n\tvar queues []string\n\tfor k, v := range s.sqs.queues {\n\t\tif strings.HasPrefix(k, queueNamePrefix) {\n\t\t\tqueues = append(queues, v.url)\n\t\t}\n\t}\n\n\tresponse := ListQueuesResponse{\n\t\tResult: ListQueuesResult{queues},\n\t\tMetaData: ResponseMetaData{\"00000000-0000-0000-0000-000000000000\"},\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\tenc := xml.NewEncoder(w)\n\tenc.Indent(\" \", \" \")\n\tif err := enc.Encode(response); err != nil {\n\t\tlog.Errorf(\"error: %s\", err)\n\t}\n}", "func (o *Replication) GetMaxQueueSizeBytesOk() (*int64, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.MaxQueueSizeBytes, true\n}", "func (ss *SqsService) IsQueueEmpty(ctx context.Context) (isEmpty bool) {\n\tisEmpty = false\n\tinput := &sqs.GetQueueAttributesInput{\n\t\tQueueUrl: &ss.queueURL,\n\t\tAttributeNames: []types.QueueAttributeName{\n\t\t\t\"ApproximateNumberOfMessages\",\n\t\t\t\"ApproximateNumberOfMessagesNotVisible\",\n\t\t},\n\t}\n\toutput, err := ss.client.GetQueueAttributes(ctx, input)\n\n\tif err != nil {\n\t\tlog.Printf(\"Faided to get queue attributes from Queue %s, please try again later - %s\", ss.queueName, err.Error())\n\t\treturn\n\t}\n\n\tvisible, _ := strconv.Atoi(output.Attributes[\"ApproximateNumberOfMessages\"])\n\tnotVisible, _ := strconv.Atoi(output.Attributes[\"ApproximateNumberOfMessagesNotVisible\"])\n\n\tlog.Printf(\"Queue %s has %d not visible message(s) and %d visable message(s)\\n\", ss.queueName, notVisible, visible)\n\n\tif visible+notVisible <= 1 {\n\t\tisEmpty = true\n\t}\n\treturn\n}", "func (o *V0037JobProperties) GetClusterConstraintsOk() (*string, bool) {\n\tif o == nil || o.ClusterConstraints == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterConstraints, true\n}", "func (mq MetricsQueue) Len() int { return len(mq) }", "func (c *checkQueueAttributeImpl) CheckQueueAttributeQuery(options CheckQueueAttributeOptions) icinga.Result {\n\tname := \"Queue.Attributes\"\n\n\tstatusCheck, err := icinga.NewStatusCheck(options.ThresholdWarning, options.ThresholdCritical)\n\tif err != nil {\n\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"can't check status: %v\", err))\n\t}\n\n\tif len(options.OkIfQueueIsMissing) > 0 {\n\t\tproperty := \"broker=\\\"0.0.0.0\\\"\"\n\t\tattribute := \"QueueNames\"\n\t\tqueueSearchResult, err := c.JolokiaClient.GetAttr(options.Domain, []string{property}, attribute)\n\t\tif err != nil {\n\t\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"can't query QueueNames in Jolokia: %v\", err))\n\t\t}\n\t\tif queueSearchResult == nil {\n\t\t\tif (options.Verbose > 0) {\n\t\t\t\tlog.Printf(\"No queues found: [%v]\", queueSearchResult)\n\t\t\t}\n\t\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"can't find QueueNames for [%v]\", property))\n\t\t}\n\n\t\tif !queueExists(queueSearchResult.([] interface{}), options.OkIfQueueIsMissing) {\n\t\t\tif (options.Verbose > 0) {\n\t\t\t\tlog.Printf(\"Queue [%v] not in queue list [%v]\", options.OkIfQueueIsMissing, queueSearchResult.([] interface{}))\n\t\t\t}\n\t\t\treturn icinga.NewResult(name, icinga.ServiceStatusOk, fmt.Sprintf(\"queue [%v] does not exist\", options.OkIfQueueIsMissing))\n\t\t}\n\t}\n\n\tsearchResult, err := c.JolokiaClient.GetAttr(options.Domain, []string{options.Queue}, options.Attribute)\n\tif err != nil {\n\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"can't query Jolokia: %v\", err))\n\t}\n\n\tresult, err := utils.ToFloat(searchResult)\n\tif err != nil {\n\t\tif (options.Verbose > 0) {\n\t\t\tlog.Printf(\"An error occured with result [%v]\", searchResult)\n\t\t}\n\t\treturn icinga.NewResult(name, icinga.ServiceStatusUnknown, fmt.Sprintf(\"query result is invalid: %v\", err))\n\t}\n\n\tmessage := fmt.Sprintf(\"Search produced: %v\", searchResult)\n\tstatus := statusCheck.Check(result)\n\n\treturn icinga.NewResult(name, status, message)\n}", "func (t *OpenconfigQos_Qos_Queues) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Queues\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (clusterInfo ClusterInfo) CreateQueues(queues []rh.QueueInfo) error {\n\trmqc, err := rh.NewClient(clusterInfo.AdminURL(), clusterInfo.UserName, clusterInfo.Password)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, queue := range queues {\n\t\tlog.Printf(\"Creating queue %v\", queue.Name)\n\t\t_, err = rmqc.DeclareQueue(clusterInfo.Vhost, queue.Name, rh.QueueSettings{Durable: queue.Durable, AutoDelete: queue.AutoDelete, Arguments: queue.Arguments})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func groomQueues(queues *Queues) (err kv.Error) {\n\tfor qName, qDetails := range *queues {\n\t\t// If we have enough runners drop the queue as it needs nothing done to it\n\t\tif len(qDetails.NodeGroup) == 0 || qDetails.Running >= qDetails.Ready+qDetails.NotVisible {\n\t\t\tif logger.IsTrace() {\n\t\t\t\tlogger.Trace(\"queue already handled\", \"queue\", qName, \"stack\", stack.Trace().TrimRuntime())\n\t\t\t}\n\t\t\tdelete(*queues, qName)\n\t\t}\n\t}\n\treturn nil\n}", "func (client *Client) GetClusterQueueInfoWithCallback(request *GetClusterQueueInfoRequest, callback func(response *GetClusterQueueInfoResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetClusterQueueInfoResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetClusterQueueInfo(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (_Rootchain *RootchainCaller) ExitsQueues(opts *bind.CallOpts, arg0 common.Address) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _Rootchain.contract.Call(opts, out, \"exitsQueues\", arg0)\n\treturn *ret0, err\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue_Config) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Output_Queues_Queue_Config\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (c *restClient) ListQueues(ctx context.Context, req *cloudtaskspb.ListQueuesRequest, opts ...gax.CallOption) *QueueIterator {\n\tit := &QueueIterator{}\n\treq = proto.Clone(req).(*cloudtaskspb.ListQueuesRequest)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tit.InternalFetch = func(pageSize int, pageToken string) ([]*cloudtaskspb.Queue, string, error) {\n\t\tresp := &cloudtaskspb.ListQueuesResponse{}\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken = pageToken\n\t\t}\n\t\tif pageSize > math.MaxInt32 {\n\t\t\treq.PageSize = math.MaxInt32\n\t\t} else if pageSize != 0 {\n\t\t\treq.PageSize = int32(pageSize)\n\t\t}\n\t\tbaseUrl, err := url.Parse(c.endpoint)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tbaseUrl.Path += fmt.Sprintf(\"/v2beta3/%v/queues\", req.GetParent())\n\n\t\tparams := url.Values{}\n\t\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\t\tif req.GetFilter() != \"\" {\n\t\t\tparams.Add(\"filter\", fmt.Sprintf(\"%v\", req.GetFilter()))\n\t\t}\n\t\tif req.GetPageSize() != 0 {\n\t\t\tparams.Add(\"pageSize\", fmt.Sprintf(\"%v\", req.GetPageSize()))\n\t\t}\n\t\tif req.GetPageToken() != \"\" {\n\t\t\tparams.Add(\"pageToken\", fmt.Sprintf(\"%v\", req.GetPageToken()))\n\t\t}\n\t\tif req.GetReadMask() != nil {\n\t\t\treadMask, err := protojson.Marshal(req.GetReadMask())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, \"\", err\n\t\t\t}\n\t\t\tparams.Add(\"readMask\", string(readMask[1:len(readMask)-1]))\n\t\t}\n\n\t\tbaseUrl.RawQuery = params.Encode()\n\n\t\t// Build HTTP headers from client and context metadata.\n\t\thds := append(c.xGoogHeaders, \"Content-Type\", \"application/json\")\n\t\theaders := gax.BuildHeaders(ctx, hds...)\n\t\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\t\tif settings.Path != \"\" {\n\t\t\t\tbaseUrl.Path = settings.Path\n\t\t\t}\n\t\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thttpReq.Header = headers\n\n\t\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer httpRsp.Body.Close()\n\n\t\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}, opts...)\n\t\tif e != nil {\n\t\t\treturn nil, \"\", e\n\t\t}\n\t\tit.Response = resp\n\t\treturn resp.GetQueues(), resp.GetNextPageToken(), nil\n\t}\n\n\tfetch := func(pageSize int, pageToken string) (string, error) {\n\t\titems, nextPageToken, err := it.InternalFetch(pageSize, pageToken)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tit.items = append(it.items, items...)\n\t\treturn nextPageToken, nil\n\t}\n\n\tit.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)\n\tit.pageInfo.MaxSize = int(req.GetPageSize())\n\tit.pageInfo.Token = req.GetPageToken()\n\n\treturn it\n}", "func (o *Replication) GetCurrentQueueSizeBytesOk() (*int64, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.CurrentQueueSizeBytes, true\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (_PlasmaFramework *PlasmaFrameworkSession) ExitsQueues(arg0 [32]byte) (common.Address, error) {\n\treturn _PlasmaFramework.Contract.ExitsQueues(&_PlasmaFramework.CallOpts, arg0)\n}", "func (o *UcsdBackupInfoAllOf) GetConnectorsOk() ([]UcsdConnectorPack, bool) {\n\tif o == nil || o.Connectors == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Connectors, true\n}", "func (_PlasmaFramework *PlasmaFrameworkCaller) ExitsQueues(opts *bind.CallOpts, arg0 [32]byte) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _PlasmaFramework.contract.Call(opts, out, \"exitsQueues\", arg0)\n\treturn *ret0, err\n}", "func (_PlasmaFramework *PlasmaFrameworkCallerSession) ExitsQueues(arg0 [32]byte) (common.Address, error) {\n\treturn _PlasmaFramework.Contract.ExitsQueues(&_PlasmaFramework.CallOpts, arg0)\n}", "func CfnQueue_IsConstruct(x interface{}) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_mediaconvert.CfnQueue\",\n\t\t\"isConstruct\",\n\t\t[]interface{}{x},\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func (o *Environment) GetQuotasOk() (*EnvironmentQuotas, bool) {\n\tif o == nil || o.Quotas == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Quotas, true\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Output_Queues) IsYANGGoStruct() {}", "func NewGetCallQueueitemsOK() *GetCallQueueitemsOK {\n\treturn &GetCallQueueitemsOK{}\n}", "func (_Rootchain *RootchainCallerSession) ExitsQueues(arg0 common.Address) (common.Address, error) {\n\treturn _Rootchain.Contract.ExitsQueues(&_Rootchain.CallOpts, arg0)\n}", "func (s QueueSetSpy) Queues() map[DeploymentID]*R11nQueue {\n\tres := s.Called()\n\treturn res.Get(0).(map[DeploymentID]*R11nQueue)\n}", "func (s *SQSServer) pollQueues(pollctx, taskctx context.Context, queues []QueueConf) error {\n\tfor _, qconf := range queues {\n\t\tq, err := s.getQueue(pollctx, qconf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq := &sqs.GetQueueAttributesInput{\n\t\t\tAttributeNames: []types.QueueAttributeName{(\"VisibilityTimeout\")},\n\t\t\tQueueUrl: &q.url,\n\t\t}\n\t\tresp, err := s.sqsSrv(q.QueueConf).GetQueueAttributes(pollctx, req)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to get queue attributes for '%s' - %s\", q.Name, err.Error())\n\t\t}\n\t\tto := resp.Attributes[\"VisibilityTimeout\"]\n\t\tif to == \"\" {\n\t\t\treturn fmt.Errorf(\"No visibility timeout returned by SQS for queue '%s'\", q.Name)\n\t\t}\n\t\tvisTimeout, err := strconv.Atoi(to)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to convert visibility timeout from '%s' to int - '%s'\", to, err.Error())\n\t\t}\n\t\t// Each queue runs in a dedicated go routine.\n\t\tgo func(vt int32) {\n\t\t\ts.queuePollers.Add(1)\n\t\t\tdefer s.queuePollers.Done()\n\t\t\ts.run(pollctx, taskctx, q, vt)\n\t\t}(int32(visTimeout))\n\t}\n\n\treturn nil\n}", "func (_Rootchain *RootchainSession) ExitsQueues(arg0 common.Address) (common.Address, error) {\n\treturn _Rootchain.Contract.ExitsQueues(&_Rootchain.CallOpts, arg0)\n}", "func (bs *BeanstalkdConnectionPool) ListQueues() (queueNames []string, err error) {\n\tqueueNames, err = bs.getGlobalConnect().ListTubes()\n\treturn\n}", "func (c *MQCache) Len() (totalLen int64, queuesLen []int64) {\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\tfor i := 0; i < c.queues; i++ {\n\t\tc.queuesLen[i] = c.q[i].len()\n\t\ttotalLen += c.queuesLen[i]\n\t}\n\treturn totalLen, c.queuesLen\n}", "func IsQueueExist(name string, ch *amqp.Channel) bool {\n\tvar exist bool\n\t_, err := ch.QueueInspect(name)\n\tif err == nil {\n\t\texist = true\n\t}\n\n\treturn exist\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues) IsYANGGoStruct() {}", "func (bq *InMemoryBuildQueue) ListPlatformQueues(ctx context.Context, request *emptypb.Empty) (*buildqueuestate.ListPlatformQueuesResponse, error) {\n\tbq.enter(bq.clock.Now())\n\tdefer bq.leave()\n\n\t// Obtain platform queue IDs in sorted order.\n\tplatformQueueList := append(platformQueueList(nil), bq.platformQueues...)\n\tsort.Sort(platformQueueList)\n\n\t// Extract status.\n\tplatformQueues := make([]*buildqueuestate.PlatformQueueState, 0, len(bq.platformQueues))\n\tfor _, pq := range platformQueueList {\n\t\tsizeClassQueues := make([]*buildqueuestate.SizeClassQueueState, 0, len(pq.sizeClassQueues))\n\t\tfor i, scq := range pq.sizeClassQueues {\n\t\t\texecutingWorkersCount := uint32(0)\n\t\t\tfor _, w := range scq.workers {\n\t\t\t\tif w.currentTask != nil {\n\t\t\t\t\texecutingWorkersCount++\n\t\t\t\t}\n\t\t\t}\n\t\t\tactiveInvocationsCount := uint32(0)\n\t\t\tfor _, i := range scq.invocations {\n\t\t\t\tif i.isActive() {\n\t\t\t\t\tactiveInvocationsCount++\n\t\t\t\t}\n\t\t\t}\n\t\t\tsizeClassQueues = append(sizeClassQueues, &buildqueuestate.SizeClassQueueState{\n\t\t\t\tSizeClass: pq.sizeClasses[i],\n\t\t\t\tTimeout: bq.cleanupQueue.getTimestamp(scq.cleanupKey),\n\t\t\t\tInvocationsCount: uint32(len(scq.invocations)),\n\t\t\t\tQueuedInvocationsCount: uint32(scq.queuedInvocations.Len()),\n\t\t\t\tActiveInvocationsCount: uint32(activeInvocationsCount),\n\t\t\t\tWorkersCount: uint32(len(scq.workers)),\n\t\t\t\tExecutingWorkersCount: executingWorkersCount,\n\t\t\t\tDrainsCount: uint32(len(scq.drains)),\n\t\t\t})\n\t\t}\n\t\tplatformQueues = append(platformQueues, &buildqueuestate.PlatformQueueState{\n\t\t\tName: pq.platformKey.GetPlatformQueueName(),\n\t\t\tSizeClassQueues: sizeClassQueues,\n\t\t})\n\t}\n\treturn &buildqueuestate.ListPlatformQueuesResponse{\n\t\tPlatformQueues: platformQueues,\n\t}, nil\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (o *ClusterRequest) GetMaxRunningNodesOk() (*int32, bool) {\n\tif o == nil || o.MaxRunningNodes == nil {\n\t\treturn nil, false\n\t}\n\treturn o.MaxRunningNodes, true\n}", "func (o *Ga4ghSearchCallSetsResponse) GetCallSetsOk() ([]Ga4ghCallSet, bool) {\n\tif o == nil || o.CallSets == nil {\n\t\tvar ret []Ga4ghCallSet\n\t\treturn ret, false\n\t}\n\treturn *o.CallSets, true\n}", "func (o *MoveClustersAccepted) IsSuccess() bool {\n\treturn true\n}", "func (mr *MockSQSAPIMockRecorder) ListQueues(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ListQueues\", reflect.TypeOf((*MockSQSAPI)(nil).ListQueues), arg0)\n}", "func ValidateQueues(db *storm.DB, config Settings.FullClientSettings, tclient *torrent.Client) {\n\ttorrentQueues := Storage.FetchQueues(db)\n\tfor len(torrentQueues.ActiveTorrents) > config.MaxActiveTorrents {\n\t\tremoveTorrent := torrentQueues.ActiveTorrents[:1]\n\t\tfor _, singleTorrent := range tclient.Torrents() {\n\t\t\tif singleTorrent.InfoHash().String() == removeTorrent[0] {\n\t\t\t\tsingleTorrentFromStorage := Storage.FetchTorrentFromStorage(db, removeTorrent[0])\n\t\t\t\tRemoveTorrentFromActive(&singleTorrentFromStorage, singleTorrent, db)\n\t\t\t}\n\t\t}\n\t}\n\ttorrentQueues = Storage.FetchQueues(db)\n\tfor _, singleTorrent := range tclient.Torrents() {\n\t\tsingleTorrentFromStorage := Storage.FetchTorrentFromStorage(db, singleTorrent.InfoHash().String())\n\t\tif singleTorrentFromStorage.TorrentStatus == \"Stopped\" {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, queuedTorrent := range torrentQueues.QueuedTorrents { //If we have a queued torrent that is missing data, and an active torrent that is seeding, then prioritize the missing data one\n\t\t\tif singleTorrent.InfoHash().String() == queuedTorrent {\n\t\t\t\tif singleTorrent.BytesMissing() > 0 {\n\t\t\t\t\tfor _, activeTorrent := range torrentQueues.ActiveTorrents {\n\t\t\t\t\t\tfor _, singleActiveTorrent := range tclient.Torrents() {\n\t\t\t\t\t\t\tif activeTorrent == singleActiveTorrent.InfoHash().String() {\n\t\t\t\t\t\t\t\tif singleActiveTorrent.Seeding() == true {\n\t\t\t\t\t\t\t\t\tsingleActiveTFS := Storage.FetchTorrentFromStorage(db, activeTorrent)\n\t\t\t\t\t\t\t\t\tLogger.WithFields(logrus.Fields{\"TorrentName\": singleActiveTFS.TorrentName}).Info(\"Seeding, Removing from active to add queued\")\n\t\t\t\t\t\t\t\t\tRemoveTorrentFromActive(&singleActiveTFS, singleActiveTorrent, db)\n\t\t\t\t\t\t\t\t\tsingleQueuedTFS := Storage.FetchTorrentFromStorage(db, queuedTorrent)\n\t\t\t\t\t\t\t\t\tLogger.WithFields(logrus.Fields{\"TorrentName\": singleQueuedTFS.TorrentName}).Info(\"Adding torrent to the queue, not active\")\n\t\t\t\t\t\t\t\t\tAddTorrentToActive(&singleQueuedTFS, singleTorrent, db)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (o *StorageHyperFlexStorageContainer) GetClusterOk() (*HyperflexClusterRelationship, bool) {\n\tif o == nil || o.Cluster == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Cluster, true\n}", "func (*OpenconfigQos_Qos_Queues) IsYANGGoStruct() {}", "func (o *NiatelemetryNexusDashboardsAllOf) GetClusterUuidOk() (*string, bool) {\n\tif o == nil || o.ClusterUuid == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ClusterUuid, true\n}", "func (q *execQueue) canQueue() bool {\n\tq.mu.Lock()\n\tok := !q.isClosed() && len(q.funcs) < cap(q.funcs)\n\tq.mu.Unlock()\n\treturn ok\n}", "func (t *OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue_Config) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue_Config\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (o *V0037JobProperties) GetQosOk() (*string, bool) {\n\tif o == nil || o.Qos == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Qos, true\n}", "func (p *Project) Queues() (*[]Queue, error) {\n qs := make([]Queue, 0)\n err := Mongo.Get(\"queue\", bson.M{\"project\": p.ID}, MaxQueuesPerProject, &qs)\n return &qs, err\n}", "func (o *DnsZoneDataData) GetZoneMastersOk() (*string, bool) {\n\tif o == nil || o.ZoneMasters == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ZoneMasters, true\n}", "func (o *V0037Node) GetCoresOk() (*int32, bool) {\n\tif o == nil || o.Cores == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Cores, true\n}", "func (o *VnicEthAdapterPolicyInventory) GetRxQueueSettingsOk() (*VnicEthRxQueueSettings, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.RxQueueSettings.Get(), o.RxQueueSettings.IsSet()\n}", "func (t *TopicCache) IsQueueEmpty(projectName, serviceName string) bool {\n\tt.RLock()\n\tdefer t.RUnlock()\n\n\t_, ok := t.inQueue[projectName+serviceName]\n\n\treturn !ok\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_VirtualOutputQueues_VoqInterface_Queues_Queue) IsYANGGoStruct() {}", "func (o *SMSConnectorSettings) GetLimitsOk() (*Thresholds, bool) {\n\tif o == nil || o.Limits == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Limits, true\n}", "func hasFreeQueueSlots() bool {\n\tif activeClients > activeClientsMax {\n\t\tatomic.AddInt64(&activeClientsReachedTimes, 1)\n\n\t\treturn false\n\t}\n\n\tif activeClients >= activeClientsReached {\n\t\tactiveClientsReached = activeClients\n\t}\n\n\treturn true\n}", "func (o *V0037Node) GetBoardsOk() (*int32, bool) {\n\tif o == nil || o.Boards == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Boards, true\n}", "func (c *captiveCoreTomlValues) QuorumSetIsConfigured() bool {\n\treturn len(c.QuorumSetEntries) > 0 || len(c.Validators) > 0\n}", "func (gcq *gcQueue) shouldQueue(\n\tctx context.Context, now hlc.ClockTimestamp, repl *Replica, _ *config.SystemConfig,\n) (bool, float64) {\n\n\t// Consult the protected timestamp state to determine whether we can GC and\n\t// the timestamp which can be used to calculate the score.\n\t_, zone := repl.DescAndZone()\n\tcanGC, _, gcTimestamp, oldThreshold, newThreshold := repl.checkProtectedTimestampsForGC(ctx, *zone.GC)\n\tif !canGC {\n\t\treturn false, 0\n\t}\n\t// If performing a GC will not advance the GC threshold, there's no reason\n\t// to GC again.\n\tif newThreshold.Equal(oldThreshold) {\n\t\treturn false, 0\n\t}\n\tr := makeGCQueueScore(ctx, repl, gcTimestamp, *zone.GC)\n\treturn r.ShouldQueue, r.FinalScore\n}", "func (o *V1WorkloadSpec) GetContainersOk() (*map[string]V1ContainerSpec, bool) {\n\tif o == nil || o.Containers == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Containers, true\n}", "func (o *V0037JobProperties) GetRequeueOk() (*bool, bool) {\n\tif o == nil || o.Requeue == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Requeue, true\n}", "func (o *ListClustersOnEndpointUsingGETOK) IsSuccess() bool {\n\treturn true\n}", "func (o *ClientConfiguration) GetAvailableBankGroupsOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.AvailableBankGroups, true\n}", "func (c *Client) ensureConsumerQueues(topic string) error {\n\taChan, err := c.getChannel()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer aChan.Close()\n\n\tfor i := 0; i != c.numConsumerQueues; i++ {\n\t\tqueue := c.getRk(topic, i)\n\n\t\tif _, err := aChan.QueueDeclare(queue, true, false, false, false, amqp.Table{}); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := aChan.QueueBind(queue, queue, topic, false, amqp.Table{}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (*OpenconfigQos_Qos_Interfaces_Interface_Input_Queues) IsYANGGoStruct() {}", "func (m *MockSQSAPI) ListQueues(arg0 *sqs.ListQueuesInput) (*sqs.ListQueuesOutput, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListQueues\", arg0)\n\tret0, _ := ret[0].(*sqs.ListQueuesOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (cfg *Config) MQServers() string {\n\treturn os.Getenv(\"MQ_SERVERS\")\n}", "func (o *NSQProducer) GetTcpPortOk() (*int32, bool) {\n\tif o == nil || o.TcpPort == nil {\n\t\treturn nil, false\n\t}\n\treturn o.TcpPort, true\n}", "func TestSliceQueueSuccess(t *testing.T) {\n\tqueueSuccess(t, &queue.SliceQueue{})\n}", "func (cc *Cluster) IsQuorum(v uint64) bool {\n\n\tfor _, q := range cc.Quorums {\n\t\tif v&q == q {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (o *NetgroupsSettingsCollectionGetOK) IsSuccess() bool {\n\treturn true\n}", "func WithQueues(queues []string) Option {\n\treturn func(opts *Options) {\n\t\topts.Queues = queues\n\t}\n}", "func (o *DnsZoneDataData) GetZoneClassParametersOk() (*[]ApiClassParameterOutputEntry, bool) {\n\tif o == nil || o.ZoneClassParameters == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ZoneClassParameters, true\n}", "func (o *Cause) GetCausesOk() (*[]interface{}, bool) {\n\tif o == nil || o.Causes == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Causes, true\n}", "func (o *StatusAzureServiceBus) GetRecordsProcessedOk() (*int64, bool) {\n\tif o == nil || IsNil(o.RecordsProcessed) {\n\t\treturn nil, false\n\t}\n\treturn o.RecordsProcessed, true\n}", "func (o *KubernetesPodStatus) GetQosClassOk() (*string, bool) {\n\tif o == nil || o.QosClass == nil {\n\t\treturn nil, false\n\t}\n\treturn o.QosClass, true\n}", "func (o *SecurityProblem) GetManagementZonesOk() (*[]ManagementZone, bool) {\n\tif o == nil || o.ManagementZones == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ManagementZones, true\n}", "func (qc *QueueConfig) Exists() bool {\n\treturn qc._exists\n}", "func (c *client) ClusterExists() (bool, error) {\n\tclusterJSON, err := c.runCmd(\"cluster\", \"list\", \"-o\", \"json\")\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tclusterList := &ClusterList{}\n\tif err := clusterList.Unmarshal([]byte(clusterJSON)); err != nil {\n\t\treturn false, err\n\t}\n\n\tfor _, cluster := range clusterList.Clusters {\n\t\tif cluster.Name == c.clusterName {\n\t\t\tif c.verbose {\n\t\t\t\tfmt.Printf(\"k3d cluster '%s' exists\", c.clusterName)\n\t\t\t}\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\tif c.verbose {\n\t\tfmt.Printf(\"k3d cluster '%s' does not exist\", c.clusterName)\n\t}\n\treturn false, nil\n}", "func (o *ResourceLimits) GetK8sClustersProvisionedOk() (*int32, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.K8sClustersProvisioned, true\n}" ]
[ "0.64735645", "0.64523965", "0.6035547", "0.58038056", "0.53999776", "0.53208125", "0.5234418", "0.5219659", "0.5205979", "0.51352835", "0.5088557", "0.5061583", "0.5047421", "0.50225717", "0.50075495", "0.49929222", "0.49726918", "0.49626175", "0.49571374", "0.49505204", "0.4910291", "0.49086472", "0.4870585", "0.48654485", "0.48648486", "0.48150924", "0.4807354", "0.47827765", "0.47716317", "0.4757595", "0.47479898", "0.47287238", "0.47200388", "0.4709999", "0.4708853", "0.4703158", "0.47027186", "0.46969888", "0.4696914", "0.469152", "0.46906188", "0.46903023", "0.46878037", "0.46815172", "0.4672829", "0.46617898", "0.4661367", "0.465014", "0.46401793", "0.46380433", "0.46244723", "0.46241587", "0.46236807", "0.46231955", "0.46199661", "0.46175566", "0.4615826", "0.46140933", "0.4592387", "0.45902264", "0.45870736", "0.45860043", "0.457281", "0.45558897", "0.45551425", "0.45495507", "0.45418593", "0.45395905", "0.45348665", "0.45200172", "0.45021793", "0.4497237", "0.4493511", "0.44841293", "0.44784686", "0.44768226", "0.44730937", "0.44699517", "0.44677082", "0.44638708", "0.44293743", "0.44248316", "0.4424208", "0.4420703", "0.44205612", "0.44089937", "0.44081664", "0.44064337", "0.4405084", "0.44050375", "0.43964806", "0.43905383", "0.43900228", "0.43877444", "0.43779445", "0.4375703", "0.437483", "0.43745577", "0.43738717", "0.4367556" ]
0.832443
0
Process calling points so that we generate the appropriate via and include their tiplocs
Обработка точек вызова процесса, чтобы сгенерировать соответствующие переходы и включить их tiplocs
func (bf *boardFilter) processCallingPoints(s ldb.Service) { if len(s.CallingPoints) > 0 { viaRequest := bf.addVia(s.RID, s.CallingPoints[len(s.CallingPoints)-1].Tiploc) for _, cp := range s.CallingPoints { bf.addTiploc(cp.Tiploc) viaRequest.AppendTiploc(cp.Tiploc) } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (bf *boardFilter) callsAt(callingPoints []darwind3.CallingPoint, tpls []string) bool {\n\tfor _, cp := range callingPoints {\n\t\tfor _, tpl := range tpls {\n\t\t\tif tpl == cp.Tiploc {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}", "func CheckpointCaller(handler interface{}, params ...interface{}) {\n\thandler.(func(checkpointIndex int, tipIndex int, tipsTotal int, messageID hornet.MessageID))(params[0].(int), params[1].(int), params[2].(int), params[3].(hornet.MessageID))\n}", "func CheckpointCaller(handler interface{}, params ...interface{}) {\n\thandler.(func(checkpointIndex int, tipIndex int, tipsTotal int, txHash aingle.Hash))(params[0].(int), params[1].(int), params[2].(int), params[3].(aingle.Hash))\n}", "func CheckpointCaller(handler interface{}, params ...interface{}) {\n\thandler.(func(checkpointIndex int, tipIndex int, tipsTotal int, txHash hornet.Hash))(params[0].(int), params[1].(int), params[2].(int), params[3].(hornet.Hash))\n}", "func parsePointInfo(p Point, chargerType []string) PointInfoJS {\r\n\tpJS := PointInfoJS{}\r\n\r\n\tpJS.Provider = p.Provider\r\n\tpJS.Address = p.Address\r\n\tpJS.Operator = p.Operator\r\n\tpJS.Requirement = p.Requirement\r\n\tpJS.Charger = p.Charger\r\n\tpJS.Parking = p.Parking\r\n\tpJS.Hour = p.Hour\r\n\tpJS.Facility = p.Facility\r\n\tpJS.Website = p.Website\r\n\tpJS.Location = append(pJS.Location, p.Location.Coordinates[1])\r\n\tpJS.Location = append(pJS.Location, p.Location.Coordinates[0])\r\n\r\n\tfor _, v := range chargerType {\r\n\t\tfor k, n := range pJS.Charger {\r\n\t\t\tif v == n.Type {\r\n\t\t\t\tpJS.Charger[k].Match = true\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\treturn pJS\r\n}", "func (cb *CanBusClient) Points(nodeID string, points []data.Point) {\n\tcb.newPoints <- NewPoints{nodeID, \"\", points}\n}", "func (g *F) Call(p ...float64) []float64 {\n\tcoords := make([]float64, len(p))\n\tfor i := 0; i < len(p); i++ {\n\t\tcoords[i] = g.f[i](p...)\n\t}\n\treturn coords\n}", "func (src *prometheusMetricsSource) buildPoint(name string, m *dto.Metric, now int64, tags map[string]string) []*MetricPoint {\n\tvar result []*MetricPoint\n\tif m.Gauge != nil {\n\t\tif !math.IsNaN(m.GetGauge().GetValue()) {\n\t\t\tpoint := src.metricPoint(name+\".gauge\", float64(m.GetGauge().GetValue()), now, src.source, tags)\n\t\t\tresult = src.filterAppend(result, point)\n\t\t}\n\t} else if m.Counter != nil {\n\t\tif !math.IsNaN(m.GetCounter().GetValue()) {\n\t\t\tpoint := src.metricPoint(name+\".counter\", float64(m.GetCounter().GetValue()), now, src.source, tags)\n\t\t\tresult = src.filterAppend(result, point)\n\t\t}\n\t} else if m.Untyped != nil {\n\t\tif !math.IsNaN(m.GetUntyped().GetValue()) {\n\t\t\tpoint := src.metricPoint(name+\".value\", float64(m.GetUntyped().GetValue()), now, src.source, tags)\n\t\t\tresult = src.filterAppend(result, point)\n\t\t}\n\t}\n\treturn result\n}", "func linePointsGen(p1, p2 Point, speed float64) (gen func() (x, y float64, e error)) {\n\t// Set up math\n\tslopeT, slope, _ := getLineParams(p1, p2)\n\n\tx := p1.X\n\txPrev := x\n\ty := p1.Y\n\tyPrev := y\n\te := fmt.Errorf(\"End of path reached\")\n\ttheta := math.Atan(slope)\n\n\t// Every slope type has a different iterator, since they change the\n\t// x and y values in different combinations, as well as do different\n\t// comparisons on the values.\n\tswitch slopeT {\n\tcase ZERORIGHT:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif x > p2.X {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\txPrev = x\n\t\t\tx += speed\n\n\t\t\treturn xPrev, y, nil\n\t\t}\n\tcase ZEROLEFT:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif x < p2.X {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\txPrev = x\n\t\t\tx -= speed\n\n\t\t\treturn xPrev, y, nil\n\t\t}\n\tcase POSRIGHT:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif y > p2.Y || x > p2.X {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\tyPrev = y\n\t\t\txPrev = x\n\n\t\t\ty += speed * math.Sin(theta)\n\t\t\tx += speed * math.Cos(theta)\n\n\t\t\treturn xPrev, yPrev, nil\n\t\t}\n\tcase NEGRIGHT:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif y < p2.Y || x > p2.X {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\tyPrev = y\n\t\t\txPrev = x\n\n\t\t\ty += speed * math.Sin(theta)\n\t\t\tx += speed * math.Cos(theta)\n\n\t\t\treturn xPrev, yPrev, nil\n\t\t}\n\tcase POSLEFT:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif y < p2.Y || x < p2.X {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\tyPrev = y\n\t\t\txPrev = x\n\n\t\t\ty -= speed * math.Sin(theta)\n\t\t\tx -= speed * math.Cos(theta)\n\n\t\t\treturn xPrev, yPrev, nil\n\t\t}\n\tcase NEGLEFT:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif y > p2.Y || x < p2.X {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\tyPrev = y\n\t\t\txPrev = x\n\n\t\t\ty -= speed * math.Sin(theta)\n\t\t\tx -= speed * math.Cos(theta)\n\n\t\t\treturn xPrev, yPrev, nil\n\t\t}\n\tcase INFUP:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif y > p2.Y {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\tyPrev := y\n\t\t\ty += speed\n\n\t\t\treturn x, yPrev, nil\n\t\t}\n\tcase INFDOWN:\n\t\treturn func() (float64, float64, error) {\n\t\t\tif y < p2.Y {\n\t\t\t\treturn 0, 0, e\n\t\t\t}\n\n\t\t\tyPrev := y\n\t\t\ty -= speed\n\n\t\t\treturn x, yPrev, nil\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s *BaseAspidaListener) EnterPoints(ctx *PointsContext) {}", "func (b *block) Plan(pointIds ...string) ([]spi.PointSPI, error) {\n\tpoints := []spi.PointSPI{}\n\n\tif len(pointIds) == 0 {\n\t\t// if there are no specified points, include all points\n\n\t\tfor _, p := range b.points {\n\t\t\tpoints = append(points, p)\n\t\t}\n\t} else {\n\t\tincluded := map[string]bool{}\n\t\tincluded_sf := map[string]bool{}\n\n\t\t// include all specified points\n\t\tfor _, id := range pointIds {\n\t\t\tif p, ok := b.points[id]; !ok {\n\t\t\t\treturn nil, sunspec.ErrNoSuchPoint\n\t\t\t} else {\n\t\t\t\tif !included[id] {\n\t\t\t\t\tpoints = append(points, p)\n\t\t\t\t\tincluded[id] = true\n\t\t\t\t}\n\t\t\t\tif p.Type() == typelabel.ScaleFactor {\n\t\t\t\t\tincluded_sf[id] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// include their scale factors too...\n\t\t//\n\t\t// we do this for several reasons:\n\t\t// - to interpret a point that uses a scale factor, we need the scale factor too\n\t\t// - if we don't there we may read a value point after its scale factor point has changed\n\t\t// By forcing contemporaneous reads of a scale factor and its related points we help to ensure\n\t\t// that the two values are consistent.\n\t\t// - we want to avoid app programmers having to encode knowedlege in their programs\n\t\t// about these depednencies - the knowledge is in the SMDX documents, so lets use it\n\t\tfor _, p := range points {\n\t\t\tsfp := p.(*point).scaleFactor\n\t\t\tif sfp != nil {\n\t\t\t\tif !included[sfp.Id()] {\n\t\t\t\t\tpoints = append(points, sfp.(spi.PointSPI))\n\t\t\t\t\tincluded[sfp.Id()] = true\n\t\t\t\t\tincluded_sf[sfp.Id()] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// We also include all the currently valid points that reference any scale\n\t\t// factor points we are going to read since we don't want such points to\n\t\t// unexpectedly enter an error state when they are invalidated by the\n\t\t// read of the scale factor point. This allows twp separate reads each\n\t\t// of which have a point that reference a shared scale factor point to\n\t\t// be equivalent to a single read of all points or to two reads in which\n\t\t// all points related to a single scale factor are read in the same read\n\t\t// as the scale factor itself.\n\t\t//\n\t\t// One consequence of this behaviour is that any local changes (via a\n\t\t// setter) to a point dependent on a scale factor point may be lost by a\n\t\t// read of any point that is dependent on the same scale factor which\n\t\t// itself means that local changes to points should be written to the\n\t\t// physical device with Block.Write before the next Block.Read or else\n\t\t// they may be lost under some circumstances even if the point concerned\n\t\t// is not directly referened by the Read call.\n\t\t//\n\t\t// Part of the reason we do this is to maximise the consistency of data\n\t\t// exposed by the API while minimising both the effort for the programmer\n\t\t// to maintain the consistency and also surprising behaviour.\n\t\tfor _, p := range b.points {\n\t\t\tif sfp := p.scaleFactor; sfp == nil || p.Error() != nil || !included_sf[sfp.Id()] {\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tif !included[p.Id()] {\n\t\t\t\t\tpoints = append(points, p)\n\t\t\t\t\tincluded[p.Id()] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// sort so scale factors come first, then other points in offset order\n\tsort.Sort(scaleFactorFirstOrder(points))\n\treturn points, nil\n}", "func PointSlope() {\n\tfmt.Print(c.CL)\n\tstrx1, err := i.Prompt(c.G + \"Enter x1 point\\n\" + c.B + \">\" + c.M)\n\tu.QuitAtError(err)\n\tstry1, err := i.Prompt(c.G + \"Enter y1 point\\n\" + c.B + \">\" + c.M)\n\tu.QuitAtError(err)\n\tstrm, err := i.Prompt(c.G + \"Enter slope\\n\" + c.B + \">\" + c.M)\n\tu.QuitAtError(err)\n\t//all inputs now provided, to be converted.\n\tx1, err := strconv.ParseFloat(strx1, 64)\n\tu.QuitAtError(err)\n\ty1, err := strconv.ParseFloat(stry1, 64)\n\tu.QuitAtError(err)\n\tm, err := strconv.ParseFloat(strm, 64)\n\tu.QuitAtError(err)\n\t//all data points provided and converted, now to math-a-tise.\n\tfmt.Println(c.CL, c.G+\"The formula is\")\n\tfmt.Println(\"y = m(x) + b\")\n\tu.Spacer(3)\n\t//to find \"b\"\n\tfmt.Println(y1, \" = \", m, \"(\", x1, \") + b\")\n\tfiller := x1 * m\n\t//multiplies x1 and m to filler.\n\tu.Spacer(1)\n\tfmt.Println(y1, \" = \", filler, \" + b\")\n\tfmt.Println(\"-\", filler, \"---------------|\")\n\t//Shows subtraction\n\tu.Spacer(1)\n\tb := y1 - filler\n\tfmt.Println(c.B2+\"b = \", b)\n\tu.Spacer(3)\n\tfmt.Println(c.B3+\"y = \", m, \"(x) + \", b)\n\tu.Go(1)\n\t//prints out completed statment, ends function\n}", "func applyToPoints(points []Point, fn func(*Point)) {\n\tfor j := range points {\n\t\tfn(&points[j])\n\t}\n}", "func (orderbook *Orderbook) backfillPoints(topbook []*Point, pointDistance uint64, leftMultiple uint64, rightMultiple uint64) []*Point {\n\tfor currentMultiple := leftMultiple; currentMultiple < rightMultiple; currentMultiple++ {\n\t\tpoint := CreatePoint(orderbook, (currentMultiple+1)*pointDistance)\n\t\ttopbook = append(topbook, &point)\n\t}\n\treturn topbook\n}", "func (a axes) drawPoint(p *vg.Painter, xy xyer, cs vg.CoordinateSystem, l Line, pointNumber int) {\n\tx, y, isEnvelope := xy.XY(l)\n\n\t// add number of NaNs leading pointNumber to pointNumber.\n\ttargetNumber := pointNumber\n\tfor i, v := range x {\n\t\tif i > targetNumber {\n\t\t\tbreak\n\t\t}\n\t\tif math.IsNaN(v) {\n\t\t\tpointNumber++\n\t\t}\n\t}\n\n\tif len(x) <= pointNumber || len(y) <= pointNumber || pointNumber < 0 {\n\t\treturn\n\t}\n\tp.SetFont(font1)\n\tlabels := make([]vg.FloatText, 2)\n\tif isEnvelope {\n\t\tif n := len(x); n != len(y) || pointNumber+2 > n {\n\t\t\treturn\n\t\t} else {\n\t\t\txp, yp := x[pointNumber], y[pointNumber]\n\t\t\txp2, yp2 := x[n-pointNumber-2], y[n-pointNumber-2]\n\t\t\tx = []float64{xp, xp2}\n\t\t\ty = []float64{yp, yp2}\n\t\t\tlabels[0] = vg.FloatText{X: xp, Y: yp, S: fmt.Sprintf(\"(%.4g, %.4g)\", xp, yp), Align: 5}\n\t\t\tlabels[1] = vg.FloatText{X: xp2, Y: yp2, S: fmt.Sprintf(\"(%.4g, %.4g)\", xp2, yp2), Align: 1}\n\t\t}\n\t} else {\n\t\txp, yp := x[pointNumber], y[pointNumber]\n\t\tx = []float64{xp}\n\t\ty = []float64{yp}\n\t\tvar s string\n\t\tif xyp, ok := xy.(xyPolar); ok {\n\t\t\txstr := \"\"\n\t\t\tif xyp.rmin == 0 && xyp.rmax == 0 { // polar\n\t\t\t\tif len(l.X) > pointNumber && pointNumber >= 0 {\n\t\t\t\t\txstr = fmt.Sprintf(\"%.4g, \", l.X[pointNumber])\n\t\t\t\t}\n\t\t\t\ts = xstr + xmath.Absang(complex(yp, xp), \"%.4g@%.0f\")\n\t\t\t} else { // ring\n\t\t\t\ts = fmt.Sprintf(\"%.4g@%.1f\", l.X[pointNumber], 180.0*l.Y[pointNumber]/math.Pi)\n\t\t\t}\n\t\t} else {\n\t\t\ts = fmt.Sprintf(\"(%.4g, %.4g)\", xp, yp)\n\t\t}\n\t\tlabels[0] = vg.FloatText{X: xp, Y: yp, S: s, Align: 1}\n\t\tlabels = labels[:1]\n\t}\n\n\tsize := l.Style.Marker.Size\n\tif size == 0 {\n\t\tsize = l.Style.Line.Width\n\t}\n\tif size == 0 {\n\t\tsize = 9\n\t} else {\n\t\tsize *= 3\n\t}\n\tc := a.plot.Style.Order.Get(l.Style.Marker.Color, l.Id+1).Color()\n\tp.SetColor(c)\n\tp.Add(vg.FloatCircles{X: x, Y: y, CoordinateSystem: cs, Radius: size, Fill: true})\n\trect := a.inside.Bounds()\n\tfor _, l := range labels {\n\t\tl.CoordinateSystem = cs\n\t\tl.Rect = rect\n\n\t\t// Change the alignment, if the label would be placed at a picture boundary.\n\t\tx0, y0 := cs.Pixel(l.X, l.Y, rect)\n\t\tif l.Align == 1 && y0 < 30 {\n\t\t\tl.Align = 5\n\t\t} else if l.Align == 5 && y0 > rect.Max.Y-30 {\n\t\t\tl.Align = 1\n\t\t}\n\t\tif x0 < 50 {\n\t\t\tif l.Align == 1 {\n\t\t\t\tl.Align = 0\n\t\t\t} else if l.Align == 5 {\n\t\t\t\tl.Align = 6\n\t\t\t}\n\t\t} else if x0 > rect.Max.X-50 {\n\t\t\tif l.Align == 1 {\n\t\t\t\tl.Align = 2\n\t\t\t} else if l.Align == 5 {\n\t\t\t\tl.Align = 4\n\t\t\t}\n\t\t}\n\n\t\t// Place the label above or below with the offset of the marker's radius.\n\t\tif l.Align <= 2 { // Label is above point.\n\t\t\tl.Yoff = -size\n\t\t} else if l.Align >= 4 { // Label is below point\n\t\t\tl.Yoff = size\n\t\t}\n\n\t\t// Fill background rectangle of the label.\n\t\tx, y, w, h := l.Extent(p)\n\t\tsaveColor := p.GetColor()\n\t\tp.SetColor(a.bg)\n\t\tp.Add(vg.Rectangle{X: x, Y: y, W: w, H: h, Fill: true})\n\t\tp.SetColor(saveColor)\n\t\tp.Add(l)\n\t}\n}", "func (b *BccLatticePointGenerator) forEachPoint(\n\tboundingBox *BoundingBox3D,\n\tspacing float64,\n\tpoints *[]*Vector3D.Vector3D,\n\tcallback func(*([]*Vector3D.Vector3D), *Vector3D.Vector3D) bool,\n) {\n\n\thalfSpacing := spacing / 2\n\tboxWidth := boundingBox.width()\n\tboxHeight := boundingBox.height()\n\tboxDepth := boundingBox.depth()\n\n\tposition := Vector3D.NewVector(0, 0, 0)\n\thasOffset := false\n\tshouldQuit := false\n\n\tfor k := float64(0); k*halfSpacing <= boxDepth && !shouldQuit; k++ {\n\n\t\tposition.Z = k*halfSpacing + boundingBox.lowerCorner.Z\n\t\tvar offset float64\n\t\tif hasOffset {\n\n\t\t\toffset = halfSpacing\n\t\t} else {\n\t\t\toffset = 0\n\t\t}\n\n\t\tfor j := float64(0); j*spacing+offset <= boxHeight && !shouldQuit; j++ {\n\t\t\tposition.Y = j*spacing + offset + boundingBox.lowerCorner.Y\n\n\t\t\tfor i := float64(0); i*spacing+offset <= boxWidth; i++ {\n\t\t\t\tposition.X = i*spacing + offset + boundingBox.lowerCorner.X\n\n\t\t\t\tif !callback(points, position) {\n\t\t\t\t\tshouldQuit = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\thasOffset = !hasOffset\n\t}\n}", "func (d *droid) backtraceToPoint(currLoc, rootLoc *point) []*point {\n\tmovement := d.findPointInPath(currLoc)\n\tbackPath := []*point{movement.location}\n\n\tisEqual := pointsEquals(movement.location, rootLoc)\n\n\tfor !isEqual {\n\t\tbackPath = append(backPath, movement.parent.location)\n\t\tmovement = movement.parent\n\t\tisEqual = pointsEquals(movement.location, rootLoc)\n\t}\n\n\treturn backPath\n}", "func OfGeomPoints(points ...geom.Point) Winding { return Order{}.OfGeomPoints(points...) }", "func CallInfo(lv int) string {\n\tpc, file, line, ok := runtime.Caller(lv)\n\tif !ok {\n\t\treturn \"\"\n\t}\n\tfile = callerShortfile(file)\n\tfuncName := runtime.FuncForPC(pc).Name()\n\tfuncName = callerShortfile(funcName)\n\tfn := callerShortfile(funcName, ')')\n\tif len(fn) < len(funcName) {\n\t\tif len(fn) > 1 && fn[0] == '.' {\n\t\t\tfn = fn[1:]\n\t\t}\n\t\tfuncName = fn\n\t} else {\n\t\tfuncName = callerShortfile(funcName, '.')\n\t}\n\ts := fmt.Sprintf(\"%s:%d(%s)\", file, line, funcName)\n\treturn s\n}", "func (me *messageEvents) checkNewCalls(ctx context.Context, from, to *types.TipSet) map[triggerID][]eventData {\n\tme.lk.RLock()\n\tdefer me.lk.RUnlock()\n\n\t// For each message in the tipset\n\tres := make(map[triggerID][]eventData)\n\tme.messagesForTs(from, func(msg *types.Message) {\n\t\t// TODO: provide receipts\n\n\t\t// Run each trigger's matcher against the message\n\t\tfor tid, matchFn := range me.matchers {\n\t\t\tmatched, err := matchFn(msg)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"event matcher failed: %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// If there was a match, include the message in the results for the\n\t\t\t// trigger\n\t\t\tif matched {\n\t\t\t\tres[tid] = append(res[tid], msg)\n\t\t\t}\n\t\t}\n\t})\n\n\treturn res\n}", "func callerInfo(skip int) string {\n\t_, file, line, _ := runtime.Caller(skip)\n\treturn fmt.Sprintf(\"%v:%v\", file, line)\n}", "func (s *BaseAspidaListener) EnterTPoints(ctx *TPointsContext) {}", "func getCallerInfo(skip int) *callerInfo {\n\tvar (\n\t\tpkg string\n\t\tfile string\n\t\tline int\n\t\tfunctions []string\n\t)\n\n\t// maximum depth of 20\n\tpcs := make([]uintptr, 20)\n\tn := runtime.Callers(skip+2, pcs)\n\tpcs = pcs[:n-1]\n\n\tframes := runtime.CallersFrames(pcs)\n\tfirstCaller := true\n\tfor {\n\t\tframe, more := frames.Next()\n\t\tif !more {\n\t\t\tbreak\n\t\t}\n\n\t\tfn := frame.Function\n\t\tfnStart := strings.LastIndexByte(fn, '/')\n\t\tif fnStart == -1 {\n\t\t\tfnStart = 0\n\t\t} else {\n\t\t\tfnStart++\n\t\t}\n\n\t\tfn = fn[fnStart:]\n\t\tpkgEnd := strings.IndexByte(fn, '.')\n\t\tif pkgEnd == -1 {\n\t\t\tfnStart = 0\n\t\t} else {\n\t\t\tfnStart = pkgEnd + 1\n\t\t}\n\t\tfunctions = append(functions, fn[fnStart:])\n\n\t\tif firstCaller {\n\t\t\tline = frame.Line\n\t\t\tfile = frame.File\n\t\t\t// set file as relative path\n\t\t\tpat := \"tracee/\"\n\t\t\ttraceeIndex := strings.Index(file, pat)\n\t\t\tif traceeIndex != -1 {\n\t\t\t\tfile = file[traceeIndex+len(pat):]\n\t\t\t}\n\t\t\tpkg = fn[:pkgEnd]\n\n\t\t\tfirstCaller = false\n\t\t}\n\t}\n\n\treturn &callerInfo{\n\t\tpkg: pkg,\n\t\tfile: file,\n\t\tline: line,\n\t\tfunctions: functions,\n\t}\n}", "func transformCall(n *ir.CallExpr) {\n\t// Set base.Pos, since transformArgs below may need it, but transformCall\n\t// is called in some passes that don't set base.Pos.\n\tir.SetPos(n)\n\t// n.Type() can be nil for calls with no return value\n\tassert(n.Typecheck() == 1)\n\ttransformArgs(n)\n\tl := n.X\n\tt := l.Type()\n\n\tswitch l.Op() {\n\tcase ir.ODOTINTER:\n\t\tn.SetOp(ir.OCALLINTER)\n\n\tcase ir.ODOTMETH:\n\t\tl := l.(*ir.SelectorExpr)\n\t\tn.SetOp(ir.OCALLMETH)\n\n\t\ttp := t.Recv().Type\n\n\t\tif l.X == nil || !types.Identical(l.X.Type(), tp) {\n\t\t\tbase.Fatalf(\"method receiver\")\n\t\t}\n\n\tdefault:\n\t\tn.SetOp(ir.OCALLFUNC)\n\t}\n\n\ttypecheckaste(ir.OCALL, n.X, n.IsDDD, t.Params(), n.Args)\n\tif l.Op() == ir.ODOTMETH && len(deref(n.X.Type().Recv().Type).RParams()) == 0 {\n\t\ttypecheck.FixMethodCall(n)\n\t}\n\tif t.NumResults() == 1 {\n\t\tif n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME {\n\t\t\tif sym := n.X.(*ir.Name).Sym(); types.IsRuntimePkg(sym.Pkg) && sym.Name == \"getg\" {\n\t\t\t\t// Emit code for runtime.getg() directly instead of calling function.\n\t\t\t\t// Most such rewrites (for example the similar one for math.Sqrt) should be done in walk,\n\t\t\t\t// so that the ordering pass can make sure to preserve the semantics of the original code\n\t\t\t\t// (in particular, the exact time of the function call) by introducing temporaries.\n\t\t\t\t// In this case, we know getg() always returns the same result within a given function\n\t\t\t\t// and we want to avoid the temporaries, so we do the rewrite earlier than is typical.\n\t\t\t\tn.SetOp(ir.OGETG)\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n}", "func checkPoints(fabric *elfFabric, takenFabric map[point]int) int {\n\tfor addX := 1; addX <= fabric.size[0]; addX++ {\n\t\tfor addY := 1; addY <= fabric.size[1]; addY++ {\n\t\t\tp := point{x: fabric.position[0] + addX, y: fabric.position[1] + addY}\n\t\t\tnumOfOverlaps := takenFabric[p]\n\t\t\tif numOfOverlaps != 1 {\n\t\t\t\treturn -1\n\t\t\t}\n\t\t}\n\t}\n\treturn fabric.ID\n}", "func (w *wire) interceptPoints(o wire) []point {\n\tvar interceptPoints []point\n\tfor i := 1; i < len(w.points); i++ {\n\t\tv1 := segment{\n\t\t\tfrom: w.points[i-1],\n\t\t\tto: w.points[i],\n\t\t}\n\t\tfor u := 1; u < len(o.points); u++ {\n\t\t\tv2 := segment{\n\t\t\t\tfrom: o.points[u-1],\n\t\t\t\tto: o.points[u],\n\t\t\t}\n\t\t\tintercept := v1.intercepts(v2)\n\t\t\tif intercept.x != 0 && intercept.y != 0 {\n\t\t\t\t// Calculate total wire length (both wires combined)\n\t\t\t\tintercept.wireLen = v1.from.wireLen + intercept.distanceToPoint(v1.from) +\n\t\t\t\t\tv2.from.wireLen + intercept.distanceToPoint(v2.from)\n\t\t\t\tinterceptPoints = append(interceptPoints, intercept)\n\t\t\t}\n\t\t}\n\t}\n\treturn interceptPoints\n}", "func (a axes) click(x, y int, xy xyer, snapToPoint bool) (PointInfo, bool) {\n\t// x, y := a.toFloats(xClick, yClick)\n\tlim := a.limits\n\tcs := vg.CoordinateSystem{lim.Xmin, lim.Ymax, lim.Xmax, lim.Ymin}\n\tbounds := image.Rect(a.x, a.y, a.x+a.width, a.y+a.height)\n\n\tif snapToPoint == false {\n\t\tpx, py := cs.Point(x, y, bounds)\n\t\treturn PointInfo{\n\t\t\tLineID: -1,\n\t\t\tPointNumber: -1,\n\t\t\tNumPoints: 0,\n\t\t\tX: px,\n\t\t\tY: py,\n\t\t}, true\n\t}\n\n\tdist := math.Inf(1)\n\tpIdx := -1\n\tlIdx := -1\n\tnumPoints := 0\n\tisEnvelope := false\n\tmaxSegment := 0\n\tisSegment := false\n\tfor i, l := range a.plot.Lines {\n\t\tX, Y, isEnv := xy.XY(l)\n\t\tnNotNaN := -1\n\t\tsegmentIdx := 0\n\t\tfor n := range X {\n\t\t\txi, yi := cs.Pixel(X[n], Y[n], bounds)\n\t\t\t// We only increase the index, if the data point is valid.\n\t\t\tnNotNaN++\n\t\t\tif math.IsNaN(X[n]) || math.IsNaN(Y[n]) {\n\t\t\t\tsegmentIdx++\n\t\t\t\tif segmentIdx > maxSegment {\n\t\t\t\t\tmaxSegment = segmentIdx\n\t\t\t\t}\n\t\t\t\tnNotNaN--\n\t\t\t}\n\t\t\tif d := float64((xi-x)*(xi-x) + (yi-y)*(yi-y)); d < dist {\n\t\t\t\tlIdx = i\n\t\t\t\tpIdx = nNotNaN\n\t\t\t\tisEnvelope = isEnv\n\t\t\t\tif isEnvelope {\n\t\t\t\t\tif n > len(X)/2 {\n\t\t\t\t\t\tpIdx = len(X) - n - 2\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tdist = d\n\n\t\t\t\tnumPoints = len(X)\n\t\t\t\tif l.Segments {\n\t\t\t\t\tpIdx = segmentIdx\n\t\t\t\t\tisSegment = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif lIdx < 0 || pIdx < 0 {\n\t\treturn PointInfo{}, false\n\t}\n\tvar px, py float64\n\tvar pc complex128\n\tl := a.plot.Lines[lIdx]\n\tif len(l.X) > pIdx {\n\t\tpx = l.X[pIdx]\n\t}\n\tif len(l.Y) > pIdx {\n\t\tpy = l.Y[pIdx]\n\t}\n\tif len(l.C) > pIdx {\n\t\tpc = l.C[pIdx]\n\t}\n\tif isSegment {\n\t\tpx = 0\n\t\tpy = 0\n\t\tpc = complex(0, 0)\n\t\tnumPoints = maxSegment + 1\n\t}\n\treturn PointInfo{\n\t\tLineID: l.Id,\n\t\tPointNumber: pIdx,\n\t\tNumPoints: numPoints,\n\t\tIsEnvelope: isEnvelope,\n\t\tX: px,\n\t\tY: py,\n\t\tC: pc,\n\t}, true\n}", "func (mock *IKeptnNatsMessageHandlerMock) ProcessCalls() []struct {\n\tEvent apimodels.KeptnContextExtendedCE\n\tSync bool\n} {\n\tvar calls []struct {\n\t\tEvent apimodels.KeptnContextExtendedCE\n\t\tSync bool\n\t}\n\tmock.lockProcess.RLock()\n\tcalls = mock.calls.Process\n\tmock.lockProcess.RUnlock()\n\treturn calls\n}", "func prepTrackpoints(trackpointPrepper *TrackpointPrepper, streamer *Streamer, db *sql.DB, conf base.Configuration) {\n\tfmt.Println(\"TrackpointPrepper:\", trackpointPrepper.WindowStart, \"-\", trackpointPrepper.WindowEnd)\n\twindowSize := conf.TrackpointPrepWindowSize\n\ttimeWarp := conf.TimeWarp\n\ttargetSpeed := conf.TargetSpeedPerSecond\n\n\t// Get all currently active routes.\n\tids := make([]int64, 0)\n\tfor _, r := range trackpointPrepper.Routes {\n\t\tids = append(ids, r.Id)\n\t}\n\troutes := getRoutes(trackpointPrepper.WindowStart, trackpointPrepper.WindowEnd, ids, db)\n\n\t// Get new set of active routes.\n\ttrackpointPrepper.Routes = append(trackpointPrepper.Routes, routes...)\n\tnewRoutes := make([]Route, 0)\n\tfor _, r := range trackpointPrepper.Routes {\n\t\tif !r.DoTime.Before(trackpointPrepper.WindowStart) {\n\t\t\tnewRoutes = append(newRoutes, r)\n\t\t}\n\t}\n\n\t// Update everything to contain the final set of routes and make ready for next iteration.\n\ttrackpointPrepper.Routes = newRoutes\n\tfmt.Println(\"TrackpointPrepper.Routes.len:\", len(trackpointPrepper.Routes))\n\n\tif len(trackpointPrepper.Routes) > int(conf.NumTaxis / 10) {\n\t\t// Create updates for all taxis. First, compute how many updates we need to reach the target speed.\n\t\tnumUpdates := windowSize * targetSpeed\n\t\tnumTimeSlices := numUpdates / float64(len(trackpointPrepper.Routes))\n\t\ttimeInc := time.Duration(1000000000.0*windowSize*timeWarp/numTimeSlices) * time.Nanosecond\n\n\t\ttimeSlice := trackpointPrepper.WindowStart\n\t\tupdates := make([][]byte, 0)\n\t\tfor timeSlice.Before(trackpointPrepper.WindowEnd) {\n\t\t\tsliceEnd := timeSlice.Add(timeInc)\n\n\t\t\tfor _, r := range trackpointPrepper.Routes {\n\t\t\t\t// Check if this route just started now. If so, we have to create an occupancy message.\n\t\t\t\t// If it's a route with passengers, a destination message has to be added too.\n\t\t\t\tif r.PuTime.After(timeSlice) && r.PuTime.Before(sliceEnd) {\n\t\t\t\t\t// This is a new route, we have to generate an occupancy message.\n\t\t\t\t\t// Since we include all messages in both streams, here we kinda redundantly send both messages.\n\t\t\t\t\to, _ := json.Marshal(TaxiOccupancyUpdate{r.TaxiId, r.PassengerCount,\n\t\t\t\t\t\tr.EndLon, r.EndLat})\n\t\t\t\t\tupdates = append(updates, o)\n\n\t\t\t\t\tb, _ := json.Marshal(TaxiDestinationUpdate{r.TaxiId, r.PassengerCount,\n\t\t\t\t\t\tr.EndLon, r.EndLat})\n\t\t\t\t\tupdates = append(updates, b)\n\t\t\t\t}\n\n\t\t\t\t// Check if this route is just stopping now. If so, we have to send the journey (esp. price) information.\n\t\t\t\tif r.DoTime.After(timeSlice) && r.DoTime.Before(sliceEnd) {\n\t\t\t\t\tb, _ := json.Marshal(TaxiRouteCompletedUpdate{r.TaxiId, r.PassengerCount,\n\t\t\t\t\t\tr.Distance, r.Duration, r.FareAmount, r.Extra,\n\t\t\t\t\t\tr.MTATax, r.TipAmount, r.TollsAmount, r.EHailFee,\n\t\t\t\t\t\tr.ImprovementSurcharge, r.TotalAmount, r.PaymentType,\n\t\t\t\t\t\tr.TripType})\n\t\t\t\t\tupdates = append(updates, b)\n\t\t\t\t\tdelete(trackpointPrepper.ReservedTaxis, r.TaxiId)\n\t\t\t\t}\n\n\t\t\t\t// In some rare cases, the taxi gets ordered to the pickup location (let's say by a reservation call).\n\t\t\t\t// Optimally, the simulator would already generate these events...\n\t\t\t\t// For now, we do this approx. for one taxi every 10 seconds.\n\t\t\t\tif r.PassengerCount == 0 && rand.Float64() < 1.0 /\n\t\t\t\t\t(10000000000.0/float64(timeInc.Nanoseconds())*float64(len(trackpointPrepper.Routes))) {\n\t\t\t\t\ttrackpointPrepper.ReservedTaxis[r.TaxiId] = true\n\t\t\t\t\tb, _ := json.Marshal(TaxiReservationUpdate{r.TaxiId, r.EndLon, r.EndLat})\n\t\t\t\t\tupdates = append(updates, b)\n\t\t\t\t}\n\n\t\t\t\t// In any case, we want to generate some location updates.\n\t\t\t\t// TODO Auf UNIX / Mac scheint es anders kodiert zu sein, d.h. das strings Replace ist nicht nötig.\n\t\t\t\t// TODO Auf Ubuntu geht es so (gleich wie Windows).\n\t\t\t\tcoords, _, err := polyline.DecodeCoords([]byte(r.Geometry))\n\t\t\t\t// coords, _, err := polyline.DecodeCoords([]byte(strings.Replace(r.Geometry, \"\\\\\\\\\", \"\\\\\", -1)))\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tperc := timeSlice.Sub(r.PuTime).Seconds() / r.DoTime.Sub(r.PuTime).Seconds()\n\t\t\t\tif perc > 0 && perc < 1 {\n\t\t\t\t\tlon, lat := taxisim.AlongPolyline(taxisim.PolylineLength(coords)*perc, coords)\n\t\t\t\t\tif streamer.TaxiupdateChannel != nil {\n\t\t\t\t\t\tvar resLon *float64\n\t\t\t\t\t\tvar resLat *float64\n\t\t\t\t\t\tif trackpointPrepper.ReservedTaxis[r.TaxiId] {\n\t\t\t\t\t\t\tresLon = &r.EndLon\n\t\t\t\t\t\t\tresLat = &r.EndLat\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif r.PassengerCount > 0 {\n\t\t\t\t\t\t\tb, _ := json.Marshal(TaxiUpdate{r.TaxiId, lon, lat,\n\t\t\t\t\t\t\t\tr.PassengerCount, &r.EndLon, &r.EndLat, resLon, resLat})\n\t\t\t\t\t\t\tupdates = append(updates, b)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tb, _ := json.Marshal(TaxiUpdate{r.TaxiId, lon, lat,\n\t\t\t\t\t\t\t\tr.PassengerCount, nil, nil, resLon, resLat})\n\t\t\t\t\t\t\tupdates = append(updates, b)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\ttimeSlice = timeSlice.Add(timeInc)\n\t\t}\n\t\t// Because some routes are not within the time slices, there are not enough updates. We fill in the missing ones\n\t\t// by repeating some.\n\t\tmissingUpdates := int(numUpdates) - len(updates)\n\t\tupdateCount := float64(len(updates)) / float64(missingUpdates)\n\t\tcnt := 0.0\n\t\ttotCnt := 0\n\t\tfor _, r := range updates {\n\t\t\t*streamer.TaxiupdateChannel <- r\n\t\t\ttotCnt += 1\n\t\t\tif updateCount > 0 && cnt > updateCount {\n\t\t\t\t*streamer.TaxiupdateChannel <- r\n\t\t\t\ttotCnt += 1\n\t\t\t\tcnt -= updateCount\n\t\t\t}\n\n\t\t\tcnt += 1\n\t\t}\n\t\tfmt.Println(\"Added messages\", totCnt)\n\n\t\ttrackpointPrepper.WindowStart = trackpointPrepper.WindowStart.Add(time.Second * time.Duration(windowSize*timeWarp))\n\t\ttrackpointPrepper.WindowEnd = trackpointPrepper.WindowEnd.Add(time.Second * time.Duration(windowSize*timeWarp))\n\t} else {\n\t\ttrackpointPrepper.WindowStart = time.Date(2016, time.January, 1, 0, 29, 20, 0, time.UTC)\n\t\ttrackpointPrepper.WindowEnd = time.Date(2016, time.January, 1, 0, 29, int(20+windowSize*conf.TimeWarp), 0, time.UTC)\n\t}\n}", "func (pc *ParticleClient) Points(nodeID string, points []data.Point) {\n\tpc.newPoints <- NewPoints{nodeID, \"\", points}\n}", "func (_Posminer *PosminerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error {\n\treturn _Posminer.Contract.PosminerCaller.contract.Call(opts, result, method, params...)\n}", "func CallGraph(skip int) CallGraphInfo {\n\tpc, f, line, _ := runtime.Caller(skip)\n\n\tsegs := strings.Split(runtime.FuncForPC(pc).Name(), \"/\")\n\tlastSegs := strings.Split(segs[len(segs)-1], \".\")\n\n\tpackageName := strings.Join(append(segs[:len(segs)-1], lastSegs[0]), \"/\")\n\n\treturn CallGraphInfo{\n\t\tPackageName: packageName,\n\t\tFileName: f,\n\t\tLine: line,\n\t}\n}", "func parsePoint(p Point, chargerType []string) PointJS {\r\n\tpJS := PointJS{}\r\n\r\n\tpJS.Provider = p.Provider\r\n\tpJS.Address = p.Address\r\n\tpJS.Postal = p.Postal\r\n\tpJS.Location = append(pJS.Location, p.Location.Coordinates[1])\r\n\tpJS.Location = append(pJS.Location, p.Location.Coordinates[0])\r\n\tpJS.Charger = append(p.Charger)\r\n\r\n\tfor _, v := range chargerType {\r\n\t\tfor k, n := range pJS.Charger {\r\n\t\t\tif v == n.Type {\r\n\t\t\t\tpJS.Charger[k].Match = true\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\treturn pJS\r\n}", "func OfPoints(pts ...[2]float64) Winding { return Order{}.OfPoints(pts...) }", "func here(skip ...int) loc {\n\tsk := 1\n\tif len(skip) > 0 && skip[0] > 1 {\n\t\tsk = skip[0]\n\t}\n\tpc, fileName, fileLine, ok := runtime.Caller(sk)\n\tfn := runtime.FuncForPC(pc)\n\tvar res loc\n\tdefer func() {\n\t\tif res.long != \"\" {\n\t\t\treturn\n\t\t}\n\t\tres.long = res.FuncName\n\t}()\n\tif !ok {\n\t\tres.FuncName = \"N/A\"\n\t\treturn res\n\t}\n\tres.FileName = fileName\n\tres.FileLine = fileLine\n\tres.FuncName = fn.Name()\n\tfileName = filepath.Join(filepath.Base(filepath.Dir(fileName)), filepath.Base(fileName))\n\tres.long = fmt.Sprintf(\"%s@%d:%s()\", fileName, res.FileLine, res.FuncName)\n\tres.short = fmt.Sprintf(\"%s@%d:%s()\", fileName, res.FileLine, strings.TrimLeft(filepath.Ext(res.FuncName), \".\"))\n\treturn res\n}", "func Points(r io.Reader, cfg Config) (client.BatchPoints, error) {\n\tif err := cfg.validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tbenchset, err := parse.ParseMultipleBenchmarks(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbp, err := client.NewBatchPoints(client.BatchPointsConfig{\n\t\tPrecision: \"s\",\n\t\tDatabase: cfg.Database,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor pkg, bs := range benchset {\n\t\tfor _, b := range bs {\n\t\t\ttags := map[string]string{\n\t\t\t\t\"goversion\": cfg.GoVersion,\n\t\t\t\t\"hwid\": cfg.HardwareID,\n\t\t\t\t\"pkg\": pkg,\n\t\t\t\t\"procs\": strconv.Itoa(b.Procs),\n\t\t\t\t\"name\": b.Name,\n\t\t\t}\n\t\t\tif cfg.Branch != \"\" {\n\t\t\t\ttags[\"branch\"] = cfg.Branch\n\t\t\t}\n\t\t\tp, err := client.NewPoint(\n\t\t\t\tcfg.Measurement,\n\t\t\t\ttags,\n\t\t\t\tmakeFields(b, cfg.Revision),\n\t\t\t\tcfg.Timestamp,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tbp.AddPoint(p)\n\t\t}\n\t}\n\n\treturn bp, nil\n}", "func (cb *CanBusClient) EdgePoints(nodeID, parentID string, points []data.Point) {\n\tcb.newEdgePoints <- NewPoints{nodeID, parentID, points}\n}", "func (s *QuasiSampler) collectPoints(filterBounds bool) []f64.Vec2 {\n\tvar pointlist []f64.Vec2\n\n\tit := NewTileLeafIterator(s.root)\n\tfor {\n\t\tpt := it.GetShape().GetP1()\n\t\t// Only \"pentagonal\" tiles generate sampling points.\n\t\tif it.GetShape().IsSamplingType() {\n\t\t\timportance := s.GetImportanceAt_bounded(pt)\n\n\t\t\t// Threshold the function against the F-Code value.\n\t\t\tif importance >= calcFCodeValue(it.GetShape().GetFCode(), it.GetShape().GetLevel()) {\n\t\t\t\t// Get the displaced point using the lookup table.\n\t\t\t\tpt_displaced := it.GetShape().GetDisplacedSamplingPoint(importance)\n\n\t\t\t\tif !filterBounds ||\n\t\t\t\t\t(pt_displaced.X >= 0 && pt_displaced.X < s.width &&\n\t\t\t\t\t\tpt_displaced.Y >= 0 && pt_displaced.Y < s.height) {\n\t\t\t\t\tpointlist = append(pointlist, pt_displaced)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif !it.Next() {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn pointlist\n}", "func (eng *Engine) fanIn(fn func(shipper Shipper) chan error) chan error {\n\tvar wg sync.WaitGroup\n\taggregator := make(chan error)\n\n\tfor target, shipper := range eng.Shippers {\n\t\twg.Add(1)\n\t\tgo func(target string, shipper Shipper) {\n\t\t\tdefer wg.Done()\n\n\t\t\tfmt.Printf(\"%v: Running target\\n\", target)\n\t\t\tfor err := range fn(shipper) {\n\t\t\t\taggregator <- err\n\t\t\t}\n\t\t\tfmt.Printf(\"%v: Completed target\\n\", target)\n\t\t}(target, shipper)\n\t}\n\n\t// Wait for all sub processes to finish and send a signal to the parent\n\t// when they do.\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(aggregator)\n\t}()\n\n\treturn aggregator\n}", "func (f *timeShiftByMetric) extractCallParams(ctx context.Context, e parser.Expr, from, until int64, values map[parser.MetricRequest][]*types.MetricData) (*callParams, error) {\n\tmetrics, err := helper.GetSeriesArg(ctx, e.Arg(0), from, until, values)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmarks, err := helper.GetSeriesArg(ctx, e.Arg(1), from, until, values)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tversionRank, err := e.GetIntArg(2)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// validating data sets: both metrics and marks must have at least 2 series each\n\t// also, all IsAbsent and Values lengths must be equal to each other\n\tpointsQty := -1\n\tstepTime := int64(-1)\n\tvar dataSets map[string][]*types.MetricData = map[string][]*types.MetricData{\n\t\t\"marks\": marks,\n\t\t\"metrics\": metrics,\n\t}\n\tfor name, dataSet := range dataSets {\n\t\tif len(dataSet) < 2 {\n\t\t\treturn nil, merry.WithMessagef(errTooFewDatasets, \"bad data: need at least 2 %s data sets to process, got %d\", name, len(dataSet))\n\t\t}\n\n\t\tfor _, series := range dataSet {\n\t\t\tif pointsQty == -1 {\n\t\t\t\tpointsQty = len(series.Values)\n\t\t\t\tif pointsQty == 0 {\n\t\t\t\t\treturn nil, merry.WithMessagef(errEmptySeries, \"bad data: empty series %s\", series.Name)\n\t\t\t\t}\n\t\t\t} else if pointsQty != len(series.Values) {\n\t\t\t\treturn nil, merry.WithMessagef(errSeriesLengthMismatch, \"bad data: length of Values for series %s differs from others\", series.Name)\n\t\t\t}\n\n\t\t\tif stepTime == -1 {\n\t\t\t\tstepTime = series.StepTime\n\t\t\t}\n\t\t}\n\t}\n\n\tresult := &callParams{\n\t\tmetrics: metrics,\n\t\tmarks: marks,\n\t\tversionRank: versionRank,\n\t\tpointsQty: pointsQty,\n\t\tstepTime: stepTime,\n\t}\n\treturn result, nil\n}", "func (_Bep20 *Bep20CallerSession) Checkpoints(arg0 common.Address, arg1 uint32) (struct {\n\tFromBlock uint32\n\tVotes *big.Int\n}, error) {\n\treturn _Bep20.Contract.Checkpoints(&_Bep20.CallOpts, arg0, arg1)\n}", "func bezier(t float64, p0, p1, p2, p3 Point) (res Point) {\n\ts := 1 - t\n\tres.X = s*s*s*p0.X + 3*(s*s*t)*p1.X + 3*(t*t*s)*p2.X + t*t*t*p3.X\n\tres.Y = s*s*s*p0.Y + 3*(s*s*t)*p1.Y + 3*(t*t*s)*p2.Y + t*t*t*p3.Y\n\treturn\n}", "func (breaker *ServiceBreaker) Call(exec func() (interface{}, error)) (interface{}, error) {\n\tlog.Printf(\"start call, %v state is %v\\n\", breaker.name, breaker.state)\n\t//before call\n\terr := breaker.beforeCall()\n\tif err != nil {\n\t\tlog.Printf(\"end call,%v batch:%v,metrics:(%v,%v,%v,%v,%v),window time start:%v\\n\\n\",\n\t\t\tbreaker.name,\n\t\t\tbreaker.metrics.WindowBatch,\n\t\t\tbreaker.metrics.CountAll,\n\t\t\tbreaker.metrics.CountSuccess,\n\t\t\tbreaker.metrics.CountFail,\n\t\t\tbreaker.metrics.ConsecutiveSuccess,\n\t\t\tbreaker.metrics.ConsecutiveFail,\n\t\t\tbreaker.metrics.WindowTimeStart.Format(\"2006/01/02 15:04:05\"))\n\t\treturn nil, err\n\t}\n\n\t//if panic occur\n\tdefer func() {\n\t\terr := recover()\n\t\tif err != nil {\n\t\t\tbreaker.afterCall(false)\n\t\t\tpanic(err) //todo?\n\t\t}\n\t}()\n\n\t//call\n\tbreaker.metrics.OnCall()\n\tresult, err := exec()\n\n\t//after call\n\tbreaker.afterCall(err == nil)\n\tlog.Printf(\"end call,%v batch:%v,metrics:(%v,%v,%v,%v,%v),window time start:%v\\n\\n\",\n\t\tbreaker.name,\n\t\tbreaker.metrics.WindowBatch,\n\t\tbreaker.metrics.CountAll,\n\t\tbreaker.metrics.CountSuccess,\n\t\tbreaker.metrics.CountFail,\n\t\tbreaker.metrics.ConsecutiveSuccess,\n\t\tbreaker.metrics.ConsecutiveFail,\n\t\tbreaker.metrics.WindowTimeStart.Format(\"2006/1/2 15:04:05\"))\n\n\treturn result, err\n}", "func (pw *PointsParser) ParsePoints(ctx context.Context, orgID, bucketID influxdb.ID, rc io.ReadCloser) (*ParsedPoints, error) {\n\tspan, ctx := opentracing.StartSpanFromContext(ctx, \"write points\")\n\tdefer span.Finish()\n\treturn pw.parsePoints(ctx, orgID, bucketID, rc)\n}", "func testPoints(nodes ...*runtime.Node) (points []*client.Point) {\n\t// Create dummy client\n\tinfluxClient, err := client.NewHTTPClient(client.HTTPConfig{Addr: \"http://127.0.0.1\"})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tnodesList := runtime.NewNodes(&runtime.Config{})\n\n\t// Create dummy connection\n\tconn := &Connection{\n\t\tpoints: make(chan *client.Point),\n\t\tclient: influxClient,\n\t}\n\n\tfor _, node := range nodes {\n\t\tnodesList.AddNode(node)\n\t}\n\n\t// Process data\n\tgo func() {\n\t\tfor _, node := range nodes {\n\t\t\tconn.InsertNode(node)\n\t\t\tif node.Neighbours != nil {\n\t\t\t\tfor _, link := range nodesList.NodeLinks(node) {\n\t\t\t\t\tconn.InsertLink(&link, node.Lastseen.GetTime())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tconn.Close()\n\t}()\n\n\t// Read points\n\tfor point := range conn.points {\n\t\tpoints = append(points, point)\n\t}\n\n\treturn\n}", "func (v *coveredPathsVisitor) VisitCall(i *Instruction) interface{} {\n\tif i.Protobuf != nil {\n\t\tif err := v.addPaths(i.Protobuf); err != nil {\n\t\t\treturn fmt.Errorf(\"instruction ID: %d: %v\", i.ID, err)\n\t\t}\n\t}\n\tif i.Response != nil {\n\t\tif err := v.addPaths(i.Response); err != nil {\n\t\t\treturn fmt.Errorf(\"instruction ID: %d: %v\", i.ID, err)\n\t\t}\n\t}\n\treturn i.VisitChildren(v)\n}", "func (_Bep20 *Bep20Caller) Checkpoints(opts *bind.CallOpts, arg0 common.Address, arg1 uint32) (struct {\n\tFromBlock uint32\n\tVotes *big.Int\n}, error) {\n\tvar out []interface{}\n\terr := _Bep20.contract.Call(opts, &out, \"checkpoints\", arg0, arg1)\n\n\toutstruct := new(struct {\n\t\tFromBlock uint32\n\t\tVotes *big.Int\n\t})\n\tif err != nil {\n\t\treturn *outstruct, err\n\t}\n\n\toutstruct.FromBlock = *abi.ConvertType(out[0], new(uint32)).(*uint32)\n\toutstruct.Votes = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int)\n\n\treturn *outstruct, err\n\n}", "func GeneratePoint(conf CurveConfig) error {\n\n\tbavardOpts := []func(*bavard.Bavard) error{\n\t\tbavard.Apache2(\"ConsenSys AG\", 2020),\n\t\tbavard.Package(conf.CurveName),\n\t\tbavard.GeneratedBy(\"gurvy\"),\n\t}\n\n\t// point code\n\tsrc := []string{\n\t\tpoint.Point,\n\t}\n\n\tpathSrc := filepath.Join(conf.OutputDir, conf.PointName+\".go\")\n\tif err := bavard.Generate(pathSrc, src, conf, bavardOpts...); err != nil {\n\t\treturn err\n\t}\n\tif err := bavard.Generate(pathSrc, src, conf, bavardOpts...); err != nil {\n\t\treturn err\n\t}\n\n\t// point test\n\tsrc = []string{\n\t\tpoint.PointTests,\n\t}\n\n\tpathSrc = filepath.Join(conf.OutputDir, conf.PointName+\"_test.go\")\n\tif err := bavard.Generate(pathSrc, src, conf, bavardOpts...); err != nil {\n\t\treturn err\n\t}\n\tif err := bavard.Generate(pathSrc, src, conf, bavardOpts...); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (m *ItemItemsItemWorkbookWorksheetsItemChartsItemSeriesWorkbookChartSeriesItemRequestBuilder) PointsById(id string)(*ItemItemsItemWorkbookWorksheetsItemChartsItemSeriesItemPointsWorkbookChartPointItemRequestBuilder) {\n urlTplParams := make(map[string]string)\n for idx, item := range m.pathParameters {\n urlTplParams[idx] = item\n }\n if id != \"\" {\n urlTplParams[\"workbookChartPoint%2Did\"] = id\n }\n return NewItemItemsItemWorkbookWorksheetsItemChartsItemSeriesItemPointsWorkbookChartPointItemRequestBuilderInternal(urlTplParams, m.requestAdapter)\n}", "func (mp *MetricTranslator) TranslateDataPoints(logger *zap.Logger, sfxDataPoints []*sfxpb.DataPoint) []*sfxpb.DataPoint {\n\tprocessedDataPoints := sfxDataPoints\n\n\tfor _, tr := range mp.rules {\n\t\tswitch tr.Action {\n\t\tcase ActionRenameDimensionKeys:\n\t\t\tfor _, dp := range processedDataPoints {\n\t\t\t\tfor _, d := range dp.Dimensions {\n\t\t\t\t\tif newKey, ok := tr.Mapping[d.Key]; ok {\n\t\t\t\t\t\td.Key = newKey\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase ActionRenameMetrics:\n\t\t\tfor _, dp := range processedDataPoints {\n\t\t\t\tif newKey, ok := tr.Mapping[dp.Metric]; ok {\n\t\t\t\t\tdp.Metric = newKey\n\t\t\t\t}\n\t\t\t}\n\t\tcase ActionMultiplyInt:\n\t\t\tfor _, dp := range processedDataPoints {\n\t\t\t\tif multiplier, ok := tr.ScaleFactorsInt[dp.Metric]; ok {\n\t\t\t\t\tv := dp.GetValue().IntValue\n\t\t\t\t\tif v != nil {\n\t\t\t\t\t\t*v = *v * multiplier\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase ActionDivideInt:\n\t\t\tfor _, dp := range processedDataPoints {\n\t\t\t\tif divisor, ok := tr.ScaleFactorsInt[dp.Metric]; ok {\n\t\t\t\t\tv := dp.GetValue().IntValue\n\t\t\t\t\tif v != nil {\n\t\t\t\t\t\t*v = *v / divisor\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase ActionMultiplyFloat:\n\t\t\tfor _, dp := range processedDataPoints {\n\t\t\t\tif multiplier, ok := tr.ScaleFactorsFloat[dp.Metric]; ok {\n\t\t\t\t\tv := dp.GetValue().DoubleValue\n\t\t\t\t\tif v != nil {\n\t\t\t\t\t\t*v = *v * multiplier\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase ActionCopyMetrics:\n\t\t\tfor _, dp := range processedDataPoints {\n\t\t\t\tif newMetric, ok := tr.Mapping[dp.Metric]; ok {\n\t\t\t\t\tnewDataPoint := copyMetric(tr, dp, newMetric)\n\t\t\t\t\tif newDataPoint != nil {\n\t\t\t\t\t\tprocessedDataPoints = append(processedDataPoints, newDataPoint)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase ActionSplitMetric:\n\t\t\tfor _, dp := range processedDataPoints {\n\t\t\t\tif tr.MetricName == dp.Metric {\n\t\t\t\t\tsplitMetric(dp, tr.DimensionKey, tr.Mapping)\n\t\t\t\t}\n\t\t\t}\n\t\tcase ActionConvertValues:\n\t\t\tfor _, dp := range processedDataPoints {\n\t\t\t\tif newType, ok := tr.TypesMapping[dp.Metric]; ok {\n\t\t\t\t\tconvertMetricValue(logger, dp, newType)\n\t\t\t\t}\n\t\t\t}\n\t\tcase ActionCalculateNewMetric:\n\t\t\tvar operand1, operand2 *sfxpb.DataPoint\n\t\t\tfor _, dp := range processedDataPoints {\n\t\t\t\tif dp.Metric == tr.Operand1Metric {\n\t\t\t\t\toperand1 = dp\n\t\t\t\t} else if dp.Metric == tr.Operand2Metric {\n\t\t\t\t\toperand2 = dp\n\t\t\t\t}\n\t\t\t}\n\t\t\tnewPt := calculateNewMetric(logger, operand1, operand2, tr)\n\t\t\tif newPt == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tprocessedDataPoints = append(processedDataPoints, newPt)\n\n\t\tcase ActionAggregateMetric:\n\t\t\t// NOTE: Based on the usage of TranslateDataPoints we can assume that the datapoints batch []*sfxpb.DataPoint\n\t\t\t// represents only one metric and all the datapoints can be aggregated together.\n\t\t\tvar dpsToAggregate []*sfxpb.DataPoint\n\t\t\tvar otherDps []*sfxpb.DataPoint\n\t\t\tfor i, dp := range processedDataPoints {\n\t\t\t\tif dp.Metric == tr.MetricName {\n\t\t\t\t\tif dpsToAggregate == nil {\n\t\t\t\t\t\tdpsToAggregate = make([]*sfxpb.DataPoint, 0, len(processedDataPoints)-i)\n\t\t\t\t\t}\n\t\t\t\t\tdpsToAggregate = append(dpsToAggregate, dp)\n\t\t\t\t} else {\n\t\t\t\t\tif otherDps == nil {\n\t\t\t\t\t\totherDps = make([]*sfxpb.DataPoint, 0, len(processedDataPoints)-i)\n\t\t\t\t\t}\n\t\t\t\t\t// This slice can contain additional datapoints from a different metric\n\t\t\t\t\t// for example copied in a translation step before\n\t\t\t\t\totherDps = append(otherDps, dp)\n\t\t\t\t}\n\t\t\t}\n\t\t\taggregatedDps := aggregateDatapoints(logger, dpsToAggregate, tr.Dimensions, tr.AggregationMethod)\n\t\t\tprocessedDataPoints = append(otherDps, aggregatedDps...)\n\t\t}\n\t}\n\n\treturn processedDataPoints\n}", "func SplitPoints(points PointArray, numStrips int) (splitValues []uint64, splitPoses []int) {\n\tif numStrips <= 1 {\n\t\treturn\n\t}\n\tsplitPos := points.Len() / 2\n\tnth.Element(points, splitPos)\n\tsplitValue := points.GetValue(splitPos)\n\n\tnumStrips1 := (numStrips + 1) / 2\n\tnumStrips2 := numStrips - numStrips1\n\tsplitValues1, splitPoses1 := SplitPoints(points.SubArray(0, splitPos), numStrips1)\n\tsplitValues = append(splitValues, splitValues1...)\n\tsplitPoses = append(splitPoses, splitPoses1...)\n\tsplitValues = append(splitValues, splitValue)\n\tsplitPoses = append(splitPoses, splitPos)\n\tsplitValues2, splitPoses2 := SplitPoints(points.SubArray(splitPos, points.Len()), numStrips2)\n\tsplitValues = append(splitValues, splitValues2...)\n\tfor i := 0; i < len(splitPoses2); i++ {\n\t\tsplitPoses = append(splitPoses, splitPos+splitPoses2[i])\n\t}\n\treturn\n}", "func (info *Info) BuildCallGraph(algo string, tests bool) (*CallGraph, error) {\n\tvar cg *callgraph.Graph\n\tswitch algo {\n\tcase \"static\":\n\t\tcg = static.CallGraph(info.Prog)\n\n\tcase \"cha\":\n\t\tcg = cha.CallGraph(info.Prog)\n\n\tcase \"pta\":\n\t\tptrCfg, err := info.PtrAnlysCfg(tests)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tptrCfg.BuildCallGraph = true\n\t\tptares, err := info.RunPtrAnlys(ptrCfg)\n\t\tif err != nil {\n\t\t\treturn nil, err // internal error in pointer analysis\n\t\t}\n\t\tcg = ptares.CallGraph\n\n\tcase \"rta\":\n\t\tmains, err := MainPkgs(info.Prog, tests)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvar roots []*ssa.Function\n\t\tfor _, main := range mains {\n\t\t\troots = append(roots, main.Func(\"init\"), main.Func(\"main\"))\n\t\t}\n\t\trtares := rta.Analyze(roots, true)\n\t\tcg = rtares.CallGraph\n\t}\n\n\tcg.DeleteSyntheticNodes()\n\n\treturn &CallGraph{cg: cg, prog: info.Prog}, nil\n}", "func (p *ProcessCalls) pendingTraces(ctx context.Context, msgChan <-chan *parser.LogEntry, notifyNewTrace chan<- string ) {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase msg := <-msgChan:\n\t\t\tp.m.Lock()\n {\n if _, ok := p.logs[msg.Trace]; ! ok {\n notifyNewTrace <- msg.Trace\n }\n p.logs[msg.Trace] = append(p.logs[msg.Trace], msg)\n\n\n // if tr, ok := p.logs[msg.Trace]; ok {\n // tr = append(tr, msg)\n // // p.logs[msg.Trace] = append(p.logs[msg.Trace], msg)\n // }else {\n // // p.logs[msg.Trace] = []parser.Logs{msg}\n // p.logs[msg.Trace] = append(p.logs[msg.Trace], msg)\n // notifyNewTrace <- msg.Trace\n // }\n // fmt.Fprintln(os.Stderr,\"New Line\", msg)\n\t\t\t}\n\t\t\tp.m.Unlock()\n\t\t}\n\t}\n}", "func pPoints(q []plotValues) map[plotKeys]plotter.XYs {\n\tpts := make(map[plotKeys]plotter.XYs)\n\tfor i := range q {\n\t\tfmt.Println(i)\n\t}\n\treturn pts\n}", "func SendPoints(nc *nats.Conn, subject string, points data.Points, ack bool) error {\n\tfor i := range points {\n\t\tif points[i].Time.IsZero() {\n\t\t\tpoints[i].Time = time.Now()\n\t\t}\n\t}\n\tdata, err := points.ToPb()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif ack {\n\t\tmsg, err := nc.Request(subject, data, time.Second)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(msg.Data) > 0 {\n\t\t\treturn errors.New(string(msg.Data))\n\t\t}\n\n\t} else {\n\t\tif err := nc.Publish(subject, data); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn err\n}", "func generateCall(generator *Generator, node parser.Node) string {\n\tvar identifier string\n\n\t// Check if it is a built-in function or not\n\tif strings.Contains(node.Value, \"|\") {\n\t\t// Get the function identifier by spliting the value by the pipe\n\t\tidentifier = strings.Split(node.Value, \"|\")[1]\n\n\t\tcheckCall(generator, node)\n\n\t\t// Add import to the generator\n\t\taddCallImport(\n\t\t\tgenerator,\n\t\t\tnode.Value,\n\t\t)\n\t} else {\n\t\tidentifier = node.Value\n\t}\n\n\t// Translate the params\n\tparams := generateParams(generator, node.Params)\n\n\t// Link all the translations together\n\treturn fmt.Sprintf(\n\t\tcCall,\n\t\tidentifier,\n\t\tstrings.Join(params, \",\"),\n\t)\n}", "func (fi *funcInfo) emitCall(line, a, nArgs, nRet int) {\r\n\tfi.emitABC(line, OP_CALL, a, nArgs+1, nRet+1)\r\n}", "func (node *CallProc) formatFast(buf *TrackedBuffer) {\n\tbuf.WriteString(\"call \")\n\tnode.Name.formatFast(buf)\n\tbuf.WriteByte('(')\n\tnode.Params.formatFast(buf)\n\tbuf.WriteByte(')')\n}", "func (_Pairing *PairingRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {\n\treturn _Pairing.Contract.PairingCaller.contract.Call(opts, result, method, params...)\n}", "func (g *Game) CalculatePoints(from, to int) []models.PointsEntrySimple {\n\tg.mu.RLock()\n\tdefer g.mu.RUnlock()\n\ttotalPoints := make(map[string]int)\n\tfor i := from; i <= to; i++ {\n\t\tqp := getPointsForQuestion(g.players, i)\n\t\tfor _, entry := range qp {\n\t\t\ttotalPoints[entry.Player] += entry.Points\n\t\t}\n\t}\n\tvar pes []models.PointsEntrySimple\n\tfor player, points := range totalPoints {\n\t\tpes = append(pes, models.PointsEntrySimple{\n\t\t\tPlayer: player,\n\t\t\tPoints: points,\n\t\t})\n\t}\n\treturn pes\n}", "func callers(s selection, args []string) {\n\tfmt.Println(runWithStdin(s.archive(), \"guru\", \"-scope\", scope(args), \"-modified\", \"callers\", s.pos()))\n}", "func walk(steps, face int, start point, xwards bool) {\n\tfor i := 0; i < steps; i++ {\n\t\tif xwards {\n\t\t\tstart.x += face * 1\n\t\t} else {\n\t\t\tstart.y += face * 1\n\t\t}\n\t\tp := point{x: start.x, y: start.y}\n\t\tvisited[p]++\n\t\tif visited[p] > 1 && (twice == origo) { // only set twice if we haven't before\n\t\t\ttwice = p\n\t\t}\n\t}\n}", "func (se *StateEngine) trajectory(points []string, partial bool) error {\n\n\tsubs, nosubs := se.diffSubs()\n\n\t//are we out of points to walk through? if so then fire acive and tell others to be inactive\n\tif len(points) < 1 {\n\t\t//deactivate all the non-subroot states\n\t\tfor _, ko := range nosubs {\n\t\t\tko.Deactivate()\n\t\t}\n\n\t\t//if the engine has a root state activate it since in doing so,it will activate its own children else manually activate the children\n\t\tif se.owner != nil {\n\t\t\tse.owner.Activate()\n\t\t} else {\n\t\t\t//activate all the subroot states first so they can\n\t\t\t//do be ready for the root. We call this here incase the StateEngine has no root state\n\t\t\tfor _, ko := range subs {\n\t\t\t\tko.Activate()\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\n\t//cache the first point so we dont loose it\n\tpoint := points[0]\n\n\tvar state = se.get(point)\n\n\tif state == nil {\n\t\t// for _, ko := range nosubs {\n\t\t// \tif sko.acceptable(se.getAddr(ko), point, so) {\n\t\t// \t\tstate = ko\n\t\t// \t\tbreak\n\t\t// \t}\n\t\t// }\n\t\t//\n\t\t// if state == nil {\n\t\treturn ErrStateNotFound\n\t\t// }\n\t}\n\n\t//set this state as the current active state\n\tse.curr = state\n\n\t//shift the list one more bit for the points\n\tpoints = points[1:]\n\n\t//we pass down the points since that will handle the loadup downwards\n\terr := state.Engine().trajectory(points, partial)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !partial {\n\t\t// //activate all the subroot states first so they can\n\t\t// //do any population they want\n\t\t// for _, ko := range subs {\n\t\t// \tko.Activate(so)\n\t\t// }\n\n\t\tif se.owner != nil {\n\t\t\tse.owner.Activate()\n\t\t}\n\t}\n\n\treturn nil\n}", "func (t *Tile) GetPoints(output chan<- Point) {\n\tif t.Level == level_depth {\n\t\tfor _, point := range t.Points {\n\t\t\toutput <- point\n\t\t}\n\t} else {\n\t\tfor _, tile := range t.SubTiles {\n\t\t\tif tile != nil {\n\t\t\t\ttile.GetPoints(output)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}", "func (t *Twemproxy) processStat(\n\tacc telegraf.Accumulator,\n\ttags map[string]string,\n\tdata map[string]interface{},\n) {\n\tif source, ok := data[\"source\"]; ok {\n\t\tif val, ok := source.(string); ok {\n\t\t\ttags[\"source\"] = val\n\t\t}\n\t}\n\n\tfields := make(map[string]interface{})\n\tmetrics := []string{\"total_connections\", \"curr_connections\", \"timestamp\"}\n\tfor _, m := range metrics {\n\t\tif value, ok := data[m]; ok {\n\t\t\tif val, ok := value.(float64); ok {\n\t\t\t\tfields[m] = val\n\t\t\t}\n\t\t}\n\t}\n\tacc.AddFields(\"twemproxy\", fields, tags)\n\n\tfor _, pool := range t.Pools {\n\t\tif poolStat, ok := data[pool]; ok {\n\t\t\tif data, ok := poolStat.(map[string]interface{}); ok {\n\t\t\t\tpoolTags := copyTags(tags)\n\t\t\t\tpoolTags[\"pool\"] = pool\n\t\t\t\tt.processPool(acc, poolTags, data)\n\t\t\t}\n\t\t}\n\t}\n}", "func main() {\n\tdefer testTime(\"main\", time.Now())\n\n\tif len(os.Args) != 2 {\n\t\tpanicIfError(fmt.Errorf(\"required URL does not exists\"))\n\t}\n\n\tshops := makeSliceFromShops(os.Args[1])\n\tfmt.Println(len(shops))\n\ttestLibGeoIndex(shops)\n\ttestLibRTree(shops)\n\n\t//for _, v := range points {\n\t//\tfmt.Println(v.Id())\n\t//}\n\n\tvar p1, p2 *xPoint\n\tp1 = &xPoint{50.425365, 30.459593}\n\tp2 = &xPoint{50.4214319750507, 30.458242893219}\n\tfmt.Println(GreatCircleDistance(p1, p2))\n\tp1 = &xPoint{50.425365, 30.459593}\n\tp2 = &xPoint{50.422747, 30.464512}\n\tfmt.Println(GreatCircleDistance(p1, p2))\n}", "func GetUserWithTempPoints(c *gin.Context) {\n\tvar input models.EditBookingInput\n\tif err := c.ShouldBindQuery(&input); err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error(), \"message\": \"Check input Booking ID on URL parameter\"})\n\t\tfmt.Println(\"Error in getting Booking ID. \" + err.Error() + \"\\n\")\n\t\treturn\n\t}\n\n\toldBooking, exists, err := RetrieveBooking(DB, models.URLBooking{BookingID: input.OldBookingID})\n\tif !exists {\n\t\terrorMessage := fmt.Sprintf(\"Booking with ID %s does not exist.\", input.OldBookingID)\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"success\": false, \"message\": errorMessage})\n\t\tfmt.Println(errorMessage)\n\t\treturn\n\t}\n\tif err != nil {\n\t\terrorMessage := fmt.Sprintf(\"Error in retrieving Booking with ID %s.\"+err.Error(), input.OldBookingID)\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error(), \"message\": errorMessage})\n\t\tfmt.Println(errorMessage)\n\t\treturn\n\t}\n\n\tstatusCode, err := GetBookingStatusCode(DB, \"In the midst of booking\")\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error(), \"message\": \"Error in querying for status code.\"})\n\t\tfmt.Println(\"Check statusQuery. \" + err.Error() + \"\\n\")\n\t}\n\n\tpendingBookings, err := GetPendingBookings(DB, models.User{Nusnetid: input.NUSNET_ID}, statusCode)\n\tif err != nil {\n\t\terrorMessage := fmt.Sprintf(\"Error in retrieving pending bookings with for user with NUSNET ID %s.\"+err.Error(), input.NUSNET_ID)\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error(), \"message\": errorMessage})\n\t\tfmt.Println(errorMessage)\n\t\treturn\n\t}\n\n\teditCart, err := EditCartDetails(oldBooking, pendingBookings, models.User{Nusnetid: input.NUSNET_ID})\n\tif err != nil {\n\t\terrorMessage := fmt.Sprint(\"Error in making bookng cart.\" + err.Error())\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error(), \"message\": errorMessage})\n\t\tfmt.Println(errorMessage)\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\"data\": editCart})\n\tfmt.Println(\"Return successful!\")\n}", "func prepareData(ctx context.Context, targets int, fetcher func() *point.Points) *data {\n\tdata := &data{\n\t\tData: &Data{Points: point.NewPoints()},\n\t\tb: make(chan io.ReadCloser, 1),\n\t\te: make(chan error, targets),\n\t\tmut: sync.RWMutex{},\n\t\twg: sync.WaitGroup{},\n\t}\n\tdata.wg.Add(1)\n\n\textraPoints := make(chan *point.Points, 1)\n\n\tgo func() {\n\t\t// add extraPoints. With NameToID\n\t\tdefer func() {\n\t\t\tdata.wg.Done()\n\t\t\tclose(extraPoints)\n\t\t}()\n\n\t\t// First check is context is already done\n\t\tif err := contextIsValid(ctx); err != nil {\n\t\t\tdata.e <- fmt.Errorf(\"prepareData failed: %w\", err)\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase extraPoints <- fetcher():\n\t\t\tp := <-extraPoints\n\t\t\tif p != nil {\n\t\t\t\tdata.mut.Lock()\n\t\t\t\tdefer data.mut.Unlock()\n\n\t\t\t\textraList := p.List()\n\t\t\t\tfor i := 0; i < len(extraList); i++ {\n\t\t\t\t\tdata.Points.AppendPoint(\n\t\t\t\t\t\tdata.Points.MetricID(p.MetricName(extraList[i].MetricID)),\n\t\t\t\t\t\textraList[i].Value,\n\t\t\t\t\t\textraList[i].Time,\n\t\t\t\t\t\textraList[i].Timestamp,\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn\n\t\tcase <-ctx.Done():\n\t\t\tdata.e <- fmt.Errorf(\"prepareData failed: %w\", ctx.Err())\n\t\t\treturn\n\t\t}\n\t}()\n\treturn data\n}", "func readCoverPoints(file *elf.File, tracePC uint64, traceCmp map[uint64]bool) ([2][]uint64, error) {\n\tvar pcs [2][]uint64\n\ttext := file.Section(\".text\")\n\tif text == nil {\n\t\treturn pcs, fmt.Errorf(\"no .text section in the object file\")\n\t}\n\tdata, err := text.Data()\n\tif err != nil {\n\t\treturn pcs, fmt.Errorf(\"failed to read .text: %v\", err)\n\t}\n\tconst callLen = 5\n\tend := len(data) - callLen + 1\n\tfor i := 0; i < end; i++ {\n\t\tpos := bytes.IndexByte(data[i:end], 0xe8)\n\t\tif pos == -1 {\n\t\t\tbreak\n\t\t}\n\t\tpos += i\n\t\ti = pos\n\t\toff := uint64(int64(int32(binary.LittleEndian.Uint32(data[pos+1:]))))\n\t\tpc := text.Addr + uint64(pos)\n\t\ttarget := pc + off + callLen\n\t\tif target == tracePC {\n\t\t\tpcs[0] = append(pcs[0], pc)\n\t\t} else if traceCmp[target] {\n\t\t\tpcs[1] = append(pcs[1], pc)\n\t\t}\n\t}\n\treturn pcs, nil\n}", "func (conn *ConnWithParameters) bucketPoints(rawPt Point) {\n\n\t// Truncate each item in batch\n\t// Split float by decimal\n\tlatSlice := strings.SplitAfter(rawPt.Lat, \".\")\n\tlngSlice := strings.SplitAfter(rawPt.Lng, \".\")\n\n\t// Truncate second half of slices\n\tlatSlice[1] = conn.truncate(latSlice[1])\n\tlngSlice[1] = conn.truncate(lngSlice[1])\n\n\t//check for truncating edge case\n\tif strings.Contains(latSlice[0], \"-0.\") {\n\t\tlatSlice = conn.checkZero(latSlice)\n\t}\n\tif strings.Contains(lngSlice[0], \"-0.\") {\n\t\tlngSlice = conn.checkZero(lngSlice)\n\t}\n\n\t// Combine the split strings together\n\tlat := strings.Join(latSlice, \"\")\n\tlng := strings.Join(lngSlice, \"\")\n\n\t//create bucket hash\n\tbucket := lat + \":\" + lng\n\n\t//create point\n\tpt := Latlng{\n\t\tCoords: Point{\n\t\t\tLat: lat,\n\t\t\tLng: lng,\n\t\t},\n\t\tCount: 1,\n\t}\n\n\t// Bucketing\n\t// check if bucket exists\n\t// if it does exists, increase the count\n\t_, contains := conn.batchMap[bucket]\n\tif contains {\n\t\tvalue := conn.batchMap[bucket] //get the value of the bucket\n\n\t\tvalue.Count++ //increase the count\n\n\t\tconn.batchMap[bucket] = value //add the new count to the point\n\n\t} else { //otherwise, add the point with the count\n\t\tconn.batchMap[bucket] = pt\n\t}\n}", "func buildTrace(messages []*parser.Message, callTrace *call, spanTo string) {\n\tvar matches []*parser.Message\n\tfor _, msg := range messages {\n\t\tif msg.SpanFrom == spanTo {\n\t\t\tmatches = append(matches, msg)\n\t\t}\n\t}\n\tsort.Sort(parser.Messages(matches))\n\n\tfor _, m := range matches {\n\t\tc := call{\n\t\t\tStart: m.Start,\n\t\t\tEnd: m.End,\n\t\t\tService: m.Service,\n\t\t\tSpan: m.SpanTo,\n\t\t\tCalls: make([]*call, 0),\n\t\t}\n\t\tbuildTrace(messages, &c, m.SpanTo)\n\t\tcallTrace.Calls = append(callTrace.Calls, &c)\n\t}\n}", "func NewPreemptableCallerInfo(\n\tcallerName string,\n) CallerInfo {\n\treturn CallerInfo{\n\t\tCallerName: callerName,\n\t\tCallerType: CallerTypePreemptable,\n\t}\n}", "func ShipperCalled(orderID uint) error {\n\tctx := context.Background()\n\t_, err := zbClient.NewPublishMessageCommand().MessageName(\"ShipperCalled\").CorrelationKey(fmt.Sprint(orderID)).TimeToLive(1 * time.Minute).Send(ctx)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn nil\n}", "func pointsToLines(points []Point) (lines []Line) {\n\tfor i := 0; i < len(points); i++ {\n\t\tfor j := i + 1; j < len(points); j++ {\n\t\t\tif points[i].nextTo(points[j]) {\n\t\t\t\tlines = append(lines, Line{P1: points[i], P2: points[j]})\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (mmGetUserLocation *mStorageMockGetUserLocation) Calls() []*StorageMockGetUserLocationParams {\n\tmmGetUserLocation.mutex.RLock()\n\n\targCopy := make([]*StorageMockGetUserLocationParams, len(mmGetUserLocation.callArgs))\n\tcopy(argCopy, mmGetUserLocation.callArgs)\n\n\tmmGetUserLocation.mutex.RUnlock()\n\n\treturn argCopy\n}", "func (t *Tables) Callers(ctx context.Context, req *epb.CallersRequest) (*epb.CallersReply, error) {\n\ttickets := req.Tickets\n\tif len(tickets) == 0 {\n\t\treturn nil, fmt.Errorf(\"missing input tickets: %v\", req)\n\t}\n\n\t// succMap maps nodes onto sets of successor nodes\n\tsuccMap := make(map[string]stringset.Set)\n\n\t// At the moment, this is our policy for missing data: if an input ticket has\n\t// no record in the table, we don't include data for that ticket in the response.\n\t// Other table access errors result in returning an error.\n\tfor _, ticket := range tickets {\n\t\tvar callgraph srvpb.Callgraph\n\t\tif err := t.FunctionToCallers.Lookup(ctx, []byte(ticket), &callgraph); err == table.ErrNoSuchKey {\n\t\t\tcontinue // skip tickets with no mappings\n\t\t} else if err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error looking up callers with ticket %q: %v\", ticket, err)\n\t\t}\n\n\t\t// This can only happen in the context of a postprocessor bug.\n\t\tif callgraph.Type != srvpb.Callgraph_CALLER {\n\t\t\treturn nil, fmt.Errorf(\"type of callgraph is not 'CALLER': %v\", callgraph)\n\t\t}\n\n\t\t// TODO(jrtom): consider logging a warning if len(callgraph.Tickets) == 0\n\t\t// (postprocessing should disallow this)\n\t\tfor _, predTicket := range callgraph.Tickets {\n\t\t\tif _, ok := succMap[predTicket]; !ok {\n\t\t\t\tsuccMap[predTicket] = stringset.New()\n\t\t\t}\n\t\t\tset := succMap[predTicket]\n\t\t\tset.Add(ticket)\n\t\t}\n\t}\n\n\treturn &epb.CallersReply{Graph: convertSuccMapToGraph(succMap)}, nil\n}", "func (gen *Db) nodePoints(id string, points data.Points) error {\n\tfor _, p := range points {\n\t\tif p.Time.IsZero() {\n\t\t\tp.Time = time.Now()\n\t\t}\n\t}\n\n\treturn gen.store.Update(func(tx *genji.Tx) error {\n\t\tnec := newNodeEdgeCache(tx)\n\n\t\tne, err := nec.getNodeAndEdges(id)\n\n\t\tif err != nil {\n\t\t\tif err == genjierrors.ErrDocumentNotFound {\n\t\t\t\tif gen.meta.RootID == \"\" {\n\t\t\t\t\tgen.lock.Lock()\n\t\t\t\t\tdefer gen.lock.Unlock()\n\t\t\t\t\tgen.meta.RootID = id\n\t\t\t\t\terr := tx.Exec(`update meta set rootid = ?`, id)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"Error setting rootid in meta: %w\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tne = &nodeAndEdges{\n\t\t\t\t\tnode: &data.Node{\n\t\t\t\t\t\tID: id,\n\t\t\t\t\t\tType: data.NodeTypeDevice,\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tfor _, point := range points {\n\t\t\tif point.Type == data.PointTypeNodeType {\n\t\t\t\tne.node.Type = point.Text\n\t\t\t\t// we don't encode type in points as this has its own field\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tne.node.Points.ProcessPoint(point)\n\t\t}\n\n\t\t/*\n\t\t\t * FIXME: need to clean up offline processing\n\t\t\tstate := node.State()\n\t\t\tif state != data.PointValueSysStateOnline {\n\t\t\t\tnode.Points.ProcessPoint(\n\t\t\t\t\tdata.Point{\n\t\t\t\t\t\tTime: time.Now(),\n\t\t\t\t\t\tType: data.PointTypeSysState,\n\t\t\t\t\t\tText: data.PointValueSysStateOnline,\n\t\t\t\t\t},\n\t\t\t\t)\n\t\t\t}\n\t\t*/\n\n\t\tsort.Sort(ne.node.Points)\n\n\t\terr = nec.processNode(ne, false)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"processNode error: %w\", err)\n\t\t}\n\n\t\terr = nec.writeEdges()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = tx.Exec(`insert into nodes values ? on conflict do replace`, ne.node)\n\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error inserting/updating node: %w\", err)\n\t\t}\n\n\t\treturn nil\n\t})\n}", "func NewSendPoints(portalURL, deviceID, authToken string, timeout time.Duration, debug bool) func(data.Points) error {\n\tvar netClient = &http.Client{\n\t\tTimeout: timeout,\n\t}\n\n\treturn func(points data.Points) error {\n\t\tpointURL := portalURL + \"/v1/devices/\" + deviceID + \"/points\"\n\n\t\ttempJSON, err := json.Marshal(NewPoints(points))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif debug {\n\t\t\tlog.Println(\"Sending points: \", string(tempJSON))\n\t\t}\n\n\t\treq, err := http.NewRequest(\"POST\", pointURL, bytes.NewBuffer(tempJSON))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treq.Header.Set(\"Content-Type\", \"application/json\")\n\t\treq.Header.Set(\"Authorization\", authToken)\n\t\tresp, err := netClient.Do(req)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdefer resp.Body.Close()\n\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\terrstring := \"Server error: \" + resp.Status + \" \" + pointURL\n\t\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\t\terrstring += \" \" + string(body)\n\t\t\treturn errors.New(errstring)\n\t\t}\n\n\t\treturn nil\n\t}\n}", "func pointsto(s selection, args []string) {\n\tfmt.Println(runWithStdin(s.archive(), \"guru\", \"-modified\", \"-scope\", scope(args), \"pointsto\", s.sel()))\n}", "func (d *DXF) Points(s v2.VecSet, r float64) {\n\td.drawing.ChangeLayer(\"Points\")\n\tfor _, p := range s {\n\t\td.drawing.Circle(p.X, p.Y, 0, r)\n\t}\n}", "func (ff *fftag) resetLocations() {\n\tfor _, chaser := range ff.chasers {\n\t\tspot := ff.spots[rand.Intn(len(ff.spots))] // get open location.\n\t\tx, y := ff.at(spot) // get map location.\n\t\tchaser.SetLocation(float64(x), float64(y), 0)\n\t}\n\tspot := ff.spots[rand.Intn(len(ff.spots))]\n\tgoalx, goaly := ff.at(spot)\n\tff.goal.SetLocation(float64(goalx), float64(goaly), 0)\n\n\t// create the flow field based on the given goal.\n\tff.flow.Create(goalx, goaly)\n}", "func (_Posminer *PosminerCallerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error {\n\treturn _Posminer.Contract.contract.Call(opts, result, method, params...)\n}", "func grabLocation(){\n\t//sends out an immediate request of closest people\n\n}", "func getCallerSourceLocation() string {\n\t_, file, line, ok := runtime.Caller(2)\n\tresult := \"unknown:unknown\"\n\tif ok {\n\t\tresult = fmt.Sprintf(\"%s:%d\", file, line)\n\t}\n\treturn result\n}", "func (p *ProcessCalls) processAfterContextExpiration(ctx context.Context, traceId string) {\n p.pendingTracesWG.Add(1)\n defer p.pendingTracesWG.Done()\n <-ctx.Done()\n\n\n if tr, ok := p.logs[traceId]; ok {\n if !tr.HasRootCallerSpan() {\n // fmt.Fprintln(os.Stderr,\"HasRootCallerSpan is false\",tr)\n return\n }\n }\n\n // ll := p.logs[traceId]\n // fmt.Fprintln(os.Stderr,traceId, \"ll.String\",ll.String())\n // if ll.HasRootCallerSpan() == false {\n // fmt.Fprintln(os.Stderr,ll.String())\n // fmt.Fprintln(os.Stderr,\"HasRootCallerSpan is false\")\n // return\n // }\n // } else {\n // fmt.Fprintln(os.Stderr,ll.String())\n //\n // }\n // fmt.Fprintln(os.Stderr,\"==> buildTraceTree \",p.logs[traceId], traceId, p.info)\n var result traceTree\n tr :=append(p.logs[traceId][:0:0], p.logs[traceId]...)\n // if tr, ok := p.logs[traceId]; ok {\n result = buildTraceTree(&tr, traceId, p.info)\n // fmt.Fprintln(os.Stderr,\"==> result.Root \",result.Root)\n if result.Root == nil {\n return\n }\n\n // }else {\n // return\n // }\n // result := buildTraceTree(p.logs[traceId], traceId, p.info)\n // fmt.Fprintln(os.Stderr,\"result %v\",result)\n res, err := json.Marshal(result)\n if err != nil {\n // TODO: Log an error and update the statistics.\n fmt.Fprintln(os.Stderr, err)\n }\n // fmt.Fprintln(os.Stderr,ll.String())\n p.m.Lock()\n defer p.m.Unlock()\n // delete(p.logs, traceId)\n\n // TODO: output this result to a proper source destination.\n fmt.Fprintln(os.Stdout, string(res))\n}", "func Caller(skip string) (string, int, string) {\n\tpc := make([]uintptr, 7)\n\tn := runtime.Callers(2, pc)\n\tframes := runtime.CallersFrames(pc[:n])\n\tfor {\n\t\tframe, more := frames.Next()\n\t\t// frame.File = /tmp/sandbox469341579/prog.go\n\t\t// frame.Line = 28\n\t\t// frame.Function = main.Announcer.Info\n\n\t\t// file = prog.go\n\t\tfile := path.Base(frame.File)\n\t\t// function = Info\n\t\tfunction := path.Base(strings.Replace(frame.Function, \".\", \"/\", -1))\n\n\t\tif file != skip {\n\t\t\treturn file, frame.Line, function\n\t\t}\n\n\t\tif !more {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn \"\", 0, \"\"\n}", "func (mock *MailgunMock) ParseAddressesCalls() []struct {\n\tAddresses []string\n} {\n\tvar calls []struct {\n\t\tAddresses []string\n\t}\n\tlockMailgunMockParseAddresses.RLock()\n\tcalls = mock.calls.ParseAddresses\n\tlockMailgunMockParseAddresses.RUnlock()\n\treturn calls\n}", "func (t *Translator) translateCallStatement(stmt *vm_ast.CallStatement) []string {\n\tkeyString := generateUuidForIdent()\n\treturnAddressLabel := \"return_to_\" + keyString\n\n\t// push return address\n\tresult := []string{\n\t\t\"@\" + returnAddressLabel,\n\t\t\"D=A;\",\n\t}\n\tresult = append(result, t.pushDregStatements()...)\n\n\t// push current LCL\n\tresult = append(result, []string{\n\t\t\"@LCL\",\n\t\t\"D=M;\",\n\t}...)\n\tresult = append(result, t.pushDregStatements()...)\n\n\t// push current ARG\n\tresult = append(result, []string{\n\t\t\"@ARG\",\n\t\t\"D=M;\",\n\t}...)\n\tresult = append(result, t.pushDregStatements()...)\n\n\t// push current THIS\n\tresult = append(result, []string{\n\t\t\"@THIS\",\n\t\t\"D=M;\",\n\t}...)\n\tresult = append(result, t.pushDregStatements()...)\n\n\t// push current THAT\n\tresult = append(result, []string{\n\t\t\"@THAT\",\n\t\t\"D=M;\",\n\t}...)\n\tresult = append(result, t.pushDregStatements()...)\n\n\t// ARG = SP - stmt.LocalNum.Literal - 5\n\tresult = append(result, []string{\n\t\t// save current M[SP] in M[R5]\n\t\t\"@SP\",\n\t\t\"D=M;\",\n\t\t\"@R5\",\n\t\t\"M=D;\",\n\t}...)\n\tcalcM := []string{}\n\tswitch stmt.ArgNum.Type {\n\tcase vm_tokenizer.IDENT:\n\t\tcalcM = []string{\n\t\t\t\"@\" + stmt.ArgNum.Literal,\n\t\t\t\"D=M;\",\n\t\t\t\"@R5\",\n\t\t\t\"M=M-D;\",\n\t\t}\n\tcase vm_tokenizer.INT:\n\t\tcalcM = []string{\n\t\t\t\"@\" + stmt.ArgNum.Literal,\n\t\t\t\"D=A;\",\n\t\t\t\"@R5\",\n\t\t\t\"M=M-D;\",\n\t\t}\n\tdefault:\n\t\tlog.Fatalf(\"unexpected token type\")\n\t}\n\tresult = append(result, calcM...)\n\tresult = append(result, []string{\n\t\t\"@5\",\n\t\t\"D=A;\",\n\t\t\"@R5\",\n\t\t\"M=M-D;\",\n\t\t\"D=M;\",\n\t\t\"@ARG\",\n\t\t\"M=D;\",\n\t}...)\n\n\t// LCL = SP\n\tresult = append(result, []string{\n\t\t\"@SP\",\n\t\t\"D=M;\",\n\t\t\"@LCL\",\n\t\t\"M=D;\",\n\t}...)\n\n\tresult = append(result, []string{\n\t\t\"@\" + getFunctionLabel(stmt.Name.Literal),\n\t\t\"0;JMP\",\n\t\t\"(return_to_\" + keyString + \")\",\n\t}...)\n\n\treturn result\n}", "func checkCoord(outputArray []string, stringarray []string, shape string, crtPosition int)string{\n\tvar i int = 0\n\t\n\t//Validates if the Shape is a square\n\t//if it is we should have two values so increment by two to proceed with the Right most derivation\n\tif shape == \"SQR\"{\n\tcrtPosition=crtPosition+2\n\n\tfor ; i != 3; i++{\n\t\tvar frtCoord string=stringarray[crtPosition]\n\t\tx:=frtCoord[0:1]\n\t\ty:=frtCoord[1:2]\n\t// Validates the Y to the give range\n\tif y==\"0\" ||y==\"1\" || y==\"2\" || y==\"3\" || y==\"4\" || y==\"5\" || y==\"6\"|| y==\"7\"|| y==\"8\"|| y==\"9\"{\n\t\t\tif i == 1 {\t//If the loop was incremented if so proceed with this new instructions\t\t\t\t\t\n\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <x>\"+y+endString)\n\t\t\t\t\tendString=(\"\"+y+endString)\t\t\t\n\t\t\t\t\t}else{ if endString != \"\" { //If the endString is not empty if so proceed with this new instructions\n\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <coord>,<coord>\"+endString)\n\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <coord>,<x><y>\"+ endString)\t\t\n\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <coord>,<x>\"+y+endString)\t\t\t\t\t\t\n\t\t\t\t\t\t\tendString=(\"\"+y+endString)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t}else{ //The First time that we are entering the loop so access the default format\n\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <coord>,<x><y>\"+ \"finish\")\t\t\n\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <coord>,<x>\"+y+\" finish\")}\n\t\t\t\t\t}\n\n\t\t\t\t\tif x==\"A\" || x==\"B\" || x==\"C\" || x==\"D\" || x==\"E\" || x==\"F\" || x==\"G\"|| x==\"H\"|| x==\"I\"|| x==\"J\"{\n\t\t\t\t\t\tif i == 1 {\t//If the loop was incremented if so proceed with this new instructions\t\n\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" \"+x+endString)\n\t\t\t\t\t\t\tendString=(\" SQR \"+x+endString)\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t}else{\tif endString !=\"\"{//If the endString is empty if so proceed with this new instructions\n\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <coord>,\"+x+endString)\n\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <x><y>, \"+x+endString)\n\t\t\t\t\t\t\t\tendString=(\",\"+x+endString)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t}else{\t\t//The First time that we are entering the loop for the x so access the default format\n\t\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <coord>,\"+x+y+\" finish\")\n\t\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <x><y>,\"+x+y+\" finish\")\n\t\t\t\t\t\t\t\t\tendString=(\",\"+x+\"\"+y+\" finish\")\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\t\t\t\t\t\t\t\n\t\t\t\t\t}else{\n\t\t\t\t\t\tfmt.Println(\"Error: \"+x+ \" is invalid\")\n\t\t\t\t\t}\n\tcrtPosition--\n\tif stringarray[crtPosition] == shape{\n\tbreak\n\t}\n\t\n\t\n\t}else{\n\t\tfmt.Println(\"Error: \"+y+ \" is invalid\")\n\t\tbreak\n\t}\n}\n\n}\n\n/*************************************************************************************/\n\t//Validates if the Shape is a Triangle\n\t//if it is we should have three values so increment by two to proceed with the Right most derivation\n\t\n\tif shape == \"TRI\"{\n\tcrtPosition=crtPosition+3\n\n\tfor ; i != 3; i++{\n\t\tvar frtCoord string=stringarray[crtPosition]\n\t\tx:=frtCoord[0:1]\n\t\ty:=frtCoord[1:2]\n\t\t\t// Validates the Y to the give range\n\t\tif y==\"0\" ||y==\"1\" || y==\"2\" || y==\"3\" || y==\"4\" || y==\"5\" || y==\"6\"{\n\t\t\tif i == 1 {\t//If the loop was incremented if so proceed with this new instructions\t\t\n\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\"<coord>,<x>\"+y+endString)\n\t\t\t\tendString=(\"\"+y+endString)\t\t\t\n\t\t\t\t\t}else {\n\t\t\t\t\t\tif i == 2 {//If the loop was incremented the third time if so proceed with this new instructions\n\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\"<x><y>,\"+endString)\n\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\"<x>\"+y+\",\"+endString)\t\n\t\t\t\t\t\t\tendString = (y+\",\"+endString)\t\t\n\t\t\t\t\t\t\t}else{ \n\t\t\t\t\t\t\t\tif endString != \"\" {//If the endString is not empty if so proceed with this new instructions \n\t\t\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\"<coord>,<coord>,<coord>\"+endString)\n\t\t\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\"<coord>,<coord>,<x><y>\"+ endString)\t\t\n\t\t\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\"<coord>,<coord>,<x>\"+y+endString)\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\tendString=(\"\"+y+endString)\t\n\t\t\t\t\t\t\t\t}else{ //The First time that we are entering the loop so access the default format\n\t\t\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <coord>,<coord>,<x><y>\"+ \"finish1\")\t\t\n\t\t\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <coord>,<coord>,<x>\"+y+\" finish1\")\n\t\t\t\t\t\t\t\t\t}\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t}\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t}\n\n\t\tif x==\"A\" || x==\"B\" || x==\"C\" || x==\"D\" || x==\"E\" || x==\"G\" || x==\"F\"{\n\t\t\t\tif i == 1 { //If the loop was incremented if so proceed with this new instructions\t\n\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\"<coord>,\"+x+endString)\n\t\t\t\t\t\t\tendString=(x+endString)\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t}else{ \t\t\t\t\t\t\n\t\t\t\t\t\t\tif endString !=\"\"{\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tif i == 2{//If the loop was incremented the third time if so proceed with this new instructions\n\t\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" \"+x+endString)\n\t\t\t\t\t\t\t\t\tendString = (\" TRI \"+x+endString)\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t}else{//If the endString is not empty if so proceed with this new instructions \n\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\"<coord>,<coord>,\"+x+endString)\n\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\"<coord>,<x><y>, \"+x+endString)\n\t\t\t\t\t\t\t\tendString=(\",\"+x+endString)\n\t\t\t\t\t\t\t\t}\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t}else{\t\t//The First time that we are entering the loop so access the default format\n\t\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\"<coord>,<coord>,\"+x+y+\" finish2\")\n\t\t\t\t\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\"<coord>,<x><y>,\"+x+y+\" finish2\")\n\t\t\t\t\t\t\t\t\tendString=(\",\"+x+\"\"+y+\" finish\")\n\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t}\t\t\t\t\t\t\t\n\t\t}\n\tcrtPosition--\n\t\n\t}\n\n}\n}\n\n/**************************************************************************************************/\n//Validates if the Shape is a Triangle\n//if it is we should have one value so increment by one to proceed with the Right most derivation\n//Either of these values can we FILL GRID or CIR\n\nif shape == \"FILL\" || shape == \"GRID\" || shape == \"CIR\"{\n\tcrtPosition=crtPosition+1\n\tfor ; i != 1; i++{\n\t\tvar frtCoord string=stringarray[crtPosition]\n\t\tx:=frtCoord[0:1]\n\t\ty:=frtCoord[1:2]\n\t\t// Validates the Y to the give range\n\t\tif y==\"0\" ||y==\"1\" || y==\"2\" || y==\"3\" || y==\"4\" || y==\"5\" || y==\"6\"{\n\t\t\t\tif shape == \"CIR\"{ // Circle Carry a different format so the printing is as follows\n\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <coord>\"+ \"finish1\")\n\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <x><y>,<x>\"+ \"finish1\")\n\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <x>\"+y+\" finish1\")\n\t\t\t\t\t}else{ // Its just the regular format\n\t\t\t\t\t\n\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <x><y>\"+ \"finish1\")\t\t\n\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" <x>\"+y+\" finish1\")\n\t\t\t\t\t}\n\t\t\tif x==\"A\" || x==\"B\" || x==\"C\" || x==\"D\" || x==\"E\" || x==\"G\" || x==\"F\"{\n\t\t\t\t\tfmt.Println(strings.Trim(fmt.Sprint(outputArray), \"[] \")+\" \"+x+y+\" finish2\")\n\t\t\t\t\tendString=(\",\"+x+\"\"+y+\" finish\")\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t}\t\t\t\t\t\t\t\t\t\t\t\t\n\t\tcrtPosition--\t\n\t}\n}\n}\n\nreturn endString\n}", "func getPoint(x, y []byte) plotter.XYZs {\n\tpts := make(plotter.XYZs, len(x))\n\tfor i := range x {\n\t\tpts[i].X = float64(x[i])\n\t\tpts[i].Y = float64(y[i])\n\t\tpts[i].Z = 0.1\n\n\t}\n\treturn pts\n}", "func CallEvalArgAt(vm *VM, target, locals Interface, msg *Message) Interface {\n\tc := target.(*Call)\n\tv, stop := msg.NumberArgAt(vm, locals, 0)\n\tif stop != nil {\n\t\treturn stop\n\t}\n\treturn c.Msg.EvalArgAt(vm, c.Sender, int(v.Value))\n}", "func (l *Logger) PrintCaller(skip int) {\n\tl.Log(Info, SPrintCaller(skip+2))\n}", "func (_Address *AddressRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {\n\treturn _Address.Contract.AddressCaller.contract.Call(opts, result, method, params...)\n}", "func (_Address *AddressRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {\n\treturn _Address.Contract.AddressCaller.contract.Call(opts, result, method, params...)\n}", "func (_Address *AddressRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {\n\treturn _Address.Contract.AddressCaller.contract.Call(opts, result, method, params...)\n}", "func (_Address *AddressRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {\n\treturn _Address.Contract.AddressCaller.contract.Call(opts, result, method, params...)\n}", "func (pc *programCode) createCall(name string) {\n\tcode := \"\"\n\tcode += \"\\n\\tcall \" + name + \"\\t; call label \" + name + \"\\n\"\n\tpc.appendCode(code)\n}", "func (pc *ParticleClient) EdgePoints(nodeID, parentID string, points []data.Point) {\n\tpc.newEdgePoints <- NewPoints{nodeID, parentID, points}\n}", "func (s *BaseAspidaListener) ExitPoints(ctx *PointsContext) {}", "func (s *Service) processBatches(batcher *tsdb.PointBatcher) {\n\tfor {\n\t\tselect {\n\t\tcase <-s.done:\n\t\t\treturn\n\t\tcase batch := <-batcher.Out():\n\t\t\t// Will attempt to create database if not yet created.\n\t\t\tif err := s.createInternalStorage(); err != nil {\n\t\t\t\ts.Logger.Info(\"Required database does not yet exist\", logger.Database(s.Database), zap.Error(err))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif err := s.PointsWriter.WritePointsPrivileged(s.Database, s.RetentionPolicy, coordinator.ConsistencyLevelAny, batch); err == nil {\n\t\t\t\tatomic.AddInt64(&s.stats.BatchesTransmitted, 1)\n\t\t\t\tatomic.AddInt64(&s.stats.PointsTransmitted, int64(len(batch)))\n\t\t\t} else {\n\t\t\t\ts.Logger.Info(\"Failed to write point batch to database\",\n\t\t\t\t\tlogger.Database(s.Database), zap.Error(err))\n\t\t\t\tatomic.AddInt64(&s.stats.BatchesTransmitFail, 1)\n\t\t\t}\n\t\t}\n\t}\n}" ]
[ "0.5804814", "0.55561084", "0.54828817", "0.54530644", "0.5207425", "0.51024735", "0.5098401", "0.50678176", "0.5043976", "0.50209445", "0.49973452", "0.49861223", "0.49611437", "0.4915316", "0.4851949", "0.48359668", "0.48310632", "0.48305863", "0.4799254", "0.4785857", "0.47404245", "0.47260666", "0.471229", "0.4692623", "0.4668686", "0.46684283", "0.4668256", "0.46433884", "0.4631132", "0.46265152", "0.46122575", "0.46096632", "0.45997483", "0.45756313", "0.45704678", "0.4565594", "0.45535722", "0.45529148", "0.45525354", "0.4542994", "0.45405114", "0.45199203", "0.45143414", "0.45112288", "0.45055437", "0.4503116", "0.45026866", "0.44925693", "0.44793507", "0.44611636", "0.44490153", "0.4445905", "0.44390187", "0.4438711", "0.44382766", "0.4436614", "0.44322366", "0.44248334", "0.44220203", "0.44158792", "0.4413424", "0.44094837", "0.44091365", "0.4407834", "0.44056374", "0.43943876", "0.43943852", "0.43928373", "0.43847877", "0.4381054", "0.43742898", "0.43694118", "0.43687925", "0.43525147", "0.43520886", "0.43508846", "0.43455824", "0.4343373", "0.43370873", "0.43367916", "0.4334879", "0.43278193", "0.43245715", "0.430518", "0.43004552", "0.42907017", "0.42865756", "0.4285323", "0.427855", "0.42713702", "0.42708027", "0.4267776", "0.42665762", "0.42665762", "0.42665762", "0.42665762", "0.4266447", "0.42616004", "0.42595842", "0.4259145" ]
0.72903216
0
acceptService returns true if the service is to be accepted, false if it's to be ignored
acceptService возвращает true, если сервис должен быть принят, и false, если его следует игнорировать
func (bf *boardFilter) acceptService(service ldb.Service) bool { // Original requirement, must have an RID if service.RID == "" { return false } // remove terminating services if bf.terminated && bf.atStation(service.Destination) { return false } if bf.callAt && !bf.callsAt(service.CallingPoints, bf.callAtTiplocs) { return false } return true }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (f *aclFilter) allowService(service string) bool {\n\tif service == \"\" {\n\t\treturn true\n\t}\n\n\tif !f.enforceVersion8 && service == structs.ConsulServiceID {\n\t\treturn true\n\t}\n\treturn f.authorizer.ServiceRead(service)\n}", "func (m *MockMessageSvc) Accept(msgType string, purpose []string) bool {\n\tif m.AcceptFunc != nil {\n\t\treturn m.AcceptFunc(msgType, purpose)\n\t}\n\n\treturn true\n}", "func (s *Service) Accept(conn net.Conn, ipport string) error {\n\tswitch s.Role {\n\tcase ROLE_MANAGE:\n\t\treturn TcpAcceptor(conn, s, ipport)\n\tcase ROLE_PROXY, ROLE_WEBSERVER:\n\t\treturn HttpAcceptor(conn, s, ipport)\n\tdefault:\n\t\tlog.Fatal(\"unknown role in accept\")\n\t}\n\treturn errors.New(\"Accept fell through!\")\n}", "func (s *ss) accept(ok string) bool {\n\treturn s.consume(ok, true)\n}", "func (s *Suite) Accept(t string) bool {\n\treturn t == signatureType\n}", "func (f *MSPFilter) Accept(peer fab.Peer) bool {\n\treturn peer.MSPID() == f.mspID\n}", "func IsValidService(s string) bool {\n\tswitch s {\n\tcase\n\t\t\"all\",\n\t\t\"proxy\",\n\t\t\"authorize\",\n\t\t\"authenticate\":\n\t\treturn true\n\t}\n\treturn false\n}", "func (v *VDRI) Accept(method string) bool {\n\treturn v.accept(method)\n}", "func (s ServiceSpecs) SupportService(serviceUrl string, serviceOrg string) bool {\n\tif serviceUrl == \"\" {\n\t\treturn true\n\t} else {\n\t\tif len(s) == 0 {\n\t\t\treturn true\n\t\t} else {\n\t\t\tfor _, sp := range s {\n\t\t\t\tif sp.Url == serviceUrl && (sp.Org == \"\" || sp.Org == serviceOrg) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func isRelevantExposedService(service *v1alpha1.ServiceExpositionPolicy_ExposedService, toClusterID string) bool {\n\t// If there is no clusters list, we treat this policy as exposed to all trusted clusters\n\tif len(service.Clusters) == 0 {\n\t\treturn true\n\t}\n\n\t// Go through the list of allowed clusters and see if it is listed\n\tfor _, cluster := range service.Clusters {\n\t\tif cluster == toClusterID {\n\t\t\treturn true\n\t\t}\n\t}\n\n\t// Service is not exposed to the specified cluster\n\treturn false\n}", "func containsService(name string, services []servicescm.Service) bool {\n\tfor _, svc := range services {\n\t\tif svc.Name == name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func IsExposedService(svc *corev1.Service) bool {\n\tlabels := svc.Labels\n\tif labels == nil {\n\t\tlabels = map[string]string{}\n\t}\n\tfor _, l := range ExposeLabelKeys {\n\t\tif labels[l] == \"true\" {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (p awsPeeringServiceOp) Accept(ctx context.Context, input *models.AcceptAwsPeeringInput) (*models.Result, *Response, error) {\n\tvar peeringResult models.Result\n\tgraphqlRequest := models.GraphqlRequest{\n\t\tName: \"acceptAwsPeering\",\n\t\tOperation: models.Mutation,\n\t\tInput: *input,\n\t\tArgs: nil,\n\t\tResponse: peeringResult,\n\t}\n\treq, err := p.client.NewRequest(&graphqlRequest)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := p.client.Do(ctx, req, &peeringResult)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn &peeringResult, resp, err\n}", "func (px *Paxos) send_accept(seq int, p Proposal) bool {\n\tok_count := 0\n\n\tfor idx, peer := range px.peers {\n\t\targs := &AcceptArgs{}\n\t\treply := &AcceptReply{}\n\n\t\targs.Seq = seq\n\t\targs.Proposal = p\n\n\t\tok := false\n\n\t\tif idx == px.me {\n\t\t\tpx.Accept(args, reply)\n\t\t\tok = true\n\t\t} else {\n\t\t\tok = call(peer, \"Paxos.Accept\", args, reply)\n\t\t}\n\n\t\tif ok && reply.Err == OK {\n\t\t\tok_count++\n\t\t}\n\t}\n\n\tpx.clog(DBG_PREPARE, \"send_accept\", \"seq=%d p=%v ok_count=%d/%d\", seq, p, ok_count, px.majority)\n\n\treturn (ok_count >= px.majority)\n}", "func (q *Query) ServiceMatches(s ServiceInfo) bool {\n\tif q.Service != \"\" && s.Config.Name != q.Service {\n\t\treturn false\n\t}\n\n\tif q.Version != \"\" && s.Config.Version != q.Version {\n\t\treturn false\n\t}\n\n\tif q.Region != \"\" && s.Config.Region != q.Region {\n\t\treturn false\n\t}\n\n\tif q.Host != \"\" && s.Config.ServiceAddr.IPAddress != q.Host {\n\t\treturn false\n\t}\n\n\tif q.Port != \"\" && fmt.Sprintf(\"%d\", s.Config.ServiceAddr.Port) != q.Port {\n\t\treturn false\n\t}\n\n\tif q.Registered != nil && s.Registered != *q.Registered {\n\t\treturn false\n\t}\n\n\tif q.UUID != \"\" && s.Config.UUID != q.UUID {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (c *comp) Accepting() bool {\n\treturn !c.r.Accepting()\n}", "func (o *ReservationStayOfferServiceModel) GetServiceOk() (*EmbeddedServiceModel, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Service, true\n}", "func (p *Reader) Accept(services protocol.ServiceFlag) error {\n\tif err := p.Handshake(services); err != nil {\n\t\t_ = p.Conn.Close()\n\t\treturn err\n\t}\n\n\tif config.Get().API.Enabled {\n\t\tgo func() {\n\t\t\tstore := capi.GetStormDBInstance()\n\t\t\taddr := p.Addr()\n\t\t\tpeerJSON := capi.PeerJSON{\n\t\t\t\tAddress: addr,\n\t\t\t\tType: \"Reader\",\n\t\t\t\tMethod: \"Accept\",\n\t\t\t\tLastSeen: time.Now(),\n\t\t\t}\n\n\t\t\terr := store.Save(&peerJSON)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"failed to save peer into StormDB\")\n\t\t\t}\n\n\t\t\t// save count\n\t\t\tpeerCount := capi.PeerCount{\n\t\t\t\tID: addr,\n\t\t\t\tLastSeen: time.Now(),\n\t\t\t}\n\n\t\t\terr = store.Save(&peerCount)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"failed to save peerCount into StormDB\")\n\t\t\t}\n\t\t}()\n\t}\n\n\treturn nil\n}", "func (m *OverDIDComm) Accept(msgType string, purpose []string) bool {\n\tif msgType != OverDIDCommMsgRequestType {\n\t\treturn false\n\t}\n\n\t// if purpose not set, then match only message type.\n\tif len(m.purpose) == 0 {\n\t\treturn true\n\t}\n\n\t// match purpose if provided\n\tfor _, msgPurpose := range purpose {\n\t\tfor _, svcPurpose := range m.purpose {\n\t\t\tif msgPurpose == svcPurpose {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}", "func (c *Client) AcceptTOS(ctx context.Context, id tg.DataJSON) error {\n\t_, err := c.api.HelpAcceptTermsOfService(ctx, id)\n\treturn err\n}", "func (input *Input) AcceptsXML() bool {\n\treturn acceptsXMLRegex.MatchString(input.Header(\"Accept\"))\n}", "func (pf *File) HasService(name string) bool {\n\treturn pf.GetService(name) != nil\n}", "func MatchService(inputName string) ServiceConfig {\n\tinputName = strings.ToLower(inputName)\n\timgName, ok := keywords[inputName]\n\tif !ok {\n\t\timgName = keywords[\"default\"]\n\t}\n\treturn ServiceConfig{\n\t\tServiceName: sanitizeServiceName(inputName),\n\t\tServiceImage: imgName,\n\t}\n}", "func (o GetVpcEndpointServicesServiceOutput) AutoAcceptConnection() pulumi.BoolOutput {\n\treturn o.ApplyT(func(v GetVpcEndpointServicesService) bool { return v.AutoAcceptConnection }).(pulumi.BoolOutput)\n}", "func (f *urlTargetFilter) Accept(peer fab.Peer) bool {\n\treturn peer.URL() == f.url\n}", "func (*endpoint) Accept() (tcpip.Endpoint, *waiter.Queue, *tcpip.Error) {\n\treturn nil, nil, tcpip.ErrNotSupported\n}", "func (na *cnmNetworkAllocator) IsServiceAllocated(s *api.Service, flags ...func(*networkallocator.ServiceAllocationOpts)) bool {\n\tvar options networkallocator.ServiceAllocationOpts\n\tfor _, flag := range flags {\n\t\tflag(&options)\n\t}\n\n\tspecNetworks := serviceNetworks(s)\n\n\t// If endpoint mode is VIP and allocator does not have the\n\t// service in VIP allocated set then it needs to be allocated.\n\tif len(specNetworks) != 0 &&\n\t\t(s.Spec.Endpoint == nil ||\n\t\t\ts.Spec.Endpoint.Mode == api.ResolutionModeVirtualIP) {\n\n\t\tif _, ok := na.services[s.ID]; !ok {\n\t\t\treturn false\n\t\t}\n\n\t\tif s.Endpoint == nil || len(s.Endpoint.VirtualIPs) == 0 {\n\t\t\treturn false\n\t\t}\n\n\t\t// If the spec has networks which don't have a corresponding VIP,\n\t\t// the service needs to be allocated.\n\tnetworkLoop:\n\t\tfor _, net := range specNetworks {\n\t\t\tfor _, vip := range s.Endpoint.VirtualIPs {\n\t\t\t\tif vip.NetworkID == net.Target {\n\t\t\t\t\tcontinue networkLoop\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// If the spec no longer has networks attached and has a vip allocated\n\t// from previous spec the service needs to allocated.\n\tif s.Endpoint != nil {\n\tvipLoop:\n\t\tfor _, vip := range s.Endpoint.VirtualIPs {\n\t\t\tif na.IsVIPOnIngressNetwork(vip) && networkallocator.IsIngressNetworkNeeded(s) {\n\t\t\t\t// This checks the condition when ingress network is needed\n\t\t\t\t// but allocation has not been done.\n\t\t\t\tif _, ok := na.services[s.ID]; !ok {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tcontinue vipLoop\n\t\t\t}\n\t\t\tfor _, net := range specNetworks {\n\t\t\t\tif vip.NetworkID == net.Target {\n\t\t\t\t\tcontinue vipLoop\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// If the endpoint mode is DNSRR and allocator has the service\n\t// in VIP allocated set then we return to be allocated to make\n\t// sure the allocator triggers networkallocator to free up the\n\t// resources if any.\n\tif s.Spec.Endpoint != nil && s.Spec.Endpoint.Mode == api.ResolutionModeDNSRoundRobin {\n\t\tif _, ok := na.services[s.ID]; ok {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif (s.Spec.Endpoint != nil && len(s.Spec.Endpoint.Ports) != 0) ||\n\t\t(s.Endpoint != nil && len(s.Endpoint.Ports) != 0) {\n\t\treturn na.portAllocator.isPortsAllocatedOnInit(s, options.OnInit)\n\t}\n\treturn true\n}", "func acceptsOffer(spec, offer string) bool {\n\tif len(spec) >= 1 && spec[len(spec)-1] == '*' {\n\t\treturn true\n\t} else if strings.HasPrefix(spec, offer) {\n\t\treturn true\n\t}\n\treturn false\n}", "func (o *EventAttributes) GetServiceOk() (*string, bool) {\n\tif o == nil || o.Service == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Service, true\n}", "func (s *Service) Matches(check string) bool {\n\tif serviceMatches(check, s.Name()) {\n\t\treturn true\n\t}\n\tfor _, p := range s.Provides() {\n\t\tif serviceMatches(check, p) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (ic IgnoredService) IsServiceIgnored(srv Service) bool {\n\tfor _, ignoredCheck := range ic.ignoredChecks {\n\t\tif ignoredCheck.Name == srv.Name {\n\t\t\tinstances := strings.Split(ignoredCheck.Instance, \" \")\n\t\t\tif len(instances) == 1 && instances[0] == \"\" {\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tfor _, instance := range instances {\n\t\t\t\thasMatched := matchInstance(instance, srv.ContainerName)\n\t\t\t\tif hasMatched {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}", "func (m *BACnetServiceAckAtomicWriteFile) ServiceChoice() uint8 {\n\treturn 0x07\n}", "func isValidServiceName(name string) bool {\n\tswitch name {\n\tcase\n\t\t\"register\",\n\t\t\"ping\",\n\t\t\"conv_creation\",\n\t\t\"conv_manag\",\n\t\t\"msg_sender\",\n\t\t\"conv-sub\",\n\t\t\"user-manag\",\n\t\t\"login\":\n\t\treturn true\n\t}\n\treturn false\n}", "func IsTypeAService(t string) bool {\n\tfor _, serviceType := range ServiceTypes() {\n\t\tif t == serviceType {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (t *AuroraTask) IsService(isService bool) *AuroraTask {\n\tt.task.IsService = isService\n\treturn t\n}", "func (o *OfferServiceModel) GetServiceOk() (*EmbeddedServiceModel, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Service, true\n}", "func (input *BeegoInput) AcceptsXML() bool {\n\treturn acceptsXMLRegex.MatchString(input.Header(\"Accept\"))\n}", "func (ctx *Context) AcceptXML() bool {\r\n\treturn acceptsXMLRegex.MatchString(ctx.HeaderParam(HeaderAccept))\r\n}", "func (r *Automaton) IsAccept(state int) bool {\n\treturn r.isAccept.Test(uint(state))\n}", "func ServiceAvailable(ctx *Context, url string, timeout time.Duration) bool {\n\n\tLog(INFO, ctx, \"ServiceAvailable\", \"url\", url)\n\n\tclient := &http.Client{Timeout: timeout}\n\tresp, err := client.Get(url)\n\tif err != nil {\n\t\tLog(INFO, ctx, \"ServiceAvailable\", \"url\", url, \"error\", err, \"available\", false)\n\t\tLog(ERROR, ctx, \"ServiceAvailable\", \"url\", url, \"error\", err, \"available\", false)\n\t\treturn false\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\tLog(INFO, ctx, \"ServiceAvailable\", \"url\", url, \"code\", resp.StatusCode, \"available\", false)\n\t\treturn false\n\t}\n\n\tLog(INFO, ctx, \"ServiceAvailable\", \"url\", url, \"available\", true)\n\treturn true\n}", "func handleService(req typhon.Request) typhon.Response {\n\tparts := reService.FindStringSubmatch(req.URL.Path)\n\tif len(parts) != 3 {\n\t\treturn typhon.Response{Error: terrors.NotFound(\"bad_endpoint\", \"Unable to determine service endpoint.\", nil)}\n\t}\n\n\treturn handle(req, \"s-\"+parts[1], parts[2])\n}", "func (m *Message) IsService() bool {\n\tfact := false\n\n\tfact = fact || m.UserJoined != nil\n\tfact = fact || len(m.UsersJoined) > 0\n\tfact = fact || m.UserLeft != nil\n\tfact = fact || m.NewGroupTitle != \"\"\n\tfact = fact || m.NewGroupPhoto != nil\n\tfact = fact || m.GroupPhotoDeleted\n\tfact = fact || m.GroupCreated || m.SuperGroupCreated\n\tfact = fact || (m.MigrateTo != m.MigrateFrom)\n\n\treturn fact\n}", "func (r *ReconcileTFAnalytics) handleCollectorService() (bool, error) {\n\t// Define a new Collector service object\n\tcollectorService := newServicesForCollector(r.instance)\n\t// Set TFAnalytics instance as the owner and controller\n\tif err := controllerutil.SetControllerReference(r.instance, collectorService, r.scheme); err != nil {\n\t\treturn false, err\n\t}\n\t// Check if this Collector Service already exists\n\tfoundCollectorService := &corev1.Service{}\n\terr := r.client.Get(context.TODO(), types.NamespacedName{Name: collectorService.Name, Namespace: collectorService.Namespace}, foundCollectorService)\n\tif err != nil && errors.IsNotFound(err) {\n\t\tr.reqLogger.Info(\"Creating a new Collector Service\", \"Service.Name\", collectorService.Name)\n\t\terr = r.client.Create(context.TODO(), collectorService)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\t// Service has been created successfully - don't requeue\n\t\treturn false, nil\n\t} else if err != nil {\n\t\treturn false, err\n\t}\n\t// Service already exists - don't requeue\n\tr.reqLogger.Info(\"Skip reconcile: Collector Service already exists\", \"Service.Name\", foundCollectorService.Name)\n\treturn false, nil\n}", "func (s *Server) Accept() chan rtmputils.ConnPair {\n\treturn s.accept\n}", "func (oc *Controller) handlePeerService(\n\tpolicy *knet.NetworkPolicy, gp *gressPolicy, np *networkPolicy) {\n\n\th := oc.watchFactory.AddFilteredServiceHandler(policy.Namespace,\n\t\tcache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\t// Service is matched so add VIP to addressSet\n\t\t\t\toc.handlePeerServiceAdd(gp, obj)\n\t\t\t},\n\t\t\tDeleteFunc: func(obj interface{}) {\n\t\t\t\t// If Service that has matched pods are deleted remove VIP\n\t\t\t\toc.handlePeerServiceDelete(gp, obj)\n\t\t\t},\n\t\t\tUpdateFunc: func(oldObj, newObj interface{}) {\n\t\t\t\t// If Service Is updated make sure same pods are still matched\n\t\t\t\toldSvc := oldObj.(*kapi.Service)\n\t\t\t\tnewSvc := newObj.(*kapi.Service)\n\t\t\t\tif reflect.DeepEqual(newSvc.Spec.ExternalIPs, oldSvc.Spec.ExternalIPs) &&\n\t\t\t\t\treflect.DeepEqual(newSvc.Spec.ClusterIP, oldSvc.Spec.ClusterIP) &&\n\t\t\t\t\treflect.DeepEqual(newSvc.Spec.Type, oldSvc.Spec.Type) &&\n\t\t\t\t\treflect.DeepEqual(newSvc.Status.LoadBalancer.Ingress, oldSvc.Status.LoadBalancer.Ingress) {\n\n\t\t\t\t\tklog.V(5).Infof(\"Skipping service update for: %s as change does not apply to any of .Spec.Ports, \"+\n\t\t\t\t\t\t\".Spec.ExternalIP, .Spec.ClusterIP, .Spec.Type, .Status.LoadBalancer.Ingress\", newSvc.Name)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\toc.handlePeerServiceDelete(gp, oldObj)\n\t\t\t\toc.handlePeerServiceAdd(gp, newObj)\n\t\t\t},\n\t\t}, nil)\n\tnp.svcHandlerList = append(np.svcHandlerList, h)\n}", "func IsServiceCondition(t apis.ConditionType) bool {\n\tswitch t {\n\tcase\n\t\tServiceConditionReady,\n\t\tServiceConditionRoutesReady,\n\t\tServiceConditionConfigurationsReady:\n\t\treturn true\n\t}\n\treturn false\n}", "func is_accepted(w http.ResponseWriter, r *http.Request) {\r\n\tfmt.Println(\"\\n Api Hit====>isAccepted\")\r\n\tvar vars = mux.Vars(r)\r\n\tvar id = vars[\"id\"]\r\n\tproc := cache[id]\r\n\tflag := isAccepted(proc)\r\n\tif flag {\r\n\t\tjson.NewEncoder(w).Encode(\"Input tokens successfully Accepted\")\r\n\t} else {\r\n\t\tjson.NewEncoder(w).Encode(\"Input tokens Rejected by the PDA\")\r\n\t}\r\n}", "func (fsm *DeployFSMContext) checkServiceReady() (bool, error) {\n\truntime := fsm.Runtime\n\t// do not check if nil for compatibility\n\tif fsm.Deployment.Extra.ServicePhaseStartAt != nil {\n\t\tstartCheckPoint := fsm.Deployment.Extra.ServicePhaseStartAt.Add(30 * time.Second)\n\t\tif time.Now().Before(startCheckPoint) {\n\t\t\tfsm.pushLog(fmt.Sprintf(\"checking too early, delay to: %s\", startCheckPoint.String()))\n\t\t\t// too early to check\n\t\t\treturn false, nil\n\t\t}\n\t}\n\tisReplicasZero := false\n\tfor _, s := range fsm.Spec.Services {\n\t\tif s.Deployments.Replicas == 0 {\n\t\t\tisReplicasZero = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif isReplicasZero {\n\t\tfsm.pushLog(\"checking status by inspect\")\n\t\t// we do double check to prevent `fake Healthy`\n\t\t// runtime.ScheduleName must have\n\t\tsg, err := fsm.getServiceGroup()\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\treturn sg.Status == \"Ready\" || sg.Status == \"Healthy\", nil\n\t}\n\n\t// 获取addon状态\n\tserviceGroup, err := fsm.getServiceGroup()\n\tif err != nil {\n\t\tfsm.pushLog(fmt.Sprintf(\"获取service状态失败,%s\", err.Error()))\n\t\treturn false, nil\n\t}\n\tfsm.pushLog(fmt.Sprintf(\"checking status: %s, servicegroup: %v\", serviceGroup.Status, runtime.ScheduleName))\n\t// 如果状态是failed,说明服务或者job运行失败\n\tif serviceGroup.Status == apistructs.StatusFailed {\n\t\treturn false, errors.New(serviceGroup.LastMessage)\n\t}\n\t// 如果状态是ready或者healthy,说明服务已经发起来了\n\truntimeStatus := apistructs.RuntimeStatusUnHealthy\n\tif serviceGroup.Status == apistructs.StatusReady || serviceGroup.Status == apistructs.StatusHealthy {\n\t\truntimeStatus = apistructs.RuntimeStatusHealthy\n\t}\n\truntimeItem := fsm.Runtime\n\tif runtimeItem.Status != runtimeStatus {\n\t\truntimeItem.Status = runtimeStatus\n\t\tif err := fsm.db.UpdateRuntime(runtime); err != nil {\n\t\t\tlogrus.Errorf(\"failed to update runtime status changed, runtime: %v, err: %v\", runtime.ID, err.Error())\n\t\t\treturn false, nil\n\t\t}\n\t}\n\tif runtimeStatus == apistructs.RuntimeStatusHealthy {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}", "func WaitForService(address string, logger *log.Logger) bool {\n\n\tfor i := 0; i < 12; i++ {\n\t\tconn, err := net.Dial(\"tcp\", address)\n\t\tif err != nil {\n\t\t\tlogger.Println(\"Connection error:\", err)\n\t\t} else {\n\t\t\tconn.Close()\n\t\t\tlogger.Println(fmt.Sprintf(\"Connected to %s\", address))\n\t\t\treturn true\n\t\t}\n\t\ttime.Sleep(5 * time.Second)\n\t}\n\n\treturn false\n}", "func (h *Handler) Accept() {\n}", "func RunService(ser Server) {\n\tfor {\n\t\tcon := Accept(ser)\n\t\tgo ser.HandleRequest(con)\n\t}\n}", "func Service(b bool) Option {\n\treturn func(o *Options) {\n\t\to.Discovery = b\n\t}\n}", "func (p *PaxosNode) ReceiveAcceptOK(OKMsg *paxos.PaxosMessage, slot *paxos.Slot) bool {\n\tif slot.Nhighest != OKMsg.ProposalNumber {\n\t\treturn false\n\t}\n\n\tif _, ok := slot.AcceptOKIDs[OKMsg.ProposerID]; ok {\n\t\treturn false\n\t}\n\n\tslot.AcceptOKIDs[OKMsg.ProposerID] = true\n\tslot.NumberOfAcceptOK++\n\tif slot.NumberOfAcceptOK >= ((len(slot.ClusterMembers) + 1) / 2) { // yay!!! majority\n\t\treturn true\n\t}\n\treturn false\n}", "func Accepts(request *http.Request, mimetype string) (bool, error) {\n\tmediaRange := request.Header.Get(\"Accept\")\n\tfor _, v := range strings.Split(mediaRange, \",\") {\n\t\tt, _, err := mime.ParseMediaType(v)\n\t\tif err != nil {\n\t\t\treturn false, ErrInvalid\n\t\t}\n\n\t\tif IsAccepted(mimetype, t) {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}", "func (cs *OutboundHTTPClient) AcceptRecipient([]string) bool {\n\treturn false\n}", "func (op *EnableServiceOperation) Done() bool {\n\treturn op.lro.Done()\n}", "func selectService(context context.T, tracer trace.Tracer, input *ConfigurePackagePluginInput, localrepo localpackages.Repository, appCfg *appconfig.SsmagentConfig, birdwatcherFacade facade.BirdwatcherFacade, isDocumentArchive *bool) (packageservice.PackageService, error) {\n\tregion, _ := context.Identity().Region()\n\tserviceEndpoint := input.Repository\n\tresponse := &ssm.GetManifestOutput{}\n\tvar err error\n\tvar s3Endpoint string\n\tif s3Endpoint, err = s3util.GetS3Endpoint(context, region); err != nil {\n\t\ttracer.CurrentTrace().AppendErrorf(\"Failed to generate s3 endpoint - %v.\", err.Error())\n\t\treturn nil, err\n\t}\n\n\tif (appCfg != nil && appCfg.Birdwatcher.ForceEnable) || !ssms3.UseSSMS3Service(context, tracer, s3Endpoint, serviceEndpoint, region) {\n\t\t// This indicates that it would be the birdwatcher service.\n\t\t// Before creating an object of type birdwatcher here, check if the name is of document arn. If it is, return with a Document type service\n\t\tif regexp.MustCompile(documentArnPattern).MatchString(input.Name) {\n\t\t\t*isDocumentArchive = true\n\t\t\t// return a new object of type document\n\t\t\treturn birdwatcherservice.NewDocumentArchive(context, birdwatcherFacade, localrepo), nil\n\t\t}\n\t\tif input.Version != \"\" {\n\t\t\t// Birdwatcher version pattern and document version name pattern is different. If the pattern doesn't match Birdwatcher,\n\t\t\t// we assume document and continue, since birdwatcher will error out with ValidationException.\n\t\t\t// This could also happen if there is a typo in the birdwatcher version, but we assume Document and continue.\n\t\t\tif !regexp.MustCompile(birdwatcherVersionPattern).MatchString(input.Version) {\n\t\t\t\t*isDocumentArchive = true\n\t\t\t\t// return a new object of type document\n\t\t\t\treturn birdwatcherservice.NewDocumentArchive(context, birdwatcherFacade, localrepo), nil\n\t\t\t}\n\t\t}\n\n\t\t// If not, make a call to GetManifest and try to figure out if it is birdwatcher or document archive.\n\t\tversion := input.Version\n\t\tif packageservice.IsLatest(version) {\n\t\t\tversion = packageservice.Latest\n\t\t}\n\t\tresponse, err = birdwatcherFacade.GetManifest(\n\t\t\t&ssm.GetManifestInput{\n\t\t\t\tPackageName: &input.Name,\n\t\t\t\tPackageVersion: &version,\n\t\t\t},\n\t\t)\n\n\t\t// If the error returned is the \"ResourceNotFoundException\", create a service with document archive\n\t\t// if any other response, create a service of birdwatcher type\n\t\tif err != nil {\n\t\t\tif strings.Contains(err.Error(), resourceNotFoundException) {\n\t\t\t\t*isDocumentArchive = true\n\t\t\t\t// return a new object of type document\n\t\t\t\treturn birdwatcherservice.NewDocumentArchive(context, birdwatcherFacade, localrepo), nil\n\t\t\t} else {\n\t\t\t\ttracer.CurrentTrace().AppendErrorf(\"Error returned for GetManifest - %v.\", err.Error())\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\t*isDocumentArchive = false\n\n\t\t// return a new object of type birdwatcher\n\t\tbirdWatcherArchiveContext := make(map[string]string)\n\t\tbirdWatcherArchiveContext[\"packageName\"] = input.Name\n\t\tbirdWatcherArchiveContext[\"packageVersion\"] = input.Version\n\t\tbirdWatcherArchiveContext[\"manifest\"] = *response.Manifest\n\t\treturn birdwatcherservice.NewBirdwatcherArchive(context, birdwatcherFacade, localrepo, birdWatcherArchiveContext), nil\n\t}\n\n\ttracer.CurrentTrace().AppendInfof(\"S3 repository is marked active\")\n\treturn ssms3.New(context, s3Endpoint, serviceEndpoint, region), nil\n}", "func (s *State) acceptable(addr string, point string) bool {\n\tif s.optionalValidator == nil {\n\t\tif addr == point {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\ts.vo.Lock()\n\tstate := s.optionalValidator(addr, point)\n\ts.vo.Unlock()\n\treturn state\n}", "func (q *Quotas) TryAccept(issuerKey utils.IssuerKey) (bool, int) {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\tif quotas, ok := q.issuerToQuotas[issuerKey]; ok {\n\t\treturn quotas.rateLimiter.TryAccept(), quotas.requestsPerDay\n\t}\n\treturn false, 0\n}", "func DoAcceptDfsServer(ss chan<- interface{}, conn *grpc.ClientConn, clientId string) {\n\tdiscoveryClient := discovery.NewDiscoveryServiceClient(conn)\n\tstream, err := discoveryClient.GetDfsServers(context.Background(),\n\t\t&discovery.GetDfsServersReq{\n\t\t\tClient: &discovery.DfsClient{\n\t\t\t\tId: clientId,\n\t\t\t},\n\t\t})\n\n\tfor err == nil {\n\t\tvar rep *discovery.GetDfsServersRep\n\t\trep, err = stream.Recv()\n\t\tif err != nil {\n\t\t\tglog.Warningf(\"Failed to recv from stream %v\", err)\n\t\t\tbreak // break the whole loop.\n\t\t}\n\n\t\tswitch union := rep.GetDfsServerUnion.(type) {\n\t\tdefault:\n\t\t\tglog.Warningf(\"Failed to receive DfsServer list: unexpected type %T\", union)\n\t\tcase *discovery.GetDfsServersRep_Sl: // server list\n\t\t\tss <- union.Sl.GetServer()\n\t\tcase *discovery.GetDfsServersRep_Hb: // heartbeat\n\t\t\tss <- union.Hb.Timestamp\n\t\t}\n\t}\n\n\tclose(ss)\n}", "func (s *Seller) acceptBid(offer int) bool {\r\n\tif !s.Object.getSold() && offer >= s.bidAccept {\r\n\t\ts.Object.setSold(true)\r\n\t\treturn true\r\n\t} else {\r\n\t\treturn false\r\n\t}\r\n}", "func (s *Seller) acceptBid(offer int) bool {\r\n\tif !s.Object.getSold() && offer >= s.bidAccept {\r\n\t\ts.Object.setSold(true)\r\n\t\treturn true\r\n\t} else {\r\n\t\treturn false\r\n\t}\r\n}", "func (s *JSONHTTPServer) StartService(\n\tctx context.Context,\n\tservices ...ServiceAPI,\n) <-chan struct{} {\n\tstarted := make(chan struct{})\n\n\t// This will block, so run it in a goroutine\n\tgo s.startInternal(\n\t\tctx,\n\t\tstarted,\n\t\tservices...)\n\n\treturn started\n}", "func (s *Server) Accept() chan *gortsplib.ServerConn {\n\treturn s.accept\n}", "func AlwaysService(proxyInfo *ProxyInfo) bool {\n\treturn true\n}", "func ProbablyInstallAsService(opts *ServiceInstallerOptions) {\r\n\targs := os.Args\r\n\tisServiceInstaller := false\r\n\tpos := 0\r\n\tfor i, v := range args {\r\n\t\tif strings.HasPrefix(v, \"__installservice\") {\r\n\t\t\tisServiceInstaller = true\r\n\t\t\tpos = i\r\n\t\t}\r\n\t}\r\n\tif !isServiceInstaller {\r\n\t\treturn\r\n\t}\r\n\targs = append(args[:pos], args[pos+1:]...)\r\n\r\n\tif runtime.GOOS == \"windows\" {\r\n\t\tlog.Fatalln(\"installation as service under windows is not yet supported :(\")\r\n\t}\r\n\r\n\tlog.SetFlags(0)\r\n\tlog.Println(\"...will modify /etc/init.d\")\r\n\tbeServiceInstaller(args, opts)\r\n\r\n\t// do not run the app!\r\n\tos.Exit(0)\r\n}", "func AcceptArithServiceClient(lis net.Listener, x ArithService) {\n\tsrv := rpc.NewServer()\n\tif err := srv.RegisterName(\"ArithService\", x); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor {\n\t\tconn, err := lis.Accept()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"lis.Accept(): %v\\n\", err)\n\t\t}\n\t\tgo srv.ServeCodec(protorpc.NewServerCodec(conn))\n\t}\n}", "func CheckVirtualService(virtualService IstioObject, namespace string, serviceName string, subsets []string) bool {\n\tif virtualService == nil || virtualService.GetSpec() == nil || subsets == nil {\n\t\treturn false\n\t}\n\tif len(subsets) > 0 && FilterByHost(virtualService.GetSpec(), serviceName) {\n\t\tif http, ok := virtualService.GetSpec()[\"http\"]; ok && checkSubsetRoute(http, serviceName, subsets) {\n\t\t\treturn true\n\t\t}\n\t\tif tcp, ok := virtualService.GetSpec()[\"tcp\"]; ok && checkSubsetRoute(tcp, serviceName, subsets) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (input *Input) AcceptsJSON() bool {\n\treturn acceptsJSONRegex.MatchString(input.Header(\"Accept\"))\n}", "func ServiceIsReady(resource common.ComponentResource) (bool, error) {\n\tvar service corev1.Service\n\tif err := getObject(resource, &service, true); err != nil {\n\t\treturn false, err\n\t}\n\n\t// if we have a name that is empty, we know we did not find the object\n\tif service.Name == \"\" {\n\t\treturn false, nil\n\t}\n\n\t// return if we have an external service type\n\tif service.Spec.Type == corev1.ServiceTypeExternalName {\n\t\treturn true, nil\n\t}\n\n\t// ensure a cluster ip address exists for cluster ip types\n\tif service.Spec.ClusterIP != corev1.ClusterIPNone && len(service.Spec.ClusterIP) == 0 {\n\t\treturn false, nil\n\t}\n\n\t// ensure a load balancer ip or hostname is present\n\tif service.Spec.Type == corev1.ServiceTypeLoadBalancer {\n\t\tif len(service.Status.LoadBalancer.Ingress) == 0 {\n\t\t\treturn false, nil\n\t\t}\n\t}\n\n\treturn true, nil\n}", "func ServiceIsReady(resource common.ComponentResource) (bool, error) {\n\tvar service corev1.Service\n\tif err := getObject(resource, &service, true); err != nil {\n\t\treturn false, err\n\t}\n\n\t// if we have a name that is empty, we know we did not find the object\n\tif service.Name == \"\" {\n\t\treturn false, nil\n\t}\n\n\t// return if we have an external service type\n\tif service.Spec.Type == corev1.ServiceTypeExternalName {\n\t\treturn true, nil\n\t}\n\n\t// ensure a cluster ip address exists for cluster ip types\n\tif service.Spec.ClusterIP != corev1.ClusterIPNone && len(service.Spec.ClusterIP) == 0 {\n\t\treturn false, nil\n\t}\n\n\t// ensure a load balancer ip or hostname is present\n\tif service.Spec.Type == corev1.ServiceTypeLoadBalancer {\n\t\tif len(service.Status.LoadBalancer.Ingress) == 0 {\n\t\t\treturn false, nil\n\t\t}\n\t}\n\n\treturn true, nil\n}", "func (o *CatalogEntry) GetServiceOk() (*string, bool) {\n\tif o == nil || o.Service == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Service, true\n}", "func (c *Contract) Accept(ctx TransactionContextInterface, jeweler string, paperNumber string, acceptDate string) (*InventoryFinancingPaper, error) {\r\n\tpaper, err := ctx.GetPaperList().GetPaper(jeweler, paperNumber)\r\n\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\tif paper.IsReadyREPO() {\r\n\t\tpaper.SetAccepted()\r\n\t}\r\n\r\n\tif !paper.IsAccepted() {\r\n\t\treturn nil, fmt.Errorf(\"inventory paper %s:%s is not accepted by bank. Current state = %s\", jeweler, paperNumber, paper.GetState())\r\n\t}\r\n\r\n\terr = ctx.GetPaperList().UpdatePaper(paper)\r\n\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\tfmt.Printf(\"The bank %q has accepted the inventory financing paper %q:%q ,The accept date is %q.\\nCurrent state is %q\", paper.GetBank(), paper.GetEvaluator(), paperNumber, acceptDate, paper.GetState())\r\n\treturn paper, nil\r\n}", "func (fpp *FoPoPattern) Match(serviceName string) bool {\n\tvar pattern string\n\tvar matched bool\n\tfor _, pattern = range fpp.ServicePatterns {\n\t\tmatched, _ = filepath.Match(pattern, serviceName)\n\t\tif matched {\n\t\t\treturn matched\n\t\t}\n\t}\n\treturn false\n}", "func (l *SensorListener) Accept() (connectorAddress string, state bool, err error) {\n // Keep looping until we get a notification message\n for {\n resp, err := l.conn.ReadResponse()\n if err != nil {\n return \"\", false, err\n }\n\n if strings.HasPrefix(resp, \"sensornotify,\") {\n resp = strings.TrimPrefix(resp, \"sensornotify,\")\n split := strings.Split(resp, \",\")\n\n if len(split) >= 2 {\n state, err = strconv.ParseBool(split[1])\n if err == nil {\n return split[0], state, nil\n }\n }\n }\n }\n}", "func (ctx *Context) AcceptJSON() bool {\r\n\treturn acceptsJSONRegex.MatchString(ctx.HeaderParam(HeaderAccept))\r\n}", "func (e *EdgeRequestContext) Service() (service Service, ok bool) {\n\ttoken := e.AuthToken()\n\tif token == nil {\n\t\treturn\n\t}\n\treturn Service(*token), true\n}", "func (t *raftLayer) Accept() (c net.Conn, err error) {\n\treturn t.listener.Accept()\n}", "func (in *ApplicationMapping) IsServiceEnabled(id string) bool {\n\tif in.IsAllApplicationServicesEnabled() {\n\t\treturn true\n\t}\n\tfor _, svc := range in.Spec.Services {\n\t\tif svc.ID == id {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (streamLayer *StreamLayer) Accept() (net.Conn, error) {\n\tconn, err := streamLayer.listener.Accept()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb := make([]byte, 1)\n\t_, err = conn.Read(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif bytes.Compare([]byte{byte(RaftRPC)}, b) != 0 {\n\t\treturn nil, fmt.Errorf(\"not a raft rpc\")\n\t}\n\tif streamLayer.serverTLSConfig != nil {\n\t\treturn tls.Server(conn, streamLayer.serverTLSConfig), nil\n\t}\n\treturn conn, nil\n}", "func (r *ReconcileTFAnalytics) handleAlarmGenService() (bool, error) {\n\t// Define a new AlarmGen service object\n\tsvcmService := newServicesForAlarmGen(r.instance)\n\t// Set TFAnalytics instance as the owner and controller\n\tif err := controllerutil.SetControllerReference(r.instance, svcmService, r.scheme); err != nil {\n\t\treturn false, err\n\t}\n\t// Check if this AlarmGen Service already exists\n\tfoundSvcmService := &corev1.Service{}\n\terr := r.client.Get(context.TODO(), types.NamespacedName{Name: svcmService.Name, Namespace: svcmService.Namespace}, foundSvcmService)\n\tif err != nil && errors.IsNotFound(err) {\n\t\tr.reqLogger.Info(\"Creating a new AlarmGen Service\", \"Service.Name\", svcmService.Name)\n\t\terr = r.client.Create(context.TODO(), svcmService)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\t// Service has been created successfully - don't requeue\n\t\treturn false, nil\n\t} else if err != nil {\n\t\treturn false, err\n\t}\n\t// Service already exists - don't requeue\n\tr.reqLogger.Info(\"Skip reconcile: AlarmGen Service already exists\", \"Service.Name\", foundSvcmService.Name)\n\treturn false, nil\n}", "func (_Dospayment *DospaymentCaller) HasServiceFee(opts *bind.CallOpts, payer common.Address, serviceType *big.Int) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Dospayment.contract.Call(opts, out, \"hasServiceFee\", payer, serviceType)\n\treturn *ret0, err\n}", "func (r *RPCIngressGateway) Accept(lisID *uint16, resp *AcceptResp) (err error) {\n\tdefer rpcutil.LogCall(r.log, \"Accept\", lisID)(resp, &err)\n\n\tlog := r.log.WithField(\"func\", \"Accept\")\n\n\tlog.Debug(\"Getting listener...\")\n\tlis, err := r.getListener(*lisID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Debug(\"Reserving next ID...\")\n\tconnID, free, err := r.cm.ReserveNextID()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Debug(\"Accepting conn...\")\n\tconn, err := lis.Accept()\n\tif err != nil {\n\t\tfree()\n\t\treturn err\n\t}\n\n\tlog.Debug(\"Wrapping conn...\")\n\twrappedConn, err := appnet.WrapConn(conn)\n\tif err != nil {\n\t\tfree()\n\t\treturn err\n\t}\n\n\tif err := r.cm.Set(*connID, wrappedConn); err != nil {\n\t\tif cErr := wrappedConn.Close(); cErr != nil {\n\t\t\tr.log.WithError(cErr).Error(\"Failed to close wrappedConn.\")\n\t\t}\n\t\tfree()\n\t\treturn err\n\t}\n\n\tremote := wrappedConn.RemoteAddr().(appnet.Addr)\n\n\tresp.Remote = remote\n\tresp.ConnID = *connID\n\n\treturn nil\n}", "func isServiceStubType(t reflect.Type) bool {\n\tif isStructPtr(t) == false {\n\t\treturn false\n\t} else if t.Implements(stubType) == false {\n\t\treturn false\n\t}\n\t// Return success\n\treturn true\n}", "func (m Model) acceptRequest() (Model, tea.Cmd) { // nolint: unparam\n\tm.lh.response <- true\n\treturn m, nil\n}", "func (customer *Customer) acceptPrice(check bool) (err error) {\n\n\tvar currentTrigger ssm.Trigger\n\n\tcurrentTrigger = TriggerCustomerCommandAcceptPrice\n\n\tswitch check {\n\n\tcase true:\n\t\t// Do a check if state machine is in correct state for triggering event\n\t\tif customer.CustomerStateMachine.CanFire(currentTrigger.Key) == true {\n\t\t\terr = nil\n\n\t\t} else {\n\n\t\t\terr = customer.CustomerStateMachine.Fire(currentTrigger.Key, nil)\n\t\t}\n\n\tcase false:\n\t\t// Execute Trigger\n\n\t\tresp, err := customerClient.AcceptPrice(context.Background(), useEnvironment)\n\t\tif err != nil {\n\t\t\tlogMessagesWithError(4, \"Could not send 'AcceptPrice' to address: \"+taxi_address_to_dial+\". Error Message:\", err)\n\t\t\tbreak\n\n\t\t} else {\n\n\t\t\t//Save last PriceAccept respons\n\t\t\tcustomer.lastRecievedPriceAccept = resp\n\n\t\t\tif resp.GetAcknack() == true {\n\t\t\t\tlogMessagesWithOutError(4, \"'AcceptPrice' on address \"+taxi_address_to_dial+\" successfully processed\")\n\t\t\t\tlogMessagesWithOutError(4, \"Response Message (Comments): \"+resp.Comments)\n\n\t\t\t\t// Moved to state machine ---go receiveTaxiInvoices(customerClient, useEnvironment)\n\n\t\t\t} else {\n\t\t\t\tlogMessagesWithOutError(4, \"'AcceptPrice' on address \"+taxi_address_to_dial+\" NOT successfully processed\")\n\t\t\t\tlogMessagesWithOutError(4, \"Response Message (Comments): \"+resp.Comments)\n\n\t\t\t\terr = errors.New(\"'AcceptPrice' on address \" + taxi_address_to_dial + \" NOT successfully processed\")\n\t\t\t}\n\t\t}\n\n\t\tif err == nil && resp.GetAcknack() == true {\n\t\t\terr = customer.CustomerStateMachine.Fire(currentTrigger.Key, nil)\n\t\t\tif err != nil {\n\t\t\t\tlogTriggerStateError(4, customer.CustomerStateMachine.State(), currentTrigger, err)\n\n\t\t\t}\n\t\t}\n\t}\n\n\treturn err\n\n}", "func (t *Task) Accept() (interface{}, error) {\n\tpar := map[string]interface{}{\n\t\t\"taskid\": t.taskId,\n\t\t\"result\": nil,\n\t}\n\treturn t.nc.Exec(\"task.result\", par)\n}", "func (s *Server) Accept() error {\n\tvar tempDelay time.Duration // how long to sleep on accept failure\n\tfor {\n\t\tc, e := s.Listener.Accept()\n\t\tif e != nil {\n\t\t\tif ne, ok := e.(net.Error); ok && ne.Temporary() {\n\t\t\t\tif tempDelay == 0 {\n\t\t\t\t\ttempDelay = 5 * time.Millisecond\n\t\t\t\t} else {\n\t\t\t\t\ttempDelay *= 2\n\t\t\t\t}\n\t\t\t\tif max := 1 * time.Second; tempDelay > max {\n\t\t\t\t\ttempDelay = max\n\t\t\t\t}\n\t\t\t\ttime.Sleep(tempDelay)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn e\n\t\t}\n\t\tgo s.accept(c)\n\t}\n}", "func (_this *StoppableListener) Accept() (conn net.Conn, err error) {\n\tconnc := make(chan *net.TCPConn, 1)\n\terrc := make(chan error, 1)\n\n\tgo func() {\n\t\ttc, err := _this.AcceptTCP()\n\t\tif err != nil {\n\t\t\terrc <- err\n\t\t\treturn\n\t\t}\n\t\tconnc <- tc\n\t}()\n\n\tselect {\n\tcase <-_this.stopc:\n\t\treturn nil, errors.New(\"Server stopped.\")\n\tcase err := <-errc:\n\t\treturn nil, err\n\tcase tc := <-connc:\n\t\ttc.SetKeepAlive(true)\n\t\ttc.SetKeepAlivePeriod(3 * time.Minute)\n\t\treturn tc, nil\n\t}\n}", "func (input *BeegoInput) AcceptsJSON() bool {\n\treturn acceptsJSONRegex.MatchString(input.Header(\"Accept\"))\n}", "func (r *ReconcileTFAnalytics) handleTopologyService() (bool, error) {\n\t// Define a new Topology service object\n\ttopologyService := newServicesForTopology(r.instance)\n\t// Set TFAnalytics instance as the owner and controller\n\tif err := controllerutil.SetControllerReference(r.instance, topologyService, r.scheme); err != nil {\n\t\treturn false, err\n\t}\n\t// Check if this Topology Service already exists\n\tfoundTopologyService := &corev1.Service{}\n\terr := r.client.Get(context.TODO(), types.NamespacedName{Name: topologyService.Name, Namespace: topologyService.Namespace}, foundTopologyService)\n\tif err != nil && errors.IsNotFound(err) {\n\t\tr.reqLogger.Info(\"Creating a new Topology Service\", \"Service.Name\", topologyService.Name)\n\t\terr = r.client.Create(context.TODO(), topologyService)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\t// Service has been created successfully - don't requeue\n\t\treturn false, nil\n\t} else if err != nil {\n\t\treturn false, err\n\t}\n\t// Service already exists - don't requeue\n\tr.reqLogger.Info(\"Skip reconcile: Topology Service already exists\", \"Service.Name\", foundTopologyService.Name)\n\treturn false, nil\n}", "func WaitForService(org string, waitService string, waitTimeout int, pattern string) {\n\n\tconst UpdateThreshold = 5 // How many service check iterations before updating the user with a msg on the console.\n\tconst ServiceUpThreshold = 5 // How many service check iterations before deciding that the service is up.\n\n\t// get message printer\n\tmsgPrinter := i18n.GetMessagePrinter()\n\n\t// Verify that the input makes sense.\n\tif waitTimeout < 0 {\n\t\tcliutils.Fatal(cliutils.CLI_INPUT_ERROR, msgPrinter.Sprintf(\"--timeout must be a positive integer.\"))\n\t}\n\n\t// 1. Wait for the /service API to return a service with url that matches the input\n\t// 2. While waiting, report when at least 1 agreement is formed\n\n\tmsgPrinter.Printf(\"Waiting for up to %v seconds for service %v/%v to start...\", waitTimeout, org, waitService)\n\tmsgPrinter.Println()\n\n\t// Save the most recent set of services here.\n\tservices := api.AllServices{}\n\n\t// Start monitoring the agent's /service API, looking for the presence of the input waitService.\n\tupdateCounter := UpdateThreshold\n\tserviceUp := 0\n\tserviceFailed := false\n\tnow := uint64(time.Now().Unix())\n\tfor (uint64(time.Now().Unix())-now < uint64(waitTimeout) || serviceUp > 0) && !serviceFailed {\n\t\ttime.Sleep(time.Duration(3) * time.Second)\n\t\tif _, err := cliutils.HorizonGet(\"service\", []int{200}, &services, true); err != nil {\n\t\t\tcliutils.Fatal(cliutils.CLI_GENERAL_ERROR, err.Error())\n\t\t}\n\n\t\t// Active services are services that have at least been started. When the execution time becomes non-zero\n\t\t// it means the service container is started. The container could still fail quickly after it is started.\n\t\tinstances := services.Instances[\"active\"]\n\t\tfor _, serviceInstance := range instances {\n\n\t\t\tif !(serviceInstance.SpecRef == waitService && serviceInstance.Org == org) {\n\t\t\t\t// Skip elements for other services\n\t\t\t\tcontinue\n\n\t\t\t} else if serviceInstance.ExecutionStartTime != 0 {\n\t\t\t\t// The target service is started. If stays up then declare victory and return.\n\t\t\t\tif serviceUp >= ServiceUpThreshold {\n\t\t\t\t\tmsgPrinter.Printf(\"Service %v/%v is started.\", org, waitService)\n\t\t\t\t\tmsgPrinter.Println()\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// The service could fail quickly if we happened to catch it just as it was starting, so make sure\n\t\t\t\t// the service stays up.\n\t\t\t\tserviceUp += 1\n\n\t\t\t} else if serviceUp > 0 {\n\t\t\t\t// The service has been up for at least 1 iteration, so it's absence means that it failed.\n\t\t\t\tserviceUp = 0\n\t\t\t\tmsgPrinter.Printf(\"The service %v/%v has failed.\", org, waitService)\n\t\t\t\tmsgPrinter.Println()\n\t\t\t\tserviceFailed = true\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\n\t\t// Service is not there yet. Update the user on progress, and wait for a bit.\n\t\tupdateCounter = updateCounter - 1\n\t\tif updateCounter <= 0 && !serviceFailed {\n\t\t\tupdateCounter = UpdateThreshold\n\t\t\tmsgPrinter.Printf(\"Waiting for service %v/%v to start executing.\", org, waitService)\n\t\t\tmsgPrinter.Println()\n\t\t}\n\t}\n\n\t// If we got to this point, then there is a problem.\n\tmsgPrinter.Printf(\"Timeout waiting for service %v/%v to successfully start. Analyzing possible reasons for the timeout...\", org, waitService)\n\tmsgPrinter.Println()\n\n\t// Let's see if we can provide the user with some help figuring out what's going on.\n\tfound := false\n\tfor _, serviceInstance := range services.Instances[\"active\"] {\n\n\t\t// 1. Maybe the service is there but just hasnt started yet.\n\t\tif serviceInstance.SpecRef == waitService && serviceInstance.Org == org {\n\t\t\tmsgPrinter.Printf(\"Service %v/%v is deployed to the node, but not executing yet.\", org, waitService)\n\t\t\tmsgPrinter.Println()\n\t\t\tfound = true\n\n\t\t\t// 2. Maybe the service has encountered an error.\n\t\t\tif serviceInstance.ExecutionStartTime == 0 && serviceInstance.ExecutionFailureCode != 0 {\n\t\t\t\tmsgPrinter.Printf(\"Service %v/%v execution failed: %v.\", org, waitService, serviceInstance.ExecutionFailureDesc)\n\t\t\t\tmsgPrinter.Println()\n\t\t\t\tserviceFailed = true\n\t\t\t} else {\n\t\t\t\tmsgPrinter.Printf(\"Service %v/%v might need more time to start executing, continuing analysis.\", org, waitService)\n\t\t\t\tmsgPrinter.Println()\n\t\t\t}\n\t\t\tbreak\n\n\t\t}\n\t}\n\n\t// 3. The service might not even be there at all.\n\tif !found {\n\t\tmsgPrinter.Printf(\"Service %v/%v is not deployed to the node, continuing analysis.\", org, waitService)\n\t\tmsgPrinter.Println()\n\t}\n\n\t// 4. Are there any agreements being made? Check for only non-archived agreements. Skip this if we know the service failed\n\t// because we know there are agreements.\n\tif !serviceFailed {\n\t\tmsgPrinter.Println()\n\t\tags := agreement.GetAgreements(false)\n\t\tif len(ags) != 0 {\n\t\t\tmsgPrinter.Printf(\"Currently, there are %v active agreements on this node. Use `hzn agreement list' to see the agreements that have been formed so far.\", len(ags))\n\t\t\tmsgPrinter.Println()\n\t\t} else {\n\t\t\tmsgPrinter.Printf(\"Currently, there are no active agreements on this node.\")\n\t\t\tmsgPrinter.Println()\n\t\t}\n\t}\n\n\t// 5. Scan the event log for errors related to this service. This should always be done if the service did not come up\n\t// successfully.\n\teLogs := make([]persistence.EventLogRaw, 0)\n\tcliutils.HorizonGet(\"eventlog?severity=error\", []int{200}, &eLogs, true)\n\tmsgPrinter.Println()\n\tif len(eLogs) == 0 {\n\t\tmsgPrinter.Printf(\"Currently, there are no errors recorded in the node's event log.\")\n\t\tmsgPrinter.Println()\n\t\tif pattern == \"\" {\n\t\t\tmsgPrinter.Printf(\"Use the 'hzn deploycheck all -b' or 'hzn deploycheck all -B' command to verify that node, service configuration and deployment policy is compatible.\")\n\t\t} else {\n\t\t\tmsgPrinter.Printf(\"Use the 'hzn deploycheck all -p' command to verify that node, service configuration and pattern is compatible.\")\n\t\t}\n\t\tmsgPrinter.Println()\n\t} else {\n\t\tmsgPrinter.Printf(\"The following errors were found in the node's event log and are related to %v/%v. Use 'hzn eventlog list -s severity=error -l' to see the full detail of the errors.\", org, waitService)\n\t\tmsgPrinter.Println()\n\n\t\t// Scan the log for events related to the service we're waiting for.\n\t\tsel := persistence.Selector{\n\t\t\tOp: \"=\",\n\t\t\tMatchValue: waitService,\n\t\t}\n\t\tmatch := make(map[string][]persistence.Selector)\n\t\tmatch[\"service_url\"] = []persistence.Selector{sel}\n\n\t\tfor _, el := range eLogs {\n\t\t\tt := time.Unix(int64(el.Timestamp), 0)\n\t\t\tprintLog := false\n\t\t\tif strings.Contains(el.Message, waitService) {\n\t\t\t\tprintLog = true\n\t\t\t} else if es, err := persistence.GetRealEventSource(el.SourceType, el.Source); err != nil {\n\t\t\t\tcliutils.Fatal(cliutils.CLI_GENERAL_ERROR, \"unable to convert eventlog source, error: %v\", err)\n\t\t\t} else if (*es).Matches(match) {\n\t\t\t\tprintLog = true\n\t\t\t}\n\n\t\t\t// Put relevant events on the console.\n\t\t\tif printLog {\n\t\t\t\tmsgPrinter.Printf(\"%v: %v\", t.Format(\"2006-01-02 15:04:05\"), el.Message)\n\t\t\t\tmsgPrinter.Println()\n\t\t\t}\n\t\t}\n\t}\n\n\t// Done analyzing\n\tmsgPrinter.Printf(\"Analysis complete.\")\n\tmsgPrinter.Println()\n\n\treturn\n}", "func (input *Input) AcceptsHTML() bool {\n\treturn acceptsHTMLRegex.MatchString(input.Header(\"Accept\"))\n}", "func (ssd StatelessServiceDescription) AsServiceDescription() (*ServiceDescription, bool) {\n\treturn nil, false\n}", "func (e *connectionedEndpoint) Accept(ctx context.Context, peerAddr *Address) (Endpoint, *syserr.Error) {\n\te.Lock()\n\n\tif !e.ListeningLocked() {\n\t\te.Unlock()\n\t\treturn nil, syserr.ErrInvalidEndpointState\n\t}\n\n\tne, err := e.getAcceptedEndpointLocked(ctx)\n\te.Unlock()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif peerAddr != nil {\n\t\tne.Lock()\n\t\tc := ne.connected\n\t\tne.Unlock()\n\t\tif c != nil {\n\t\t\taddr, err := c.GetLocalAddress()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, syserr.TranslateNetstackError(err)\n\t\t\t}\n\t\t\t*peerAddr = addr\n\t\t}\n\t}\n\treturn ne, nil\n}", "func (o *EventAttributes) HasService() bool {\n\treturn o != nil && o.Service != nil\n}", "func WaitForService(ctx context.Context, conn *dbus.Conn, svc string) error {\n\t// If the name is already owned, we're done.\n\tif ServiceOwned(ctx, conn, svc) {\n\t\treturn nil\n\t}\n\n\tsw, err := NewSignalWatcher(ctx, conn, MatchSpec{\n\t\tType: \"signal\",\n\t\tPath: busPath,\n\t\tInterface: busInterface,\n\t\tSender: busName,\n\t\tMember: \"NameOwnerChanged\",\n\t\tArg0: svc,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer sw.Close(ctx)\n\n\t// Make sure the name wasn't taken while we were creating the watcher.\n\tif ServiceOwned(ctx, conn, svc) {\n\t\treturn nil\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase sig := <-sw.Signals:\n\t\t\tif len(sig.Body) < 3 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Skip signals about this service if the \"new owner\" arg is empty.\n\t\t\tif v, ok := sig.Body[2].(string); !ok || v == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Otherwise, we're done.\n\t\t\treturn nil\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\t}\n\t}\n}", "func ApplyServiceMonitor(ctx context.Context, client dynamic.Interface, recorder events.Recorder, required *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) {\n\tnamespace := required.GetNamespace()\n\texisting, err := client.Resource(serviceMonitorGVR).Namespace(namespace).Get(ctx, required.GetName(), metav1.GetOptions{})\n\tif errors.IsNotFound(err) {\n\t\tnewObj, createErr := client.Resource(serviceMonitorGVR).Namespace(namespace).Create(ctx, required, metav1.CreateOptions{})\n\t\tif createErr != nil {\n\t\t\trecorder.Warningf(\"ServiceMonitorCreateFailed\", \"Failed to create ServiceMonitor.monitoring.coreos.com/v1: %v\", createErr)\n\t\t\treturn nil, true, createErr\n\t\t}\n\t\trecorder.Eventf(\"ServiceMonitorCreated\", \"Created ServiceMonitor.monitoring.coreos.com/v1 because it was missing\")\n\t\treturn newObj, true, nil\n\t}\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\texistingCopy := existing.DeepCopy()\n\n\ttoUpdate, modified, err := ensureGenericSpec(required, existingCopy, noDefaulting, equality.Semantic)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tif !modified {\n\t\treturn nil, false, nil\n\t}\n\n\tif klog.V(4).Enabled() {\n\t\tklog.Infof(\"ServiceMonitor %q changes: %v\", namespace+\"/\"+required.GetName(), JSONPatchNoError(existing, toUpdate))\n\t}\n\n\tnewObj, err := client.Resource(serviceMonitorGVR).Namespace(namespace).Update(ctx, toUpdate, metav1.UpdateOptions{})\n\tif err != nil {\n\t\trecorder.Warningf(\"ServiceMonitorUpdateFailed\", \"Failed to update ServiceMonitor.monitoring.coreos.com/v1: %v\", err)\n\t\treturn nil, true, err\n\t}\n\n\trecorder.Eventf(\"ServiceMonitorUpdated\", \"Updated ServiceMonitor.monitoring.coreos.com/v1 because it changed\")\n\treturn newObj, true, err\n}", "func (j *AuroraJob) IsService(isService bool) Job {\n\tj.jobConfig.TaskConfig.IsService = isService\n\treturn j\n}", "func (m *OutboundMock) CanAccept(p Inbound) (r bool) {\n\tcounter := atomic.AddUint64(&m.CanAcceptPreCounter, 1)\n\tdefer atomic.AddUint64(&m.CanAcceptCounter, 1)\n\n\tif len(m.CanAcceptMock.expectationSeries) > 0 {\n\t\tif counter > uint64(len(m.CanAcceptMock.expectationSeries)) {\n\t\t\tm.t.Fatalf(\"Unexpected call to OutboundMock.CanAccept. %v\", p)\n\t\t\treturn\n\t\t}\n\n\t\tinput := m.CanAcceptMock.expectationSeries[counter-1].input\n\t\ttestify_assert.Equal(m.t, *input, OutboundMockCanAcceptInput{p}, \"Outbound.CanAccept got unexpected parameters\")\n\n\t\tresult := m.CanAcceptMock.expectationSeries[counter-1].result\n\t\tif result == nil {\n\t\t\tm.t.Fatal(\"No results are set for the OutboundMock.CanAccept\")\n\t\t\treturn\n\t\t}\n\n\t\tr = result.r\n\n\t\treturn\n\t}\n\n\tif m.CanAcceptMock.mainExpectation != nil {\n\n\t\tinput := m.CanAcceptMock.mainExpectation.input\n\t\tif input != nil {\n\t\t\ttestify_assert.Equal(m.t, *input, OutboundMockCanAcceptInput{p}, \"Outbound.CanAccept got unexpected parameters\")\n\t\t}\n\n\t\tresult := m.CanAcceptMock.mainExpectation.result\n\t\tif result == nil {\n\t\t\tm.t.Fatal(\"No results are set for the OutboundMock.CanAccept\")\n\t\t}\n\n\t\tr = result.r\n\n\t\treturn\n\t}\n\n\tif m.CanAcceptFunc == nil {\n\t\tm.t.Fatalf(\"Unexpected call to OutboundMock.CanAccept. %v\", p)\n\t\treturn\n\t}\n\n\treturn m.CanAcceptFunc(p)\n}" ]
[ "0.63734", "0.6060541", "0.59677523", "0.58842057", "0.5720938", "0.5569348", "0.5523225", "0.54433316", "0.5340439", "0.53300637", "0.5299485", "0.5284296", "0.52749", "0.52703655", "0.52669793", "0.5251331", "0.52413315", "0.52043164", "0.51288015", "0.5126478", "0.511291", "0.5094832", "0.5026353", "0.50225866", "0.5018173", "0.5017211", "0.50089806", "0.4993258", "0.49862543", "0.49840206", "0.49737436", "0.49688202", "0.4953105", "0.49435169", "0.49395347", "0.49356285", "0.4920474", "0.49174678", "0.49092406", "0.4884988", "0.48676953", "0.4848693", "0.4845149", "0.48440522", "0.48013315", "0.47910658", "0.47899097", "0.4770117", "0.4741985", "0.47419038", "0.4728911", "0.4724682", "0.4718462", "0.4707024", "0.47063035", "0.47051919", "0.46987534", "0.46986803", "0.46951863", "0.46838206", "0.467274", "0.467274", "0.46685025", "0.46675754", "0.46612936", "0.46571434", "0.4656498", "0.46508643", "0.46449792", "0.4643559", "0.4641994", "0.463333", "0.46308696", "0.46287844", "0.46286452", "0.4622316", "0.4615369", "0.46113005", "0.46047142", "0.46007034", "0.45865437", "0.45782655", "0.45769903", "0.45709112", "0.45650518", "0.45605838", "0.45587662", "0.4556341", "0.45479268", "0.4545016", "0.45435578", "0.4525287", "0.45244485", "0.45235834", "0.45201638", "0.45161125", "0.4510356", "0.4508397", "0.45000607", "0.44962978" ]
0.75590676
0
rowToRecord converts from pgx.Row to a store.Record
rowToRecord преобразует pgx.Row в store.Record
func (s *sqlStore) rowToRecord(row pgx.Row) (*store.Record, error) { var expiry *time.Time record := &store.Record{} metadata := make(Metadata) if err := row.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil { if err == sql.ErrNoRows { return record, store.ErrNotFound } return nil, err } // set the metadata record.Metadata = toMetadata(&metadata) if expiry != nil { record.Expiry = time.Until(*expiry) } return record, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *sqlStore) rowsToRecords(rows pgx.Rows) ([]*store.Record, error) {\n\tvar records []*store.Record\n\n\tfor rows.Next() {\n\t\tvar expiry *time.Time\n\t\trecord := &store.Record{}\n\t\tmetadata := make(Metadata)\n\n\t\tif err := rows.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil {\n\t\t\treturn records, err\n\t\t}\n\n\t\t// set the metadata\n\t\trecord.Metadata = toMetadata(&metadata)\n\t\tif expiry != nil {\n\t\t\trecord.Expiry = time.Until(*expiry)\n\t\t}\n\t\trecords = append(records, record)\n\t}\n\treturn records, nil\n}", "func recordToRecord(\n\ttopic string,\n\tpartition int32,\n\tbatch *kmsg.RecordBatch,\n\trecord *kmsg.Record,\n) *Record {\n\th := make([]RecordHeader, 0, len(record.Headers))\n\tfor _, kv := range record.Headers {\n\t\th = append(h, RecordHeader{\n\t\t\tKey: kv.Key,\n\t\t\tValue: kv.Value,\n\t\t})\n\t}\n\n\treturn &Record{\n\t\tKey: record.Key,\n\t\tValue: record.Value,\n\t\tHeaders: h,\n\t\tTimestamp: timeFromMillis(batch.FirstTimestamp + int64(record.TimestampDelta)),\n\t\tTopic: topic,\n\t\tPartition: partition,\n\t\tAttrs: RecordAttrs{uint8(batch.Attributes)},\n\t\tProducerID: batch.ProducerID,\n\t\tProducerEpoch: batch.ProducerEpoch,\n\t\tLeaderEpoch: batch.PartitionLeaderEpoch,\n\t\tOffset: batch.FirstOffset + int64(record.OffsetDelta),\n\t}\n}", "func toRow(pl any) []byte {\n\trt := reflect.TypeOf(pl)\n\n\tenc, err := coder.RowEncoderForStruct(rt)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"unable to get row encoder\"))\n\t}\n\tvar buf bytes.Buffer\n\tif err := enc(pl, &buf); err != nil {\n\t\tpanic(fmt.Errorf(\"unable to do row encoding\"))\n\t}\n\treturn buf.Bytes()\n}", "func (r RecordV1) toRecord() Record {\n\treturn Record{\n\t\tType: r.Type,\n\t\tName: r.Name,\n\t\tAppliedAt: r.AppliedAt,\n\t}\n}", "func RowToRawData(rows *sql.Rows) (r RawData) {\n\trecord, _ := RowToArr(rows)\n\tr.Header = record[0]\n\tr.Rows = append(r.Rows, record[1:])\n\treturn\n}", "func convertRow(\n\trow *Row,\n\twantsNode bool,\n\twantsTimestamp bool,\n\tdesiredValues []string,\n) *stats.Row {\n\tvar (\n\t\tnode string\n\t\ttimestamp time.Time\n\t)\n\n\tvar resultValues map[string]interface{}\n\tif len(desiredValues) > 0 {\n\t\tresultValues = make(map[string]interface{})\n\t}\n\n\tfor _, v := range desiredValues {\n\t\tresultValues[v] = row.value(v)\n\t}\n\n\tif wantsNode {\n\t\tnode = row.Node\n\t}\n\tif wantsTimestamp {\n\t\ttimestamp = row.Timestamp.UTC()\n\t}\n\n\treturn &stats.Row{\n\t\tNode: node,\n\t\tTimestamp: timestamp,\n\t\tValues: resultValues,\n\t}\n}", "func toRecord(cache airtabledb.DB, src Feature, dst interface{}) {\n\tdV := reflect.ValueOf(dst).Elem().FieldByName(\"Fields\")\n\tsV := reflect.ValueOf(src)\n\tcopyFields(cache, sV, dV)\n}", "func (e *commonFormatEncoder) Row(tp int, row *[]interface{}, seqno uint64) ([]byte, error) {\n\tcf := convertRowToCommonFormat(tp, row, e.inSchema, seqno, e.filter)\n\treturn CommonFormatEncode(cf)\n}", "func MarshalRecord(record *rangedb.Record) ([]byte, error) {\n\tvar buf bytes.Buffer\n\n\tnewRecord := *record\n\tnewRecord.Data = nil\n\n\tencoder := msgpack.NewEncoder(&buf)\n\tencoder.UseJSONTag(true)\n\n\terr := encoder.Encode(newRecord)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed encoding record: %v\", err)\n\t}\n\n\terr = encoder.Encode(record.Data)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed encoding record data: %v\", err)\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func RowTo[T any](row CollectableRow) (T, error) {\n\tvar value T\n\terr := row.Scan(&value)\n\treturn value, err\n}", "func rowToRSDocument(row string) (document *redisearch.Document) {\n\tif debug > 0 {\n\t\tfmt.Fprintln(os.Stderr, \"converting row to rediSearch Document \"+row)\n\t}\n\tfieldSizesStr := strings.Split(row, \",\")\n\t// we need at least the id and score\n\tif len(fieldSizesStr) >= 2 {\n\t\tdocumentId := index + \"-\" + fieldSizesStr[0]\n\t\tdocumentScore, _ := strconv.ParseFloat(fieldSizesStr[1], 64)\n\t\tdoc := redisearch.NewDocument(documentId, float32(documentScore))\n\n\t\tfor _, keyValuePair := range fieldSizesStr[2:] {\n\t\t\tpair := strings.Split(keyValuePair, \"=\")\n\t\t\tif len(pair) == 2 {\n\t\t\t\tif debug > 0 {\n\t\t\t\t\tfmt.Fprintln(os.Stderr, \"On doc \"+documentId+\" adding field with NAME \"+pair[0]+\" and VALUE \"+pair[1])\n\t\t\t\t}\n\t\t\t\tdoc.Set(pair[0], pair[1])\n\t\t\t} else {\n\t\t\t\tif debug > 0 {\n\t\t\t\t\tfmt.Fprintf(os.Stderr, \"On doc \"+documentId+\" len(pair)=%d\", len(pair))\n\t\t\t\t}\n\t\t\t\tlog.Fatalf(\"keyValuePair pair size != 2 . Got \" + keyValuePair)\n\t\t\t}\n\t\t}\n\t\tif debug > 0 {\n\t\t\tfmt.Fprintln(os.Stderr, \"Doc \"+documentId)\n\t\t}\n\t\treturn &doc\n\t}\n\treturn document\n}", "func (dao PathProfileDAOPsql) rowToPathProfile(row *sql.Row, o *models.PathProfile) error {\n\treturn row.Scan(&o.ID, &o.ProfileID, &o.Path.ID, &o.Path.Path, &o.Path.PathName, &o.Path.Description, &o.Post, &o.Put, &o.Del, &o.Get, &o.CreatedAt, &o.UpdatedAt)\n}", "func (mcs *MemoryCellStore) MakeRow(sheet *Sheet) *Row {\n\treturn makeMemoryRow(sheet).row\n}", "func (r *Rows) row(a ...interface{}) error {\n\tdefer r.Close()\n\n\tfor _, dp := range a {\n\t\tif _, ok := dp.(*sql.RawBytes); ok {\n\t\t\treturn VarTypeError(\"RawBytes isn't allowed on Row()\")\n\t\t}\n\t}\n\n\tif !r.Next() {\n\t\tif err := r.Err(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn sql.ErrNoRows\n\t}\n\tif err := r.Scan(a...); err != nil {\n\t\treturn err\n\t}\n\n\treturn r.Close()\n}", "func ConvertRecord(s string) (r record) {\n // Drop the last char pf the string (it's a ' ')\n s = s[:len(s) - 1]\n\n // Split the string in the various fields\n var fields []string = strings.Split(s, \" \")\n\n // Update the fields of the record based on the various fields\n for _, f := range fields {\n switch f[:3] {\n case \"byr\": r.byr = f[4:]\n case \"iyr\": r.iyr = f[4:]\n case \"eyr\": r.eyr = f[4:]\n case \"hgt\": r.hgt = f[4:]\n case \"hcl\": r.hcl = f[4:]\n case \"ecl\": r.ecl = f[4:]\n case \"pid\": r.pid = f[4:]\n }\n }\n\n return\n}", "func RowToArr(rows *sql.Rows) (records [][]string, err error) {\n\tfmt.Printf(\"RowToArr start at %s\", time.Now())\n\tcolumns, err := rows.Columns()\n\tif err != nil {\n\t\treturn\n\t}\n\tcount := len(columns)\n\treadCols := make([]interface{}, count)\n\trawCols := make([]interface{}, count)\n\t//records = make([]interface{}, 0)\n\trecords = append(records, columns) //append row header as 1st row\n\n\t// var resultCols []string\n\tfor rows.Next() {\n\t\t// resultCols = make([]string, count)\n\t\tfor i := range columns {\n\t\t\treadCols[i] = &rawCols[i]\n\t\t}\n\t\terr = rows.Scan(readCols...)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tresultCols := assertTypeArray(columns, rawCols)\n\t\trecords = append(records, resultCols)\n\t}\n\n\tfmt.Printf(\"RowToArr end at %s\", time.Now())\n\treturn records, nil\n}", "func doltRowToSqlRow(doltRow row.Row, sch schema.Schema) (sql.Row, error) {\n\tcolVals := make(sql.Row, sch.GetAllCols().Size())\n\n\ti := 0\n\terr := sch.GetAllCols().Iter(func(tag uint64, col schema.Column) (stop bool, err error) {\n\t\tvar innerErr error\n\t\tvalue, _ := doltRow.GetColVal(tag)\n\t\tcolVals[i], innerErr = col.TypeInfo.ConvertNomsValueToValue(value)\n\t\tif innerErr != nil {\n\t\t\treturn true, innerErr\n\t\t}\n\t\ti++\n\t\treturn false, nil\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn sql.NewRow(colVals...), nil\n}", "func (mcs *MemoryCellStore) WriteRow(r *Row) error {\n\tif r != nil {\n\t\tkey := r.key()\n\t\tmcs.rows[key] = r\n\t}\n\treturn nil\n}", "func convertToUser(row *sql.Row) (*User, error) {\n\tuser := User{}\n\terr := row.Scan(&user.UserID, &user.Mail, &user.Password)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"model.convertToUser: %w\", err)\n\t}\n\treturn &user, nil\n}", "func (p *partitionImpl) getRow(row *rowImpl, rowNum int) sif.Row {\n\trow.rowNum = rowNum\n\trow.partition = p\n\treturn row\n}", "func SqlRowToDoltRow(nbf *types.NomsBinFormat, r sql.Row, doltSchema schema.Schema) (row.Row, error) {\n\ttaggedVals := make(row.TaggedValues)\n\tallCols := doltSchema.GetAllCols()\n\tfor i, val := range r {\n\t\ttag := allCols.Tags[i]\n\t\tschCol := allCols.TagToCol[tag]\n\t\tif val != nil {\n\t\t\tvar err error\n\t\t\ttaggedVals[tag], err = schCol.TypeInfo.ConvertValueToNomsValue(val)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t} else if !schCol.IsNullable() {\n\t\t\treturn nil, fmt.Errorf(\"column <%v> received nil but is non-nullable\", schCol.Name)\n\t\t}\n\t}\n\treturn row.New(nbf, doltSchema, taggedVals)\n}", "func NewRecord(f sql.SelectObjectFormat) *Record {\n\treturn &Record{\n\t\tKVS: jstream.KVS{},\n\t\tSelectFormat: f,\n\t}\n}", "func NewRecord(record map[string]interface{}, schema *schma.Schema, schemaText string) *Record {\n\treturn &Record{\n\t\tschema: schema,\n\t\tschemaText: schemaText,\n\t\tData: record,\n\t}\n}", "func createRecord(\n\ts *database.Store,\n\tdoc *document.Document,\n) (*database.Record, error) {\n\tnow := ptypes.TimestampNow()\n\tnew := &database.Record{\n\t\tRevision: 1,\n\t\tKeys: marshalKeys(doc.Keys),\n\t\tCreatedAt: now,\n\t\tUpdatedAt: now,\n\t}\n\n\tif err := s.PutRecord(doc.ID, new); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := s.UpdateKeys(doc.ID, nil, new.Keys); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn new, nil\n}", "func updateRecord(\n\ts *database.Store,\n\tdoc *document.Document,\n\trec *database.Record,\n) (*database.Record, error) {\n\tnew := proto.Clone(rec).(*database.Record)\n\tnew.Revision++\n\tnew.Keys = marshalKeys(doc.Keys)\n\tnew.UpdatedAt = ptypes.TimestampNow()\n\n\tif err := s.PutRecord(doc.ID, new); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := s.UpdateKeys(doc.ID, rec.Keys, new.Keys); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn new, nil\n}", "func (kvcodec *tableKVEncoder) AddRecord(\n\trow []types.Datum,\n\trowID int64,\n\tcolumnPermutation []int,\n) (Row, int, error) {\n\tcols := kvcodec.tbl.Cols()\n\n\tvar value types.Datum\n\tvar err error\n\n\trecord := kvcodec.recordCache\n\tif record == nil {\n\t\trecord = make([]types.Datum, 0, len(cols)+1)\n\t}\n\n\tisAutoRandom := false\n\tif kvcodec.tbl.Meta().PKIsHandle && kvcodec.tbl.Meta().ContainsAutoRandomBits() {\n\t\tisAutoRandom = true\n\t}\n\n\tfor i, col := range cols {\n\t\tj := columnPermutation[i]\n\t\tisAutoIncCol := mysql.HasAutoIncrementFlag(col.Flag)\n\t\tisPk := mysql.HasPriKeyFlag(col.Flag)\n\t\tswitch {\n\t\tcase j >= 0 && j < len(row):\n\t\t\tvalue, err = table.CastValue(kvcodec.se, row[j], col.ToInfo(), false, false)\n\t\t\tif err == nil {\n\t\t\t\terr = col.HandleBadNull(&value, kvcodec.se.vars.StmtCtx)\n\t\t\t}\n\t\tcase isAutoIncCol:\n\t\t\t// we still need a conversion, e.g. to catch overflow with a TINYINT column.\n\t\t\tvalue, err = table.CastValue(kvcodec.se, types.NewIntDatum(rowID), col.ToInfo(), false, false)\n\t\tdefault:\n\t\t\tvalue, err = table.GetColDefaultValue(kvcodec.se, col.ToInfo())\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, 0, errors.Trace(err)\n\t\t}\n\n\t\trecord = append(record, value)\n\n\t\tif isAutoRandom && isPk {\n\t\t\ttypeBitsLength := uint64(mysql.DefaultLengthOfMysqlTypes[col.Tp] * 8)\n\t\t\tincrementalBits := typeBitsLength - kvcodec.tbl.Meta().AutoRandomBits\n\t\t\thasSignBit := !mysql.HasUnsignedFlag(col.Flag)\n\t\t\tif hasSignBit {\n\t\t\t\tincrementalBits--\n\t\t\t}\n\t\t\t_ = kvcodec.tbl.RebaseAutoID(kvcodec.se, value.GetInt64()&((1<<incrementalBits)-1), false, autoid.AutoRandomType)\n\t\t}\n\t\tif isAutoIncCol {\n\t\t\t// TODO use auto incremental type\n\t\t\t_ = kvcodec.tbl.RebaseAutoID(kvcodec.se, getAutoRecordID(value, &col.FieldType), false, autoid.RowIDAllocType)\n\t\t}\n\t}\n\n\tif TableHasAutoRowID(kvcodec.tbl.Meta()) {\n\t\tj := columnPermutation[len(cols)]\n\t\tif j >= 0 && j < len(row) {\n\t\t\tvalue, err = table.CastValue(kvcodec.se, row[j], extraHandleColumnInfo, false, false)\n\t\t} else {\n\t\t\tvalue, err = types.NewIntDatum(rowID), nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, 0, errors.Trace(err)\n\t\t}\n\t\trecord = append(record, value)\n\t\t_ = kvcodec.tbl.RebaseAutoID(kvcodec.se, value.GetInt64(), false, autoid.RowIDAllocType)\n\t}\n\t_, err = kvcodec.tbl.AddRecord(kvcodec.se, record)\n\tif err != nil {\n\t\tlog.Error(\"kv add Record failed\",\n\t\t\tzapRow(\"originalRow\", row),\n\t\t\tzapRow(\"convertedRow\", record),\n\t\t\tzap.Error(err),\n\t\t)\n\t\treturn nil, 0, errors.Trace(err)\n\t}\n\n\tpairs, size := kvcodec.se.takeKvPairs()\n\tkvcodec.recordCache = record[:0]\n\treturn Pairs(pairs), size, nil\n}", "func RowToDrivers(row *sql.Rows) []Driver {\n result := []Driver{}\n for row.Next() {\n var driverName string\n var driverTelephoneNumber string\n row.Scan(&driverName, &driverTelephoneNumber)\n result = append(result, Driver{\n DriverName: driverName,\n DriverTelephoneNumber: driverTelephoneNumber,\n })\n }\n return result\n}", "func RowToTrips(row *sql.Rows) []Trip {\n trips := []Trip{}\n for row.Next() {\n var tripNumber int\n var startLocationName string\n var destinationName string\n row.Scan(&tripNumber, &startLocationName, &destinationName)\n trips = append(trips, Trip{\n TripNumber: tripNumber,\n StartLocationName: startLocationName,\n DestinationName: destinationName,\n })\n }\n return trips\n}", "func (f *fragment) row(rowID uint64) *Row {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\treturn f.unprotectedRow(rowID)\n}", "func DoltRowToSqlRow(doltRow row.Row, sch schema.Schema) (sql.Row, error) {\n\tif doltRow == nil {\n\t\treturn nil, nil\n\t}\n\n\tcolVals := make(sql.Row, sch.GetAllCols().Size())\n\ti := 0\n\n\t_, err := doltRow.IterSchema(sch, func(tag uint64, val types.Value) (stop bool, err error) {\n\t\tcol, _ := sch.GetAllCols().GetByTag(tag)\n\t\tcolVals[i], err = col.TypeInfo.ConvertNomsValueToValue(val)\n\t\ti++\n\n\t\tstop = err != nil\n\t\treturn\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn sql.NewRow(colVals...), nil\n}", "func (r *Result) Recordx() *Record {\n\treturn &Record{r.Record()}\n}", "func (d *DB) readfloat_row(row []byte, pos uint32) float32 {\n\tvar retval float32\n\tdata := row[pos : pos+4]\n\tbits := binary.LittleEndian.Uint32(data)\n\tretval = math.Float32frombits(bits)\n\treturn retval\n}", "func (env *Environment) ConvertRowToTable(row bt.Row) *lua.LTable {\n\ttable := env.state.NewTable()\n\tfor cfName, cf := range row.ColumnFamilies {\n\t\tcfTable := env.state.NewTable()\n\t\tfor column, value := range cf {\n\t\t\tcfTable.RawSetString(column, lua.LString(value))\n\t\t}\n\n\t\ttable.RawSet(lua.LString(cfName), cfTable)\n\t}\n\n\t// set key last to ensure it doesn't get overwritten\n\ttable.RawSetString(\"key\", lua.LString(row.Key))\n\treturn table\n}", "func (t *Table) GetRow(ctx context.Context, pk types.Tuple, sch schema.Schema) (row.Row, bool, error) {\n\trowMap, err := t.GetRowData(ctx)\n\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tfieldsVal, _, err := rowMap.MaybeGet(ctx, pk)\n\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tif fieldsVal == nil {\n\t\treturn nil, false, nil\n\t}\n\n\tr, err := row.FromNoms(sch, pk, fieldsVal.(types.Tuple))\n\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\treturn r, true, nil\n}", "func (f *fragment) rowFromStorage(rowID uint64) *Row {\n\t// Only use a subset of the containers.\n\t// NOTE: The start & end ranges must be divisible by container width.\n\t//\n\t// Note that OffsetRange now returns a new bitmap which uses frozen\n\t// containers which will use copy-on-write semantics. The actual bitmap\n\t// and Containers object are new and not shared, but the containers are\n\t// shared.\n\tdata := f.storage.OffsetRange(f.shard*ShardWidth, rowID*ShardWidth, (rowID+1)*ShardWidth)\n\n\trow := &Row{\n\t\tsegments: []rowSegment{{\n\t\t\tdata: data,\n\t\t\tshard: f.shard,\n\t\t\twritable: true,\n\t\t}},\n\t}\n\trow.invalidateCount()\n\n\treturn row\n}", "func getNewRecordFunc(rowMeta sqlz.RecordMeta) driver.NewRecordFunc {\n\treturn func(row []interface{}) (sqlz.Record, error) {\n\t\trec, skipped := driver.NewRecordFromScanRow(rowMeta, row, nil)\n\t\t// We iterate over each element of val, checking for certain\n\t\t// conditions. A more efficient approach might be to (in\n\t\t// the outside func) iterate over the column metadata, and\n\t\t// build a list of val elements to visit.\n\t\tfor _, i := range skipped {\n\t\t\tif nullTime, ok := rec[i].(*mysql.NullTime); ok {\n\t\t\t\tif nullTime.Valid {\n\t\t\t\t\t// Make a copy of the value\n\t\t\t\t\tt := nullTime.Time\n\t\t\t\t\trec[i] = &t\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// Else\n\t\t\t\trec[i] = nil\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif rowMeta[i].DatabaseTypeName() == \"TIME\" && rec[i] != nil {\n\t\t\t\t// MySQL may return TIME as RawBytes... convert to a string.\n\t\t\t\t// https://github.com/go-sql-driver/mysql#timetime-support\n\t\t\t\tif rb, ok := rec[i].(*sql.RawBytes); ok {\n\t\t\t\t\tif len(*rb) == 0 {\n\t\t\t\t\t\t// shouldn't happen\n\t\t\t\t\t\tzero := \"00:00\"\n\t\t\t\t\t\trec[i] = &zero\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\t// Else\n\t\t\t\t\ttext := string(*rb)\n\t\t\t\t\trec[i] = &text\n\t\t\t\t}\n\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// else, we don't know what to do with this col\n\t\t\treturn nil, errz.Errorf(\"column %d %s: unknown type db(%T) with kind(%s), val(%v)\", i, rowMeta[i].Name(), rec[i], rowMeta[i].Kind(), rec[i])\n\t\t}\n\t\treturn rec, nil\n\t}\n}", "func RowToQueryResult(row *sql.Row, colDefines []database.Column) (QueryResult, error) {\n\tcols := database.Columns(colDefines).Names()\n\tcolumns := make([]interface{}, len(cols))\n\tcolumnPointers := make([]interface{}, len(cols))\n\tfor i := range columns {\n\t\tcolumnPointers[i] = &columns[i]\n\t}\n\t// Scan the result into the column pointers...\n\tif err := row.Scan(columnPointers...); err != nil {\n\t\treturn nil, err\n\t}\n\n\trowData := makeRowDataSet(colDefines)\n\tfor i, colName := range cols {\n\t\tval := columnPointers[i].(*interface{})\n\t\trowData[colName] = ColData{Data: val, DataType: rowData[colName].DataType}\n\t}\n\n\treturn QueryResult(rowData), nil\n}", "func CreateRecord(db *sql.DB, e Record) (Record, error) {\n\tvar record Record\n\terr := db.QueryRow(`\n\t\tINSERT INTO user_records(weight, reps, rpe, date_performed, exercise_id, user_id)\n\t\tVALUES\n\t\t($1, $2, $3, $4, $5, $6)\n\t\tRETURNING *`,\n\t\te.Weight, e.Reps, e.RPE, e.DatePerformed, e.ExerciseID, e.UserID,\n\t).Scan(\n\t\t&record.ID,\n\t\t&record.Weight,\n\t\t&record.Reps,\n\t\t&record.RPE,\n\t\t&record.DatePerformed,\n\t\t&record.ExerciseID,\n\t\t&record.UserID,\n\t)\n\n\tif err != nil {\n\t\treturn record, err\n\t}\n\n\treturn record, nil\n}", "func (r *distSQLReceiver) PushRow(row sqlbase.EncDatumRow) bool {\n\tif r.err != nil {\n\t\treturn false\n\t}\n\tif r.rows == nil {\n\t\tr.numRows++\n\t\treturn true\n\t}\n\tif r.row == nil {\n\t\tr.row = make(parser.DTuple, len(r.resultToStreamColMap))\n\t}\n\tfor i, resIdx := range r.resultToStreamColMap {\n\t\terr := row[resIdx].EnsureDecoded(&r.alloc)\n\t\tif err != nil {\n\t\t\tr.err = err\n\t\t\treturn false\n\t\t}\n\t\tr.row[i] = row[resIdx].Datum\n\t}\n\t// Note that AddRow accounts for the memory used by the Datums.\n\tif _, err := r.rows.AddRow(r.row); err != nil {\n\t\tr.err = err\n\t\treturn false\n\t}\n\treturn true\n}", "func recordToSlice(record Record) []string {\n\tvar recordSlice []string\n\n\trecordSlice = []string{\n\t\tfmt.Sprintf(\"%d\",record.CheeseId), record.CheeseName, record.ManufacturerName, record.ManufacturerProvCode,\n\t\trecord.ManufacturingType, record.WebSite, fmt.Sprintf(\"%.2f\", record.FatContentPercent), \n\t\tfmt.Sprintf(\"%.2f\", record.MoisturePercent), record.Particularities, record.Flavour, \n\t\trecord.Characteristics, record.Ripening, fmt.Sprintf(\"%t\", record.Organic),\n\t\trecord.CategoryType, record.MilkType, record.MilkTreatmentType, record.RindType, record.LastUpdateDate,\n\t}\n\n\treturn recordSlice\n}", "func parseRecord(table string, r *Record) error {\n\t// it's ok if some records don't return a value\n\tif len(r.Value) == 0 {\n\t\treturn nil\n\t}\n\tif r.Table == \"\" {\n\t\tr.Table = table\n\t} else {\n\t\t// TODO: probably never happens\n\t\tpanicIf(r.Table != table)\n\t}\n\n\t// set Block/Space etc. based on TableView type\n\tvar pRawJSON *map[string]interface{}\n\tvar obj interface{}\n\tswitch table {\n\tcase TableActivity:\n\t\tr.Activity = &Activity{}\n\t\tobj = r.Activity\n\t\tpRawJSON = &r.Activity.RawJSON\n\tcase TableBlock:\n\t\tr.Block = &Block{}\n\t\tobj = r.Block\n\t\tpRawJSON = &r.Block.RawJSON\n\tcase TableUser:\n\t\tr.User = &User{}\n\t\tobj = r.User\n\t\tpRawJSON = &r.User.RawJSON\n\tcase TableSpace:\n\t\tr.Space = &Space{}\n\t\tobj = r.Space\n\t\tpRawJSON = &r.Space.RawJSON\n\tcase TableCollection:\n\t\tr.Collection = &Collection{}\n\t\tobj = r.Collection\n\t\tpRawJSON = &r.Collection.RawJSON\n\tcase TableCollectionView:\n\t\tr.CollectionView = &CollectionView{}\n\t\tobj = r.CollectionView\n\t\tpRawJSON = &r.CollectionView.RawJSON\n\tcase TableDiscussion:\n\t\tr.Discussion = &Discussion{}\n\t\tobj = r.Discussion\n\t\tpRawJSON = &r.Discussion.RawJSON\n\tcase TableComment:\n\t\tr.Comment = &Comment{}\n\t\tobj = r.Comment\n\t\tpRawJSON = &r.Comment.RawJSON\n\t}\n\tif obj == nil {\n\t\treturn fmt.Errorf(\"unsupported table '%s'\", r.Table)\n\t}\n\tif err := jsonit.Unmarshal(r.Value, pRawJSON); err != nil {\n\t\treturn err\n\t}\n\tid := (*pRawJSON)[\"id\"]\n\tif id != nil {\n\t\tr.ID = id.(string)\n\t}\n\tif err := jsonit.Unmarshal(r.Value, &obj); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func NewRecord(value interface{}) Record {\n\n\tswitch obj := value.(type) {\n\tcase nil:\n\t\treturn Record{\n\t\t\tdropped: true,\n\t\t}\n\tcase map[string]interface{}:\n\t\treturn Record{\n\t\t\tvalues: GuessType(obj).(map[string]interface{}),\n\t\t}\n\t}\n\n\treturn Record{\n\t\tvalues: map[string]interface{}{\n\t\t\t\"value\": GuessType(value),\n\t\t},\n\t}\n}", "func (kvcodec *tableKVEncoder) RemoveRecord(\n\trow []types.Datum,\n\trowID int64,\n\tcolumnPermutation []int,\n) (Row, int, error) {\n\tcols := kvcodec.tbl.Cols()\n\n\tvar value types.Datum\n\tvar err error\n\n\trecord := kvcodec.recordCache\n\tif record == nil {\n\t\trecord = make([]types.Datum, 0, len(cols)+1)\n\t}\n\n\tfor i, col := range cols {\n\t\tj := columnPermutation[i]\n\t\tisAutoIncCol := mysql.HasAutoIncrementFlag(col.Flag)\n\t\tswitch {\n\t\tcase j >= 0 && j < len(row):\n\t\t\tvalue, err = table.CastValue(kvcodec.se, row[j], col.ToInfo(), false, false)\n\t\t\tif err == nil {\n\t\t\t\terr = col.HandleBadNull(&value, kvcodec.se.vars.StmtCtx)\n\t\t\t}\n\t\tcase isAutoIncCol:\n\t\t\t// we still need a conversion, e.g. to catch overflow with a TINYINT column.\n\t\t\tvalue, err = table.CastValue(kvcodec.se, types.NewIntDatum(rowID), col.ToInfo(), false, false)\n\t\tdefault:\n\t\t\tvalue, err = table.GetColDefaultValue(kvcodec.se, col.ToInfo())\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, 0, errors.Trace(err)\n\t\t}\n\t\trecord = append(record, value)\n\t}\n\terr = kvcodec.tbl.RemoveRecord(kvcodec.se, kv.IntHandle(rowID), record)\n\tif err != nil {\n\t\tlog.Error(\"kv remove record failed\",\n\t\t\tzapRow(\"originalRow\", row),\n\t\t\tzapRow(\"convertedRow\", record),\n\t\t\tzap.Error(err),\n\t\t)\n\t\treturn nil, 0, errors.Trace(err)\n\t}\n\n\tpairs, size := kvcodec.se.takeKvPairs()\n\tkvcodec.recordCache = record[:0]\n\treturn Pairs(pairs), size, nil\n}", "func convert(ent *entry.Entry) pdata.LogRecord {\n\tdest := pdata.NewLogRecord()\n\tconvertInto(ent, dest)\n\treturn dest\n}", "func NewBrokerRowProtoConverter(\n\tnamespace []byte,\n\tenrichedTags tag.Tags,\n\tlimits *models.Limits,\n) (\n\tcvt *BrokerRowProtoConverter,\n\treleaseFunc func(cvt *BrokerRowProtoConverter),\n) {\n\treleaseFunc = func(cvt *BrokerRowProtoConverter) { rowConverterPool.Put(cvt) }\n\titem := rowConverterPool.Get()\n\tif item == nil {\n\t\tcvt = NewProtoConverter(limits)\n\t} else {\n\t\tcvt = item.(*BrokerRowProtoConverter)\n\t}\n\tcvt.Reset()\n\tcvt.namespace = namespace\n\tcvt.enrichedTags = enrichedTags\n\tcvt.limits = limits\n\treturn cvt, releaseFunc\n}", "func SqlRowToDoltRow(ctx context.Context, vrw types.ValueReadWriter, r sql.Row, doltSchema schema.Schema) (row.Row, error) {\n\tif schema.IsKeyless(doltSchema) {\n\t\treturn keylessDoltRowFromSqlRow(ctx, vrw, r, doltSchema)\n\t}\n\treturn pkDoltRowFromSqlRow(ctx, vrw, r, doltSchema)\n}", "func (r *Reader) Row() []interface{} {\n\treturn r.row\n}", "func InsertRow(db *sql.DB, tab DBTable, row interface{}) (err error) {\n\tnumRows := reflect.ValueOf(row).Len()\n\t// log.Println(fmt.Sprintf(\"Inserting %s record ...\", tab.name))\n\tinsertSQL := fmt.Sprintf(\"INSERT INTO %s(%s) VALUES\", tab.name, tab.columns)\n\tvaluesSQL := fmt.Sprintf(\" (%s)\", tab.questions)\n\tfor j := 0; j < numRows-1; j++ {\n\t\tvaluesSQL = fmt.Sprintf(\"%s, (%s)\", valuesSQL, tab.questions)\n\t}\n\n\tinsertSQL = fmt.Sprint(insertSQL, valuesSQL)\n\tstatement, err := db.Prepare(insertSQL) // Prepare statement. This is good to avoid SQL injections\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar args []interface{}\n\tfor j := 0; j < numRows; j++ {\n\t\trv := reflect.ValueOf(row).Index(j)\n\t\tfor i := 0; i < rv.NumField(); i++ {\n\t\t\targs = append(args, rv.Field(i).Interface())\n\t\t}\n\t}\n\t_, err = statement.Exec(args...)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func RowToTripOfferings(row *sql.Rows) []TripOffering {\n tripOffering := []TripOffering{}\n for row.Next() {\n var tripNumber int\n var date string\n var scheduledStartTime string\n var scheduledArrivalTime string\n var driverName string\n var busID int\n row.Scan(&tripNumber, &date, &scheduledStartTime, &scheduledArrivalTime, &driverName, &busID)\n tripOffering = append(tripOffering, TripOffering{\n TripNumber: tripNumber,\n Date: date,\n ScheduledStartTime: scheduledStartTime,\n ScheduledArrivalTime: scheduledArrivalTime,\n DriverName: driverName,\n BusID: busID,\n })\n }\n return tripOffering\n}", "func RowToMap(rows *sql.Rows) []map[string]string {\n\tcolumns, _ := rows.Columns()\n\tcount := len(columns)\n\treadCols := make([]interface{}, count)\n\trawCols := make([]interface{}, count)\n\tvar records []map[string]string\n\tfor rows.Next() {\n\t\t// resultCols := make(map[string]string, count)\n\t\tfor i := range columns {\n\t\t\treadCols[i] = &rawCols[i]\n\t\t}\n\t\trows.Scan(readCols...)\n\n\t\t// all conver to string\n\t\tresultCols := assertTypeMap(columns, rawCols)\n\n\t\trecords = append(records, resultCols)\n\t}\n\treturn records\n}", "func PrintRow(fields []string, row map[string]interface{}) {\n\ttable := New(fields)\n\t// add row\n\ttable.AddRow(row)\n\t// And display table\n\ttable.Print()\n}", "func RecordToProto(ctx context.Context, dag format.DAGService, rec net.Record) (*pb.Log_Record, error) {\n\tblock, err := rec.GetBlock(ctx, dag)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tevent, ok := block.(*Event)\n\tif !ok {\n\t\tevent, err = EventFromNode(block)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\theader, err := event.GetHeader(ctx, dag, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbody, err := event.GetBody(ctx, dag, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &pb.Log_Record{\n\t\tRecordNode: rec.RawData(),\n\t\tEventNode: block.RawData(),\n\t\tHeaderNode: header.RawData(),\n\t\tBodyNode: body.RawData(),\n\t}, nil\n}", "func GetRecord(db *sql.DB, id int) (Record, error) {\n\tvar record Record\n\terr := db.QueryRow(`SELECT * FROM user_records WHERE id = ($1)`, id).Scan(\n\t\t&record.ID,\n\t\t&record.Weight,\n\t\t&record.Reps,\n\t\t&record.RPE,\n\t\t&record.DatePerformed,\n\t\t&record.ExerciseID,\n\t\t&record.UserID,\n\t)\n\n\tif err != nil {\n\t\treturn record, err\n\t}\n\n\treturn record, nil\n}", "func (writer *Writer) WriteRow(row Row, recordMd *[]Metadata) (e error) {\n\tvar md *Metadata\n\tvar inMd *Metadata\n\tvar rIdx int\n\tvar nRecord = len(row)\n\tvar recV []byte\n\tv := []byte{}\n\n\tfor i := range writer.OutputMetadata {\n\t\tmd = &writer.OutputMetadata[i]\n\n\t\t// find the input index based on name on record metadata.\n\t\trIdx = 0\n\t\tfor y := range (*recordMd) {\n\t\t\tinMd = &(*recordMd)[y]\n\n\t\t\tif inMd.Name == md.Name {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif ! (*recordMd)[y].Skip {\n\t\t\t\trIdx++\n\t\t\t}\n\t\t}\n\n\t\t// If input column is ignored, continue to next record.\n\t\tif inMd.Skip {\n\t\t\tcontinue\n\t\t}\n\n\t\t// No input metadata matched? skip it too.\n\t\tif rIdx >= nRecord {\n\t\t\tcontinue\n\t\t}\n\n\t\trecV = row[rIdx].ToByte()\n\n\t\tif \"\" != md.LeftQuote {\n\t\t\tv = append (v, []byte (md.LeftQuote)...)\n\t\t}\n\n\t\tv = append (v, recV...)\n\n\t\tif \"\" != md.RightQuote {\n\t\t\tv = append (v, []byte (md.RightQuote)...)\n\t\t}\n\n\t\tif \"\" != md.Separator {\n\t\t\tv = append (v, []byte (md.Separator)...)\n\t\t}\n\t}\n\n\tv = append (v, '\\n')\n\n\t_, e = writer.BufWriter.Write (v)\n\n\tif nil != e {\n\t\treturn e\n\t}\n\n\treturn nil\n}", "func NewRecord() *Record {\n\treturn new(Record)\n}", "func sensorValueFromRow(rows *sql.Rows) (SensorValue, error) {\n\tvar sensorId,\n\t\tip,\n\t\ttyp string\n\tvar t time.Time\n\tvar value float64\n\n\terr := rows.Scan(\n\t\t&sensorId,\n\t\t&typ,\n\t\t&t,\n\t\t&value,\n\t\t&ip)\n\n\treturn SensorValue{\n\t\tSensorId: sensorId,\n\t\tType: typ,\n\t\tTime: t,\n\t\tValue: value,\n\t\tIp: ip,\n\t}, err\n}", "func lineToRecord(line []string) Record {\n\n\t// parse some values from strings\n\tcheeseId, err := strconv.ParseInt(line[0], 10, 64)\n\tif err != nil { cheeseId = 0 }\n\tfatContentPercent, err := strconv.ParseFloat(line[10], 32)\n\tif err != nil { fatContentPercent = 0.0 }\n\tmoisturePercent, err := strconv.ParseFloat(line[11], 32)\n\tif err != nil { moisturePercent = 0.0 }\n\torganic, err := strconv.ParseBool(line[20])\n\tif err != nil { organic = false }\n\n\treturn Record {\n\t\tCheeseId: int(cheeseId),\n\t\tCheeseName: getFirstNonEmptyStringOrNA(line[1], line[2]),\n\t\tManufacturerName: getFirstNonEmptyStringOrNA(line[3], line[4]),\n\t\tManufacturerProvCode: getFirstNonEmptyStringOrNA(line[5], \"??\"),\n\t\tManufacturingType: getFirstNonEmptyStringOrNA(line[6], line[7]),\n\t\tWebSite: getFirstNonEmptyStringOrNA(line[8], line[9]),\n\t\tFatContentPercent: float32(fatContentPercent),\n\t\tMoisturePercent: float32(moisturePercent),\n\t\tParticularities: getFirstNonEmptyStringOrNA(line[12], line[13]),\n\t\tFlavour: getFirstNonEmptyStringOrNA(line[14], line[15]),\n\t\tCharacteristics: getFirstNonEmptyStringOrNA(line[16], line[17]),\n\t\tRipening: getFirstNonEmptyStringOrNA(line[18], line[19]),\n\t\tOrganic: organic,\n\t\tCategoryType: getFirstNonEmptyStringOrNA(line[21], line[22]),\n\t\tMilkType: getFirstNonEmptyStringOrNA(line[23], line[24]),\n\t\tMilkTreatmentType: getFirstNonEmptyStringOrNA(line[25], line[26]),\n\t\tRindType: getFirstNonEmptyStringOrNA(line[27], line[28]),\n\t\tLastUpdateDate: line[29],\n\t}\n}", "func (fr *FakeResult) Row(ptr interface{}) error {\n\tif fr.Force == \"true\" {\n\t\treturn errors.New(\"Function Row forced error\")\n\t}\n\tif reflect.TypeOf(ptr).String() == \"**schema.Stat\" {\n\t\tvar stat *schema.Stat\n\t\tvar data string\n\n\t\tswitch count {\n\t\tcase 0:\n\t\t\tdata = `{\n\t\t\t\t\"ProcessOutcome\": \"No Action\",\n\t\t\t\t\"UserClassification\": \"Cancel Subscription\",\n\t\t\t\t\"count\": 25\n\t\t\t}`\n\t\t\tbreak\n\t\tcase 1:\n\t\t\tdata = `{\n\t\t\t\t\"ProcessOutcome\": \"No Action\",\n\t\t\t\t\"UserClassification\": \"Cancel Autorenewal\",\n\t\t\t\t\"count\": 13\n\t\t\t}`\n\t\t\tbreak\n\t\tcase 2:\n\t\t\tdata = `{\n\t\t\t\t\"ProcessOutcome\": \"No Action\",\n\t\t\t\t\"UserClassification\": \"\",\n\t\t\t\t\"count\": 34\n\t\t\t}`\n\t\t\tbreak\n\t\tcase 3:\n\t\t\tdata = `{\n\t\t\t\t\"ProcessOutcome\": \"No Action\",\n\t\t\t\t\"UserClassification\": \"\",\n\t\t\t\t\"count\": 95\n\t\t\t}`\n\t\t\tbreak\n\t\t}\n\t\tjson.Unmarshal([]byte(data), &stat)\n\t\t*ptr.(**schema.Stat) = stat\n\t} else {\n\t\tvar rl *schema.ReportList\n\t\tdata := `{ \"id\": \"096esbpfrk8b3nhdlfhditsmk10gj03g06i3c201.json\",\n \"servisbotstats\": {\n \"EmailClassification\": \"Cancel\",\n \"ProcessOutcome\": \"No Action\",\n \"UserClassification\": \"\",\n \"success\": false\n }}`\n\t\tjson.Unmarshal([]byte(data), &rl)\n\t\t*ptr.(**schema.ReportList) = rl\n\t}\n\treturn nil\n}", "func (rc *BrokerRowProtoConverter) ConvertTo(m *protoMetricsV1.Metric, row *BrokerRow) error {\n\tblock, err := rc.MarshalProtoMetricV1(m)\n\tif err != nil {\n\t\treturn err\n\t}\n\trow.FromBlock(block)\n\treturn nil\n}", "func (f *FakeTable) ReadRow(ovs *libovsdb.OvsdbClient, readRowArgs ovsdb.ReadRowArgs) (map[string]interface{}, error) {\n\tm := make(map[string]interface{})\n\treturn m, nil\n}", "func fetchObject(row scannableRow) (*remember.DataObject, error) {\n\tobject := &remember.DataObject{}\n\n\tvar created int64\n\tvar updated int64\n\n\terr := row.Scan(\n\t\t&object.ID,\n\t\t&object.Title,\n\t\t&object.GroupId,\n\t\t&object.Payload,\n\t\tcreated,\n\t\tupdated,\n\t)\n\n\tobject.CreatedAt = time.Unix(created, 0)\n\tobject.UpdatedAt = time.Unix(updated, 0)\n\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\n\treturn object, nil\n}", "func (vt *perfSchemaTable) Row(ctx sessionctx.Context, h int64) ([]types.Datum, error) {\n\treturn nil, table.ErrUnsupportedOp\n}", "func marshalRecord(r common.Record, buff []byte) {\n\tcopy(buff, r.ID)\n\n\tbinary.LittleEndian.PutUint64(buff[16:24], r.Start)\n\tbinary.LittleEndian.PutUint32(buff[24:], r.Length)\n}", "func (c *ConnCtx) InsertRecordsRowByRow(records []*SongRecord) error {\n\ttemplate := c.getInsertQueryTempalte()\n\tstmt, err := c.Conn.PrepareNamed(template)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer stmt.Close()\n\n\tfor _, item := range records {\n\t\tresult, err := stmt.Exec(&item)\n\t\tif err != nil {\n\t\t\tErrorF(fmt.Sprintf(\"execute sql error: %s\", err.Error()))\n\t\t\tcontinue\n\t\t}\n\t\trowsAffected, err := result.RowsAffected()\n\t\tif err != nil {\n\t\t\tErrorF(fmt.Sprintf(\"pg server executes error: %s\", err.Error()))\n\t\t\tcontinue\n\t\t}\n\t\tif rowsAffected != 1 {\n\t\t\tWarningF(fmt.Sprintf(\"insert record affected row error: %d\", rowsAffected))\n\t\t}\n\t\tDebugF(\"insert done: %#v\", item)\n\t}\n\n\treturn nil\n}", "func (s SQLite) QueryRow(query string, args ...interface{}) (*sql.Row, error) {\n\tif s.DB == nil {\n\t\treturn nil, fmt.Errorf(\"db is not created\")\n\t}\n\treturn s.DB.QueryRow(query, args...), nil\n}", "func GetRecord(m interface{}) (ret int, ts interface{}, rec map[string]string) {\n\tslice := reflect.ValueOf(m)\n\tt := slice.Index(0).Interface()\n\tdata := slice.Index(1)\n\n\tmapInterfaceData := data.Interface().(map[interface{}]interface{})\n\n\tmapData := make(map[string]string)\n\n\tfor kData, vData := range mapInterfaceData {\n\t\tmapData[kData.(string)] = string(vData.([]uint8))\n\t}\n\n\tmapData[\"id\"] = uuid.NewV4().String()\n\n\treturn 0, t, mapData\n}", "func (m *PgSQL) QueryRow(query string, args ...interface{}) *sql.Row {\n\treturn m.Connection.QueryRow(query, args...)\n}", "func (dht *FullRT) getRecordFromDatastore(ctx context.Context, dskey ds.Key) (*recpb.Record, error) {\n\tbuf, err := dht.datastore.Get(ctx, dskey)\n\tif err == ds.ErrNotFound {\n\t\treturn nil, nil\n\t}\n\tif err != nil {\n\t\tlogger.Errorw(\"error retrieving record from datastore\", \"key\", dskey, \"error\", err)\n\t\treturn nil, err\n\t}\n\trec := new(recpb.Record)\n\terr = proto.Unmarshal(buf, rec)\n\tif err != nil {\n\t\t// Bad data in datastore, log it but don't return an error, we'll just overwrite it\n\t\tlogger.Errorw(\"failed to unmarshal record from datastore\", \"key\", dskey, \"error\", err)\n\t\treturn nil, nil\n\t}\n\n\terr = dht.Validator.Validate(string(rec.GetKey()), rec.GetValue())\n\tif err != nil {\n\t\t// Invalid record in datastore, probably expired but don't return an error,\n\t\t// we'll just overwrite it\n\t\tlogger.Debugw(\"local record verify failed\", \"key\", rec.GetKey(), \"error\", err)\n\t\treturn nil, nil\n\t}\n\n\treturn rec, nil\n}", "func NewSmsLogRow()(*SmsLogRow) {\n m := &SmsLogRow{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}", "func (r *RowCache) Row(uuid string) Model {\n\tr.mutex.RLock()\n\tdefer r.mutex.RUnlock()\n\tif row, ok := r.cache[uuid]; ok {\n\t\treturn row.(Model)\n\t}\n\treturn nil\n}", "func (db *DB) QueryRowx(query string, args ...interface{}) *Row {\n rows, err := db.DB.Query(query, args...)\n return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper}\n}", "func (mr MutRow) ToRow() Row {\n\treturn Row(mr)\n}", "func WriteRow(writer io.Writer, ts int64, anyObject ...interface{}) error {\n\tencoded, err := EncodeRow(ts, anyObject...)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"WriteRow encoding error: %v\", err)\n\t}\n\treturn WriteMessage(writer, encoded)\n}", "func RowToStructByNameLax[T any](row CollectableRow) (T, error) {\n\tvar value T\n\terr := row.Scan(&namedStructRowScanner{ptrToStruct: &value, lax: true})\n\treturn value, err\n}", "func RowToStructByName[T any](row CollectableRow) (T, error) {\n\tvar value T\n\terr := row.Scan(&namedStructRowScanner{ptrToStruct: &value})\n\treturn value, err\n}", "func SerializeRecord(data []string) Record {\n\ttimestamp, err := time.Parse(\"2006-01-02 15:04:05\", data[1])\n\tif err != nil {\n\t\tlog.Fatalf(\"could not parse time from %s: %s\", data[1], err)\n\t}\n\treturn Record{\n\t\tID: data[0],\n\t\tTimestamp: timestamp,\n\t\tEmail: data[2],\n\t\tIP: data[3],\n\t\tMac: data[4],\n\t\tCountryCode: data[5],\n\t\tUserAgent: data[6],\n\t}\n}", "func insertRecordToDB(db *sql.DB, r *gosince.APIRecord) error {\n\tctx, cancel := context.WithTimeout(context.Background(), gosince.DBTimeout)\n\tdefer cancel()\n\tresult, err := db.ExecContext(ctx,\n\t\t`INSERT INTO goapis(name, category, version, package_name, description, golang_url)\n\t\tvalues(?, ?, ?, ?, ?, ?)`,\n\t\tr.Name, r.Category, r.Version, r.PackageName, r.Description, r.GolangURL)\n\n\t// Ignore duplicate API records. For example.\n\t// pkg log/syslog (darwin-386), const LOG_ALERT = 1\n\t// pkg log/syslog (openbsd-amd64-cgo), const LOG_ALERT = 1\n\tif err != nil {\n\t\tif e, ok := err.(gosqlite3.Error); ok && e.Code == 19 {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\trows, err := result.RowsAffected()\n\tif err != nil || rows != 1 {\n\t\treturn err\n\t}\n\treturn nil\n}", "func RowToTripStopInfos(row *sql.Rows) []TripStopInfo {\n result := []TripStopInfo{}\n for row.Next() {\n var tripNumber int\n var stopNumber int\n var sequenceNumber int\n var drivingTime float32\n result = append(result, TripStopInfo{\n TripNumber: tripNumber,\n StopNumber: stopNumber,\n SequenceNumber: sequenceNumber,\n DrivingTime: drivingTime,\n })\n }\n return result\n}", "func (p *partitionImpl) GetRow(rowNum int) sif.Row {\n\treturn &rowImpl{rowNum, p}\n}", "func (self *RowsBuffer) ReadRow() (*Row, error) {\n\tself.Lock()\n\tdefer self.Unlock()\n\n\tfor self.Index >= self.RowsNumber {\n\t\tself.ClearValues()\n\t\tif err := self.readRows(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\trow := RowPool.Get().(*Row)\n\trow.Clear()\n\trow.Vals = make([]interface{}, len(self.ValueBuffers))\n\tfor i, col := range self.ValueBuffers {\n\t\trow.Vals[i] = col[self.Index]\n\t}\n\n\trow.Keys = make([]interface{}, len(self.KeyBuffers))\n\tfor i, col := range self.KeyBuffers {\n\t\trow.Keys[i] = col[self.Index]\n\t}\n\tself.Index++\n\treturn row, nil\n}", "func (d *Db) GetNextRecord(r *Record) (*Record, error) {\n\tnr := &Record{rec: C.wg_get_next_record(d.db, r.rec)}\n\tif nr.rec == nil {\n\t\treturn nil, WDBError(\"Done With DB\")\n\t}\n\treturn nr, nil\n}", "func NewRow(schema typeof.Schema, capacity int) Row {\n\tif schema == nil {\n\t\tschema = make(typeof.Schema, capacity)\n\t}\n\n\treturn Row{\n\t\tValues: make(map[string]interface{}, capacity),\n\t\tSchema: schema,\n\t}\n}", "func (mcs *MemoryCellStore) ReadRow(key string, s *Sheet) (*Row, error) {\n\tr, ok := mcs.rows[key]\n\tif !ok {\n\t\treturn nil, NewRowNotFoundError(key, \"No such row\")\n\t}\n\treturn r, nil\n}", "func (r *Report) ReportRow(rowID int) *db.ReportRow {\n\treturn &db.ReportRow{\n\t\tReportID: r.FullID,\n\t\tTitle: r.Title,\n\t\tRowID: rowID,\n\t\tPostDateTime: r.DateTime,\n\t\tRawText: r.rawString,\n\t}\n}", "func rowsToThings(rows *sql.Rows) Things {\n\tvar (\n\t\tt Thing\n\t\tresult Things\n\t\terr error\n\t)\n\n\tcheckRows(\"Things\", rows)\n\n\tfor i := 0; rows.Next(); i++ {\n\t\terr := rows.Scan(&t.ckey, &t.cval, &t.url, &t.data, &t.clockid, &t.tsn)\n\t\tcheckErr(\"scan things\", err)\n\n\t\tresult = append(result, t)\n\t}\n\terr = rows.Err()\n\tcheckErr(\"end reading things loop\", err)\n\n\tfmt.Printf(\"returning things: %d rows\\n\", len(result))\n\treturn result\n}", "func rowToSample(row map[string]bigquery.Value) (prompb.Sample, model.Metric, []*prompb.Label, error) {\n\tvar v interface{}\n\tlabelsJSON := row[\"tags\"].(string)\n\terr := json.Unmarshal([]byte(labelsJSON), &v)\n\tif err != nil {\n\t\treturn prompb.Sample{}, nil, nil, err\n\t}\n\tlabels := v.(map[string]interface{})\n\tlabelPairs := make([]*prompb.Label, 0, len(labels))\n\tmetric := model.Metric{}\n\tfor name, value := range labels {\n\t\tlabelPairs = append(labelPairs, &prompb.Label{\n\t\t\tName: name,\n\t\t\tValue: value.(string),\n\t\t})\n\t\tmetric[model.LabelName(name)] = model.LabelValue(value.(string))\n\t}\n\tlabelPairs = append(labelPairs, &prompb.Label{\n\t\tName: model.MetricNameLabel,\n\t\tValue: row[\"metricname\"].(string),\n\t})\n\t// Make sure we sort the labels, so the test cases won't blow up\n\tsort.Slice(labelPairs, func(i, j int) bool { return labelPairs[i].Name < labelPairs[j].Name })\n\tmetric[model.LabelName(model.MetricNameLabel)] = model.LabelValue(row[\"metricname\"].(string))\n\treturn prompb.Sample{Timestamp: row[\"timestamp\"].(int64), Value: row[\"value\"].(float64)}, metric, labelPairs, nil\n}", "func TestRecord(t *testing.T) {\n\terr := testDbf.GoTo(1)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// test if the record is deleted\n\tdeleted, err := testDbf.Deleted()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !deleted {\n\t\tt.Fatal(\"Record should be deleted\")\n\t}\n\n\t// read the same record using Record() and RecordAt()\n\trecs := [2]*Record{}\n\trecs[0], err = testDbf.Record()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\trecs[1], err = testDbf.RecordAt(1)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor irec, rec := range recs {\n\t\tfor _, want := range wantValues {\n\t\t\tval, err := rec.Field(want.pos)\n\t\t\tif err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t\tstrval := strings.TrimSpace(fmt.Sprintf(\"%v\", val))\n\t\t\tstrtype := fmt.Sprintf(\"%T\", val)\n\n\t\t\tif want.strval != strval || want.strtype != strtype {\n\t\t\t\tt.Errorf(\"Record %d: Wanted value %s with type %s, have value %s with type %s\", irec, want.strval, want.strtype, strval, strtype)\n\t\t\t}\n\t\t}\n\t}\n}", "func (f *FieldValues) Row(idx int, dest []driver.Value) {\n\tcopy(dest, f.values[idx*f.cols:(idx+1)*f.cols])\n\n\tif f.lobCols == 0 {\n\t\treturn\n\t}\n\n\tfor i, descr := range f.descrs {\n\t\tcol := descr.col\n\t\twriter := dest[col].(lobWriter)\n\t\tf.writers[i] = writer\n\t\tdescr.w = writer\n\t\tdest[col] = lobReadDescrToPointer(descr)\n\t}\n\n\t// last descriptor triggers lob read\n\tf.descrs[f.lobCols-1].fn = func() error {\n\t\treturn f.s.readLobStream(f.writers)\n\t}\n}", "func returnSingleRecord(w http.ResponseWriter, r *http.Request, ps httprouter.Params){\n\n\t//msg := <- requestChannel\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tid, ok := getID(w, ps)\n\tfmt.Print(\"val\",id,ok)\n\tif !ok {\n\t\trec, ires := mytable.searchByKey(ps.ByName(\"id\"))\n\t\tfmt.Println(rec,ires)\n\t\tif ires == -1{\n\t\t\tjson.NewEncoder(w).Encode(\"No record ith that key\")\n\t\t} else {\n\t\t\tjson.NewEncoder(w).Encode(rec)\n\t\t}\n\t} else {\n\t\trec, ires := mytable.searchById(id)\n\t\tfmt.Println(rec,ires)\n\t\tif ires == -1 {\n\t\t\tjson.NewEncoder(w).Encode(\"No value with that id\")\n\n\t\t} else {\n\t\t\tjson.NewEncoder(w).Encode(rec)\n\t\t}\n\t}\n}", "func (w *Wrapper) queryRow(query string, args ...interface{}) *sql.Row {\n\tw.connLock.RLock()\n\tdefer w.connLock.RUnlock()\n\n\treturn w.connection.QueryRow(w.prepare(query), args...)\n}", "func (v *recordingTable) NewRecord() reform.Record {\n\treturn new(Recording)\n}", "func (f *recordingSource) parseRecord(recordNum int) *record {\n\tr, ok := f.recordDecls[recordNum]\n\tif !ok {\n\t\tpanicf(\"record with number %d must exist\", recordNum)\n\t}\n\n\t// Record fields are separated by tabs, with the first field being the name\n\t// of the driver method.\n\tfields := splitString(r, \"\\t\")\n\trecType, ok := strToRecType[fields[0]]\n\tif !ok {\n\t\tpanicf(\"record type %v is not recognized\", fields[0])\n\t}\n\n\t// Remaining fields are record arguments in \"<dataType>:<formattedValue>\"\n\t// format.\n\trec := &record{Typ: recType}\n\tfor i := 1; i < len(fields); i++ {\n\t\tval, err := parseValueWithType(fields[i])\n\t\tif err != nil {\n\t\t\tpanicf(\"error parsing %s: %v\", fields[i], err)\n\t\t}\n\t\trec.Args = append(rec.Args, val)\n\t}\n\treturn rec\n}", "func MarshalRecord(subRecord Record) ([]byte, error) {\n\tbase := X_Record{}\n\n\tswitch subRecord.(type) {\n\tcase *GenesisRecord:\n\t\tbase.Union = &X_Record_Genesis{(*X_GenesisRecord)(subRecord.(*GenesisRecord))}\n\tcase *ChildRecord:\n\t\tbase.Union = &X_Record_Child{(*X_ChildRecord)(subRecord.(*ChildRecord))}\n\tcase *JetRecord:\n\t\tbase.Union = &X_Record_Jet{(*X_JetRecord)(subRecord.(*JetRecord))}\n\tcase *RequestRecord:\n\t\tbase.Union = &X_Record_Request{(*X_RequestRecord)(subRecord.(*RequestRecord))}\n\tcase *ResultRecord:\n\t\tbase.Union = &X_Record_Result{(*X_ResultRecord)(subRecord.(*ResultRecord))}\n\tcase *TypeRecord:\n\t\tbase.Union = &X_Record_Type{(*X_TypeRecord)(subRecord.(*TypeRecord))}\n\tcase *CodeRecord:\n\t\tbase.Union = &X_Record_Code{(*X_CodeRecord)(subRecord.(*CodeRecord))}\n\tcase *ObjectActivateRecord:\n\t\tbase.Union = &X_Record_ObjectActivate{(*X_ObjectActivateRecord)(subRecord.(*ObjectActivateRecord))}\n\tcase *ObjectAmendRecord:\n\t\tbase.Union = &X_Record_ObjectAmend{(*X_ObjectAmendRecord)(subRecord.(*ObjectAmendRecord))}\n\tcase *ObjectDeactivateRecord:\n\t\tbase.Union = &X_Record_ObjectDeactivate{(*X_ObjectDeactivateRecord)(subRecord.(*ObjectDeactivateRecord))}\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"__Record.union has unexpected type %T\", subRecord)\n\t}\n\treturn base.Marshal()\n}", "func scanRow(scanner db.Scanner, dest *core.Connection) error {\n\treturn scanner.Scan(\n\t\t&dest.ID,\n\t\t&dest.Name,\n\t\t&dest.PID,\n\t\t&dest.DataBase,\n\t\t&dest.Host,\n\t\t&dest.Port,\n\t\t&dest.User,\n\t\t&dest.Password,\n\t\t&dest.Description,\n\t\t&dest.Created,\n\t\t&dest.Updated,\n\t)\n}", "func (empHandler *EmployeeHandler) storeRecord() []Employee {\n\tvar emp []Employee\n\tdis, err := empHandler.DB.Query(\"select id, name, age, gender, role from employee\")\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tfor dis.Next() {\n\t\tvar row Employee\n\t\terr = dis.Scan(&row.Id, &row.Name, &row.Age, &row.Gender, &row.Role)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\temp = append(emp, row)\n\t}\n\treturn emp\n}", "func (c *Cache) saveRecord(r quandl.Record) {\n\terr := c.DB.Save(&r).Error\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (d *Database) QueryRow(db DB, dst interface{}, query string, args ...interface{}) error {\n\treturn d.QueryRowContext(context.Background(), db, dst, query, args...)\n}", "func NewRowToRow() AStarConfig {\n\tr2r := &rowToRow{}\n\treturn r2r\n}", "func RowToStructByPos[T any](row CollectableRow) (T, error) {\n\tvar value T\n\terr := row.Scan(&positionalStructRowScanner{ptrToStruct: &value})\n\treturn value, err\n}", "func (db TestDB) QueryRow(query string, args ...interface{}) *sql.Row {\n\treturn db.testTx.QueryRow(query, args...)\n}" ]
[ "0.6539114", "0.64575875", "0.61635786", "0.59165096", "0.5815466", "0.576414", "0.5693766", "0.5690296", "0.5623456", "0.56130004", "0.5544808", "0.5532619", "0.5377269", "0.5357239", "0.5338372", "0.5308314", "0.5301193", "0.5299179", "0.52847064", "0.52828956", "0.52663213", "0.5221548", "0.52182883", "0.5197463", "0.5188103", "0.5166546", "0.516225", "0.51385635", "0.5130053", "0.50951684", "0.50754106", "0.5067426", "0.50646496", "0.5041237", "0.5012035", "0.49921605", "0.49876735", "0.49822807", "0.49818763", "0.496961", "0.49623063", "0.49587193", "0.49577093", "0.49284422", "0.49174178", "0.49067694", "0.49048546", "0.48960373", "0.48814198", "0.48755592", "0.48547187", "0.48513025", "0.4851149", "0.4835554", "0.48347515", "0.48285773", "0.48144278", "0.4814123", "0.48133892", "0.47810516", "0.47697294", "0.47667855", "0.47648776", "0.47247112", "0.47129786", "0.46996424", "0.4699345", "0.46989405", "0.4698544", "0.46979606", "0.46959728", "0.4693592", "0.46931618", "0.4658993", "0.46518588", "0.46499443", "0.46494862", "0.46467692", "0.4642963", "0.46422005", "0.4630433", "0.46166307", "0.4610473", "0.4604428", "0.46040887", "0.45966443", "0.45946082", "0.45845747", "0.45771766", "0.45755392", "0.45755172", "0.45690742", "0.4564756", "0.45634598", "0.45623347", "0.45606554", "0.45550045", "0.45541856", "0.4552429", "0.4549263" ]
0.8461538
0
rowsToRecords converts from pgx.Rows to []store.Record
rowsToRecords преобразует pgx.Rows в []store.Record
func (s *sqlStore) rowsToRecords(rows pgx.Rows) ([]*store.Record, error) { var records []*store.Record for rows.Next() { var expiry *time.Time record := &store.Record{} metadata := make(Metadata) if err := rows.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil { return records, err } // set the metadata record.Metadata = toMetadata(&metadata) if expiry != nil { record.Expiry = time.Until(*expiry) } records = append(records, record) } return records, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *sqlStore) rowToRecord(row pgx.Row) (*store.Record, error) {\n\tvar expiry *time.Time\n\trecord := &store.Record{}\n\tmetadata := make(Metadata)\n\n\tif err := row.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn record, store.ErrNotFound\n\t\t}\n\t\treturn nil, err\n\t}\n\n\t// set the metadata\n\trecord.Metadata = toMetadata(&metadata)\n\tif expiry != nil {\n\t\trecord.Expiry = time.Until(*expiry)\n\t}\n\n\treturn record, nil\n}", "func RowToArr(rows *sql.Rows) (records [][]string, err error) {\n\tfmt.Printf(\"RowToArr start at %s\", time.Now())\n\tcolumns, err := rows.Columns()\n\tif err != nil {\n\t\treturn\n\t}\n\tcount := len(columns)\n\treadCols := make([]interface{}, count)\n\trawCols := make([]interface{}, count)\n\t//records = make([]interface{}, 0)\n\trecords = append(records, columns) //append row header as 1st row\n\n\t// var resultCols []string\n\tfor rows.Next() {\n\t\t// resultCols = make([]string, count)\n\t\tfor i := range columns {\n\t\t\treadCols[i] = &rawCols[i]\n\t\t}\n\t\terr = rows.Scan(readCols...)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tresultCols := assertTypeArray(columns, rawCols)\n\t\trecords = append(records, resultCols)\n\t}\n\n\tfmt.Printf(\"RowToArr end at %s\", time.Now())\n\treturn records, nil\n}", "func RowToRawData(rows *sql.Rows) (r RawData) {\n\trecord, _ := RowToArr(rows)\n\tr.Header = record[0]\n\tr.Rows = append(r.Rows, record[1:])\n\treturn\n}", "func toRow(pl any) []byte {\n\trt := reflect.TypeOf(pl)\n\n\tenc, err := coder.RowEncoderForStruct(rt)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"unable to get row encoder\"))\n\t}\n\tvar buf bytes.Buffer\n\tif err := enc(pl, &buf); err != nil {\n\t\tpanic(fmt.Errorf(\"unable to do row encoding\"))\n\t}\n\treturn buf.Bytes()\n}", "func (a *kinesisFirehoseWriter) toRecords(msg message.Batch) ([]*firehose.Record, error) {\n\tentries := make([]*firehose.Record, msg.Len())\n\n\terr := msg.Iter(func(i int, p *message.Part) error {\n\t\tentry := firehose.Record{\n\t\t\tData: p.AsBytes(),\n\t\t}\n\n\t\tif len(entry.Data) > mebibyte {\n\t\t\ta.log.Errorf(\"part %d exceeds the maximum Kinesis Firehose payload limit of 1 MiB\\n\", i)\n\t\t\treturn component.ErrMessageTooLarge\n\t\t}\n\n\t\tentries[i] = &entry\n\t\treturn nil\n\t})\n\n\treturn entries, err\n}", "func rowsToThings(rows *sql.Rows) Things {\n\tvar (\n\t\tt Thing\n\t\tresult Things\n\t\terr error\n\t)\n\n\tcheckRows(\"Things\", rows)\n\n\tfor i := 0; rows.Next(); i++ {\n\t\terr := rows.Scan(&t.ckey, &t.cval, &t.url, &t.data, &t.clockid, &t.tsn)\n\t\tcheckErr(\"scan things\", err)\n\n\t\tresult = append(result, t)\n\t}\n\terr = rows.Err()\n\tcheckErr(\"end reading things loop\", err)\n\n\tfmt.Printf(\"returning things: %d rows\\n\", len(result))\n\treturn result\n}", "func recordToSlice(record Record) []string {\n\tvar recordSlice []string\n\n\trecordSlice = []string{\n\t\tfmt.Sprintf(\"%d\",record.CheeseId), record.CheeseName, record.ManufacturerName, record.ManufacturerProvCode,\n\t\trecord.ManufacturingType, record.WebSite, fmt.Sprintf(\"%.2f\", record.FatContentPercent), \n\t\tfmt.Sprintf(\"%.2f\", record.MoisturePercent), record.Particularities, record.Flavour, \n\t\trecord.Characteristics, record.Ripening, fmt.Sprintf(\"%t\", record.Organic),\n\t\trecord.CategoryType, record.MilkType, record.MilkTreatmentType, record.RindType, record.LastUpdateDate,\n\t}\n\n\treturn recordSlice\n}", "func RowToMap(rows *sql.Rows) []map[string]string {\n\tcolumns, _ := rows.Columns()\n\tcount := len(columns)\n\treadCols := make([]interface{}, count)\n\trawCols := make([]interface{}, count)\n\tvar records []map[string]string\n\tfor rows.Next() {\n\t\t// resultCols := make(map[string]string, count)\n\t\tfor i := range columns {\n\t\t\treadCols[i] = &rawCols[i]\n\t\t}\n\t\trows.Scan(readCols...)\n\n\t\t// all conver to string\n\t\tresultCols := assertTypeMap(columns, rawCols)\n\n\t\trecords = append(records, resultCols)\n\t}\n\treturn records\n}", "func databaseRowsToPaginationDataList(rows *sql.Rows, dtFields []dtColumn) ([]map[string]string, error) {\n\tvar dataList []map[string]string\n\n\tcolumns, err := rows.Columns()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not get row.Columns %w\", err)\n\t}\n\n\tvalues := make([]sql.RawBytes, len(columns))\n\t// rows.Scan wants '[]interface{}' as an argument, so we must copy the\n\t// references into such a slice\n\t// See http://code.google.com/p/go-wiki/wiki/InterfaceSlice for details\n\tscanArgs := make([]interface{}, len(values))\n\tfor i := range values {\n\t\tscanArgs[i] = &values[i]\n\t}\n\n\tfor rows.Next() {\n\t\t// get RawBytes from data\n\t\terr = rows.Scan(scanArgs...)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not scan rows to 'scanArgs...' %w\", err)\n\t\t}\n\n\t\tvar value string\n\n\t\tfor i, col := range values {\n\t\t\t// Here we can check if the value is nil (NULL value)\n\t\t\tif col == nil {\n\t\t\t\tvalue = \"NULL\"\n\t\t\t} else {\n\t\t\t\tvalue = string(col)\n\t\t\t}\n\n\t\t\tfor _, dtField := range dtFields {\n\t\t\t\tif dtField.dbColumnName == columns[i] {\n\t\t\t\t\tdtObject := map[string]string{dtField.dtColumnName: value}\n\t\t\t\t\tdataList = append(dataList, dtObject)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn dataList, nil\n}", "func RowsToMaps(rows *sql.Rows, geomColumn string) ([]map[string]interface{}, error) {\n\tvar maps []map[string]interface{}\n\n\tcols, err := rows.Columns()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor rows.Next() {\n\t\trow := make([]interface{}, len(cols))\n\t\tfor idx, col := range cols {\n\t\t\tif col == geomColumn {\n\t\t\t\trow[idx] = new(wkb.GeometryScanner)\n\t\t\t} else {\n\t\t\t\trow[idx] = new(DumbScanner)\n\t\t\t}\n\t\t}\n\t\terr := rows.Scan(row...)\n\t\tif err != nil {\n\t\t\treturn maps, err\n\t\t}\n\t\tm := make(map[string]interface{})\n\t\tfor idx, col := range cols {\n\t\t\tif geom, isGeomScanner := row[idx].(*wkb.GeometryScanner); isGeomScanner {\n\t\t\t\tif geom.Valid {\n\t\t\t\t\tm[col] = geom.Geometry\n\t\t\t\t} else {\n\t\t\t\t\treturn nil, InvalidGeometryErr\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tds := row[idx].(*DumbScanner)\n\t\t\t\tm[col] = ds.Value\n\t\t\t}\n\t\t}\n\t\tmaps = append(maps, m)\n\t}\n\n\treturn maps, nil\n}", "func CollectRows[T any](rows Rows, fn RowToFunc[T]) ([]T, error) {\n\tdefer rows.Close()\n\n\tslice := []T{}\n\n\tfor rows.Next() {\n\t\tvalue, err := fn(rows)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tslice = append(slice, value)\n\t}\n\n\tif err := rows.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn slice, nil\n}", "func RowsToMap(rows *sql.Rows, typeString string) ([]map[string]interface{}, error) {\n\tarr := make([]map[string]interface{}, 0)\n\n\tcols, err := rows.Columns()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//Set up valuePointers slice using types from typeString\n\ttypes := strings.Split(typeString, \",\")\n\tvaluePointers := make([]interface{}, len(types))\n\tfor i, t := range types {\n\t\tif t == \"int\" {\n\t\t\tvaluePointers[i] = new(int)\n\t\t} else if t == \"string\" {\n\t\t\tvaluePointers[i] = new(string)\n\t\t} else {\n\t\t\treturn nil, errors.New(\"Unknown type in typeString\")\n\t\t}\n\t}\n\n\tfor rows.Next() {\n\t\t// Scan the result into the value pointers...\n\t\tif err := rows.Scan(valuePointers...); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tm := make(map[string]interface{})\n\t\tfor i, colName := range cols {\n\t\t\tm[colName] = valuePointers[i]\n\t\t}\n\n\t\tarr = append(arr, m)\n\t}\n\n\treturn arr, nil\n}", "func RowToDrivers(row *sql.Rows) []Driver {\n result := []Driver{}\n for row.Next() {\n var driverName string\n var driverTelephoneNumber string\n row.Scan(&driverName, &driverTelephoneNumber)\n result = append(result, Driver{\n DriverName: driverName,\n DriverTelephoneNumber: driverTelephoneNumber,\n })\n }\n return result\n}", "func (a *kinesisWriter) toRecords(msg message.Batch) ([]*kinesis.PutRecordsRequestEntry, error) {\n\tentries := make([]*kinesis.PutRecordsRequestEntry, msg.Len())\n\n\terr := msg.Iter(func(i int, p *message.Part) error {\n\t\tpartKey, err := a.partitionKey.String(i, msg)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"partition key interpolation error: %w\", err)\n\t\t}\n\t\tentry := kinesis.PutRecordsRequestEntry{\n\t\t\tData: p.AsBytes(),\n\t\t\tPartitionKey: aws.String(partKey),\n\t\t}\n\n\t\tif len(entry.Data) > mebibyte {\n\t\t\ta.log.Errorf(\"part %d exceeds the maximum Kinesis payload limit of 1 MiB\\n\", i)\n\t\t\treturn component.ErrMessageTooLarge\n\t\t}\n\n\t\thashKey, err := a.hashKey.String(i, msg)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"hash key interpolation error: %w\", err)\n\t\t}\n\t\tif hashKey != \"\" {\n\t\t\tentry.ExplicitHashKey = aws.String(hashKey)\n\t\t}\n\n\t\tentries[i] = &entry\n\t\treturn nil\n\t})\n\n\treturn entries, err\n}", "func RowToTrips(row *sql.Rows) []Trip {\n trips := []Trip{}\n for row.Next() {\n var tripNumber int\n var startLocationName string\n var destinationName string\n row.Scan(&tripNumber, &startLocationName, &destinationName)\n trips = append(trips, Trip{\n TripNumber: tripNumber,\n StartLocationName: startLocationName,\n DestinationName: destinationName,\n })\n }\n return trips\n}", "func NewRows(rs *sql.Rows) (*Rows, error) {\n\tif nil == rs {\n\t\trs = new(sql.Rows)\n\t}\n\tdefer rs.Close()\n\n\tvar err error\n\tvar tmp map[string]string\n\n\tret := &Rows{}\n\tret.currentData = make(map[string]string)\n\tret.data = make([]map[string]string, 0)\n\tret.colnames, err = rs.Columns()\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\n\tfor rs.Next() {\n\t\ttmp, err = fetchMap(rs)\n\t\tif nil != err {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tret.data = append(ret.data, tmp)\n\t\tret.dataLen++\n\t}\n\treturn ret, nil\n}", "func convertFromTsRows(tsRows [][]TsCell) []*riak_ts.TsRow {\n\tvar rows []*riak_ts.TsRow\n\tvar cells []*riak_ts.TsCell\n\tfor _, tsRow := range tsRows {\n\t\tcells = make([]*riak_ts.TsCell, 0)\n\n\t\tfor _, tsCell := range tsRow {\n\t\t\tcells = append(cells, tsCell.cell)\n\t\t}\n\n\t\tif len(rows) < 1 {\n\t\t\trows = make([]*riak_ts.TsRow, 0)\n\t\t}\n\n\t\trows = append(rows, &riak_ts.TsRow{Cells: cells})\n\t}\n\n\treturn rows\n}", "func (r *Rows) row(a ...interface{}) error {\n\tdefer r.Close()\n\n\tfor _, dp := range a {\n\t\tif _, ok := dp.(*sql.RawBytes); ok {\n\t\t\treturn VarTypeError(\"RawBytes isn't allowed on Row()\")\n\t\t}\n\t}\n\n\tif !r.Next() {\n\t\tif err := r.Err(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn sql.ErrNoRows\n\t}\n\tif err := r.Scan(a...); err != nil {\n\t\treturn err\n\t}\n\n\treturn r.Close()\n}", "func (res *Result) Rows() [][]interface{} {\n\tifacesSlice := make([][]interface{}, len(res.rows))\n\tfor i := range res.rows {\n\t\tifaces := make([]interface{}, len(res.rows[i]))\n\t\tfor j := range res.rows[i] {\n\t\t\tifaces[j] = res.rows[i][j]\n\t\t}\n\t\tifacesSlice[i] = ifaces\n\t}\n\treturn ifacesSlice\n}", "func (r *Reader) Row() []interface{} {\n\treturn r.row\n}", "func RowsToJSONArray(rows *sql.Rows) (string, error) {\n\tvar ret string\n\tvar err error\n\tret = \"[]\"\n\n\tcolumns, err := rows.Columns()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t//Scan requires pointers and []*interface does not work for rows.Scan so this is a workaround\n\t//Since interface can be anything, we create a pointer to another interface in another slice to pass type-check\n\t//https://stackoverflow.com/questions/29102725/go-sql-driver-get-interface-column-values\n\tcolPointers := make([]interface{}, len(columns))\n\tcols := make([]interface{}, len(columns))\n\tfor i := range colPointers {\n\t\tcolPointers[i] = &cols[i]\n\t}\n\n\tcounter := 0\n\tfor rows.Next() {\n\t\terr := rows.Scan(colPointers...)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tfor i, v := range cols {\n\t\t\tpath := fmt.Sprintf(\"%d.%s\", counter, columns[i])\n\t\t\tret, err = sjson.Set(ret, path, v)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t}\n\t\tcounter++\n\t}\n\treturn ret, nil\n}", "func RowsToStrings(qr *sqltypes.Result) [][]string {\n\tvar result [][]string\n\tfor _, row := range qr.Rows {\n\t\tvar srow []string\n\t\tfor _, cell := range row {\n\t\t\tsrow = append(srow, cell.ToString())\n\t\t}\n\t\tresult = append(result, srow)\n\t}\n\treturn result\n}", "func RowToStops(row *sql.Rows) []Stop {\n result := []Stop{}\n for row.Next() {\n var stopNumber int\n var stopAddress string\n result = append(result, Stop{\n StopNumber: stopNumber,\n StopAddress: stopAddress,\n })\n }\n return result\n}", "func recordToRecord(\n\ttopic string,\n\tpartition int32,\n\tbatch *kmsg.RecordBatch,\n\trecord *kmsg.Record,\n) *Record {\n\th := make([]RecordHeader, 0, len(record.Headers))\n\tfor _, kv := range record.Headers {\n\t\th = append(h, RecordHeader{\n\t\t\tKey: kv.Key,\n\t\t\tValue: kv.Value,\n\t\t})\n\t}\n\n\treturn &Record{\n\t\tKey: record.Key,\n\t\tValue: record.Value,\n\t\tHeaders: h,\n\t\tTimestamp: timeFromMillis(batch.FirstTimestamp + int64(record.TimestampDelta)),\n\t\tTopic: topic,\n\t\tPartition: partition,\n\t\tAttrs: RecordAttrs{uint8(batch.Attributes)},\n\t\tProducerID: batch.ProducerID,\n\t\tProducerEpoch: batch.ProducerEpoch,\n\t\tLeaderEpoch: batch.PartitionLeaderEpoch,\n\t\tOffset: batch.FirstOffset + int64(record.OffsetDelta),\n\t}\n}", "func RowsToQueryResults(rows *sql.Rows, coldefs []database.Column) (QueryResults, error) {\n\tcols := database.Columns(coldefs).Names()\n\tres := []RowData{}\n\tfor rows.Next() {\n\t\tcolumns := make([]interface{}, len(cols))\n\t\tcolumnPointers := make([]interface{}, len(cols))\n\t\tfor i := range columns {\n\t\t\tcolumnPointers[i] = &columns[i]\n\t\t}\n\t\t// Scan the result into the column pointers...\n\t\tif err := rows.Scan(columnPointers...); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\trowData := makeRowDataSet(coldefs)\n\t\tfor i, colName := range cols {\n\t\t\tval := columnPointers[i].(*interface{})\n\t\t\trowData[colName] = ColData{Data: val, DataType: rowData[colName].DataType}\n\t\t}\n\n\t\tres = append(res, rowData)\n\t}\n\n\treturn res, nil\n}", "func RowTo[T any](row CollectableRow) (T, error) {\n\tvar value T\n\terr := row.Scan(&value)\n\treturn value, err\n}", "func convertRow(\n\trow *Row,\n\twantsNode bool,\n\twantsTimestamp bool,\n\tdesiredValues []string,\n) *stats.Row {\n\tvar (\n\t\tnode string\n\t\ttimestamp time.Time\n\t)\n\n\tvar resultValues map[string]interface{}\n\tif len(desiredValues) > 0 {\n\t\tresultValues = make(map[string]interface{})\n\t}\n\n\tfor _, v := range desiredValues {\n\t\tresultValues[v] = row.value(v)\n\t}\n\n\tif wantsNode {\n\t\tnode = row.Node\n\t}\n\tif wantsTimestamp {\n\t\ttimestamp = row.Timestamp.UTC()\n\t}\n\n\treturn &stats.Row{\n\t\tNode: node,\n\t\tTimestamp: timestamp,\n\t\tValues: resultValues,\n\t}\n}", "func sqlReceiveRows(rows *sql.Rows,\n\tcolumnTypes []query.GoColumnType,\n\tcolumnNames []string,\n\tbuilder *sqlBuilder,\n\t) []map[string]interface{} {\n\n\tvar values []map[string]interface{}\n\n\tcursor := NewSqlCursor(rows, columnTypes, columnNames, nil)\n\tdefer cursor.Close()\n\tfor v := cursor.Next();v != nil;v = cursor.Next() {\n\t\tvalues = append(values, v)\n\t}\n\tif builder != nil {\n\t\tvalues = builder.unpackResult(values)\n\t}\n\n\treturn values\n}", "func (s Series) Records(force bool) ([]string, error) {\n\tret := make([]string, s.Len())\n\tfor i := 0; i < s.Len(); i++ {\n\t\te := s.elements.Elem(i)\n\t\tval, err := e.String()\n\t\tif err != nil && !force {\n\t\t\treturn nil, err\n\t\t}\n\t\tif err != nil {\n\t\t\tret[i] = \"\"\n\t\t} else {\n\t\t\tret[i] = val\n\t\t}\n\t}\n\treturn ret, nil\n}", "func RowsScan(rows *sql.Rows) (result []map[string]string, err error) {\n\tcolumns, err := rows.Columns()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvalues := make([]sql.RawBytes, len(columns))\n\tscanArgs := make([]interface{}, len(values))\n\t// ret := make(map[string]string, len(scanArgs))\n\n\tfor i := range values {\n\t\tscanArgs[i] = &values[i]\n\t}\n\n\tfor rows.Next() {\n\t\terr = rows.Scan(scanArgs...)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tvar value string\n\t\tret := make(map[string]string, len(scanArgs))\n\n\t\tfor i, col := range values {\n\t\t\tif col == nil {\n\t\t\t\tvalue = \"NULL\"\n\t\t\t} else {\n\t\t\t\tvalue = string(col)\n\t\t\t}\n\t\t\tret[columns[i]] = value\n\t\t}\n\n\t\tresult = append(result, ret)\n\n\t\t// break //get the first row only\n\t}\n\n\treturn\n}", "func (handler *SQLLiteTableHandler) ParseRows(rows *sql.Rows) per.IQueryResult {\n\thandler.Parent.LogDebug(\"ParseRows\", \"Returing empty results - was this function replaced\")\n\treturn NewDataQueryResult(false, []per.IDataItem{})\n}", "func convertFromPbTsRows(tsRows []*riak_ts.TsRow, tsCols []*riak_ts.TsColumnDescription) [][]TsCell {\n\tvar rows [][]TsCell\n\tvar row []TsCell\n\tvar cell TsCell\n\n\tfor _, tsRow := range tsRows {\n\t\trow = make([]TsCell, 0)\n\n\t\tfor i, tsCell := range tsRow.Cells {\n\t\t\ttsColumnType := riak_ts.TsColumnType_VARCHAR\n\t\t\tif tsCols != nil {\n\t\t\t\ttsColumnType = tsCols[i].GetType()\n\t\t\t}\n\t\t\tcell.setCell(tsCell, tsColumnType)\n\t\t\trow = append(row, cell)\n\t\t}\n\n\t\tif len(rows) < 1 {\n\t\t\trows = make([][]TsCell, 0)\n\t\t}\n\n\t\trows = append(rows, row)\n\t}\n\n\treturn rows\n}", "func (rows *Rows) ToMap() ([]map[string]interface{}, error) {\n\n\tcolumns, err := rows.Rows.Columns()\n\tif err != nil {\n\t\treturn nil,err\n\t}\n\n\tvalues := make([]interface{}, len(columns))\n\tfor i := range values {\n\t\tvalues[i] = new(interface{})\n\t}\n\n\trowMaps := make([]map[string]interface{}, 0)\n\n\tfor rows.Rows.Next() {\n\t\terr = rows.Rows.Scan(values...)\n\t\tif err != nil {\n\t\t\treturn nil,err\n\t\t}\n\n\t\tcurrRow := make(map[string]interface{})\n\t\tfor i, name := range columns {\n\t\t\tcurrRow[name] = *(values[i].(*interface{}))\n\t\t}\n\t\t// accumulating rowMaps is the easy way out\n\t\trowMaps = append(rowMaps, currRow)\n\t}\n\n\treturn rowMaps,nil\n}", "func getMapFromRows(rows *sql.Rows) (map[string]interface{}, error) {\n\tcols, _ := rows.Columns()\n\tm := make(map[string]interface{})\n\tfor rows.Next() {\n\t\t// Create a slice of interface{}'s to represent each column,\n\t\t// and a second slice to contain pointers to each item in the columns slice.\n\t\tcolumns := make([]interface{}, len(cols))\n\t\tcolumnPointers := make([]interface{}, len(cols))\n\t\tfor i, _ := range columns {\n\t\t\tcolumnPointers[i] = &columns[i]\n\t\t}\n\n\t\t// Scan the result into the column pointers...\n\t\tif err := rows.Scan(columnPointers...); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Create our map, and retrieve the value for each column from the pointers slice,\n\t\t// storing it in the map with the name of the column as the key.\n\t\tfor i, colName := range cols {\n\t\t\tval := columnPointers[i].(*interface{})\n\t\t\tm[colName] = *val\n\t\t}\n\t}\n\treturn m, nil\n}", "func (g *GroupByAggregator) recordsForTable(table map[string]*GroupByRow) []*zng.Record {\n\tvar keys []string\n\tfor k := range table {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\n\tvar recs []*zng.Record\n\tfor _, k := range keys {\n\t\trow := table[k]\n\t\tvar zv zcode.Bytes\n\t\tif g.TimeBinDuration > 0 {\n\t\t\tzv = zcode.AppendPrimitive(zv, zng.EncodeTime(row.ts))\n\t\t}\n\t\tzv = append(zv, row.keyvals...)\n\t\tfor _, red := range row.reducers.Reducers {\n\t\t\t// a reducer value is never a container\n\t\t\tv := reducer.Result(red)\n\t\t\tif v.IsContainer() {\n\t\t\t\tpanic(\"internal bug: reducer result cannot be a container!\")\n\t\t\t}\n\t\t\tzv = v.Encode(zv)\n\t\t}\n\t\ttyp := g.lookupRowType(row)\n\t\tr := zng.NewRecordTs(typ, row.ts, zv)\n\t\trecs = append(recs, r)\n\t}\n\treturn recs\n}", "func RowToTripOfferings(row *sql.Rows) []TripOffering {\n tripOffering := []TripOffering{}\n for row.Next() {\n var tripNumber int\n var date string\n var scheduledStartTime string\n var scheduledArrivalTime string\n var driverName string\n var busID int\n row.Scan(&tripNumber, &date, &scheduledStartTime, &scheduledArrivalTime, &driverName, &busID)\n tripOffering = append(tripOffering, TripOffering{\n TripNumber: tripNumber,\n Date: date,\n ScheduledStartTime: scheduledStartTime,\n ScheduledArrivalTime: scheduledArrivalTime,\n DriverName: driverName,\n BusID: busID,\n })\n }\n return tripOffering\n}", "func GetAllRecords(client *mongo.Collection) *[]Record {\n\tcursor, err := client.Find(context.TODO(), bson.D{{}})\n\tif err != nil {\n\t\tlog.Fatalln(\"Unable to find any records: %s\", err)\n\t}\n\n\tvar result []Record\n\tfor cursor.Next(context.TODO()) {\n\t\tvar elem Record\n\t\terr := cursor.Decode(&elem)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\t\tresult = append(result, elem)\n\t}\n\tcursor.Close(context.TODO())\n\treturn &result\n}", "func RowToActualStopInfos(row *sql.Rows) []ActualTripStopInfo {\n result := []ActualTripStopInfo{}\n for row.Next() {\n var tripNumber int\n var date string\n var scheduledStartTime string\n var stopNumber int\n var scheduledArrivalTime string\n var actualStartTime string\n var actualArrivalTime string\n var numberOfPassengerIn int\n var numberOfPassengerOut int\n result = append(result, ActualTripStopInfo{\n TripNumber: tripNumber,\n Date: date,\n ScheduledStartTime: scheduledStartTime,\n StopNumber: stopNumber,\n ScheduledArrivalTime: scheduledArrivalTime,\n ActualStartTime: actualStartTime,\n ActualArrivalTime: actualArrivalTime,\n NumberOfPassengerIn: numberOfPassengerIn,\n NumberOfPassengerOut: numberOfPassengerOut,\n })\n }\n return result\n}", "func (m *sparse) Rows() func() *sparseRow {\n\ti := 0\n\tr := &sparseRow{}\n\n\treturn func() *sparseRow {\n\t\tif i == (len(m.ptr) - 1) {\n\t\t\treturn nil\n\t\t}\n\n\t\tstart := m.ptr[i]\n\t\tend := m.ptr[i+1]\n\n\t\tr.index = i\n\t\tr.ind = m.ind[start:end]\n\t\tr.val = m.val[start:end]\n\t\ti++\n\n\t\treturn r\n\t}\n}", "func (r *Runner) Rows() (*sql.Rows, error) {\n\tq, err := r.query.Construct()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn r.db.QueryContext(r.ctx, q.Query(), q.Args()...)\n}", "func rowsToResultStrings(ctx *sql.Context, iter sql.RowIter) ([]string, error) {\n\tvar results []string\n\tif iter == nil {\n\t\treturn results, nil\n\t}\n\n\tfor {\n\t\trow, err := iter.Next(ctx)\n\t\tif err == io.EOF {\n\t\t\treturn results, nil\n\t\t} else if err != nil {\n\t\t\tdrainIteratorIgnoreErrors(ctx, iter)\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tfor _, col := range row {\n\t\t\t\tresults = append(results, toSqlString(col))\n\t\t\t}\n\t\t}\n\t}\n}", "func (s *Statement) Row() (values []interface{}) {\n\tfor i := 0; i < s.Columns(); i++ {\n\t\tvalues = append(values, s.Column(i))\n\t}\n\treturn\n}", "func (m *Message) getRows() Rows {\n\t// Read the column count and column names.\n\tcolumns := make([]string, m.getUint64())\n\n\tfor i := range columns {\n\t\tcolumns[i] = m.getString()\n\t}\n\n\trows := Rows{\n\t\tColumns: columns,\n\t\tmessage: m,\n\t}\n\treturn rows\n}", "func partitionRecords(size int, records []types.Record) [][]types.Record {\n\tnumberOfPartitions := len(records) / size\n\tif len(records)%size != 0 {\n\t\tnumberOfPartitions++\n\t}\n\n\tpartitions := make([][]types.Record, 0, numberOfPartitions)\n\tfor i := 0; i < numberOfPartitions; i++ {\n\t\tstart := size * i\n\t\tend := size * (i + 1)\n\t\tif end > len(records) {\n\t\t\tend = len(records)\n\t\t}\n\n\t\tpartitions = append(partitions, records[start:end])\n\t}\n\n\treturn partitions\n}", "func getRecordWrapper(numFound int, keys [][]byte, pointers []interface{}) (records Records, err error) {\n\tif numFound == 0 {\n\t\treturn nil, ErrScansNoResult\n\t}\n\n\trecords = Records{}\n\tfor i := 0; i < numFound; i++ {\n\t\trecords = append(records, pointers[i].(*Record))\n\t}\n\n\treturn records, nil\n}", "func (r *Representer) RepresentationFromRows(rows *sql.Rows) *Table {\n\treturn nil\n}", "func RowToTripStopInfos(row *sql.Rows) []TripStopInfo {\n result := []TripStopInfo{}\n for row.Next() {\n var tripNumber int\n var stopNumber int\n var sequenceNumber int\n var drivingTime float32\n result = append(result, TripStopInfo{\n TripNumber: tripNumber,\n StopNumber: stopNumber,\n SequenceNumber: sequenceNumber,\n DrivingTime: drivingTime,\n })\n }\n return result\n}", "func (c *ConnCtx) InsertRecordsRowByRow(records []*SongRecord) error {\n\ttemplate := c.getInsertQueryTempalte()\n\tstmt, err := c.Conn.PrepareNamed(template)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer stmt.Close()\n\n\tfor _, item := range records {\n\t\tresult, err := stmt.Exec(&item)\n\t\tif err != nil {\n\t\t\tErrorF(fmt.Sprintf(\"execute sql error: %s\", err.Error()))\n\t\t\tcontinue\n\t\t}\n\t\trowsAffected, err := result.RowsAffected()\n\t\tif err != nil {\n\t\t\tErrorF(fmt.Sprintf(\"pg server executes error: %s\", err.Error()))\n\t\t\tcontinue\n\t\t}\n\t\tif rowsAffected != 1 {\n\t\t\tWarningF(fmt.Sprintf(\"insert record affected row error: %d\", rowsAffected))\n\t\t}\n\t\tDebugF(\"insert done: %#v\", item)\n\t}\n\n\treturn nil\n}", "func (f *FakeTable) ReadRows(ovs *libovsdb.OvsdbClient, readRowArgs ovsdb.ReadRowArgs) ([]map[string]interface{}, error) {\n\tif f.ReadRowsFunc != nil {\n\t\treturn f.ReadRowsFunc(ovs, readRowArgs)\n\t}\n\tm := make([]map[string]interface{}, 10)\n\treturn m, nil\n}", "func readUsersFromRows(rows *sql.Rows) ([]*User, error) {\n\tvar users []*User\n\n\tfor rows.Next() {\n\t\tu := User{}\n\t\terr := rows.Scan(\n\t\t\t&u.ID,\n\t\t\t&u.Username,\n\t\t\t&u.Email,\n\t\t\t&u.Bio,\n\t\t\t&u.Password,\n\t\t\t&u.Clicks,\n\t\t\t&u.LastClick,\n\t\t\t&u.IsAdmin,\n\t\t)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tusers = append(users, &u)\n\t}\n\n\treturn users, nil\n}", "func (e *commonFormatEncoder) Row(tp int, row *[]interface{}, seqno uint64) ([]byte, error) {\n\tcf := convertRowToCommonFormat(tp, row, e.inSchema, seqno, e.filter)\n\treturn CommonFormatEncode(cf)\n}", "func (t *Table) Rows(fi, li int) []map[string]interface{} {\r\n\r\n\tif fi < 0 || fi >= len(t.header.cols) {\r\n\t\tpanic(tableErrInvRow)\r\n\t}\r\n\tif li < 0 {\r\n\t\tli = len(t.rows) - 1\r\n\t} else if li < 0 || li >= len(t.rows) {\r\n\t\tpanic(tableErrInvRow)\r\n\t}\r\n\tif li < fi {\r\n\t\tpanic(\"Last index less than first index\")\r\n\t}\r\n\tres := make([]map[string]interface{}, li-li+1)\r\n\tfor ri := fi; ri <= li; ri++ {\r\n\t\ttrow := t.rows[ri]\r\n\t\trmap := make(map[string]interface{})\r\n\t\tfor ci := 0; ci < len(t.header.cols); ci++ {\r\n\t\t\tc := t.header.cols[ci]\r\n\t\t\trmap[c.id] = trow.cells[c.order].value\r\n\t\t}\r\n\t\tres = append(res, rmap)\r\n\t}\r\n\treturn res\r\n}", "func RowToBuses(row *sql.Rows) []Bus {\n result := []Bus{}\n for row.Next() {\n var busID int\n var model string\n var year int\n row.Scan(&busID, &model, &year)\n result = append(result, Bus{\n BusID: busID,\n Model: model,\n Year: year,\n })\n }\n return result\n}", "func (serializer *batchSerializer) parseBatchRecord(records []IRecord) (*batchRecord, error) {\n batch := &batchRecord{\n records: make([]*binaryRecord, 0, len(records)),\n }\n\n for _, record := range records {\n bRecord, err := serializer.bSerializer.dhRecord2BinaryRecord(record)\n if err != nil {\n return nil, err\n }\n batch.records = append(batch.records, bRecord)\n }\n return batch, nil\n}", "func MakeRowTrusted(fields []*querypb.Field, row *querypb.Row) []Value {\n\tsqlRow := make([]Value, len(row.Lengths))\n\tvar offset int64\n\tfor i, length := range row.Lengths {\n\t\tif length < 0 {\n\t\t\tcontinue\n\t\t}\n\t\tsqlRow[i] = MakeTrusted(fields[i].Type, row.Values[offset:offset+length])\n\t\toffset += length\n\t}\n\treturn sqlRow\n}", "func (empHandler *EmployeeHandler) storeRecord() []Employee {\n\tvar emp []Employee\n\tdis, err := empHandler.DB.Query(\"select id, name, age, gender, role from employee\")\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tfor dis.Next() {\n\t\tvar row Employee\n\t\terr = dis.Scan(&row.Id, &row.Name, &row.Age, &row.Gender, &row.Role)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\temp = append(emp, row)\n\t}\n\treturn emp\n}", "func insertRows(rows []vmparser.Row) error {\n\t// ctx := GetInsertCtx()\n\t// defer PutInsertCtx(ctx)\n\n\tctx := &InsertCtx{mrs: model.MetricRows{}}\n\t// ctx.Reset(len(rows))\n\tfor i := range rows {\n\t\tr := &rows[i]\n\t\tctx.Labels = ctx.Labels[:0]\n\t\tctx.AddLabel(\"\", r.Metric)\n\t\tfor j := range r.Tags {\n\t\t\ttag := &r.Tags[j]\n\t\t\tctx.AddLabel(tag.Key, tag.Value)\n\t\t}\n\t\tctx.WriteDataPoint(nil, ctx.Labels, r.Timestamp, r.Value)\n\t}\n\trowsInserted.Add(len(rows))\n\trowsPerInsert.Update(float64(len(rows)))\n\treturn ctx.FlushBufs()\n}", "func SaveReturningPackageRows(ctx context.Context, db SQLHandle, inputs ...*PackageRow) (err error) {\n\trows := PackageRows(inputs)\n\t_, err = queryWithJSONArgs(ctx, db, rows.ReceiveRows, SQLSaveReturningPackageRows, rows)\n\tif err != nil {\n\t\treturn formatError(\"SaveReturningPackageRows\", err)\n\t}\n\treturn nil\n}", "func FetchRows(rows *sql.Rows, dst interface{}) error {\n\tvar columns []string\n\tvar err error\n\n\t// Destination.\n\tdstv := reflect.ValueOf(dst)\n\n\tif dstv.IsNil() || dstv.Kind() != reflect.Ptr {\n\t\treturn db.ErrExpectingPointer\n\t}\n\n\tif dstv.Elem().Kind() != reflect.Slice {\n\t\treturn db.ErrExpectingSlicePointer\n\t}\n\n\tif dstv.Kind() != reflect.Ptr || dstv.Elem().Kind() != reflect.Slice || dstv.IsNil() {\n\t\treturn db.ErrExpectingSliceMapStruct\n\t}\n\n\tif columns, err = rows.Columns(); err != nil {\n\t\treturn err\n\t}\n\n\tslicev := dstv.Elem()\n\titem_t := slicev.Type().Elem()\n\n\treset(dst)\n\n\tfor rows.Next() {\n\n\t\titem, err := fetchResult(item_t, rows, columns)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tslicev = reflect.Append(slicev, reflect.Indirect(item))\n\t}\n\n\trows.Close()\n\n\tdstv.Elem().Set(slicev)\n\n\treturn nil\n}", "func ToTransmissionRecordModels(trs []TransmissionRecord) []models.TransmissionRecord {\n\tmodels := make([]models.TransmissionRecord, len(trs))\n\tfor i, tr := range trs {\n\t\tmodels[i] = ToTransmissionRecordModel(tr)\n\t}\n\treturn models\n}", "func (s *Service) Records(c context.Context, types []int64, mid, stime, etime int64, order, sort string, pn, ps int32) (res []*model.Record, total int32, err error) {\n\tvar midAts []int64\n\tif res, total, err = s.search.RecordPaginate(c, types, mid, stime, etime, order, sort, pn, ps); err != nil {\n\t\tlog.Error(\"s.search.RecordPaginate(%d,%d,%d,%d,%s,%s) error(%v)\", mid, sort, pn, ps, stime, etime, err)\n\t\treturn\n\t}\n\tif res == nil {\n\t\tres = _emptyRecords\n\t\treturn\n\t}\n\tfor _, r := range res {\n\t\tr.Message = template.HTMLEscapeString(r.Message)\n\t\tif len(r.Ats) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tvar ats []int64\n\t\tif ats, err = xstr.SplitInts(r.Ats); err != nil {\n\t\t\tlog.Error(\"xstr.SplitInts(%s) error(%v)\", r.Ats, err)\n\t\t\terr = nil\n\t\t}\n\t\tmidAts = append(midAts, ats...)\n\t}\n\tif len(midAts) == 0 {\n\t\treturn\n\t}\n\taccMap, _ := s.getAccInfo(c, midAts)\n\tfor _, r := range res {\n\t\tr.FillAts(accMap)\n\t}\n\treturn\n}", "func (sq *testQueryService) addGeneratedRows(from, to int) {\n\tvar rows [][]sqltypes.Value\n\t// ksids has keyspace ids which are covered by the shard key ranges -40 and 40-80.\n\tksids := []uint64{0x2000000000000000, 0x6000000000000000}\n\n\tfor id := from; id < to; id++ {\n\t\t// Only return the rows which are covered by this shard.\n\t\tshardIndex := id % 2\n\t\tif sq.shardCount == 1 || shardIndex == sq.shardIndex {\n\t\t\tidValue := sqltypes.NewInt64(int64(id))\n\n\t\t\trow := []sqltypes.Value{\n\t\t\t\tidValue,\n\t\t\t\tsqltypes.NewVarBinary(fmt.Sprintf(\"Text for %v\", id)),\n\t\t\t}\n\t\t\tif !sq.omitKeyspaceID {\n\t\t\t\trow = append(row, sqltypes.NewVarBinary(fmt.Sprintf(\"%v\", ksids[shardIndex])))\n\t\t\t}\n\t\t\trows = append(rows, row)\n\t\t}\n\t}\n\n\tif sq.rows == nil {\n\t\tsq.rows = rows\n\t} else {\n\t\tsq.rows = append(sq.rows, rows...)\n\t}\n}", "func (board Board)Records()(interface{}) {\n if board.Id < 1 {\n return []bool{}\n }\n board_head := BoardHead{}\n Db.Where(\"id = ?\", board.BoardHeadId).First(&board_head)\n if board_head.BoardType == \"sample\" {\n records := []Sample{}\n Db.Where(\"board_id = ?\", board.Id).Find(&records)\n return records\n }\n if board_head.BoardType == \"primer\" {\n records := []Primer{}\n Db.Where(\"board_id = ?\", board.Id).Find(&records)\n return records\n } else {\n rows, _ := Db.Table(\"reactions\").Select(\"reactions.id, reactions.hole, samples.name, primers.name\").Joins(\"INNER JOIN samples ON samples.id = reactions.sample_id INNER JOIN primers ON primers.id = reactions.primer_id\").Where(\"reactions.board_id = ?\", board.Id).Rows()\n result := []map[string]interface{}{}\n for rows.Next() {\n var id int\n var hole, sample, primer string\n rows.Scan(&id, &hole, &sample, &primer)\n d := map[string]interface{}{\n \"id\": id,\n \"sample\": sample,\n \"primer\": primer,\n \"hole\": hole,\n }\n result = append(result, d)\n }\n return result\n }\n}", "func (s *SqliteServer) ReadRecords(hashes []gdp.Hash) ([]gdp.Record, error) {\n\tif len(hashes) == 0 {\n\t\treturn nil, nil\n\t}\n\n\thexHashes := make([]string, 0, len(hashes))\n\tfor _, hash := range hashes {\n\t\thexHashes = append(hexHashes, fmt.Sprintf(\"\\\"%X\\\"\", hash))\n\t}\n\n\tqueryString := fmt.Sprintf(\n\t\t\"SELECT hash, recno, timestamp, accuracy, prevhash, value, sig FROM log_entry WHERE hex(hash) IN (%s)\",\n\t\tstrings.Join(hexHashes, \",\"),\n\t)\n\trows, err := s.db.Query(queryString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trecords, err := parseRecordRows(rows)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn records, nil\n}", "func (b *RecordBuffer) Records() []interface{} {\n\treturn b.recordsInBuffer\n}", "func GetRows(currency string) (*sql.Rows, error) {\n\tif !common.ValidateCurrency(currency) {\n\t\treturn nil, errors.New(\"invalid currency\")\n\t}\n\t// TODO: implement date range windowing\n\treturn db.Queryx(fmt.Sprintf(\"SELECT * FROM %s\", currency))\n}", "func (f *fragment) rows(start uint64, filters ...rowFilter) []uint64 {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\treturn f.unprotectedRows(start, filters...)\n}", "func RowBatchToVizierRowBatch(rb *schemapb.RowBatchData, tableID string) (*vizierpb.RowBatchData, error) {\n\tcols := make([]*vizierpb.Column, len(rb.Cols))\n\tfor i, col := range rb.Cols {\n\t\tc, err := colToVizierCol(col)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcols[i] = c\n\t}\n\n\treturn &vizierpb.RowBatchData{\n\t\tTableID: tableID,\n\t\tNumRows: rb.NumRows,\n\t\tEow: rb.Eow,\n\t\tEos: rb.Eos,\n\t\tCols: cols,\n\t}, nil\n}", "func selectRows(db *gorm.DB) ([]uint, error) {\n\n\tids := []uint{}\n\n\trows, err := db.Select(\"DISTINCT products_product.id,products_product.created_at\").Rows()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar (\n\t\tid uint\n\t\tcreatedAt time.Time\n\t)\n\n\tfor rows.Next() {\n\t\terr = rows.Scan(&id, &createdAt)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tids = append(ids, id)\n\t}\n\n\treturn ids, nil\n\n}", "func readRows(db *sql.DB, query string, dataChan chan []sql.RawBytes, quitChan chan bool, goChan chan bool, csvHeader bool) {\n\trows, err := db.Query(query)\n\tdefer rows.Close()\n\tif err != nil {\n\t\tlog.Print(err)\n\t\tos.Exit(1)\n\t}\n\n\tcols, err := rows.Columns()\n\tcheckErr(err)\n\n\t// Write columns as a header line\n\tif csvHeader {\n\t\theaders := make([]sql.RawBytes, len(cols))\n\t\tfor i, col := range cols {\n\t\t\theaders[i] = []byte(col)\n\t\t}\n\t\tdataChan <- headers\n\t\t<-goChan\n\t}\n\n\t// Need to scan into empty interface since we don't know how many columns a query might return\n\tscanVals := make([]interface{}, len(cols))\n\tvals := make([]sql.RawBytes, len(cols))\n\tfor i := range vals {\n\t\tscanVals[i] = &vals[i]\n\t}\n\n\tfor rows.Next() {\n\t\terr := rows.Scan(scanVals...)\n\t\tcheckErr(err)\n\n\t\tdataChan <- vals\n\n\t\t// Block and wait for writeRows() to signal back it has consumed the data\n\t\t// This is necessary because sql.RawBytes is a memory pointer and when rows.Next()\n\t\t// loops and change the memory address before writeRows can properly process the values\n\t\t<-goChan\n\t}\n\n\terr = rows.Err()\n\tcheckErr(err)\n\n\tclose(dataChan)\n\tquitChan <- true\n}", "func toRecord(cache airtabledb.DB, src Feature, dst interface{}) {\n\tdV := reflect.ValueOf(dst).Elem().FieldByName(\"Fields\")\n\tsV := reflect.ValueOf(src)\n\tcopyFields(cache, sV, dV)\n}", "func ExtractRecords() (records []record) {\n // Open the input file and create a scanner to parse it\n file, _ := os.Open(\"Input.txt\")\n\tscanner := bufio.NewScanner(file)\n\n // Var to store the various fields which represents a record\n var fields string\n\t\n\tfor scanner.Scan() {\n // If a blank row isn't scan, add the row to the record representation\n\t\tif scanner.Text() != \"\" {\n\t\t\tfields += scanner.Text() + \" \"\n\t\t} else {\n // If a blank row is scan, convert the record representation to an actual record and add it to the records slice\n\t\t\trecords = append(records, ConvertRecord(fields))\n\n // Then reset the representation\n\t\t\tfields = \"\"\n\t\t}\n\t}\n // Add the last record, which is lost due to the end of file\n\trecords = append(records, ConvertRecord(fields))\n\tfile.Close()\n\n return\n}", "func (record) MarshalRecordsToBuffer(records []common.Record, buffer []byte) error {\n\tif len(records)*recordLength > len(buffer) {\n\t\treturn fmt.Errorf(\"buffer %d is not big enough for records %d\", len(buffer), len(records)*recordLength)\n\t}\n\n\tfor i, r := range records {\n\t\tbuff := buffer[i*recordLength : (i+1)*recordLength]\n\n\t\tif !validation.ValidTraceID(r.ID) { // todo: remove this check. maybe have a max id size of 128 bits?\n\t\t\treturn errors.New(\"ids must be 128 bit\")\n\t\t}\n\n\t\tmarshalRecord(r, buff)\n\t}\n\n\treturn nil\n}", "func NewRecordset(rows [][]interface{}, fields []string, offset int) *Recordset {\n\treturn &Recordset{\n\t\trows: rows,\n\t\tfields: fields,\n\t\toffset: offset,\n\t}\n}", "func (t *JSONTable) PartitionRows(ctx *sql.Context, partition sql.Partition) (sql.RowIter, error) {\n\treturn t.b.Build(ctx, t, nil)\n}", "func (f *File) FromRows(rows *sql.Rows) error {\n\tvar scanf struct {\n\t\tID int\n\t\tCreateTime sql.NullTime\n\t\tUpdateTime sql.NullTime\n\t\tType sql.NullString\n\t\tName sql.NullString\n\t\tSize sql.NullInt64\n\t\tModifiedAt sql.NullTime\n\t\tUploadedAt sql.NullTime\n\t\tContentType sql.NullString\n\t\tStoreKey sql.NullString\n\t\tCategory sql.NullString\n\t}\n\t// the order here should be the same as in the `file.Columns`.\n\tif err := rows.Scan(\n\t\t&scanf.ID,\n\t\t&scanf.CreateTime,\n\t\t&scanf.UpdateTime,\n\t\t&scanf.Type,\n\t\t&scanf.Name,\n\t\t&scanf.Size,\n\t\t&scanf.ModifiedAt,\n\t\t&scanf.UploadedAt,\n\t\t&scanf.ContentType,\n\t\t&scanf.StoreKey,\n\t\t&scanf.Category,\n\t); err != nil {\n\t\treturn err\n\t}\n\tf.ID = strconv.Itoa(scanf.ID)\n\tf.CreateTime = scanf.CreateTime.Time\n\tf.UpdateTime = scanf.UpdateTime.Time\n\tf.Type = scanf.Type.String\n\tf.Name = scanf.Name.String\n\tf.Size = int(scanf.Size.Int64)\n\tf.ModifiedAt = scanf.ModifiedAt.Time\n\tf.UploadedAt = scanf.UploadedAt.Time\n\tf.ContentType = scanf.ContentType.String\n\tf.StoreKey = scanf.StoreKey.String\n\tf.Category = scanf.Category.String\n\treturn nil\n}", "func ReadRows(r Rows) Stream {\n c, _ := r.(io.Closer)\n return &rowStream{rows: r, maybeCloser: maybeCloser{c: c}}\n}", "func RowsFormater(rows *sql.Rows) {\n\tcols, err := rows.Columns()\n\tif err != nil {\n\t\tglog.Errorln(err)\n\t}\n\ttable := tablewriter.NewWriter(os.Stdout)\n\ttable.SetHeader(cols)\n\tdata := make([][]string, 1)\n\tcount := 0\n\tfor rows.Next() {\n\t\tcolumns := make([]interface{}, len(cols))\n\t\tcolumnPointers := make([]interface{}, len(cols))\n\t\tfor i, _ := range columns {\n\t\t\tcolumnPointers[i] = &columns[i]\n\t\t}\n\n\t\t// Scan the result into the column pointers...\n\t\tif err := rows.Scan(columnPointers...); err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\n\t\t// Create our map, and retrieve the value for each column from the pointers slice,\n\t\t// storing it in the map with the name of the column as the key.\n\t\trow := make([]string, 0)\n\t\tfor i, _ := range cols {\n\t\t\tval := columnPointers[i].(*interface{})\n\t\t\trow = append(row, interface2String(*val))\n\t\t}\n\n\t\tdata = append(data, row)\n\t\tcount = count + 1\n\t}\n\tfor _, v := range data {\n\t\ttable.Append(v)\n\t}\n\ttable.Render()\n\tif count > 0 {\n\t\tfmt.Printf(\"(%d rows of records)\\n\", count)\n\t}\n}", "func (r record) MarshalRecords(records []common.Record) ([]byte, error) {\n\trecordBytes := make([]byte, len(records)*recordLength)\n\n\terr := r.MarshalRecordsToBuffer(records, recordBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn recordBytes, nil\n}", "func (r RecordV1) toRecord() Record {\n\treturn Record{\n\t\tType: r.Type,\n\t\tName: r.Name,\n\t\tAppliedAt: r.AppliedAt,\n\t}\n}", "func GetAllRecords(db *sql.DB, id int) ([]Record, error) {\n\trows, err := db.Query(`SELECT * FROM user_records WHERE user_id = $1`, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar records []Record\n\tfor rows.Next() {\n\t\tvar record Record\n\t\terr := rows.Scan(\n\t\t\t&record.ID,\n\t\t\t&record.Weight,\n\t\t\t&record.Reps,\n\t\t\t&record.RPE,\n\t\t\t&record.DatePerformed,\n\t\t\t&record.ExerciseID,\n\t\t\t&record.UserID,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trecords = append(records, record)\n\t}\n\n\treturn records, nil\n}", "func (s *Store) SortedRecords() []Record {\n\trecords := make([]Record, 0, s.sorted.Len())\n\tfor e := s.sorted.Front(); e != nil; e = e.Next() {\n\t\trecord := e.Value.(txRecord)\n\t\trecords = append(records, record.record(s))\n\t}\n\treturn records\n}", "func (sink *influxdbSink) parseRawQueryRow(rawRow influx_models.Row) ([]core.TimestampedMetricValue, error) {\n\tvals := make([]core.TimestampedMetricValue, len(rawRow.Values))\n\twasInt := make(map[string]bool, 1)\n\tfor i, rawVal := range rawRow.Values {\n\t\tval := core.TimestampedMetricValue{}\n\n\t\tif ts, err := time.Parse(time.RFC3339, rawVal[0].(string)); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Unable to parse timestamp %q in series %q\", rawVal[0].(string), rawRow.Name)\n\t\t} else {\n\t\t\tval.Timestamp = ts\n\t\t}\n\n\t\tif err := tryParseMetricValue(\"value\", rawVal, &val.MetricValue, 1, wasInt); err != nil {\n\t\t\tglog.Errorf(\"Unable to parse field \\\"value\\\" in series %q: %v\", rawRow.Name, err)\n\t\t\treturn nil, fmt.Errorf(\"Unable to parse values in series %q\", rawRow.Name)\n\t\t}\n\n\t\tvals[i] = val\n\t}\n\n\tif wasInt[\"value\"] {\n\t\tfor i := range vals {\n\t\t\tvals[i].MetricValue.ValueType = core.ValueInt64\n\t\t}\n\t} else {\n\t\tfor i := range vals {\n\t\t\tvals[i].MetricValue.ValueType = core.ValueFloat\n\t\t}\n\t}\n\n\treturn vals, nil\n}", "func QueryReturnRows(query string, db *sql.DB, arg ...interface{}) (bool, []string) {\n\trows, err := db.Query(query, arg...)\n\tCheck(err)\n\tdefer rows.Close()\n\n\tvar items []string\n\tfor rows.Next() {\n\t\tvar currentItem string\n\t\terr := rows.Scan(&currentItem)\n\t\tCheck(err)\n\n\t\titems = append(items, currentItem)\n\t}\n\n\tif len(items) < 1 {\n\t\treturn false, []string{}\n\t}\n\n\treturn true, items\n}", "func ToTestRecords(searchResp *frontend.SearchResponse, imgBaseURL string) []*TestRecord {\n\t// Group the results by test.\n\tretMap := map[types.TestName]*TestRecord{}\n\tfor _, oneDigest := range searchResp.Digests {\n\t\ttestNameVal := oneDigest.ParamSet[types.PRIMARY_KEY_FIELD]\n\t\tif len(testNameVal) == 0 {\n\t\t\tsklog.Errorf(\"Error: Digest '%s' has no primaryKey in paramset\", oneDigest.Digest)\n\t\t\tcontinue\n\t\t}\n\n\t\tdigestInfo := &DigestInfo{\n\t\t\tSRDigest: oneDigest,\n\t\t\tURL: DigestUrl(imgBaseURL, oneDigest.Digest),\n\t\t}\n\n\t\ttestName := types.TestName(oneDigest.ParamSet[types.PRIMARY_KEY_FIELD][0])\n\t\tif found, ok := retMap[testName]; ok {\n\t\t\tfound.Digests = append(found.Digests, digestInfo)\n\t\t} else {\n\t\t\tretMap[testName] = &TestRecord{\n\t\t\t\tTestName: testName,\n\t\t\t\tDigests: []*DigestInfo{digestInfo},\n\t\t\t}\n\t\t}\n\t}\n\n\t// Put the records into an array and return them.\n\tret := make([]*TestRecord, 0, len(retMap))\n\tfor _, oneTestRec := range retMap {\n\t\tret = append(ret, oneTestRec)\n\t}\n\n\treturn ret\n}", "func (f *fragment) rowFromStorage(rowID uint64) *Row {\n\t// Only use a subset of the containers.\n\t// NOTE: The start & end ranges must be divisible by container width.\n\t//\n\t// Note that OffsetRange now returns a new bitmap which uses frozen\n\t// containers which will use copy-on-write semantics. The actual bitmap\n\t// and Containers object are new and not shared, but the containers are\n\t// shared.\n\tdata := f.storage.OffsetRange(f.shard*ShardWidth, rowID*ShardWidth, (rowID+1)*ShardWidth)\n\n\trow := &Row{\n\t\tsegments: []rowSegment{{\n\t\t\tdata: data,\n\t\t\tshard: f.shard,\n\t\t\twritable: true,\n\t\t}},\n\t}\n\trow.invalidateCount()\n\n\treturn row\n}", "func (o GetSrvRecordResultOutput) Records() GetSrvRecordRecordArrayOutput {\n\treturn o.ApplyT(func(v GetSrvRecordResult) []GetSrvRecordRecord { return v.Records }).(GetSrvRecordRecordArrayOutput)\n}", "func NewIterFromRows(rows []KTV) Iter {\n\treturn &memIter{rows: rows, index: -1}\n}", "func (mp *inmemoryPart) InitFromRows(rows []rawRow) {\n\tif len(rows) == 0 {\n\t\tlogger.Panicf(\"BUG: Inmemory.InitFromRows must accept at least one row\")\n\t}\n\n\tmp.Reset()\n\trrm := getRawRowsMarshaler()\n\trrm.marshalToInmemoryPart(mp, rows)\n\tputRawRowsMarshaler(rrm)\n\tmp.creationTime = fasttime.UnixTimestamp()\n}", "func (swfs *SurveyWiFiScans) FromRows(rows *sql.Rows) error {\n\tfor rows.Next() {\n\t\tscanswfs := &SurveyWiFiScan{}\n\t\tif err := scanswfs.FromRows(rows); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*swfs = append(*swfs, scanswfs)\n\t}\n\treturn nil\n}", "func makeRecords(v interface{}, header Header) [][]string {\n\tval := reflect.ValueOf(v)\n\n\tsize := val.Len()\n\tout := make([][]string, size)\n\tfor i := 0; i < size; i++ {\n\t\trecord := makeRecord(val.Index(i).Interface(), header)\n\t\tout[i] = record\n\t}\n\treturn out\n}", "func (l *ImmutableTimestampedLog) LoadRecordsRaw(fromIdx, toIdx uint32, descending bool) ([][]byte, error) {\n\tif fromIdx > toIdx {\n\t\treturn nil, nil\n\t}\n\tret := make([][]byte, 0, toIdx-fromIdx+1)\n\tfromIdxInt := int(fromIdx)\n\ttoIdxInt := int(toIdx)\n\tif !descending {\n\t\tfor i := fromIdxInt; i <= toIdxInt; i++ {\n\t\t\tr, err := l.getRawRecordAtIndex(uint32(i))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tret = append(ret, r)\n\t\t}\n\t} else {\n\t\tfor i := toIdxInt; i >= fromIdxInt; i-- {\n\t\t\tr, err := l.getRawRecordAtIndex(uint32(i))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tret = append(ret, r)\n\t\t}\n\t}\n\treturn ret, nil\n}", "func RowToQueryResult(row *sql.Row, colDefines []database.Column) (QueryResult, error) {\n\tcols := database.Columns(colDefines).Names()\n\tcolumns := make([]interface{}, len(cols))\n\tcolumnPointers := make([]interface{}, len(cols))\n\tfor i := range columns {\n\t\tcolumnPointers[i] = &columns[i]\n\t}\n\t// Scan the result into the column pointers...\n\tif err := row.Scan(columnPointers...); err != nil {\n\t\treturn nil, err\n\t}\n\n\trowData := makeRowDataSet(colDefines)\n\tfor i, colName := range cols {\n\t\tval := columnPointers[i].(*interface{})\n\t\trowData[colName] = ColData{Data: val, DataType: rowData[colName].DataType}\n\t}\n\n\treturn QueryResult(rowData), nil\n}", "func (r rowsRes) Next(dest []driver.Value) error {\n\terr := r.my.ScanRow(r.row)\n\tif err != nil {\n\t\treturn errFilter(err)\n\t}\n\tfor i, col := range r.row {\n\t\tif col == nil {\n\t\t\tdest[i] = nil\n\t\t\tcontinue\n\t\t}\n\t\tswitch c := col.(type) {\n\t\tcase time.Time:\n\t\t\tdest[i] = c\n\t\t\tcontinue\n\t\tcase mysql.Timestamp:\n\t\t\tdest[i] = c.Time\n\t\t\tcontinue\n\t\tcase mysql.Date:\n\t\t\tdest[i] = c.Localtime()\n\t\t\tcontinue\n\t\t}\n\t\tv := reflect.ValueOf(col)\n\t\tswitch v.Kind() {\n\t\tcase reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\t// this contains time.Duration to\n\t\t\tdest[i] = v.Int()\n\t\tcase reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t\tu := v.Uint()\n\t\t\tif u > math.MaxInt64 {\n\t\t\t\tpanic(\"Value to large for int64 type\")\n\t\t\t}\n\t\t\tdest[i] = int64(u)\n\t\tcase reflect.Float32, reflect.Float64:\n\t\t\tdest[i] = v.Float()\n\t\tcase reflect.Slice:\n\t\t\tif v.Type().Elem().Kind() == reflect.Uint8 {\n\t\t\t\tdest[i] = v.Interface().([]byte)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfallthrough\n\t\tdefault:\n\t\t\tpanic(fmt.Sprint(\"Unknown type of column: \", v.Type()))\n\t\t}\n\t}\n\treturn nil\n}", "func queryRows(db *sql.DB) {\n\t// Set the command to execute\n\tvar sql = `\n\t\tselect id, first_name, last_name\n\t\tfrom ` + dbname + `.DemoTable;\n\t`\n\n\t// Get row results\n\tvar rows, rowErr = db.Query(sql)\n\tif rowErr != nil {\n\t\tfmt.Println(rowErr)\n\t\treturn\n\t}\n\tdefer rows.Close()\n\n\t// Iterate over the rowset and output the demo table columns\n\tfor rows.Next() {\n\t\tvar id int\n\t\tvar fname string\n\t\tvar lname string\n\t\tif err := rows.Scan(&id, &fname, &lname); err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t\tfmt.Printf(\"id: %d, fname: %s, lname: %s\\n\", id, fname, lname)\n\t}\n}", "func (r *Relay) convertMessagesToKafkaSinkRecords(messages []interface{}) ([]*records.KafkaSinkRecord, error) {\n\tsinkRecords := make([]*records.KafkaSinkRecord, 0)\n\n\tfor i, v := range messages {\n\t\trelayMessage, ok := v.(*types.RelayMessage)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"unable to type assert incoming message as RelayMessage (index: %d)\", i)\n\t\t}\n\n\t\tif err := r.validateKafkaRelayMessage(relayMessage); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to validate kafka relay message (index: %d): %s\", i, err)\n\t\t}\n\n\t\tsinkRecords = append(sinkRecords, &records.KafkaSinkRecord{\n\t\t\tTopic: relayMessage.Value.Topic,\n\t\t\tKey: relayMessage.Value.Key,\n\t\t\tValue: relayMessage.Value.Value,\n\t\t\tTimestamp: time.Now().UTC().UnixNano(),\n\t\t\tOffset: relayMessage.Value.Offset,\n\t\t\tPartition: int32(relayMessage.Value.Partition),\n\t\t\tHeaders: convertKafkaHeaders(relayMessage.Value.Headers),\n\t\t})\n\t}\n\n\treturn sinkRecords, nil\n}", "func (e *LoadDataWorker) GetRows() [][]types.Datum {\n\treturn e.rows\n}", "func toArray(row []interface{}) ([]string, []bool) {\n\tstrRow := make([]string, len(row))\n\n\tquotes := make([]bool, len(row))\n\n\tfor idx, colVal := range row {\n\t\tif colVal == nil {\n\t\t\tlog.Warn(\"column[\", idx, \"] is null\")\n\t\t\tcontinue\n\t\t}\n\t\tswitch reflect.TypeOf(colVal).Kind() {\n\t\tcase reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\tstrRow[idx] = fmt.Sprintf(\"%d\", colVal)\n\t\t\tquotes[idx] = false\n\n\t\tcase reflect.Float32, reflect.Float64:\n\t\t\tstrRow[idx] = fmt.Sprintf(\"%f\", colVal)\n\t\t\tquotes[idx] = false\n\n\t\tcase reflect.Slice:\n\t\t\tv := colVal.([]uint8)\n\t\t\tbuffer := bytes.NewBuffer([]byte(\"0x\"))\n\t\t\tfor i := 0; i < len(v); i++ {\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(\"%.2x\", v[i]))\n\t\t\t}\n\t\t\tstrRow[idx] = buffer.String()\n\n\t\tcase reflect.String:\n\t\t\tv := mysql.Escape(colVal.(string))\n\t\t\tstrRow[idx] = v\n\t\t\tquotes[idx] = true\n\t\t}\n\t}\n\n\treturn strRow, quotes\n}", "func getRecords(res *RecordsResp, qntString string) {\n\t//Setting the default value of the query status to false.\n\t//If the query succeeds, at the end, we cange this status to true.\n\tres.Status = false\n\n\tqnt, err := strconv.Atoi(qntString)\n\tif err != nil {\n\t\tlog.Printf(\"Function getRecords: Something went wrong when converting the quantity of records from string to int.\\n %v\\n\", err)\n\t\treturn\n\t}\n\t\n\t// Connecting to the database\n session, err := mgo.Dial(\"localhost\");\n if err != nil {\n \tlog.Printf(\"Function getRecords: Error when opening connection to database.\\n %v\\n\", err)\n \treturn\n }\n defer session.Close()\n \n // Querying the database\n conn := session.DB(DATABASE_NAME).C(RECORDS_COLLECTION)\n if err := conn.Find(nil).Limit(qnt).All(&res.Records); err != nil {\n \tlog.Printf(\"Function getRecords: Error when querying database.\\n %v\\n\", err)\n \treturn\n }\n \n // Getting the User Data\n conn = session.DB(DATABASE_NAME).C(USERS_COLLECTION)\n for i, _ := range res.Records {\n \tif err := conn.FindId(res.Records[i].UserId).One(&res.Records[i].UserData); err != nil {\n \t\tlog.Printf(\"Function getRecords: Error when getting user data\\n %v\\n\", err)\n \t\treturn\n \t}\n }\n \n //Query succeeded\n res.Status = true\n}", "func TransactionArrayMarshalling(rows *sqlx.Rows) []byte {\n\tvar t types.TransactionPayload\n\tvar txs []byte\n\n\tfor rows.Next() {\n\t\ttx := types.Transaction{}\n\t\terr := rows.StructScan(&tx)\n\t\tif err != nil {\n\t\t\tlogger.Warn(\"Unable to retrieve rows: \" + err.Error())\n\t\t\treturn nil\n\t\t}\n\t\tt.Payload = append(t.Payload, tx)\n\t\tserializedPayload, err := json.Marshal(t.Payload)\n\t\tif err != nil {\n\t\t\tlogger.Warn(\"Unable to serialize payload: \" + err.Error())\n\t\t}\n\t\ttxs = serializedPayload\n\t}\n\tif err := rows.Close(); err != nil {\n\t\tlogger.Warn(\"Unable to close row connection: \" + err.Error())\n\t}\n\n\treturn txs\n}" ]
[ "0.6726777", "0.67042124", "0.62163883", "0.6109386", "0.6026409", "0.59923404", "0.58899695", "0.58857733", "0.5779613", "0.5778647", "0.5747893", "0.56930035", "0.56913865", "0.56230724", "0.5609854", "0.55732936", "0.5567313", "0.5530029", "0.551342", "0.5497782", "0.545771", "0.5378807", "0.5364863", "0.53523624", "0.53421175", "0.53201264", "0.5316915", "0.53010964", "0.53002036", "0.52639854", "0.52572703", "0.52417034", "0.51732534", "0.5162114", "0.5157685", "0.5157179", "0.51047903", "0.51037806", "0.51002896", "0.5094491", "0.5087671", "0.50853723", "0.5080812", "0.50781035", "0.5074195", "0.5074055", "0.5068736", "0.50538856", "0.5050229", "0.5048929", "0.50475025", "0.5039514", "0.5023499", "0.50179094", "0.500229", "0.4999025", "0.49928015", "0.49907756", "0.498282", "0.49794254", "0.49781203", "0.49693242", "0.49626142", "0.49580267", "0.49336803", "0.49315596", "0.4931276", "0.49109823", "0.49061212", "0.49049345", "0.49030268", "0.48838064", "0.48771086", "0.48680255", "0.4863948", "0.48585615", "0.48582536", "0.48531842", "0.48521975", "0.48473343", "0.4846733", "0.48464224", "0.4841058", "0.48286074", "0.48098356", "0.48066762", "0.4783066", "0.47803703", "0.47787476", "0.4776244", "0.47708806", "0.47680292", "0.47670737", "0.47660163", "0.47640884", "0.47627366", "0.4759317", "0.4747613", "0.47426212", "0.47393155" ]
0.8269437
0
Add adds one or more previously unadded urls to crawler to visit. source can be nil to indicate root. Returns a list of errors if any occured.
Add добавляет один или несколько ранее не добавленных URL для посещения в краулер. source может быть nil, чтобы указать корень. Возвращает список ошибок, если произошли какие-либо ошибки.
func (c *Crawler) Add(source *url.URL, uri ...*url.URL) []error { var errs []error for _, u := range uri { var err error u := u u.Fragment = "" // reset fragment, we don't want it messing our visited list if source != nil { u = source.ResolveReference(u) } if u.Scheme != "http" && u.Scheme != "https" { err = ErrUnsupportedScheme } else if err == nil && c.filter != nil && !c.filter(u) { err = ErrFilteredOut } us := u.String() // For the already-visited test we need to clean up each URL a bit vkey := strings.TrimRight(us[strings.Index(us, ":")+1:], "/") // Remove scheme and trailing slash if err == nil { c.toVisitMu.RLock() if _, ok := c.toVisit[vkey]; ok { err = ErrAlreadyInList } c.toVisitMu.RUnlock() } if err == nil { c.logger.Debugf("Add(%v %v): OK", source, us) atomic.AddUint64(&c.numQueued, 1) } else if err != nil { //c.logger.Warnf("Add(%v %v): %v", source, us, err) atomic.AddUint64(&c.numEncountered, 1) errs = append(errs, errors.Wrapf(err, "Invalid URL %v", u)) continue } c.toVisitMu.Lock() c.toVisit[vkey] = struct{}{} c.toVisitMu.Unlock() { uu := *u uu.Scheme = "" if source != nil && source.Host == uu.Host { uu.Host = "" } if source == nil { c.mapper.Add("<root>", uu.String()) } else { c.mapper.Add(source.String(), uu.String()) } } v := visit{ source: source, target: u, } select { case c.visitChan <- v: case <-c.ctx.Done(): return append(errs, c.ctx.Err()) } } return errs }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (os *OriginChecker) AddRawURLs(urls []string) {\n\tos.Lock()\n\tdefer os.Unlock()\n\n\tfor _, u := range urls {\n\t\tclean, err := cleanOrigin(u)\n\t\tif err == nil {\n\t\t\tos.origins[clean] = true\n\t\t}\n\t}\n}", "func (r *RssFeedEmitter) Add(url string) {\n\tfor _, feed := range r.feeds {\n\t\tif feed.Link == url {\n\t\t\treturn\n\t\t}\n\t}\n\tnewFeed, err := r.parser.ParseURL(url)\n\tif err != nil {\n\t\treturn\n\t}\n\tr.feeds = append(r.feeds, *newFeed)\n}", "func (me *Crawler) AddUrl(URL string) (err error) {\n\tfor range only.Once {\n\t\tvar u *url.URL\n\t\tu, err = url.Parse(URL)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif u.Path == \"\" {\n\t\t\tu.Path = \"/\"\n\t\t}\n\t\tr := &colly.Request{\n\t\t\tURL: u,\n\t\t\tMethod: \"GET\",\n\t\t}\n\t\tvar b []byte\n\t\tb, err = r.Marshal()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\terr = me.Storage.AddRequest(b)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn err\n}", "func (h *CrawlHandler) AddCrawl(url string, statusCode int) {\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\n\th.crawls[url] = statusCode\n}", "func (f *frontier) Add(uri ...string) {\n\tfor _, i := range uri {\n\t\tu, err := f.filter(f, i)\n\t\tif err != nil {\n\t\t\tcontinue // do nothing\n\t\t}\n\t\tf.lk.Lock()\n\t\tf.nbs = append(f.nbs, &visitable{uri: u})\n\t\tf.lk.Unlock()\n\t}\n}", "func (r *result) Add(url, body string) {\n\tr.mux.Lock()\n\tr.Sites[url] = body\n\tr.give(url)\n\tr.mux.Unlock()\n}", "func (r *Repository) AddImages(urls []string) []error {\n\tvar errors []error\n\terrChan := make(chan error, len(urls))\n\tvar wg sync.WaitGroup\n\tfor _, url := range urls {\n\t\turl := url\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terr := r.addImage(url)\n\t\t\terrChan <- err\n\t\t}()\n\t}\n\n\twg.Wait()\n\tclose(errChan)\n\n\tfor err := range errChan {\n\t\tif err != nil {\n\t\t\terrors = append(errors, err)\n\t\t}\n\t}\n\n\treturn errors\n}", "func (self *errorList) Add(err error) {\n\tif err != nil {\n\t\tself.list = append(self.list, err.Error())\n\t}\n\t//return err\n}", "func (s *FeedService) AddFeed(url string) error {\n\texists, err := s.dbClient.CheckWhetherSourceExist(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !exists {\n\t\treturn fmt.Errorf(\"api: feed %s already exists\", url)\n\t}\n\tfp := gofeed.NewParser()\n\tfp.Client = s.httpClient\n\tf, err := fp.ParseURL(url)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"add: cannot parse URL: %v\", err)\n\t}\n\tfavIcon, err := getFavIcon(f.Link)\n\tif err != nil {\n\t\treturn err\n\t}\n\titem := &db.FeedSource{\n\t\tTitle: f.Title,\n\t\tUrlSource: url,\n\t\tDescription: f.Description,\n\t\tLastUpdated: f.UpdatedParsed,\n\t\tActive: true,\n\t\tLastChecked: time.Now(),\n\t\tFavIcon: favIcon,\n\t}\n\tfeedID, err := s.dbClient.AddFeed(item)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, i := range f.Items {\n\t\tfixFeedItem(i)\n\t\terr := s.dbClient.AddNews(feedID, db.ToFeedItem(item.Id, i))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tlog.Printf(\"Added %d items\", len(f.Items))\n\treturn err\n}", "func (a *Agent) addSources(sources []string) {\n\tfor _, source := range sources {\n\t\tswitch source {\n\t\tcase \"webscan\":\n\t\t\ta.sources[source] = &webscan.Source{}\n\t\tcase \"hackertarget\":\n\t\t\ta.sources[source] = &hackertarget.Source{}\n\t\tcase \"dnsgrep\":\n\t\t\ta.sources[source] = &dnsgrep.Source{}\n\t\tcase \"rapiddns\":\n\t\t\ta.sources[source] = &rapiddns.Source{}\n\t\tcase \"c99\":\n\t\t\ta.sources[source] = &c99.Source{}\n\t\tcase \"ip138\":\n\t\t\ta.sources[source] = &ip138.Source{}\n\t\tcase \"aizhan\":\n\t\t\ta.sources[source] = &aizhan.Source{}\n\t\tcase \"omnisint\":\n\t\t\ta.sources[source] = &omnisint.Source{}\n\t\tcase \"viewdns\":\n\t\t\ta.sources[source] = &viewdns.Source{}\n\t\tcase \"bugscaner\":\n\t\t\ta.sources[source] = &bugscaner.Source{}\n\t\tcase \"dnslytics\":\n\t\t\ta.sources[source] = &dnslytics.Source{}\n\t\tcase \"domaintools\":\n\t\t\ta.sources[source] = &domaintools.Source{}\n\t\tcase \"yougetsignal\":\n\t\t\ta.sources[source] = &yougetsignal.Source{}\n\t\tcase \"chinaz\":\n\t\t\ta.sources[source] = &chinaz.Source{}\n\t\tcase \"securitytrails\":\n\t\t\ta.sources[source] = &securitytrails.Source{}\n\t\t}\n\t}\n}", "func (c *Cache) Add(cr CrawlResult) {\n\tc.mutex.Lock()\n\tc.c[cr.url] = cr\n\tc.mutex.Unlock()\n}", "func (e *Errors) Add(errs ...error) {\n\t*e = append(*e, errs...)\n}", "func (errs Errors) Add(newErrors ...error) Errors {\n\tfor _, err := range newErrors {\n\t\tif err == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif errors, ok := err.(Errors); ok {\n\t\t\terrs = errs.Add(errors...)\n\t\t} else {\n\t\t\tok = true\n\t\t\tfor _, e := range errs {\n\t\t\t\tif err == e {\n\t\t\t\t\tok = false\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ok {\n\t\t\t\terrs = append(errs, err)\n\t\t\t}\n\t\t}\n\t}\n\treturn errs\n}", "func (errs Errors) Add(newErrors ...error) Errors {\n\tfor _, err := range newErrors {\n\t\tif err == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif errors, ok := err.(Errors); ok {\n\t\t\terrs = errs.Add(errors...)\n\t\t} else {\n\t\t\tok = true\n\t\t\tfor _, e := range errs {\n\t\t\t\tif err == e {\n\t\t\t\t\tok = false\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ok {\n\t\t\t\terrs = append(errs, err)\n\t\t\t}\n\t\t}\n\t}\n\treturn errs\n}", "func (s *Spider) addSkippedURL(u *url.URL) {\n\ts.Lock()\n\ts.skippedURLs[u.Path] = struct{}{}\n\ts.Unlock()\n}", "func (v *ValidationErrors) Add(err ...string) {\n\t*v = append(*v, err...)\n}", "func (results *Results) Add(result *Result) {\n\tif !result.Passed || result.Error != nil {\n\t\tresults.Passed = false\n\t}\n\tresults.List = append(results.List, result)\n}", "func (target *LinkStatistics) Add(source LinkStatistics) {\n\tif target.Name == \"\" {\n\t\ttarget.Name = source.Name\n\t}\n\tif target.URI == \"\" {\n\t\ttarget.URI = source.URI\n\t}\n\ttarget.AnnotatedValuesWaiting += source.AnnotatedValuesWaiting\n\ttarget.AnnotatedValuesInProgress += source.AnnotatedValuesInProgress\n\ttarget.AnnotatedValuesAcknowledged += source.AnnotatedValuesAcknowledged\n}", "func (m refCountedUrlSet) addUrl(urlStr string) bool {\n\tm[urlStr]++\n\treturn m[urlStr] == 1\n}", "func (f *FFS) Addrs(ctx context.Context) ([]api.AddrInfo, error) {\n\tresp, err := f.client.Addrs(ctx, &rpc.AddrsRequest{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\taddrs := make([]api.AddrInfo, len(resp.Addrs))\n\tfor i, addr := range resp.Addrs {\n\t\taddrs[i] = api.AddrInfo{\n\t\t\tName: addr.Name,\n\t\t\tAddr: addr.Addr,\n\t\t\tType: addr.Type,\n\t\t}\n\t}\n\treturn addrs, nil\n}", "func SetTotalURLs(total int) (err error) {\n\tclient := getClient()\n\terr = client.Set(\"TotalUrls\", total, 0).Err()\n\treturn\n}", "func (list *ValidationErrors) Add(err *FieldError) {\n\t*list = append(*list, err)\n}", "func updateSource(source *Source, newSource *Source) {\n\tfor _, newEntry := range newSource.Entries {\n\t\tvar exists = false\n\t\tfor _, entry := range source.Entries {\n\t\t\tif entry.Url == newEntry.Url {\n\t\t\t\texists = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !exists {\n\t\t\tsource.Entries = append(source.Entries, newEntry)\n\t\t}\n\t}\n}", "func (m *MultiError) Add(err error) {\n\tif err != nil {\n\t\tm.errors = append(m.errors, err)\n\t}\n}", "func (qi *Items) Add(r *pageloader.Request) {\n\n\tif qi.haveSeen(r.URL) {\n\t\treturn\n\t}\n\n\tqi.Lock()\n\tlog.Println(\"adding request to the queue\")\n\tqi.Stack = append(qi.Stack, r)\n\tqi.Seen[r.URL] = true\n\tqi.Length++\n\tlog.Printf(\"queue length now: %d\\n\", qi.Length)\n\tqi.Unlock()\n}", "func addUrl(writer http.ResponseWriter, request *http.Request) {\n\tvars := mux.Vars(request)\n\thostname := vars[\"hostname\"]\n\tquerypath := vars[\"querypath\"]\n\n\tresponse := APIResponse{}\n\terr := utils.ValidateUrl(hostname)\n\tif err != nil {\n\t\tresponse.BadRequest(err)\n\t\thttp_respond(response, writer)\n\t\treturn\n\t}\n\n\tdecodedPath, err := utils.URLDecode(querypath)\n\tif err != nil {\n\t\tresponse.BadRequest(err)\n\t\thttp_respond(response, writer)\n\t\treturn\n\t}\n\n\t// Generate URL service for querying the URL\n\turlService, err := services.NewUrlService(hostname, decodedPath, config.DBType, config.CacheType)\n\tif err != nil {\n\t\tutils.LogError(utils.LogFields{\"hostname\": hostname, \"path\": decodedPath}, err, \"Error getting URL\")\n\t\tresponse.InternalError(errors.New(\"An error occurred\"))\n\t\thttp_respond(response, writer)\n\t\treturn\n\t}\n\n\terr = urlService.AddUrl()\n\tif err != nil {\n\t\tresponse.BadRequest(err)\n\t} else {\n\t\tresponse.Success(StringData{Message: \"Successfully added URL\"})\n\t}\n\n\thttp_respond(response, writer)\n}", "func (prCtx *ParseResultContext) AddResults(source string, parsedResList []*ParseResult) {\n\t// If there is no source, the configMap is probably a platform scan map, in that case\n\t// treat all the results as consistent.\n\tif source == \"\" {\n\t\tprCtx.addConsistentResults(source, parsedResList)\n\t\treturn\n\t}\n\n\t// Treat the first batch of results as consistent\n\tif len(prCtx.inconsistent) == 0 && len(prCtx.consistent) == 0 {\n\t\tprCtx.addConsistentResults(source, parsedResList)\n\t} else {\n\t\tprCtx.addParsedResults(source, parsedResList)\n\t}\n}", "func ParseSyndicationSource(ctx context.Context, repos *repository.Repositories, r *http.Result, s *syndication.Source) ([]*url.URL, error) {\n\tvar urls []*url.URL\n\n\tif err := handleFeedHTTPErrors(ctx, repos, r, s); err != nil {\n\t\treturn urls, err\n\t}\n\n\t// We only want successful requests at this point\n\tif !r.RequestWasSuccessful() {\n\t\treturn urls, fmt.Errorf(\"%s\", r.GetFailureReason())\n\t}\n\n\tif r.RequestWasRedirected() {\n\t\tvar err error\n\t\ts, err = handleDuplicateFeed(ctx, repos, r.FinalURI, s)\n\t\tif err != nil {\n\t\t\treturn urls, err\n\t\t}\n\t}\n\n\tpr, err := repos.Botlogs.FindPreviousByURL(ctx, s.URL, r)\n\tif err != nil && err != sql.ErrNoRows {\n\t\treturn urls, err\n\t}\n\n\tif r.IsContentDifferent(pr) {\n\t\tvar c *gofeed.Feed\n\t\tc, err = gofeed.NewParser().Parse(r.Content)\n\t\tif err != nil {\n\t\t\treturn urls, fmt.Errorf(\"Parsing error: %s - URL %s\", err, s.URL)\n\t\t}\n\n\t\tif c.Title != \"\" {\n\t\t\tif s.Title == \"\" || s.Title == syndication.DefaultWPFeedTitle {\n\t\t\t\ts.Title = c.Title\n\t\t\t}\n\t\t}\n\n\t\tif c.Link != \"\" {\n\t\t\tvar l *url.URL\n\t\t\tl, err = url.FromRawURL(c.Link)\n\t\t\tif err == nil {\n\t\t\t\ts.Domain = l\n\t\t\t}\n\t\t}\n\n\t\tif s.Type == \"\" {\n\t\t\tvar feedType syndication.Type\n\t\t\tfeedType, err = syndication.FromGoFeedType(c.FeedType)\n\t\t\tif err == nil {\n\t\t\t\ts.Type = feedType\n\t\t\t} else {\n\t\t\t\tlogger.Error(err)\n\t\t\t}\n\t\t}\n\n\t\tfor _, item := range c.Items {\n\t\t\tvar u *url.URL\n\t\t\tu, err = url.FromRawURL(item.Link)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error(err)\n\t\t\t\tcontinue // Just skip invalid URLs\n\t\t\t}\n\n\t\t\t// @TODO Add a list of Source proxy and resolve source's URLs before pushing to the queue\n\t\t\tvar b bool\n\t\t\tb, err = repos.Documents.ExistWithURL(ctx, u)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !b {\n\t\t\t\tlogger.Info(fmt.Sprintf(\"Adding URL [%s]\", u))\n\t\t\t\turls = append(urls, u)\n\t\t\t} else {\n\t\t\t\tlogger.Info(fmt.Sprintf(\"URL [%s] already exists\", u))\n\t\t\t}\n\t\t}\n\t} else {\n\t\tlogger.Info(\"Feed content has not changed\")\n\t}\n\n\t// Reverse results\n\tfor l, r := 0, len(urls)-1; l < r; l, r = l+1, r-1 {\n\t\turls[l], urls[r] = urls[r], urls[l]\n\t}\n\n\tvar results []*http.Result\n\tresults, err = repos.Botlogs.FindByURL(ctx, s.URL)\n\tif err != nil {\n\t\treturn urls, err\n\t}\n\n\tf := http.CalculateFrequency(results)\n\tlogger.Warn(fmt.Sprintf(\"Source frequency: [%s], previous: [%s]\", f, s.Frequency))\n\n\ts.Frequency = f\n\ts.ParsedAt = time.Now()\n\n\tif err := repos.Syndication.Update(ctx, s); err != nil {\n\t\treturn urls, err\n\t}\n\n\treturn urls, nil\n}", "func addNewFeed(ctx *web.Context) string {\n\turl := ctx.Params[\"url\"]\n\tsource, err := loadFeed(url)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\tlock.Lock()\n\tfolders[\"uncategorized\"] = append(folders[\"uncategorized\"], source)\n\tlock.Unlock()\n\n\tctx.Redirect(303, \"/\")\n\treturn \"\"\n}", "func (c *CrawlerState) AddURL(url string) bool {\n\tc.Lock()\n\tif _, ok := c.urlMap[url]; ok {\n\t\t// URL already present. Return false indicating the new url was already present\n\t\tc.Unlock()\n\t\treturn false\n\t}\n\tc.urlMap[url] = struct{}{}\n\tc.seenURLCount++\n\tc.urls = append(c.urls, url)\n\tc.Unlock()\n\treturn true\n}", "func (i *Index) AddSrc(src Src) bool {\n\tfor _, s := range i.Srcs {\n\t\tif s.SrcID == src.SrcID {\n\t\t\treturn false\n\t\t}\n\t}\n\ti.Srcs = append(i.Srcs, src)\n\treturn true\n}", "func (tr *TestRunner) Add(addrs ...*net.UDPAddr) {\n\ttr.mutex.Lock()\n\tdefer tr.mutex.Unlock()\n\ttr.targets = append(tr.targets, addrs...)\n}", "func (e *Errors) Add(err error) bool {\n\tif err != nil {\n\t\te.lock.Lock()\n\t\tdefer e.lock.Unlock()\n\t\te.errors = append(e.errors, err)\n\t\treturn true\n\t}\n\treturn false\n}", "func (m *Metadata) Addrs() []string {\n\taddrs := make([]string, 0, len(m.addrs))\n\tfor addr := range m.addrs {\n\t\taddrs = append(addrs, addr)\n\t}\n\treturn addrs\n}", "func (e *ValidationError) Add(s string) {\n\te.ErrorList = append(e.ErrorList, s)\n}", "func (t *targetBuilder) addSrcs(srcs *treeset.Set) *targetBuilder {\n\tit := srcs.Iterator()\n\tfor it.Next() {\n\t\tt.srcs.Add(it.Value().(string))\n\t}\n\treturn t\n}", "func (t *AuroraTask) AddURIs(extract bool, cache bool, values ...string) *AuroraTask {\n\tfor _, value := range values {\n\t\tt.task.MesosFetcherUris = append(\n\t\t\tt.task.MesosFetcherUris,\n\t\t\t&aurora.MesosFetcherURI{Value: value, Extract: &extract, Cache: &cache})\n\t}\n\treturn t\n}", "func (ps *PeerStore) Addrs() []string {\n\tps.lock.RLock()\n\tdefer ps.lock.RUnlock()\n\taddrs := make([]string, 0)\n\tfor addr := range ps.peers {\n\t\taddrs = append(addrs, addr)\n\t}\n\treturn addrs\n}", "func newURLs(isRaw bool, versionID string, isAllVersions bool, sources ...string) ([]*url.URL, error) {\n\tvar urls []*url.URL\n\tfor _, src := range sources {\n\t\tsrcurl, err := url.New(src, url.WithRaw(isRaw), url.WithVersion(versionID),\n\t\t\turl.WithAllVersions(isAllVersions))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif err := checkVersinoningURLRemote(srcurl); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\turls = append(urls, srcurl)\n\t}\n\treturn urls, nil\n}", "func (r *Paths) Add(url string, handler interface{}) error {\n\tif handler == nil {\n\t\treturn errors.New(\"nil handler\")\n\t}\n\tparts := splitter(url) // нормализуем путь и разбиваем его на части\n\t// проверяем, что количество получившихся частей не превышает максимально\n\t// поддерживаемое количество\n\tlevel := uint16(len(parts)) // всего элементов пути\n\tif level > (1<<15 - 1) {\n\t\treturn fmt.Errorf(\"path parts overflow: %d\", len(parts))\n\t}\n\t// считаем количество параметров в определении пути\n\tvar params uint16\n\tfor i, value := range parts {\n\t\tif strings.HasPrefix(value, NamedParamFlag) {\n\t\t\tparams++ // увеличиваем счетчик параметров\n\t\t} else if strings.HasPrefix(value, CatchAllParamFlag) {\n\t\t\t// такой параметр должен быть самым последним в определении путей\n\t\t\tif uint16(i) != level-1 {\n\t\t\t\treturn errors.New(\"catch-all parameter must be last\")\n\t\t\t}\n\t\t\tparams |= 1 << 15 // взводим флаг динамического параметра\n\t\t\t// запоминаем позицию самого раннего встреченного динамического\n\t\t\t// параметра во всех добавленных путях\n\t\t\tif r.catchAll == 0 || r.catchAll > level {\n\t\t\t\tr.catchAll = level\n\t\t\t}\n\t\t}\n\t}\n\t// если в пути нет параметров, то добавляем в статические обработчики\n\tif params == 0 {\n\t\tif r.static == nil {\n\t\t\tr.static = make(map[string]interface{})\n\t\t}\n\t\tr.static[strings.Join(parts, PathDelimeter)] = handler\n\t\treturn nil\n\t}\n\t// запоминаем максимальное количество элементов пути во всех определениях\n\tif r.maxParts < level {\n\t\tr.maxParts = level\n\t}\n\t// инициализируем динамические пути, если не сделали этого раньше\n\tif r.fields == nil {\n\t\tr.fields = make(map[uint16]records)\n\t}\n\t// добавляем в массив обработчиков с таким же количеством параметров\n\tr.fields[level] = append(r.fields[level], &record{params, parts, handler})\n\tsort.Stable(r.fields[level]) // сортируем по количеству параметров\n\treturn nil\n}", "func (errors *Errors) Add(u uint64) uint64 {\n\terrCount := errors.counter.Add(u)\n\terrors.checkMaxError(errCount)\n\treturn errCount\n}", "func (l *errList) Push(pc uintptr, err error, file string, line int) {\n\titem := &Node{\n\t\tPC: pc,\n\t\tErr: err,\n\t\tFile: file,\n\t\tLine: line,\n\t}\n\n\tif l.head == nil {\n\t\tl.head = item\n\t}\n\n\tif l.tail != nil {\n\t\tl.tail.next = item\n\t}\n\n\tl.tail = item\n\tl.len++\n}", "func (sdr *CSdRegistry) AddSources(vt *vtree.Vtree) {\n\tC.sd_registry_add_sources(sdr.sdr, node(vt))\n}", "func (g *Graph) Add(addr *url.URL) {\n\ta := *addr\n\tsum := urlsum(a)\n\tg.Lock()\n\tdefer g.Unlock()\n\tif _, ok := g.nodes[sum]; !ok {\n\t\tg.nodes[sum] = &Page{\n\t\t\tweight: 0,\n\t\t\toutbound: 0,\n\t\t\taddr: a,\n\t\t}\n\t}\n}", "func (s *Scraper) addRequest(rt *requestTracker) {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\ts.requests = append(s.requests, rt)\n}", "func (r *Robots) add(rule *Rule) {\n\tif rule.Length > 0 {\n\t\tr.Rules = append(r.Rules, rule)\n\t}\n}", "func (e *ErrorList) Push(err error) {\n\tif err == nil {\n\t\treturn\n\t}\n\n\tswitch v := err.(type) {\n\tcase *ErrorList:\n\t\te.errs = append(e.errs, v.errs...)\n\tdefault:\n\t\te.errs = append(e.errs, err)\n\t}\n}", "func (s *Service) AddURL(url string) error {\n\tindex := s.indexOfURL(url)\n\n\tif index != -1 {\n\t\treturn fmt.Errorf(\"URL '%s' is already registered\", url)\n\t}\n\n\ts.URLs = append(s.URLs, url)\n\treturn nil\n}", "func (collection *RemoteRepoCollection) Add(repo *RemoteRepo) error {\n\tfor _, r := range collection.list {\n\t\tif r.Name == repo.Name {\n\t\t\treturn fmt.Errorf(\"mirror with name %s already exists\", repo.Name)\n\t\t}\n\t}\n\n\terr := collection.Update(repo)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcollection.list = append(collection.list, repo)\n\treturn nil\n}", "func (be *Batch) Add(errs ...error) {\n\tfor _, err := range errs {\n\t\tif err == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar batch Batch\n\t\tif errors.As(err, &batch) {\n\t\t\tbe.addBatch(batch)\n\t\t} else {\n\t\t\tbe.errors = append(be.errors, err)\n\t\t}\n\t}\n}", "func (r *URef) AddSrc(src SrcItem) bool {\n\tfor i, s := range r.Srcs {\n\t\tif s.EqualKey(src) {\n\t\t\tif s.Equal(src) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\t// Update mutable attributes.\n\t\t\tr.Srcs[i] = src\n\t\t\treturn true\n\t\t}\n\t}\n\tr.Srcs = append(r.Srcs, src)\n\treturn true\n}", "func (s *basicService) PostURLs(urls []string) ([]crawl.Result, error) {\n\tif len(urls) == 0 {\n\t\treturn nil, ErrEmptyURLs\n\t}\n\tcrawlResult := []crawl.Result{}\n\n\turlLen := len(urls)\n\n\tjobs := make(chan string, urlLen)\n\tjobResults := make(chan jobResult, urlLen)\n\n\tworkers := urlLen\n\tif workers > s.maxWorkers {\n\t\tworkers = s.maxWorkers\n\t}\n\n\tfor w := 1; w <= workers; w++ {\n\t\tgo s.worker(jobs, jobResults)\n\t}\n\n\tfor _, url := range urls {\n\t\tjobs <- url\n\t}\n\tclose(jobs)\n\n\tfor i := 1; i <= urlLen; i++ {\n\t\tjobResult := <-jobResults\n\t\tif jobResult.err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tcrawlResult = append(crawlResult, jobResult.result)\n\t}\n\n\treturn crawlResult, nil\n}", "func ParseURLs(content string) []string { return parseURLsMax(content, -1) }", "func (list *ErrorList) Collect(args ...interface{}) {\n\tfor _, a := range args {\n\t\tif err, _ := a.(error); err != nil {\n\t\t\t*list = append(*list, err)\n\t\t}\n\t}\n}", "func (m *Mux) Add(ds *discordgo.Session, dm *discordgo.Message, ctx *Context) {\n\tif !authorized(dm) {\n\t\tds.ChannelMessageSend(dm.ChannelID, \"Only the bot owner can do that.\")\n\t\treturn\n\t}\n\n\t//http://code.9front.org/hg/plan9front/rss-log\n\tresp := \"```\\n\"\n\t// URL to feed should be last item\n\turl := ctx.Fields[len(ctx.Fields) -1]\n\tfmt.Println(\"Proposed addition for: \", url)\n\t\n\tfor _, v := range Config.Feeds {\n\t\t// this is bad matching, can't have two bitbucket url's?\n\t\tif strings.Contains(url, v.Feed.UpdateURL) {\n\t\t\t//fmt.Println(url)\n\t\t\t//fmt.Println(v.Link)\n\t\t\tresp += \"Denied! Feed already subscribed to.\"\n\t\t\tresp += \"```\\n\"\n\t\t\tds.ChannelMessageSend(dm.ChannelID, resp)\n\t\t\treturn\n\t\t}\n\t}\n\t\n\tfeed, err := rss.Fetch(url)\n\n\tif err != nil {\n\t\tfmt.Println(\"Error in reading RSS feed, see: x/mux/commits.go\")\n\t\tfmt.Printf(\"%s\\n\\n\", err)\n\t\tresp += \"Denied! Could not parse feed.\"\n\t\tresp += \"```\\n\"\n\t\tds.ChannelMessageSend(dm.ChannelID, resp)\n\t\treturn\n\t}\n\t\n\t// Might not be thread safe\n\tvar tmpFeed Feed\n\ttmpFeed.Feed = *feed\n\t// Maybe make the size here a Config variable\n\ttmpFeed.Recent = make([]string, 3)\n\tConfig.Feeds = append(Config.Feeds, tmpFeed)\n\tresp += \"Added.\"\n\t\n\tresp += \"```\\n\"\n\tds.ChannelMessageSend(dm.ChannelID, resp)\n\n\treturn\n}", "func (c *cache) add(stories []item) {\n\tsize := c.curSize\n\tfor _, story := range stories {\n\t\tfound := false\n\t\tfor i := 0; i < size; i++ {\n\t\t\tif c.stories[i].id == story.id {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tif c.curSize < c.maxSize {\n\t\t\t\tc.mutx.Lock()\n\t\t\t\tc.stories[c.curSize] = story\n\t\t\t\tc.curSize++\n\t\t\t\tc.mutx.Unlock()\n\t\t\t}\n\t\t\tif c.curSize >= c.maxSize {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func (p *Manager) AddFromURL(purl string) {\n\tproxy := p.parseURL(purl)\n\tp.Add(proxy)\n}", "func CrawlFromSource(w http.ResponseWriter, r *http.Request) {\n\tsubBuf := new(bytes.Buffer)\n\tappConfig := config.ReadConfig()\n\tpubSubConfig := appConfig.PubSubConfig\n\n\terrGettingSrc := subscriber.PullCrawlFromSourceMsgs(subBuf, pubSubConfig.ProjectID, pubSubConfig.Topics.UpsertLink)\n\tif errGettingSrc != nil {\n\t\tfmt.Println(\"Epic failure, you should probably look into it: \", errGettingSrc)\n\t}\n\n\tfmt.Println(\"LINKS PUBLISHED IN GOOGLE CLOUD PLATFORM'S PUB/SUB `upsert_link` TASK\")\n}", "func (ucc *UpdateClientConfig) Add(url string) error {\n\tparts := repoRegexp.FindStringSubmatch(url)\n\tif len(parts) != 6 {\n\t\treturn ErrorsUCInvalidURL\n\t}\n\n\tvar err error\n\tif !ucc.exist() {\n\t\terr = ucc.Init()\n\t} else {\n\t\terr = ucc.Load()\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, repo := range ucc.Repos {\n\t\tif repo == url {\n\t\t\treturn ErrorsUCRepoExist\n\t\t}\n\t}\n\tucc.Repos = append(ucc.Repos, url)\n\n\treturn ucc.save()\n}", "func Addrs(addrs []string) ([]*net.TCPAddr, error) {\n\tnetAddrs := make([]*net.TCPAddr, 0, len(addrs))\n\tnErrs := 0\n\tfor _, a := range addrs {\n\t\tnetAddr, err := net.ResolveTCPAddr(\"tcp4\", a)\n\t\tif err != nil {\n\t\t\tnErrs++\n\t\t\tif nErrs == len(addrs) {\n\t\t\t\t// bail if none of the addrs could be parsed\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tnetAddrs = append(netAddrs, netAddr)\n\t}\n\treturn netAddrs, nil\n}", "func (p *Pipeline) Add(nodes ...Node) {\n\tfor _, node := range nodes {\n\t\tif l := len(p.nodes); (l == 0) || !p.nodes[l-1].TryMerge(node) {\n\t\t\tp.nodes = append(p.nodes, node)\n\t\t}\n\t}\n}", "func (p *Pipeline) Add(nodes ...Node) {\n\tfor _, node := range nodes {\n\t\tif l := len(p.nodes); (l == 0) || !p.nodes[l-1].TryMerge(node) {\n\t\t\tp.nodes = append(p.nodes, node)\n\t\t}\n\t}\n}", "func (e *RateLimitErrorLogging) AddEvents(ctx context.Context, points []*event.Event, next Sink) error {\n\terr := next.AddEvents(ctx, points)\n\tif err != nil {\n\t\tnow := time.Now()\n\t\tlastLogTimeNs := atomic.LoadInt64(&e.lastLogTimeNs)\n\t\tsinceLastLogNs := now.UnixNano() - lastLogTimeNs\n\t\tif sinceLastLogNs > e.LogThrottle.Nanoseconds() {\n\t\t\tnowUnixNs := now.UnixNano()\n\t\t\tif atomic.CompareAndSwapInt64(&e.lastLogTimeNs, lastLogTimeNs, nowUnixNs) {\n\t\t\t\te.Callback(err)\n\t\t\t}\n\t\t}\n\t}\n\treturn err\n}", "func (j *AuroraJob) AddURIs(extract bool, cache bool, values ...string) Job {\n\tfor _, value := range values {\n\t\tj.jobConfig.TaskConfig.MesosFetcherUris = append(j.jobConfig.TaskConfig.MesosFetcherUris,\n\t\t\t&aurora.MesosFetcherURI{Value: value, Extract: &extract, Cache: &cache})\n\t}\n\treturn j\n}", "func (s *MemStateStore) Add(state, url string) error {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\n\tif state == \"\" {\n\t\treturn fmt.Errorf(\"State argument not provided\")\n\t}\n\n\ts.states[state] = url\n\n\treturn nil\n}", "func (ucc *UpdateClientConfig) Add(url string) error {\n\tif url == \"\" {\n\t\treturn ErrorsUCEmptyURL\n\t}\n\n\tvar err error\n\tif !ucc.exist() {\n\t\terr = ucc.Init()\n\t} else {\n\t\terr = ucc.Load()\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, repo := range ucc.Repos {\n\t\tif repo == url {\n\t\t\treturn ErrorsUCRepoExist\n\t\t}\n\t}\n\tucc.Repos = append(ucc.Repos, url)\n\n\treturn ucc.save()\n}", "func (c *concurrentStorage) add(u url.URL) (bool) {\n\tc.Lock()\n\tdefer c.Unlock()\n\tif _, ok := c.urls[u]; ok{\n\t\treturn false\n\t}\n\tc.urls[u] = true\n\tc.urlsSize++\n\treturn true\n}", "func visit(url string) ([]string, error) {\n\tvar links []string\n\n\tif !urlRegex.MatchString(url) {\n\t\treturn links, fmt.Errorf(\"Not a valid url to visit : %s\", url)\n\t}\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn links, err\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tresp.Body.Close()\n\t\treturn links, fmt.Errorf(\"Received status code %s when fetching url %s\", resp.Status, url)\n\t}\n\n\tdoc, err := html.Parse(resp.Body)\n\tresp.Body.Close()\n\tif err != nil {\n\t\treturn links, err\n\t}\n\n\tlinks = traverse(doc, nil)\n\treturn links, nil\n}", "func ParseHTMLFromSource(r io.Reader) ([]HTMLhrefEntries, error) {\n\thtmlReader := html.NewTokenizer(r)\n\tvar found = []HTMLhrefEntries{}\n\tfound, err := findHrefs(found, \"\", htmlReader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn found, nil\n}", "func (h *Handler) Add(from, to string, code Code) *Handler {\n\th.Redirects[normalise(from)] = redirect{\n\t\tURL: to,\n\t\tCode: code,\n\t}\n\treturn h\n}", "func Addrs(addrs ...string) Option {\n\treturn func(o *Options) {\n\t\to.Addrs = addrs\n\t}\n}", "func Addrs(addrs ...string) Option {\n\treturn func(o *Options) {\n\t\to.Addrs = addrs\n\t}\n}", "func Addrs(addrs ...string) Option {\n\treturn func(o *Options) {\n\t\to.Addrs = addrs\n\t}\n}", "func Addrs(addrs ...string) Option {\n\treturn func(o *Options) {\n\t\to.Addrs = addrs\n\t}\n}", "func Addrs(addrs ...string) Option {\n\treturn func(o *Options) {\n\t\to.Addrs = addrs\n\t}\n}", "func appendSources(srcs [][]byte) YAMLOption {\n\treturn optionFunc(func(c *config) {\n\t\tfor _, src := range srcs {\n\t\t\tc.sources = append(c.sources, source{bytes: src})\n\t\t}\n\t})\n}", "func AddLastErr(source, msg string) {\n\tlastErrorVec.WithLabelValues(source, msg).Set(float64(time.Now().Unix()))\n\tlastErrorCountVec.WithLabelValues(source).Inc()\n\n\tif atomic.AddInt64(&lastErrorCount, int64(1))%MaxLastErrorCount == 0 { // clean\n\t\tResetLastErrors()\n\t}\n}", "func (d *Sources) Add(ctx context.Context, source chronograf.Source) (chronograf.Source, error) {\n\tgenID, err := d.IDs.Generate()\n\tif err != nil {\n\t\td.Logger.\n\t\t\tWithField(\"component\", \"source\").\n\t\t\tError(\"Unable to generate ID\")\n\t\treturn chronograf.Source{}, err\n\t}\n\n\tid, err := strconv.Atoi(genID)\n\tif err != nil {\n\t\td.Logger.\n\t\t\tWithField(\"component\", \"source\").\n\t\t\tError(\"Unable to convert ID\")\n\t\treturn chronograf.Source{}, err\n\t}\n\n\tsource.ID = id\n\n\tfile := sourceFile(d.Dir, source)\n\tif err = d.Create(file, source); err != nil {\n\t\tif err == chronograf.ErrSourceInvalid {\n\t\t\td.Logger.\n\t\t\t\tWithField(\"component\", \"source\").\n\t\t\t\tWithField(\"name\", file).\n\t\t\t\tError(\"Invalid Source: \", err)\n\t\t} else {\n\t\t\td.Logger.\n\t\t\t\tWithField(\"component\", \"source\").\n\t\t\t\tWithField(\"name\", file).\n\t\t\t\tError(\"Unable to write source:\", err)\n\t\t}\n\t\treturn chronograf.Source{}, err\n\t}\n\treturn source, nil\n}", "func (c *Controller) AddNewURLEntry(urlEntry *URLEntry) {\n\tselect {\n\tcase <-c.ctx.Done():\n\t\treturn\n\tcase c.subTree <- urlEntry:\n\t}\n}", "func (sr *ScraperRegistry) Add(s *types.Scraper) {\n\t// Initialize the Prometheus metric pointer\n\ts.InitializeMetrics()\n\t// Append initialized scraper to the slice of all scrapers\n\tsr.Scrapers = append(sr.Scrapers, s)\n}", "func (y *YggdrasilAdminAPI) AddPeer(uri string) ([]string, error) {\n\treq := fmt.Sprintf(`{\"keepalive\":true, \"request\":\"addpeer\", \"uri\":\"%s\"}`, uri)\n\tresp, err := y.execReq(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfmt.Println(string(resp.Response))\n\tadded := struct {\n\t\tAdded []string `json:\"added\"`\n\t}{}\n\tif err := json.Unmarshal(resp.Response, &added); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn added.Added, nil\n}", "func (s internalSelector) Add(reqs ...Requirement) Selector {\n\tret := make(internalSelector, 0, len(s)+len(reqs))\n\tret = append(ret, s...)\n\tret = append(ret, reqs...)\n\tsort.Sort(ByKey(ret))\n\treturn ret\n}", "func main() {\n\tsource := make(chan interface{}, 0)\n\n\t// flag.Parse()\n\tfmt.Fprintln(os.Stderr, \"starting up crawler\")\n\n\tresults := output.NewStorage()\n\n\tsteps := []pipeline.Handler{\n\t\tprocess.FetchUrl,\n\t\tprocess.ParseHTML,\n\t\tprocess.CompileNodeInfo,\n\t\tprocess.FilterLinks,\n\t}\n\n\tunique := pipeline.NewPipeline(source, 1, results.IsUnique, process.MaxDepth(5))\n\trest := pipeline.NewPipeline(unique.Output(), 4, steps...)\n\n\tpending := 0\n\tfor _, arg := range os.Args[1:] {\n\t\tURL, err := url.Parse(arg)\n\t\tif err == nil && (URL.Scheme == \"http\" || URL.Scheme == \"https\") {\n\t\t\tpending++\n\t\t\tif URL.Path == \"\" {\n\t\t\t\tURL.Path = \"/\"\n\t\t\t}\n\t\t\tsource <- URL.String()\n\t\t\tbreak\n\t\t}\n\t}\n\tif pending == 0 {\n\t\tfmt.Fprintln(os.Stderr, \"a valid http url was not provided\")\n\t\treturn\n\t}\n\n\thalt := make(chan os.Signal, 0)\n\tsignal.Notify(halt, os.Interrupt)\n\tfinish := make(chan interface{}, 0)\n\tgo func() {\n\t\t<-halt\n\t\tfmt.Fprintln(os.Stderr, \"waiting for current jobs to finish...\")\n\t\tclose(finish)\n\t}()\n\n\tfor pending > 0 {\n\t\tselect {\n\t\tcase <-unique.Err():\n\t\t\t// if we already have visited the link, we don't care about the error\n\t\tcase err := <-rest.Err():\n\t\t\t// other errors cause the program to exit, these could be closed connections etc.\n\t\t\tfmt.Fprintln(os.Stderr, \"unable to continue: \", err)\n\t\t\treturn\n\t\tcase out, open := <-rest.Output():\n\t\t\tif !open {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tnewLinks := out.([]string)\n\t\t\tpending += len(newLinks)\n\t\t\t// we don't want to block the pipeline so we do this in a goroutine\n\t\t\tgo func() {\n\t\t\t\tfor _, link := range newLinks {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-finish:\n\t\t\t\t\t\tpending--\n\t\t\t\t\tcase source <- link:\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t\tpending--\n\t}\n\tclose(source)\n\terr := results.Dump(\"dot\", os.Stdout)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"Unable to write:\", err)\n\t}\n}", "func (w *Watcher) Add(sr *ldap.SearchRequest, c Checker) (Watch, error) {\n\twatch := Watch{\n\t\twatcher: w,\n\t\tsearchRequest: sr,\n\t\tchecker: c,\n\t\tdone: make(chan struct{}),\n\t}\n\tw.watches = append(w.watches, &watch)\n\treturn watch, nil\n}", "func (m *EdiscoverySearch) SetAdditionalSources(value []DataSourceable)() {\n m.additionalSources = value\n}", "func (h *History) Add(action []*Action) {\n\th.actions = append(h.actions, action)\n\th.head++\n}", "func (r *SyncParseQueue) AddSyncUrl(url string, dest string) {\n\n\tr.syncUrlsMutex.Lock()\n\tdefer r.syncUrlsMutex.Unlock()\n\tdefer func() {\n\t\tbelogs.Debug(\"AddSyncUrl():defer rpQueue.SyncingAndParsingCount:\", atomic.LoadInt64(&r.SyncingAndParsingCount))\n\t\tif atomic.LoadInt64(&r.SyncingAndParsingCount) == 0 {\n\t\t\tr.SyncAndParseEndChan <- SyncAndParseEndChan{}\n\t\t}\n\t}()\n\tbelogs.Debug(\"AddSyncUrl():url:\", url, \" dest:\", dest)\n\tif len(url) == 0 || len(dest) == 0 {\n\t\tbelogs.Error(\"AddSyncUrl():len(url) == 0 || len(dest) == 0, before SyncingAndParsingCount-1:\", atomic.LoadInt64(&r.SyncingAndParsingCount))\n\t\tatomic.AddInt64(&r.SyncingAndParsingCount, -1)\n\t\tbelogs.Debug(\"AddSyncUrl():len(url) == 0 || len(dest) == 0, after SyncingAndParsingCount-1:\", atomic.LoadInt64(&r.SyncingAndParsingCount))\n\t\treturn\n\t}\n\n\te := r.syncUrls.Front()\n\tfor e != nil {\n\t\tif strings.Contains(url, e.Value.(SyncChan).Url) {\n\t\t\tbelogs.Debug(\"AddSyncUrl():have existed:\", url, \" in \", e.Value.(SyncChan).Url,\n\t\t\t\t\" len(r.SyncChan):\", len(r.SyncChan))\n\t\t\tbelogs.Debug(\"AddSyncUrl():have existed, before SyncingAndParsingCount-1:\", atomic.LoadInt64(&r.SyncingAndParsingCount))\n\t\t\tatomic.AddInt64(&r.SyncingAndParsingCount, -1)\n\t\t\tbelogs.Debug(\"AddSyncUrl():have existed, after SyncingAndParsingCount-1:\", atomic.LoadInt64(&r.SyncingAndParsingCount))\n\t\t\treturn\n\t\t}\n\t\te = e.Next()\n\t}\n\n\tsyncChan := SyncChan{Url: url, Dest: dest}\n\te = r.syncUrls.PushBack(syncChan)\n\tbelogs.Info(\"AddSyncUrl():will send to syncChan:\", syncChan,\n\t\t\" len(syncUrls):\", r.syncUrls.Len())\n\tr.SyncChan <- syncChan\n\tbelogs.Debug(\"AddSyncUrl():after send to syncChan:\", syncChan,\n\t\t\" syncUrls:\", r.syncUrls)\n\treturn\n}", "func CrawlEachURLFound(url string, fetcher Fetcher, ch chan []string) {\n\tbody, urls, err := fetcher.Fetch(url)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t} else {\n\t\tfmt.Printf(\"Found: %s %q\\n\", url, body)\n\t}\n\tch <- urls\n}", "func (m *MockDiscovery) Addrs(id peer.ID) []ma.Multiaddr {\n\treturn m.Peers[id]\n}", "func GetTotalURLs() (total int, err error) {\n\tclient := getClient()\n\tv, err := client.Get(\"TotalUrls\").Result()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\ttotal, err = strconv.Atoi(v)\n\n\treturn\n}", "func AddHTTP(src string) string {\n\tre := regexp.MustCompile(\"^https?://\")\n\tif re.MatchString(src) {\n\t\treturn src\n\t}\n\n\treturn fmt.Sprintf(\"http://%s\", src)\n}", "func (r *HelpResolver) Add(target string) error {\n\tr.Lock()\n\tdefer r.Unlock()\n\n\tfor _, addr := range r.addrs {\n\t\tif addr == target {\n\t\t\treturn errors.New(\"target is existed\")\n\t\t}\n\t}\n\n\tupdates := []*naming.Update{&naming.Update{Op: naming.Add, Addr: target}}\n\tr.watcher.updatesChan <- updates\n\treturn nil\n}", "func (mr *MockHostMockRecorder) Addrs() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Addrs\", reflect.TypeOf((*MockHost)(nil).Addrs))\n}", "func (sc *SourceCreate) AddChildren(s ...*Source) *SourceCreate {\n\tids := make([]int, len(s))\n\tfor i := range s {\n\t\tids[i] = s[i].ID\n\t}\n\treturn sc.AddChildIDs(ids...)\n}", "func (prCtx *ParseResultContext) addParsedResults(source string, newResults []*ParseResult) {\n\tfor _, consistentResult := range prCtx.consistent {\n\t\tconsistentResult.processed = false\n\t}\n\n\tfor _, pr := range newResults {\n\t\tconsistentPr, ok := prCtx.consistent[pr.Id]\n\t\tif !ok {\n\t\t\t// This either already inconsistent result or an extra\n\t\t\t// this batch has an extra item, save it as a diff with (only so far) this source\n\t\t\tprCtx.addInconsistentResult(pr.Id, pr, source)\n\t\t\tcontinue\n\t\t}\n\t\tconsistentPr.processed = true\n\n\t\tok = diffChecks(consistentPr.CheckResult, pr.CheckResult) && diffRemediations(consistentPr.Remediations, pr.Remediations)\n\t\tif !ok {\n\t\t\t// remove the check from consistent, add it to diff, but TWICE\n\t\t\t// once for the sources from the consistent list and once for the new source\n\t\t\tprCtx.addInconsistentResult(pr.Id, &consistentPr.ParseResult, consistentPr.sources...)\n\t\t\tdelete(prCtx.consistent, pr.Id)\n\t\t\tprCtx.addInconsistentResult(pr.Id, pr, source)\n\t\t\tcontinue\n\t\t}\n\n\t\t// OK, same as a previous result in consistent, just append the source\n\t\tconsistentPr.sources = append(consistentPr.sources, source)\n\t}\n\n\t// Make sure all previously consistent items were touched, IOW we didn't receive\n\t// fewer items by moving all previously untouched items to the inconsistent list\n\tfor _, consistentResult := range prCtx.consistent {\n\t\tif consistentResult.processed == true {\n\t\t\tcontinue\n\t\t}\n\t\t// Deleting an item from a map while iterating over it is safe, see https://golang.org/doc/effective_go.html#for\n\t\tprCtx.addInconsistentResult(consistentResult.Id, &consistentResult.ParseResult, consistentResult.sources...)\n\t\tdelete(prCtx.consistent, consistentResult.Id)\n\t}\n}", "func (c *Closer) Add(closers ...io.Closer) {\n\tc.closers = append(c.closers, closers...)\n}", "func (h CRConfigHistoryThreadsafe) Add(i *CRConfigStat) {\n\th.m.Lock()\n\tdefer h.m.Unlock()\n\n\tif *h.length != 0 {\n\t\tlast := (*h.hist)[(*h.pos-1)%*h.limit]\n\t\tdatesEqual := (i.Stats.DateUnixSeconds == nil && last.Stats.DateUnixSeconds == nil) || (i.Stats.DateUnixSeconds != nil && last.Stats.DateUnixSeconds != nil && *i.Stats.DateUnixSeconds == *last.Stats.DateUnixSeconds)\n\t\tcdnsEqual := (i.Stats.CDNName == nil && last.Stats.CDNName == nil) || (i.Stats.CDNName != nil && last.Stats.CDNName != nil && *i.Stats.CDNName == *last.Stats.CDNName)\n\t\treqAddrsEqual := i.ReqAddr == last.ReqAddr\n\t\tif reqAddrsEqual && datesEqual && cdnsEqual {\n\t\t\treturn\n\t\t}\n\t}\n\n\t(*h.hist)[*h.pos] = *i\n\t*h.pos = (*h.pos + 1) % *h.limit\n\tif *h.length < *h.limit {\n\t\t*h.length++\n\t}\n}", "func (ec *ErrorCollection) addError(err error) {\n\t\n\tif err == nil {\n\t\treturn\n\t}\n\t\n\tif ec.DuplicatationOptions != AllowDuplicates {\n\t\t//Don't append if err is a duplicate\n\t\tfor i, containedErr := range ec.Errors {\n\n\t\t\tvar je1 *JE\n\t\t\tvar je2 *JE\n\n\t\t\ts, ok := err.(JE)\n\t\t\tif ok {\n\t\t\t\tje1 = &s\n\t\t\t} else {\n\t\t\t\ts, ok := err.(*JE)\n\t\t\t\tif ok {\n\t\t\t\t\tje1 = s\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t_, ok = containedErr.(JE)\n\t\t\tif ok {\n\t\t\t\tt := (ec.Errors[i]).(JE)\n\t\t\t\tje2 = &t\n\t\t\t} else {\n\t\t\t\t_, ok := containedErr.(*JE)\n\t\t\t\tif ok {\n\t\t\t\t\tje2 = (ec.Errors[i]).(*JE)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif je1 != nil && je2 != nil {\n\t\t\t\t//Don't use Reflection since both are JE structs\n\t\t\t\tif (*je1).Code == (*je2).Code && (*je1).Domain == (*je2).Domain && (*je1).error == (*je2).error && (*je1).message == (*je2).message {\n\t\t\t\t\tif ec.DuplicatationOptions == RejectDuplicates {\n\t\t\t\t\t\tif (*je1).time.Equal((*je2).time) {\n\t\t\t\t\t\t\t//Both JE structs are 100% identical including timestamp\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\t//We don't care about timestamps\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t//Use Reflection\n\t\t\t\tif reflect.DeepEqual(containedErr, err) {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tec.Errors = append(ec.Errors, err)\n}", "func (fs *FileSystemWatch) Add(ms ...string) error {\n\tif fs.notifier == nil {\n\t\tfs.files = append(fs.files, ms...)\n\t\treturn nil\n\t}\n\n\tfs.files = append(fs.files, ms...)\n\n\tfor _, file := range ms {\n\t\tif err := fs.notifier.Add(file); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (pm *PoolManager) Add(ScrapeHost, ScrapePath, cacheScriptPath string) Pool {\n\tp := Pool{\n\t\tScrapeHost: ScrapeHost,\n\t\tScrapePath: ScrapePath,\n\t\tCacheScriptPath: cacheScriptPath,\n\t}\n\tpm.Pools = append(pm.Pools, p)\n\treturn p\n}" ]
[ "0.5309802", "0.52734095", "0.52264124", "0.5196098", "0.51324165", "0.5123398", "0.5056944", "0.496427", "0.49161023", "0.48829758", "0.48795125", "0.4875261", "0.48127764", "0.48127764", "0.48080775", "0.47849753", "0.4765869", "0.47221217", "0.47214398", "0.47201884", "0.47096452", "0.46895826", "0.4675403", "0.46564314", "0.4654779", "0.4653746", "0.4650115", "0.4638114", "0.4633953", "0.46239084", "0.46201015", "0.4606775", "0.46022898", "0.45853668", "0.45792556", "0.45748436", "0.45697913", "0.45678234", "0.4567757", "0.45554778", "0.4555303", "0.4541003", "0.45368978", "0.45353812", "0.45289403", "0.45092964", "0.448991", "0.4480876", "0.44708228", "0.44641632", "0.4453543", "0.44437236", "0.44415545", "0.44315085", "0.44297802", "0.4421511", "0.44206128", "0.44184056", "0.44171345", "0.44154152", "0.44140923", "0.44140923", "0.4410008", "0.44088596", "0.44068736", "0.44067925", "0.4398198", "0.4389686", "0.43884915", "0.43754396", "0.43638223", "0.43638223", "0.43638223", "0.43638223", "0.43638223", "0.43544486", "0.43536243", "0.43451083", "0.43285382", "0.4322499", "0.43134764", "0.43131253", "0.4308513", "0.4306928", "0.42859128", "0.42849472", "0.4284435", "0.42826417", "0.4271841", "0.4271731", "0.42658633", "0.42613795", "0.42565775", "0.42537677", "0.42500865", "0.42478463", "0.4225554", "0.4220542", "0.42124784", "0.42116183" ]
0.7516235
0
getSourcegraphVersion queries the Sourcegraph GraphQL API to get the current version of the Sourcegraph instance.
getSourcegraphVersion запрашивает GraphQL-интерфейс Sourcegraph, чтобы получить текущую версию экземпляра Sourcegraph.
func (svc *Service) getSourcegraphVersion(ctx context.Context) (string, error) { var result struct { Site struct { ProductVersion string } } ok, err := svc.client.NewQuery(sourcegraphVersionQuery).Do(ctx, &result) if err != nil || !ok { return "", err } return result.Site.ProductVersion, err }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (obj *Edge) GetVersion() int {\n\treturn obj.getVersion()\n}", "func GetVersion() string {\n\treturn version\n}", "func GetVersion() string {\n\treturn version\n}", "func (c *Context) GetVersion() string { // 获取版本号\n\treturn c.GetGinCtx().Param(\"version\")\n}", "func (o VirtualDatabaseSpecBuildSourceOutput) Version() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v VirtualDatabaseSpecBuildSource) *string { return v.Version }).(pulumi.StringPtrOutput)\n}", "func (a *BaseAggregateSourced) GetVersion() int {\n\treturn a.Version\n}", "func GetVersion() string {\n\treturn version.GetVersion()\n}", "func getVersion(agentInstall DotNetAgentInstall) (result tasks.Result) {\n\n\tagentVersion, err := tasks.GetFileVersion(agentInstall.AgentPath)\n\n\tif err != nil {\n\t\tresult.Status = tasks.Error\n\t\tresult.Summary = \"Error finding .Net Agent version\"\n\t\tlog.Info(\"Error finding .Net Agent version. The error is \", err)\n\t\treturn result\n\t}\n\n\tresult.Status = tasks.Info\n\tresult.Summary = agentVersion\n\tresult.Payload = agentVersion\n\treturn result\n\n}", "func (_Bridge *BridgeCallerSession) GetVersion() (string, error) {\n\treturn _Bridge.Contract.GetVersion(&_Bridge.CallOpts)\n}", "func SourceVersion() string {\n\treturn fmt.Sprintf(\"%s commit: %s / nearest-git-\"+\n\t\t\"tag: %s / branch: %s / %s\\n\",\n\t\tProgramName, LAST_GIT_COMMIT_HASH,\n\t\tNEAREST_GIT_TAG, GIT_BRANCH, GO_VERSION)\n}", "func GetVersion() string {\n\treturn Version\n}", "func GetVersion() string {\n\treturn Version\n}", "func (o VirtualDatabaseSpecBuildSourcePtrOutput) Version() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *VirtualDatabaseSpecBuildSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Version\n\t}).(pulumi.StringPtrOutput)\n}", "func CurrentSourceVersion() string {\n\tif environ.HasValue(\"SOURCE_VERSION_OVERRIDE\") {\n\t\treturn environ.GetValueStr(\"SOURCE_VERSION_OVERRIDE\")\n\t}\n\n\tmanifestPath := path.Join(RootDir(), \"src\", \"appengine\", \"resources\", \"clusterfuzz-source.manifest\")\n\tresult, err := ioutil.ReadFile(manifestPath)\n\n\tif err != nil {\n\t\tlogs.Panicf(\"Failed to get current source version: %v\", err)\n\t}\n\n\treturn string(result)\n}", "func (u UserInviteCodeServiceServer) GetVersion(_ context.Context, _ *rfpb.VersionReq) (*rfpb.VersionRes, error) {\n\n\t_handleSimLoadLatency()\n\treturn &rfpb.VersionRes{Version: fmt.Sprintf(\"v%s\", metaServiceVersion)}, nil\n}", "func GetVersion() string {\n\treturn VersionString\n}", "func (_Bridge *BridgeCaller) GetVersion(opts *bind.CallOpts) (string, error) {\n\tvar (\n\t\tret0 = new(string)\n\t)\n\tout := ret0\n\terr := _Bridge.contract.Call(opts, out, \"getVersion\")\n\treturn *ret0, err\n}", "func (_Bridge *BridgeSession) GetVersion() (string, error) {\n\treturn _Bridge.Contract.GetVersion(&_Bridge.CallOpts)\n}", "func getVersion() string {\n\tif metadata == \"\" {\n\t\treturn version\n\t}\n\treturn version + \"-\" + metadata\n}", "func (m *GraphDef) GetVersion() int32 {\n\tif m != nil {\n\t\treturn m.Version\n\t}\n\treturn 0\n}", "func Version() string {\n\treturn getData().Version\n}", "func (m *SynchronizationSchema) GetVersion()(*string) {\n val, err := m.GetBackingStore().Get(\"version\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func GetVersion() string {\n\tif metadata == \"\" {\n\t\treturn version\n\t}\n\treturn version + \"+\" + metadata\n}", "func (_PlasmaFramework *PlasmaFrameworkCallerSession) GetVersion() (string, error) {\n\treturn _PlasmaFramework.Contract.GetVersion(&_PlasmaFramework.CallOpts)\n}", "func getVersion(driver *neo4j.Driver) (Version, error) {\n\tversion := Version{}\n\tsession := (*driver).NewSession(neo4j.SessionConfig{})\n\tdefer session.Close()\n\n\tresult, err := session.Run(VERSION_QUERY, nil)\n\tif err != nil {\n\t\treturn version, nil\n\t}\n\n\trecord, err := result.Single()\n\tif err != nil {\n\t\treturn version, nil\n\t}\n\n\tval, found := record.Get(\"version\")\n\tif !found {\n\t\treturn version, errors.New(\"couldn't find 'version' in query results\")\n\t}\n\tdata, ok := val.([]interface{})\n\tif !ok {\n\t\treturn version, errors.New(\"'version' isn't an array\")\n\t}\n\tif len(data) < 2 {\n\t\treturn version, errors.New(\"'version' array is empty or too small\")\n\t}\n\n\tval, found = record.Get(\"extra\")\n\tif !found {\n\t\treturn version, errors.New(\"couldn't find 'extra' version info\")\n\t}\n\textra, ok := val.(string)\n\tif !ok {\n\t\treturn version, errors.New(\"'extra' value isn't a string\")\n\t}\n\n\t// yolo for now\n\tversion.Major = uint8(data[0].(int64))\n\tversion.Minor = uint8(data[1].(int64))\n\n\tif len(data) > 2 {\n\t\tversion.Patch = uint8(data[2].(int64))\n\t}\n\tversion.Extra = extra\n\n\treturn version, nil\n}", "func (o *VirtualizationIweHost) GetVersion() string {\n\tif o == nil || o.Version == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Version\n}", "func (_PlasmaFramework *PlasmaFrameworkCaller) GetVersion(opts *bind.CallOpts) (string, error) {\n\tvar (\n\t\tret0 = new(string)\n\t)\n\tout := ret0\n\terr := _PlasmaFramework.contract.Call(opts, out, \"getVersion\")\n\treturn *ret0, err\n}", "func (f *Features) getVersion(ctx context.Context, adminDB *mongo.Database) {\n\tcmd := bson.D{\n\t\t{\n\t\t\tKey: \"buildInfo\",\n\t\t\tValue: 1,\n\t\t},\n\t}\n\tvar result buildInfo\n\terr := adminDB.RunCommand(ctx, cmd).Decode(&result)\n\tif err != nil {\n\t\tf.MongoVersion = &semver.Version{}\n\t\treturn\n\t}\n\n\tf.MongoVersion = semver.MustParse(result.Version)\n}", "func (pr LocalPackageReference) GeneratorVersion() string {\n\treturn pr.generatorVersion\n}", "func (c *Connection) Version(ctx context.Context) (string, error) {\n\tresp, err := c.Request(ctx).\n\t\tSetResult(&api.VersionResponse{}).\n\t\tGet(\"/version\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn resp.Result().(*api.VersionResponse).Version, nil\n}", "func (s *Store) GetVersion(ctx context.Context) (string, error) {\n\t// We treat the existence of cells_subscriptions as running on the initial\n\t// version, 1.0.0\n\tconst query = `\n\t\tSELECT EXISTS (\n \t\tSELECT *\n\t\t FROM information_schema.tables \n \t\tWHERE table_name = 'cells_subscriptions'\n )`\n\trow := s.db.QueryRowContext(ctx, query)\n\tvar ret bool\n\terr := row.Scan(&ret)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif ret {\n\t\t// Base version\n\t\treturn \"v1.0.0\", nil\n\t}\n\t// Version without cells joins table.\n\t// TODO: leverage proper migrations and use something like the query below.\n\treturn \"v2.0.0\", nil\n}", "func (_PlasmaFramework *PlasmaFrameworkSession) GetVersion() (string, error) {\n\treturn _PlasmaFramework.Contract.GetVersion(&_PlasmaFramework.CallOpts)\n}", "func GetVersion() string {\n\treturn __VERSION__\n}", "func (c *Container) GetVersion(ctx echo.Context) error {\n tabletServersFuture := make(chan helpers.TabletServersFuture)\n go helpers.GetTabletServersFuture(helpers.HOST, tabletServersFuture)\n\n // Get response from tabletServersFuture\n tabletServersResponse := <-tabletServersFuture\n if tabletServersResponse.Error != nil {\n return ctx.String(http.StatusInternalServerError,\n tabletServersResponse.Error.Error())\n }\n nodeList := helpers.GetNodesList(tabletServersResponse)\n versionInfoFutures := []chan helpers.VersionInfoFuture{}\n for _, nodeHost := range nodeList {\n versionInfoFuture := make(chan helpers.VersionInfoFuture)\n versionInfoFutures = append(versionInfoFutures, versionInfoFuture)\n go helpers.GetVersionFuture(nodeHost, versionInfoFuture)\n }\n smallestVersion := helpers.GetSmallestVersion(versionInfoFutures)\n return ctx.JSON(http.StatusOK, models.VersionInfo{\n Version: smallestVersion,\n })\n}", "func GetVersion() string {\n\tif len(Version) == 0 {\n\t\treturn \"dev\"\n\t}\n\treturn Version\n}", "func GetVersion() string {\n\tif len(Version) == 0 {\n\t\treturn \"dev\"\n\t}\n\treturn Version\n}", "func (d TinkDB) GetWorkflowDataVersion(ctx context.Context, workflowID string) (int32, error) {\n\treturn getLatestVersionWfData(ctx, d.instance, workflowID)\n}", "func (o *MicrosoftGraphSharedPcConfiguration) GetVersion() int32 {\n\tif o == nil || o.Version == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.Version\n}", "func GetVersion(c *gin.Context) {\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"version\": Version,\n\t\t\"merchant_name\": MerchantName,\n\t})\n}", "func (dataChannel *DataChannel) GetAgentVersion() string {\n\treturn dataChannel.agentVersion\n}", "func Version(node client.ABCIClient) (\n\tstring, *rpctypes.ResultABCIQuery, error,\n) {\n\t// perform the query\n\tres, err := node.ABCIQuery(query.VersionEndpoint, []byte{})\n\tif err != nil {\n\t\treturn \"\", res, err\n\t}\n\treturn string(res.Response.GetValue()), res, err\n}", "func (o InstanceS3ImportOutput) SourceEngineVersion() pulumi.StringOutput {\n\treturn o.ApplyT(func(v InstanceS3Import) string { return v.SourceEngineVersion }).(pulumi.StringOutput)\n}", "func (r Source) GetAPIVersion() string {\n\treturn r.APIVersion\n}", "func Version() string {\n\t// TODO: Implement version tracking\n\treturn \"0.0.1\"\n}", "func (store *GSStore) GetVersion() int {\n\treturn store.Version\n}", "func (m *GroupPolicyDefinition) GetVersion()(*string) {\n val, err := m.GetBackingStore().Get(\"version\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func GetCurrentUsingVersion() (*string, error) {\n\tdenoFilepath := getInstalledDenoFilepath()\n\n\tif denoFilepath == \"\" {\n\t\treturn nil, nil\n\t}\n\n\targs := []string{\"--version\"}\n\tcmd := exec.Command(denoFilepath, args...)\n\n\toutput, err := cmd.CombinedOutput()\n\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"`deno --version` failed\\n%s\", string(output))\n\t}\n\n\tif cmd.ProcessState.ExitCode() != 0 {\n\t\treturn nil, errors.New(string(output))\n\t}\n\n\tarr := strings.Split(strings.Split(string(output), \"\\n\")[0], \" \")\n\n\tversion := strings.TrimSpace(\"v\" + strings.TrimSpace(arr[1]))\n\n\treturn &version, nil\n}", "func (o *SoftwareTechs) GetVersion() string {\n\tif o == nil || o.Version == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Version\n}", "func GetImageVersion(image string) (string, error) {\n\treturn getImageLabel(image, alterVersionLabelKey)\n}", "func (o ClusterS3ImportOutput) SourceEngineVersion() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ClusterS3Import) string { return v.SourceEngineVersion }).(pulumi.StringOutput)\n}", "func (d Dispatcher) Version() (string, error) {\n\theight, err := d.GetBC().GetLatestHeight()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\thashes, err := d.GetBC().GetBlockHashesHex()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tversionBytes, err := helpers.Serialize(NewVersion(GizoVersion, height, hashes))\n\treturn string(versionBytes), nil\n}", "func Version() string {\n\treturn version\n}", "func Version() string {\n\treturn version\n}", "func Version() string {\n\treturn version\n}", "func Version() string {\n\treturn version\n}", "func Version() string {\n\treturn version\n}", "func Version() string {\n\treturn version\n}", "func Version() string {\n\treturn version\n}", "func Version() string {\n\treturn version\n}", "func Version() string {\n\treturn versionNumber\n}", "func (p OpenFlow10Protocol) GetVersion() uint8 {\n\treturn goloxi.VERSION_1_0\n}", "func GitVersion() string { return gitVersion }", "func (o InstanceS3ImportPtrOutput) SourceEngineVersion() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *InstanceS3Import) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.SourceEngineVersion\n\t}).(pulumi.StringPtrOutput)\n}", "func (api *API) Version(ctx context.Context) (string, error) {\n\taddr := api.host + \":\" + api.port\n\treturn version(addr)\n}", "func (o ClusterS3ImportPtrOutput) SourceEngineVersion() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ClusterS3Import) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.SourceEngineVersion\n\t}).(pulumi.StringPtrOutput)\n}", "func (m *DeviceManagementConfigurationSettingDefinition) GetVersion()(*string) {\n val, err := m.GetBackingStore().Get(\"version\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func (o *LoraNetworkTrigger) GetVersion() int32 {\n\tif o == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\n\treturn o.Version\n}", "func (c *ServerConfig) GetVersionEndpoint() string {\n\tnurl := *c.ParsedEndpoint\n\tnurl.Path = path.Join(nurl.Path, c.APIPaths.Version)\n\treturn nurl.String()\n}", "func (o ApplicationStatusWorkflowContextbackendOutput) ResourceVersion() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusWorkflowContextbackend) *string { return v.ResourceVersion }).(pulumi.StringPtrOutput)\n}", "func (o *ConnectorTypeAllOf) GetVersion() string {\n\tif o == nil || o.Version == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Version\n}", "func (o *ConnectorTypeAllOf) GetVersion() string {\n\tif o == nil || o.Version == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Version\n}", "func (self PostgresDatabase) getDBVersion() (version int) {\n var val string\n var vers int64\n err := self.conn.QueryRow(\"SELECT value FROM Settings WHERE name = $1\", \"version\").Scan(&val)\n if err == nil {\n vers, err = strconv.ParseInt(val, 10, 32)\n if err == nil {\n version = int(vers)\n } else {\n log.Fatal(\"cannot figure out db version\", err)\n }\n } else {\n version = -1\n }\n return\n}", "func (r *Resolver) Version() VersionResolver { return &versionResolver{r} }", "func (o *SchemaDefinitionRestDto) GetVersion() string {\n\tif o == nil || o.Version == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Version\n}", "func (o *Version) GetVersion() string {\n\tif o == nil || o.Version == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Version\n}", "func (o *Ga4ghExternalIdentifier) GetVersion() string {\n\tif o == nil || o.Version == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Version\n}", "func (o ApplicationStatusWorkflowContextbackendPtrOutput) ResourceVersion() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ApplicationStatusWorkflowContextbackend) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ResourceVersion\n\t}).(pulumi.StringPtrOutput)\n}", "func GetVersion() string {\n\tv := Map[\"version\"]\n\treturn v\n}", "func Version() (version string) {\n\treturn GetVersion()\n}", "func (p Plugin) GetVersion() string {\n\treturn \"v0.0.0\"\n}", "func getVersion() string {\n\tslurp, err := ioutil.ReadFile(filepath.Join(camRoot, \"VERSION\"))\n\tif err == nil {\n\t\treturn strings.TrimSpace(string(slurp))\n\t}\n\treturn gitVersion()\n}", "func (r *SoftwareVolumeResource) GetVersion() (string, error) {\n\tvar list SoftwareVolumeConfigList\n\tif err := r.c.ReadQuery(BasePath+SoftwareVolumeEndpoint, &list); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar version string\n\n\tfor _, vol := range list.Items {\n\t\tif vol.Active == true {\n\t\t\tversion = vol.Version + \" \" + vol.Build\n\t\t}\n\t}\n\n\treturn version, nil\n}", "func (o *SoftwarerepositoryCategoryMapper) GetVersion() string {\n\tif o == nil || o.Version == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Version\n}", "func (terraformSource Source) EncodeSourceVersion() (string, error) {\n\tif IsLocalSource(terraformSource.CanonicalSourceURL) {\n\t\tsourceHash := sha256.New()\n\t\tsourceDir := filepath.Clean(terraformSource.CanonicalSourceURL.Path)\n\n\t\terr := filepath.Walk(sourceDir, func(path string, info os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\t// If we've encountered an error while walking the tree, give up\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif info.IsDir() {\n\t\t\t\t// We don't use any info from directories to calculate our hash\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t// avoid checking files in .terragrunt-cache directory since contents is auto-generated\n\t\t\tif strings.Contains(path, util.TerragruntCacheDir) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t// avoid checking files in .terraform directory since contents is auto-generated\n\t\t\tif info.Name() == util.TerraformLockFile {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tfileModified := info.ModTime().UnixMicro()\n\t\t\thashContents := fmt.Sprintf(\"%s:%d\", path, fileModified)\n\t\t\tsourceHash.Write([]byte(hashContents))\n\n\t\t\treturn nil\n\t\t})\n\n\t\tif err == nil {\n\t\t\thash := fmt.Sprintf(\"%x\", sourceHash.Sum(nil))\n\n\t\t\treturn hash, nil\n\t\t}\n\n\t\tterraformSource.Logger.WithError(err).Warningf(\"Could not encode version for local source\")\n\t\treturn \"\", err\n\t}\n\n\treturn util.EncodeBase64Sha1(terraformSource.CanonicalSourceURL.Query().Encode()), nil\n}", "func (o *TeamConfiguration) GetVersion() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Version\n}", "func (s *Structured) GetVersion() string {\n\treturn s.cloudEvent.CloudEventsVersion\n}", "func (o *DeviceResourceVersionValueWeb) GetVersion() string {\n\tif o == nil || o.Version == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Version\n}", "func (pr LocalPackageReference) Version() string {\n\treturn pr.version\n}", "func (v *VersionSelector) getUpgradeVersion() (string, string, error) {\n\tvar selectedVersionSelector upgradeselectors.Interface = nil\n\n\tcurPriority := math.MinInt32\n\n\tversionSelectors := upgradeselectors.GetVersionSelectors()\n\n\tfor _, versionSelector := range versionSelectors {\n\t\tif versionSelector.ShouldUse() && versionSelector.Priority() > curPriority {\n\t\t\tselectedVersionSelector = versionSelector\n\t\t\tcurPriority = versionSelector.Priority()\n\t\t}\n\t}\n\n\t// If no version selector has been found for an upgrade, assume that an upgrade is not being asked for.\n\tif selectedVersionSelector == nil {\n\t\treturn \"\", \"\", nil\n\t}\n\n\trelease, selector, err := selectedVersionSelector.SelectVersion(spi.NewVersionBuilder().Version(v.clusterVersion).Build(), v.versionList)\n\n\tif release == nil || release.Version().Original() == \"\" {\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error selecting version: %s\", err.Error())\n\t\t}\n\t\treturn util.NoVersionFound, \"\", err\n\t}\n\n\topenshiftRelease := fmt.Sprintf(\"openshift-v%s\", release.Version().Original())\n\n\tlog.Printf(\"Selected %s using selector `%s`\", openshiftRelease, selector)\n\n\treturn openshiftRelease, \"\", err\n}", "func (native *OpenGL) GetShadingLanguageVersion() string {\n\treturn gl.GoStr(gl.GetString(gl.SHADING_LANGUAGE_VERSION))\n}", "func (o *NetworkElementSummaryAllOf) GetVersion() string {\n\tif o == nil || o.Version == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Version\n}", "func detectClientVersion(gql *graphql.Client) (clientVersion, error) {\n\thandlesOwnerId, err := orbQueryHandleOwnerId(gql)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif !handlesOwnerId {\n\t\treturn v1_string, nil\n\t}\n\treturn v2_string, nil\n}", "func (e *Event) GetVersion() string {\n\treturn e.BaseEvent.Version\n}", "func (_PlasmaFramework *PlasmaFrameworkCallerSession) Version() (string, error) {\n\treturn _PlasmaFramework.Contract.Version(&_PlasmaFramework.CallOpts)\n}", "func (s *AzsbSource) GetGroupVersionKind() schema.GroupVersionKind {\n\treturn SchemeGroupVersion.WithKind(\"AzsbSource\")\n}", "func Version() string {\n\treturn \"1.0.6\"\n}", "func (vdb *VspDatabase) Version() (uint32, error) {\n\tvar version uint32\n\terr := vdb.db.View(func(tx *bolt.Tx) error {\n\t\tbytes := tx.Bucket(vspBktK).Get(versionK)\n\t\tversion = bytesToUint32(bytes)\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn version, nil\n}", "func (l *Libvirt) ConnectGetVersion() (rHvVer uint64, err error) {\n\tvar buf []byte\n\n\tvar r response\n\tr, err = l.requestStream(4, constants.Program, buf, nil, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Return value unmarshaling\n\ttpd := typedParamDecoder{}\n\tct := map[string]xdr.TypeDecoder{\"libvirt.TypedParam\": tpd}\n\trdr := bytes.NewReader(r.Payload)\n\tdec := xdr.NewDecoderCustomTypes(rdr, 0, ct)\n\t// HvVer: uint64\n\t_, err = dec.Decode(&rHvVer)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func (c *Client) GetVersion(ctx context.Context) (rpc.GetVersionResult, error) {\n\tres, err := c.RpcClient.GetVersion(ctx)\n\terr = checkRpcResult(res.GeneralResponse, err)\n\tif err != nil {\n\t\treturn rpc.GetVersionResult{}, err\n\t}\n\treturn res.Result, nil\n}", "func (db *DB) Version() (string, error) {\n\tvar v string\n\tif _, err := db.QueryOne(pg.Scan(&v), \"select version()\"); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn v, nil\n}" ]
[ "0.5781868", "0.5636606", "0.5636606", "0.5576799", "0.55693847", "0.5562794", "0.55050653", "0.549268", "0.54771435", "0.5471268", "0.5450064", "0.5450064", "0.5440955", "0.5429904", "0.541623", "0.5398498", "0.5388633", "0.53843856", "0.53595656", "0.5346013", "0.53183573", "0.5314673", "0.52918404", "0.52900976", "0.5273753", "0.52654076", "0.52536184", "0.524013", "0.5227394", "0.52224934", "0.5217413", "0.5206825", "0.51875746", "0.517901", "0.51703006", "0.51703006", "0.5136207", "0.5130812", "0.5127151", "0.51125", "0.5106135", "0.50981164", "0.50977176", "0.5088438", "0.50795656", "0.5071805", "0.50662947", "0.5063983", "0.50634223", "0.5062896", "0.5058294", "0.50548035", "0.50548035", "0.50548035", "0.50548035", "0.50548035", "0.50548035", "0.50548035", "0.50548035", "0.5053434", "0.5049862", "0.50472075", "0.5035477", "0.5031258", "0.502338", "0.5020306", "0.50062615", "0.5000556", "0.49989983", "0.49975413", "0.49975413", "0.49969625", "0.49957564", "0.49889177", "0.49723837", "0.49667087", "0.49591374", "0.4958394", "0.49517083", "0.49514994", "0.49500775", "0.49481383", "0.4939891", "0.49380833", "0.49339134", "0.49336314", "0.4931089", "0.4927297", "0.49255714", "0.49226543", "0.49190333", "0.4915234", "0.4915011", "0.49137366", "0.49108097", "0.49107507", "0.49097213", "0.4904745", "0.49013713", "0.49012962" ]
0.80999446
0
DetermineFeatureFlags fetches the version of the configured Sourcegraph instance and then sets flags on the Service itself to use features available in that version, e.g. gzip compression.
DetermineFeatureFlags получает версию настроенной инстанции Sourcegraph и затем устанавливает флаги на самом Service для использования функций, доступных в этой версии, например, сжатия gzip.
func (svc *Service) DetermineFeatureFlags(ctx context.Context) error { version, err := svc.getSourcegraphVersion(ctx) if err != nil { return errors.Wrap(err, "failed to query Sourcegraph version to check for available features") } return svc.features.setFromVersion(version) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func InitFeatureFlags(flag *pflag.FlagSet) {\n\tflag.Bool(FeatureFlagAccessCode, false, \"Flag (bool) to enable requires-access-code\")\n\tflag.Bool(FeatureFlagRoleBasedAuth, false, \"Flag (bool) to enable role-based-auth\")\n\tflag.Bool(FeatureFlagConvertPPMsToGHC, false, \"Flag (bool) to enable convert-ppms-to-ghc\")\n}", "func (o *Options) AddFlags(fs *pflag.FlagSet) {\n\tfs.BoolVar(&o.InstallCRDs, \"install-crds\", true, \"install the CRDs used by the controller as part of startup\")\n\tfs.Var(flagutil.NewMapStringBool(&o.FeatureGates), \"feature-gates\", \"A set of key=value pairs that describe feature gates for alpha/experimental features. \"+\n\t\t\"Options are:\\n\"+strings.Join(utilfeature.DefaultFeatureGate.KnownFeatures(), \"\\n\"))\n\n\tfs.StringVar(&o.Config.FederationNamespace, \"federation-namespace\", util.DefaultFederationSystemNamespace, \"The namespace the federation control plane is deployed in.\")\n\tfs.StringVar(&o.Config.ClusterNamespace, \"registry-namespace\", util.MulticlusterPublicNamespace, \"The cluster registry namespace.\")\n\tfs.DurationVar(&o.Config.ClusterAvailableDelay, \"cluster-available-delay\", util.DefaultClusterAvailableDelay, \"Time to wait before reconciling on a healthy cluster.\")\n\tfs.DurationVar(&o.Config.ClusterUnavailableDelay, \"cluster-unavailable-delay\", util.DefaultClusterUnavailableDelay, \"Time to wait before giving up on an unhealthy cluster.\")\n\n\tfs.BoolVar(&o.LimitedScope, \"limited-scope\", false, \"Whether the federation namespace will be the only target for federation.\")\n\tfs.DurationVar(&o.ClusterMonitorPeriod, \"cluster-monitor-period\", time.Second*40, \"How often to monitor the cluster health\")\n\n\tfs.DurationVar(&o.LeaderElection.LeaseDuration, \"leader-elect-lease-duration\", util.DefaultLeaderElectionLeaseDuration, \"\"+\n\t\t\"The duration that non-leader candidates will wait after observing a leadership \"+\n\t\t\"renewal until attempting to acquire leadership of a led but unrenewed leader \"+\n\t\t\"slot. This is effectively the maximum duration that a leader can be stopped \"+\n\t\t\"before it is replaced by another candidate. This is only applicable if leader \"+\n\t\t\"election is enabled.\")\n\tfs.DurationVar(&o.LeaderElection.RenewDeadline, \"leader-elect-renew-deadline\", util.DefaultLeaderElectionRenewDeadline, \"\"+\n\t\t\"The interval between attempts by the acting master to renew a leadership slot \"+\n\t\t\"before it stops leading. This must be less than or equal to the lease duration. \"+\n\t\t\"This is only applicable if leader election is enabled.\")\n\tfs.DurationVar(&o.LeaderElection.RetryPeriod, \"leader-elect-retry-period\", util.DefaultLeaderElectionRetryPeriod, \"\"+\n\t\t\"The duration the clients should wait between attempting acquisition and renewal \"+\n\t\t\"of a leadership. This is only applicable if leader election is enabled.\")\n\tfs.StringVar(&o.LeaderElection.ResourceLock, \"leader-elect-resource-lock\", \"configmaps\", \"\"+\n\t\t\"The type of resource object that is used for locking during \"+\n\t\t\"leader election. Supported options are `configmaps` (default) and `endpoints`.\")\n}", "func (a *AdminApiService) GetAllFeatureFlags(ctx _context.Context) (FeatureFlag, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue FeatureFlag\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/admin/feature-flag\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func InitFlags(fs *pflag.FlagSet) {\n\tfs.StringVar(&metricsBindAddr, \"metrics-bind-addr\", \":8080\",\n\t\t\"The address the metric endpoint binds to.\")\n\n\tfs.BoolVar(&enableLeaderElection, \"leader-elect\", false,\n\t\t\"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.\")\n\n\tfs.StringVar(&watchNamespace, \"namespace\", \"\",\n\t\t\"Namespace that the controller watches to reconcile objects. If unspecified, the controller watches for objects across all namespaces.\")\n\n\tfs.StringVar(&profilerAddress, \"profiler-address\", \"\",\n\t\t\"Bind address to expose the pprof profiler (e.g. localhost:6060)\")\n\n\tfs.IntVar(&eksControlPlaneConcurrency, \"ekscontrolplane-concurrency\", 10,\n\t\t\"Number of EKS control planes to process simultaneously\")\n\n\tfs.DurationVar(&syncPeriod, \"sync-period\", 10*time.Minute,\n\t\t\"The minimum interval at which watched resources are reconciled (e.g. 15m)\")\n\n\tfs.IntVar(&webhookPort, \"webhook-port\", 9443,\n\t\t\"Webhook Server port, disabled by default. When enabled, the manager will only work as webhook server, no reconcilers are installed.\")\n\n\tfs.StringVar(&webhookCertDir, \"webhook-cert-dir\", \"/tmp/k8s-webhook-server/serving-certs/\",\n\t\t\"Webhook cert dir, only used when webhook-port is specified.\")\n\n\tfs.StringVar(&serviceEndpoints, \"service-endpoints\", \"\",\n\t\t\"Set custom AWS service endpoins in semi-colon separated format: ${SigningRegion1}:${ServiceID1}=${URL},${ServiceID2}=${URL};${SigningRegion2}...\")\n\n\tfs.StringVar(\n\t\t&watchFilterValue,\n\t\t\"watch-filter\",\n\t\t\"\",\n\t\tfmt.Sprintf(\"Label value that the controller watches to reconcile cluster-api objects. Label key is always %s. If unspecified, the controller watches for all cluster-api objects.\", clusterv1.WatchLabel),\n\t)\n\n\tfeature.MutableGates.AddFlag(fs)\n}", "func InitializeFeatures(featuresClient managementv3.FeatureClient, featureArgs string) {\n\t// applies any default values assigned in --features flag to feature map\n\tif err := applyArgumentDefaults(featureArgs); err != nil {\n\t\tlogrus.Errorf(\"failed to apply feature args: %v\", err)\n\t}\n\n\tif featuresClient == nil {\n\t\treturn\n\t}\n\n\t// creates any features in map that do not exist, updates features with new default value\n\tfor key, f := range features {\n\t\tfeatureState, err := featuresClient.Get(key, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tif !errors.IsNotFound(err) {\n\t\t\t\tlogrus.Errorf(\"unable to retrieve feature %s in initialize features: %v\", f.name, err)\n\t\t\t}\n\n\t\t\tif f.install {\n\t\t\t\t// value starts off as nil, that way rancher can determine if value has been manually assigned\n\t\t\t\tnewFeature := &v3.Feature{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: f.name,\n\t\t\t\t\t},\n\t\t\t\t\tSpec: v3.FeatureSpec{\n\t\t\t\t\t\tValue: nil,\n\t\t\t\t\t},\n\t\t\t\t\tStatus: v3.FeatureStatus{\n\t\t\t\t\t\tDefault: f.def,\n\t\t\t\t\t\tDynamic: f.dynamic,\n\t\t\t\t\t\tDescription: f.description,\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tif _, err := featuresClient.Create(newFeature); err != nil {\n\t\t\t\t\tlogrus.Errorf(\"unable to create feature %s in initialize features: %v\", f.name, err)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tnewFeatureState := featureState.DeepCopy()\n\t\t\t// checks if default value has changed\n\t\t\tif featureState.Status.Default != f.def {\n\t\t\t\tnewFeatureState.Status.Default = f.def\n\t\t\t}\n\n\t\t\t// checks if developer has changed dynamic value from previous rancher version\n\t\t\tif featureState.Status.Dynamic != f.dynamic {\n\t\t\t\tnewFeatureState.Status.Dynamic = f.dynamic\n\t\t\t}\n\n\t\t\t// checks if developer has changed description value from previous rancher version\n\t\t\tif featureState.Status.Description != f.description {\n\t\t\t\tnewFeatureState.Status.Description = f.description\n\t\t\t}\n\n\t\t\tnewFeatureState, err = featuresClient.Update(newFeatureState)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"unable to update feature %s in initialize features: %v\", f.name, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif newFeatureState.Status.LockedValue != nil {\n\t\t\t\tf.Set(*newFeatureState.Status.LockedValue)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif featureState.Spec.Value == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif *featureState.Spec.Value == f.val {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tf.Set(*featureState.Spec.Value)\n\t\t}\n\t}\n}", "func BuildServerFlags(cmd *cobra.Command, srv *server.Command) {\n\tflags := cmd.Flags()\n\tflags.StringVar(&srv.Config.Name, \"name\", srv.Config.Name, \"Name of the node in the cluster.\")\n\tflags.StringVarP(&srv.Config.DataDir, \"data-dir\", \"d\", srv.Config.DataDir, \"Directory to store FeatureBase data files.\")\n\tflags.StringVarP(&srv.Config.Bind, \"bind\", \"b\", srv.Config.Bind, \"Default URI on which FeatureBase should listen.\")\n\tflags.StringVar(&srv.Config.BindGRPC, \"bind-grpc\", srv.Config.BindGRPC, \"URI on which FeatureBase should listen for gRPC requests.\")\n\tflags.StringVar(&srv.Config.Advertise, \"advertise\", srv.Config.Advertise, \"Address to advertise externally.\")\n\tflags.StringVar(&srv.Config.AdvertiseGRPC, \"advertise-grpc\", srv.Config.AdvertiseGRPC, \"Address to advertise externally for gRPC.\")\n\tflags.IntVar(&srv.Config.MaxWritesPerRequest, \"max-writes-per-request\", srv.Config.MaxWritesPerRequest, \"Number of write commands per request.\")\n\tflags.StringVar(&srv.Config.LogPath, \"log-path\", srv.Config.LogPath, \"Log path\")\n\tflags.BoolVar(&srv.Config.Verbose, \"verbose\", srv.Config.Verbose, \"Enable verbose logging\")\n\tflags.Uint64Var(&srv.Config.MaxMapCount, \"max-map-count\", srv.Config.MaxMapCount, \"Limits the maximum number of active mmaps. FeatureBase will fall back to reading files once this is exhausted. Set below your system's vm.max_map_count.\")\n\tflags.Uint64Var(&srv.Config.MaxFileCount, \"max-file-count\", srv.Config.MaxFileCount, \"Soft limit on the maximum number of fragment files FeatureBase keeps open simultaneously.\")\n\tflags.DurationVar((*time.Duration)(&srv.Config.LongQueryTime), \"long-query-time\", time.Duration(srv.Config.LongQueryTime), \"Duration that will trigger log and stat messages for slow queries. Zero to disable.\")\n\tflags.IntVar(&srv.Config.QueryHistoryLength, \"query-history-length\", srv.Config.QueryHistoryLength, \"Number of queries to remember in history.\")\n\tflags.Int64Var(&srv.Config.MaxQueryMemory, \"max-query-memory\", srv.Config.MaxQueryMemory, \"Maximum memory allowed per Extract() or SELECT query.\")\n\n\t// TLS\n\tSetTLSConfig(flags, \"\", &srv.Config.TLS.CertificatePath, &srv.Config.TLS.CertificateKeyPath, &srv.Config.TLS.CACertPath, &srv.Config.TLS.SkipVerify, &srv.Config.TLS.EnableClientVerification)\n\n\t// Handler\n\tflags.StringSliceVar(&srv.Config.Handler.AllowedOrigins, \"handler.allowed-origins\", []string{}, \"Comma separated list of allowed origin URIs (for CORS/Web UI).\")\n\n\t// Cluster\n\tflags.IntVar(&srv.Config.Cluster.ReplicaN, \"cluster.replicas\", 1, \"Number of hosts each piece of data should be stored on.\")\n\tflags.DurationVar((*time.Duration)(&srv.Config.Cluster.LongQueryTime), \"cluster.long-query-time\", time.Duration(srv.Config.Cluster.LongQueryTime), \"RENAMED TO 'long-query-time': Duration that will trigger log and stat messages for slow queries.\") // negative duration indicates invalid value because 0 is meaningful\n\tflags.StringVar(&srv.Config.Cluster.Name, \"cluster.name\", srv.Config.Cluster.Name, \"Human-readable name for the cluster.\")\n\tflags.StringVar(&srv.Config.Cluster.PartitionToNodeAssignment, \"cluster.partition-to-node-assignment\", srv.Config.Cluster.PartitionToNodeAssignment, \"How to assign partitions to nodes. jmp-hash or modulus\")\n\n\t// Translation\n\tflags.StringVar(&srv.Config.Translation.PrimaryURL, \"translation.primary-url\", srv.Config.Translation.PrimaryURL, \"DEPRECATED: URL for primary translation node for replication.\")\n\tflags.IntVar(&srv.Config.Translation.MapSize, \"translation.map-size\", srv.Config.Translation.MapSize, \"Size in bytes of mmap to allocate for key translation.\")\n\n\t// Etcd\n\t// Etcd.Name used Config.Name for its value.\n\tflags.StringVar(&srv.Config.Etcd.Dir, \"etcd.dir\", srv.Config.Etcd.Dir, \"Directory to store etcd data files. If not provided, a directory will be created under the main data-dir directory.\")\n\t// Etcd.ClusterName uses Cluster.Name for its value\n\tflags.StringVar(&srv.Config.Etcd.LClientURL, \"etcd.listen-client-address\", srv.Config.Etcd.LClientURL, \"Listen client address.\")\n\tflags.StringVar(&srv.Config.Etcd.AClientURL, \"etcd.advertise-client-address\", srv.Config.Etcd.AClientURL, \"Advertise client address. If not provided, uses the listen client address.\")\n\tflags.StringVar(&srv.Config.Etcd.LPeerURL, \"etcd.listen-peer-address\", srv.Config.Etcd.LPeerURL, \"Listen peer address.\")\n\tflags.StringVar(&srv.Config.Etcd.APeerURL, \"etcd.advertise-peer-address\", srv.Config.Etcd.APeerURL, \"Advertise peer address. If not provided, uses the listen peer address.\")\n\tflags.StringVar(&srv.Config.Etcd.ClusterURL, \"etcd.cluster-url\", srv.Config.Etcd.ClusterURL, \"Cluster URL to join.\")\n\tflags.StringVar(&srv.Config.Etcd.InitCluster, \"etcd.initial-cluster\", srv.Config.Etcd.InitCluster, \"Initial cluster name1=apurl1,name2=apurl2\")\n\tflags.Int64Var(&srv.Config.Etcd.HeartbeatTTL, \"etcd.heartbeat-ttl\", srv.Config.Etcd.HeartbeatTTL, \"Timeout used to determine cluster status\")\n\n\tflags.StringVar(&srv.Config.Etcd.Cluster, \"etcd.static-cluster\", srv.Config.Etcd.Cluster, \"EXPERIMENTAL static featurebase cluster name1=apurl1,name2=apurl2\")\n\t_ = flags.MarkHidden(\"etcd.static-cluster\")\n\tflags.StringVar(&srv.Config.Etcd.EtcdHosts, \"etcd.etcd-hosts\", srv.Config.Etcd.EtcdHosts, \"EXPERIMENTAL etcd server host:port comma separated list\")\n\t_ = flags.MarkHidden(\"etcd.etcd-hosts\") // TODO (twg) expose when ready for public consumption\n\n\t// External postgres database for ExternalLookup\n\tflags.StringVar(&srv.Config.LookupDBDSN, \"lookup-db-dsn\", \"\", \"external (postgres) database DSN to use for ExternalLookup calls\")\n\n\t// AntiEntropy\n\tflags.DurationVar((*time.Duration)(&srv.Config.AntiEntropy.Interval), \"anti-entropy.interval\", (time.Duration)(srv.Config.AntiEntropy.Interval), \"Interval at which to run anti-entropy routine.\")\n\n\t// Metric\n\tflags.StringVar(&srv.Config.Metric.Service, \"metric.service\", srv.Config.Metric.Service, \"Where to send stats: can be expvar (in-memory served at /debug/vars), prometheus, statsd or none.\")\n\tflags.StringVar(&srv.Config.Metric.Host, \"metric.host\", srv.Config.Metric.Host, \"URI to send metrics when metric.service is statsd.\")\n\tflags.DurationVar((*time.Duration)(&srv.Config.Metric.PollInterval), \"metric.poll-interval\", (time.Duration)(srv.Config.Metric.PollInterval), \"Polling interval metrics.\")\n\tflags.BoolVar((&srv.Config.Metric.Diagnostics), \"metric.diagnostics\", srv.Config.Metric.Diagnostics, \"Enabled diagnostics reporting.\")\n\n\t// Tracing\n\tflags.StringVar(&srv.Config.Tracing.AgentHostPort, \"tracing.agent-host-port\", srv.Config.Tracing.AgentHostPort, \"Jaeger agent host:port.\")\n\tflags.StringVar(&srv.Config.Tracing.SamplerType, \"tracing.sampler-type\", srv.Config.Tracing.SamplerType, \"Jaeger sampler type (remote, const, probabilistic, ratelimiting) or 'off' to disable tracing completely.\")\n\tflags.Float64Var(&srv.Config.Tracing.SamplerParam, \"tracing.sampler-param\", srv.Config.Tracing.SamplerParam, \"Jaeger sampler parameter.\")\n\n\t// Profiling\n\tflags.IntVar(&srv.Config.Profile.BlockRate, \"profile.block-rate\", srv.Config.Profile.BlockRate, \"Sampling rate for goroutine blocking profiler. One sample per <rate> ns.\")\n\tflags.IntVar(&srv.Config.Profile.MutexFraction, \"profile.mutex-fraction\", srv.Config.Profile.MutexFraction, \"Sampling fraction for mutex contention profiling. Sample 1/<rate> of events.\")\n\n\tflags.StringVar(&srv.Config.Storage.Backend, \"storage.backend\", storage.DefaultBackend, \"Storage backend to use: 'rbf' is only supported value.\")\n\tflags.BoolVar(&srv.Config.Storage.FsyncEnabled, \"storage.fsync\", true, \"enable fsync fully safe flush-to-disk\")\n\n\t// RBF specific flags. See pilosa/rbf/cfg/cfg.go for definitions.\n\tsrv.Config.RBFConfig.DefineFlags(flags)\n\n\tflags.BoolVar(&srv.Config.SQL.EndpointEnabled, \"sql.endpoint-enabled\", srv.Config.SQL.EndpointEnabled, \"Enable FeatureBase SQL /sql endpoint (default false)\")\n\n\t// Future flags.\n\tflags.BoolVar(&srv.Config.Future.Rename, \"future.rename\", false, \"Present application name as FeatureBase. Defaults to false, will default to true in an upcoming release.\")\n\n\t// OAuth2.0 identity provider configuration\n\tflags.BoolVar(&srv.Config.Auth.Enable, \"auth.enable\", false, \"Enable AuthN/AuthZ of featurebase, disabled by default.\")\n\tflags.StringVar(&srv.Config.Auth.ClientId, \"auth.client-id\", srv.Config.Auth.ClientId, \"Identity Provider's Application/Client ID.\")\n\tflags.StringVar(&srv.Config.Auth.ClientSecret, \"auth.client-secret\", srv.Config.Auth.ClientSecret, \"Identity Provider's Client Secret.\")\n\tflags.StringVar(&srv.Config.Auth.AuthorizeURL, \"auth.authorize-url\", srv.Config.Auth.AuthorizeURL, \"Identity Provider's Authorize URL.\")\n\tflags.StringVar(&srv.Config.Auth.RedirectBaseURL, \"auth.redirect-base-url\", srv.Config.Auth.RedirectBaseURL, \"Base URL of the featurebase instance used to redirect IDP.\")\n\tflags.StringVar(&srv.Config.Auth.TokenURL, \"auth.token-url\", srv.Config.Auth.TokenURL, \"Identity Provider's Token URL.\")\n\tflags.StringVar(&srv.Config.Auth.GroupEndpointURL, \"auth.group-endpoint-url\", srv.Config.Auth.GroupEndpointURL, \"Identity Provider's Group endpoint URL.\")\n\tflags.StringVar(&srv.Config.Auth.LogoutURL, \"auth.logout-url\", srv.Config.Auth.LogoutURL, \"Identity Provider's Logout URL.\")\n\tflags.StringSliceVar(&srv.Config.Auth.Scopes, \"auth.scopes\", srv.Config.Auth.Scopes, \"Comma separated list of scopes obtained from IdP\")\n\tflags.StringVar(&srv.Config.Auth.SecretKey, \"auth.secret-key\", srv.Config.Auth.SecretKey, \"Secret key used for auth.\")\n\tflags.StringVar(&srv.Config.Auth.PermissionsFile, \"auth.permissions\", srv.Config.Auth.PermissionsFile, \"Permissions' file with group authorization.\")\n\tflags.StringVar(&srv.Config.Auth.QueryLogPath, \"auth.query-log-path\", srv.Config.Auth.QueryLogPath, \"Path to log user queries\")\n\tflags.StringSliceVar(&srv.Config.Auth.ConfiguredIPs, \"auth.configured-ips\", srv.Config.Auth.ConfiguredIPs, \"List of configured IPs allowed for ingest\")\n\n\tflags.BoolVar(&srv.Config.DataDog.Enable, \"datadog.enable\", false, \"enable continuous profiling with DataDog cloud service, Note you must have DataDog agent installed\")\n\tflags.StringVar(&srv.Config.DataDog.Service, \"datadog.service\", \"default-service\", \"The Datadog service name, for example my-web-app\")\n\tflags.StringVar(&srv.Config.DataDog.Env, \"datadog.env\", \"default-env\", \"The Datadog environment name, for example, production\")\n\tflags.StringVar(&srv.Config.DataDog.Version, \"datadog.version\", \"default-version\", \"The version of your application\")\n\tflags.StringVar(&srv.Config.DataDog.Tags, \"datadog.tags\", \"molecula\", \"The tags to apply to an uploaded profile. Must be a list of in the format <KEY1>:<VALUE1>,<KEY2>:<VALUE2>\")\n\tflags.BoolVar(&srv.Config.DataDog.CPUProfile, \"datadog.cpu-profile\", true, \"golang pprof cpu profile \")\n\tflags.BoolVar(&srv.Config.DataDog.HeapProfile, \"datadog.heap-profile\", true, \"golang pprof heap profile\")\n\tflags.BoolVar(&srv.Config.DataDog.MutexProfile, \"datadog.mutex-profile\", false, \"golang pprof mutex profile\")\n\tflags.BoolVar(&srv.Config.DataDog.GoroutineProfile, \"datadog.goroutine-profile\", false, \"golang pprof goroutine profile\")\n\tflags.BoolVar(&srv.Config.DataDog.BlockProfile, \"datadog.block-profile\", false, \"golang pprof goroutine \")\n}", "func (b *AdapterBase) InstallFlags() {\n\tb.initFlagSet()\n\tb.flagOnce.Do(func() {\n\t\tif b.CustomMetricsAdapterServerOptions == nil {\n\t\t\tb.CustomMetricsAdapterServerOptions = server.NewCustomMetricsAdapterServerOptions()\n\t\t\tb.CustomMetricsAdapterServerOptions.OpenAPIConfig = b.OpenAPIConfig\n\t\t}\n\n\t\tb.SecureServing.AddFlags(b.FlagSet)\n\t\tb.Authentication.AddFlags(b.FlagSet)\n\t\tb.Authorization.AddFlags(b.FlagSet)\n\t\tb.Audit.AddFlags(b.FlagSet)\n\t\tb.Features.AddFlags(b.FlagSet)\n\n\t\tb.FlagSet.StringVar(&b.RemoteKubeConfigFile, \"lister-kubeconfig\", b.RemoteKubeConfigFile,\n\t\t\t\"kubeconfig file pointing at the 'core' kubernetes server with enough rights to list \"+\n\t\t\t\t\"any described objects\")\n\t\tb.FlagSet.DurationVar(&b.DiscoveryInterval, \"discovery-interval\", b.DiscoveryInterval,\n\t\t\t\"interval at which to refresh API discovery information\")\n\t})\n}", "func (s *VMTServer) AddFlags(fs *pflag.FlagSet) {\n\tfs.StringVar(&s.ClusterKeyInjected, \"cluster-key-injected\", \"\", \"Injected cluster key to enable pod move across cluster\")\n\tfs.IntVar(&s.Port, \"port\", s.Port, \"The port that kubeturbo's http service runs on.\")\n\tfs.StringVar(&s.Address, \"ip\", s.Address, \"the ip address that kubeturbo's http service runs on.\")\n\t// TODO: The flagset that is included by vendoring k8s uses the same names i.e. \"master\" and \"kubeconfig\".\n\t// This for some reason conflicts with the names introduced by kubeturbo after upgrading the k8s vendored code\n\t// to version 1.19.1. Right now we have changed the names of kubeturbo flags as a quick fix. These flags are\n\t// not user facing and are useful only when running kubeturbo outside the cluster. Find a better solution\n\t// when need be.\n\tfs.StringVar(&s.Master, \"k8s-master\", s.Master, \"The address of the Kubernetes API server (overrides any value in kubeconfig).\")\n\tfs.StringVar(&s.K8sTAPSpec, \"turboconfig\", s.K8sTAPSpec, \"Path to the config file.\")\n\tfs.StringVar(&s.TestingFlagPath, \"testingflag\", s.TestingFlagPath, \"Path to the testing flag.\")\n\tfs.StringVar(&s.KubeConfig, \"k8s-kubeconfig\", s.KubeConfig, \"Path to kubeconfig file with authorization and master location information.\")\n\tfs.BoolVar(&s.EnableProfiling, \"profiling\", false, \"Enable profiling via web interface host:port/debug/pprof/.\")\n\tfs.BoolVar(&s.UseUUID, \"stitch-uuid\", true, \"Use VirtualMachine's UUID to do stitching, otherwise IP is used.\")\n\tfs.IntVar(&s.KubeletPort, \"kubelet-port\", DefaultKubeletPort, \"The port of the kubelet runs on.\")\n\tfs.BoolVar(&s.EnableKubeletHttps, \"kubelet-https\", DefaultKubeletHttps, \"Indicate if Kubelet is running on https server.\")\n\tfs.BoolVar(&s.UseNodeProxyEndpoint, \"use-node-proxy-endpoint\", false, \"Indicate if Kubelet queries should be routed through APIServer node proxy endpoint.\")\n\tfs.BoolVar(&s.ForceSelfSignedCerts, \"kubelet-force-selfsigned-cert\", true, \"Indicate if we must use self-signed cert.\")\n\tfs.BoolVar(&s.FailVolumePodMoves, \"fail-volume-pod-moves\", true, \"Indicate if kubeturbo should fail to move pods which have volumes attached. Default is set to true.\")\n\tfs.BoolVar(&s.UpdateQuotaToAllowMoves, \"update-quota-to-allow-moves\", true, \"Indicate if kubeturbo should try to update namespace quotas to allow pod moves when quota(s) is/are full. Default is set to true.\")\n\tfs.StringVar(&k8sVersion, \"k8sVersion\", k8sVersion, \"[deprecated] the kubernetes server version; for openshift, it is the underlying Kubernetes' version.\")\n\tfs.StringVar(&noneSchedulerName, \"noneSchedulerName\", noneSchedulerName, \"[deprecated] a none-exist scheduler name, to prevent controller to create Running pods during move Action.\")\n\tfs.IntVar(&s.DiscoveryIntervalSec, \"discovery-interval-sec\", defaultDiscoveryIntervalSec, \"The discovery interval in seconds.\")\n\tfs.IntVar(&s.ValidationWorkers, \"validation-workers\", DefaultValidationWorkers, \"The validation workers\")\n\tfs.IntVar(&s.ValidationTimeout, \"validation-timeout-sec\", DefaultValidationTimeout, \"The validation timeout in seconds.\")\n\tfs.IntVar(&s.DiscoveryWorkers, \"discovery-workers\", DefaultDiscoveryWorkers, \"The number of discovery workers.\")\n\tfs.IntVar(&s.DiscoveryTimeoutSec, \"discovery-timeout-sec\", DefaultDiscoveryTimeoutSec, \"The discovery timeout in seconds for each discovery worker.\")\n\tfs.IntVar(&s.DiscoverySamples, \"discovery-samples\", DefaultDiscoverySamples, \"The number of resource usage data samples to be collected from kubelet in each full discovery cycle. This should be no larger than 60.\")\n\tfs.IntVar(&s.DiscoverySampleIntervalSec, \"discovery-sample-interval\", DefaultDiscoverySampleIntervalSec, \"The discovery interval in seconds to collect additional resource usage data samples from kubelet. This should be no smaller than 10 seconds.\")\n\tfs.IntVar(&s.GCIntervalMin, \"garbage-collection-interval\", DefaultGCIntervalMin, \"The garbage collection interval in minutes for possible leaked pods from actions failed because of kubeturbo restarts. Default value is 20 mins.\")\n\tfs.IntVar(&s.ItemsPerListQuery, \"items-per-list-query\", 0, \"Number of workload controller items the list api call should request for.\")\n\tfs.StringSliceVar(&s.sccSupport, \"scc-support\", defaultSccSupport, \"The SCC list allowed for executing pod actions, e.g., --scc-support=restricted,anyuid or --scc-support=* to allow all. Default allowed scc is [*].\")\n\t// So far we have noticed cluster api support only in openshift clusters and our implementation works only for openshift\n\t// It thus makes sense to have openshifts machine api namespace as our default cluster api namespace\n\tfs.StringVar(&s.ClusterAPINamespace, \"cluster-api-namespace\", \"openshift-machine-api\", \"The Cluster API namespace.\")\n\tfs.StringVar(&s.BusyboxImage, \"busybox-image\", \"busybox\", \"The complete image uri used for fallback node cpu frequency getter job.\")\n\tfs.StringVar(&s.BusyboxImagePullSecret, \"busybox-image-pull-secret\", \"\", \"The name of the secret that stores the image pull credentials for busybox image.\")\n\tfs.StringVar(&s.CpufreqJobExcludeNodeLabels, \"cpufreq-job-exclude-node-labels\", \"\", \"The comma separated list of key=value node label pairs for the nodes (for example windows nodes) to be excluded from running job based cpufrequency getter.\")\n\tfs.StringVar(&s.containerUtilizationDataAggStrategy, \"cnt-utilization-data-agg-strategy\", agg.DefaultContainerUtilizationDataAggStrategy, \"Container utilization data aggregation strategy.\")\n\tfs.StringVar(&s.containerUsageDataAggStrategy, \"cnt-usage-data-agg-strategy\", agg.DefaultContainerUsageDataAggStrategy, \"Container usage data aggregation strategy.\")\n\tfs.IntVar(&s.readinessRetryThreshold, \"readiness-retry-threshold\", DefaultReadinessRetryThreshold, \"When the pod readiness check fails, Kubeturbo will try readinessRetryThreshold times before giving up. Defaults to 60.\")\n\t// Flags for gitops based action execution\n\tfs.StringVar(&s.gitConfig.GitSecretNamespace, \"git-secret-namespace\", \"\", \"The namespace of the secret which holds the git credentials.\")\n\tfs.StringVar(&s.gitConfig.GitSecretName, \"git-secret-name\", \"\", \"The name of the secret which holds the git credentials.\")\n\tfs.StringVar(&s.gitConfig.GitUsername, \"git-username\", \"\", \"The user name to be used to push changes to git.\")\n\tfs.StringVar(&s.gitConfig.GitEmail, \"git-email\", \"\", \"The email to be used to push changes to git.\")\n\tfs.StringVar(&s.gitConfig.CommitMode, \"git-commit-mode\", \"direct\", \"The commit mode that should be used for git action executions. One of request|direct. Defaults to direct.\")\n\t// CpuFreqGetter image and secret\n\tfs.StringVar(&s.CpuFrequencyGetterImage, \"cpufreqgetter-image\", \"icr.io/cpopen/turbonomic/cpufreqgetter\", \"The complete cpufreqgetter image uri used for fallback node cpu frequency getter job.\")\n\tfs.StringVar(&s.CpuFrequencyGetterPullSecret, \"cpufreqgetter-image-pull-secret\", \"\", \"The name of the secret that stores the image pull credentials for cpufreqgetter image.\")\n\tfs.BoolVar(&s.CleanupSccRelatedResources, \"cleanup-scc-impersonation-resources\", true, \"Enable cleanup the resources for scc impersonation.\")\n}", "func (o *FailoverAgentOptions) AddFlags(fs *pflag.FlagSet) {\n\tfs.StringVar(&o.ClusterName, \"cluster-name\", o.ClusterName,\n\t\t\"If non-empty, will use as cluster name instead of generated random name.\")\n\tfs.StringVar(&o.BootstrapKubeconfig, \"bootstrap-kubeconfig\", o.BootstrapKubeconfig,\n\t\t\"The path of the kubeconfig file for agent bootstrap.\")\n\tfs.StringVar(&o.HubKubeconfigSecret, \"hub-kubeconfig-secret\", o.HubKubeconfigSecret,\n\t\t\"The name of secret in component namespace storing kubeconfig for hub.\")\n\tfs.StringVar(&o.HubKubeconfigDir, \"hub-kubeconfig-dir\", o.HubKubeconfigDir,\n\t\t\"The mount path of hub-kubeconfig-secret in the container.\")\n\tfs.StringArrayVar(&o.SpokeExternalServerURLs, \"spoke-external-server-urls\", o.SpokeExternalServerURLs,\n\t\t\"A list of reachable spoke cluster api server URLs for hub cluster.\")\n\tfs.DurationVar(&o.ClusterHealthCheckPeriod, \"cluster-healthcheck-period\", o.ClusterHealthCheckPeriod,\n\t\t\"The period to check managed cluster kube-apiserver health\")\n\tfs.IntVar(&o.MaxCustomClusterClaims, \"max-custom-cluster-claims\", o.MaxCustomClusterClaims,\n\t\t\"The max number of custom cluster claims to expose.\")\n}", "func (c *client) GetFeatureFlags() (map[string]string, error) {\n\tjsonFile, err := os.Open(c.featureFlagConfigPath)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to open feature flag file\")\n\t}\n\tdefer jsonFile.Close()\n\tbyteValue, err := io.ReadAll(jsonFile)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to read feature flag file\")\n\t}\n\tresult := make(map[string]string)\n\terr = json.Unmarshal(byteValue, &result)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to unmarshal feature flag json data\")\n\t}\n\treturn result, nil\n}", "func (s *VMTServer) AddFlags(fs *pflag.FlagSet) {\n\tfs.IntVar(&s.Port, \"port\", s.Port, \"The port that kubeturbo's http service runs on\")\n\tfs.StringVar(&s.Address, \"ip\", s.Address, \"the ip address that kubeturbo's http service runs on\")\n\tfs.IntVar(&s.CAdvisorPort, \"cadvisor-port\", K8sCadvisorPort, \"The port of the cadvisor service runs on\")\n\tfs.StringVar(&s.Master, \"master\", s.Master, \"The address of the Kubernetes API server (overrides any value in kubeconfig)\")\n\tfs.StringVar(&s.K8sTAPSpec, \"turboconfig\", s.K8sTAPSpec, \"Path to the config file.\")\n\tfs.StringVar(&s.TestingFlagPath, \"testingflag\", s.TestingFlagPath, \"Path to the testing flag.\")\n\tfs.StringVar(&s.KubeConfig, \"kubeconfig\", s.KubeConfig, \"Path to kubeconfig file with authorization and master location information.\")\n\tfs.BoolVar(&s.EnableProfiling, \"profiling\", false, \"Enable profiling via web interface host:port/debug/pprof/.\")\n\tfs.BoolVar(&s.UseVMWare, \"usevmware\", false, \"If the underlying infrastructure is VMWare.\")\n\tfs.IntVar(&s.KubeletPort, \"kubelet-port\", kubelet.DefaultKubeletPort, \"The port of the kubelet runs on\")\n\tfs.BoolVar(&s.EnableKubeletHttps, \"kubelet-https\", kubelet.DefaultKubeletHttps, \"Indicate if Kubelet is running on https server\")\n\tfs.StringVar(&s.K8sVersion, \"k8sVersion\", executor.HigherK8sVersion, \"the kubernetes server version; for openshift, it is the underlying Kubernetes' version.\")\n\tfs.StringVar(&s.NoneSchedulerName, \"noneSchedulerName\", executor.DefaultNoneExistSchedulerName, \"a none-exist scheduler name, to prevent controller to create Running pods during move Action.\")\n\n\t//leaderelection.BindFlags(&s.LeaderElection, fs)\n}", "func (a *AssembliesApiService) GetFeatures(ctx _context.Context, did string, wvm string, wvmid string, eid string, localVarOptionals *GetFeaturesOpts) (BtAssemblyFeatureListResponse1174, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue BtAssemblyFeatureListResponse1174\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/api/assemblies/d/{did}/{wvm}/{wvmid}/e/{eid}/features\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"did\"+\"}\", _neturl.QueryEscape(parameterToString(did, \"\")) , -1)\n\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"wvm\"+\"}\", _neturl.QueryEscape(parameterToString(wvm, \"\")) , -1)\n\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"wvmid\"+\"}\", _neturl.QueryEscape(parameterToString(wvmid, \"\")) , -1)\n\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"eid\"+\"}\", _neturl.QueryEscape(parameterToString(eid, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif localVarOptionals != nil && localVarOptionals.FeatureId.IsSet() {\n\t\tt:=localVarOptionals.FeatureId.Value()\n\t\tif reflect.TypeOf(t).Kind() == reflect.Slice {\n\t\t\ts := reflect.ValueOf(t)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tlocalVarQueryParams.Add(\"featureId\", parameterToString(s.Index(i), \"multi\"))\n\t\t\t}\n\t\t} else {\n\t\t\tlocalVarQueryParams.Add(\"featureId\", parameterToString(t, \"multi\"))\n\t\t}\n\t}\n\tif localVarOptionals != nil && localVarOptionals.LinkDocumentId.IsSet() {\n\t\tlocalVarQueryParams.Add(\"linkDocumentId\", parameterToString(localVarOptionals.LinkDocumentId.Value(), \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/vnd.onshape.v2+json;charset=UTF-8;qs=0.2\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v BtAssemblyFeatureListResponse1174\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func ParseFeatures(queryString string) error {\n\tfeatureMutex.Lock()\n\tdefer featureMutex.Unlock()\n\n\tfeatures := map[Feature]bool{}\n\t// copy the defaults into this map\n\tfor k, v := range featureDefaults {\n\t\tfeatures[k] = v\n\t}\n\n\tvalues, err := url.ParseQuery(queryString)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error parsing query string for feature gates\")\n\t}\n\n\tfor k := range values {\n\t\tf := Feature(k)\n\n\t\tif _, ok := featureDefaults[f]; !ok {\n\t\t\treturn errors.Errorf(\"Feature Gate %q is not a valid Feature Gate\", f)\n\t\t}\n\n\t\tb, err := strconv.ParseBool(values.Get(k))\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"error parsing bool value from flag %s \", k)\n\t\t}\n\t\tfeatures[f] = b\n\t}\n\n\tfeatureGates = features\n\treturn nil\n}", "func (o *Options) InitFlags(fs *flag.FlagSet) {\n\tif fs == nil {\n\t\tfs = flag.CommandLine\n\t}\n\n\tflag.StringVar(\n\t\t&o.MetricsAddr,\n\t\t\"metrics-addr\",\n\t\t\":8080\",\n\t\t\"The address the metric endpoint binds to.\")\n\tflag.BoolVar(\n\t\t&o.LeaderElectionEnabled,\n\t\t\"enable-leader-election\",\n\t\ttrue,\n\t\t\"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.\")\n\tflag.StringVar(\n\t\t&o.LeaderElectionID,\n\t\t\"leader-election-id\",\n\t\t\"\",\n\t\t\"Name of the config map to use as the locking resource when configuring leader election.\")\n\tflag.StringVar(\n\t\t&o.LeaderElectionNamespace,\n\t\t\"leader-election-namespace\",\n\t\t\"\",\n\t\t\"Name of the namespace to use for the configmap locking resource when configuring leader election.\")\n\tflag.StringVar(\n\t\t&o.WatchNamespace,\n\t\t\"namespace\",\n\t\t\"\",\n\t\t\"Namespace that the controller watches to reconcile cluster-api objects. If unspecified, the controller watches for cluster-api objects across all namespaces.\")\n\tflag.DurationVar(\n\t\t&o.SyncPeriod,\n\t\t\"sync-period\",\n\t\tDefaultSyncPeriod,\n\t\t\"The interval at which cluster-api objects are synchronized\")\n\tflag.IntVar(\n\t\t&o.MaxConcurrentReconciles,\n\t\t\"max-concurrent-reconciles\",\n\t\t10,\n\t\t\"The maximum number of allowed, concurrent reconciles.\")\n\tflag.StringVar(\n\t\t&o.PodNameSuffix,\n\t\t\"pod-name-suffix\",\n\t\t\"controller-manager\",\n\t\t\"The suffix name of the pod running the controller manager.\")\n\tflag.StringVar(\n\t\t&o.PodNamespaceSuffix,\n\t\t\"pod-namespace-suffix\",\n\t\t\"controller-manager\",\n\t\t\"The suffix name of the pod namespace running the controller manager.\")\n\tflag.IntVar(\n\t\t&o.WebhookPort,\n\t\t\"webhook-port\",\n\t\tDefaultWebhookServiceContainerPort,\n\t\t\"Webhook Server port (set to 0 to disable)\")\n\tflag.StringVar(\n\t\t&o.HealthAddr,\n\t\t\"health-addr\",\n\t\t\":9440\",\n\t\t\"The address the health endpoint binds to.\",\n\t)\n}", "func (s *APIEnablementOptions) AddFlags(fs *pflag.FlagSet) {\n\tfs.Var(&s.RuntimeConfig, \"runtime-config\", \"\"+\n\t\t\"A set of key=value pairs that describe runtime configuration that may be passed \"+\n\t\t\"to apiserver. apis/<groupVersion> key can be used to turn on/off specific api versions. \"+\n\t\t\"apis/<groupVersion>/<resource> can be used to turn on/off specific resources. api/all and \"+\n\t\t\"api/legacy are special keys to control all and legacy api versions respectively.\")\n}", "func (t *T) AddFlags(fs *flag.FlagSet) {\n\tt.RequirementLevels.AddFlags(fs)\n\tt.FeatureStates.AddFlags(fs)\n}", "func (a *AdminApiService) GetFeatureFlag(ctx _context.Context, id string) (FeatureFlag, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue FeatureFlag\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/admin/feature-flag/{id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", _neturl.QueryEscape(parameterToString(id, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func collectFeaturesInfo(gates []corev1alpha2.FeatureGate, features []corev1alpha2.Feature) map[string]*FeatureInfo {\n\tinfos := map[string]*FeatureInfo{}\n\n\tfor i := range features {\n\t\tpolicy := corev1alpha2.GetPolicyForStabilityLevel(features[i].Spec.Stability)\n\n\t\tinfos[features[i].Name] = &FeatureInfo{\n\t\t\tName: features[i].Name,\n\t\t\tDescription: features[i].Spec.Description,\n\t\t\tStability: features[i].Spec.Stability,\n\t\t\tActivated: features[i].Status.Activated,\n\t\t\tImmutable: policy.Immutable,\n\t\t\tDiscoverable: policy.Discoverable,\n\t\t\tFeatureGate: \"--\",\n\t\t}\n\t}\n\n\tfor i := range gates {\n\t\tfor _, featRef := range gates[i].Spec.Features {\n\t\t\tinfo, ok := infos[featRef.Name]\n\t\t\tif ok {\n\t\t\t\t// FeatureGate referenced Feature is in cluster.\n\t\t\t\tinfo.FeatureGate = gates[i].Name\n\t\t\t}\n\n\t\t\tif !ok {\n\t\t\t\t// FeatureGate referenced Feature is not in cluster. Since the Discoverable policy\n\t\t\t\t// cannot be known until the Feature shows up in cluster, set it to true for now.\n\t\t\t\tinfos[featRef.Name] = &FeatureInfo{\n\t\t\t\t\tName: featRef.Name,\n\t\t\t\t\tDiscoverable: true,\n\t\t\t\t\tFeatureGate: gates[i].Name,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn infos\n}", "func WithFeatureFlags() metadata.MD {\n\treturn metadata.Pairs(\"bigtable-features\", featureFlags)\n}", "func ParseFeaturesFromEnv() error {\n\treturn ParseFeatures(viper.GetString(FeatureGateFlag))\n}", "func NewFromFlags() (*Framework, error) {\n\tusr, err := user.Current()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tviper.SetDefault(kubeconfigFlag, filepath.Join(usr.HomeDir, \".kube\", \"config\"))\n\tviper.SetDefault(gsimageFlag, \"us-docker.pkg.dev/agones-images/examples/simple-game-server:0.17\")\n\tviper.SetDefault(pullSecretFlag, \"\")\n\tviper.SetDefault(stressTestLevelFlag, 0)\n\tviper.SetDefault(perfOutputDirFlag, \"\")\n\tviper.SetDefault(versionFlag, \"\")\n\tviper.SetDefault(runtime.FeatureGateFlag, \"\")\n\tviper.SetDefault(namespaceFlag, \"\")\n\tviper.SetDefault(cloudProductFlag, \"generic\")\n\n\tpflag.String(kubeconfigFlag, viper.GetString(kubeconfigFlag), \"kube config path, e.g. $HOME/.kube/config\")\n\tpflag.String(gsimageFlag, viper.GetString(gsimageFlag), \"gameserver image to use for those tests\")\n\tpflag.String(pullSecretFlag, viper.GetString(pullSecretFlag), \"optional secret to be used for pulling the gameserver and/or Agones SDK sidecar images\")\n\tpflag.Int(stressTestLevelFlag, viper.GetInt(stressTestLevelFlag), \"enable stress test at given level 0-100\")\n\tpflag.String(perfOutputDirFlag, viper.GetString(perfOutputDirFlag), \"write performance statistics to the specified directory\")\n\tpflag.String(versionFlag, viper.GetString(versionFlag), \"agones controller version to be tested, consists of release version plus a short hash of the latest commit\")\n\tpflag.String(namespaceFlag, viper.GetString(namespaceFlag), \"namespace is used to isolate test runs to their own namespaces\")\n\tpflag.String(cloudProductFlag, viper.GetString(cloudProductFlag), \"cloud product of cluster references by kubeconfig; defaults to 'generic'; options are 'generic', 'gke-autopilot'\")\n\truntime.FeaturesBindFlags()\n\tpflag.Parse()\n\n\tviper.SetEnvKeyReplacer(strings.NewReplacer(\"-\", \"_\"))\n\truntime.Must(viper.BindEnv(kubeconfigFlag))\n\truntime.Must(viper.BindEnv(gsimageFlag))\n\truntime.Must(viper.BindEnv(pullSecretFlag))\n\truntime.Must(viper.BindEnv(stressTestLevelFlag))\n\truntime.Must(viper.BindEnv(perfOutputDirFlag))\n\truntime.Must(viper.BindEnv(versionFlag))\n\truntime.Must(viper.BindEnv(namespaceFlag))\n\truntime.Must(viper.BindEnv(cloudProductFlag))\n\truntime.Must(viper.BindPFlags(pflag.CommandLine))\n\truntime.Must(runtime.FeaturesBindEnv())\n\truntime.Must(runtime.ParseFeaturesFromEnv())\n\n\tframework, err := newFramework(viper.GetString(kubeconfigFlag), 0, 0)\n\tif err != nil {\n\t\treturn framework, err\n\t}\n\tframework.GameServerImage = viper.GetString(gsimageFlag)\n\tframework.PullSecret = viper.GetString(pullSecretFlag)\n\tframework.StressTestLevel = viper.GetInt(stressTestLevelFlag)\n\tframework.PerfOutputDir = viper.GetString(perfOutputDirFlag)\n\tframework.Version = viper.GetString(versionFlag)\n\tframework.Namespace = viper.GetString(namespaceFlag)\n\tframework.CloudProduct = viper.GetString(cloudProductFlag)\n\tframework.WaitForState = 5 * time.Minute\n\tif framework.CloudProduct == \"gke-autopilot\" {\n\t\tframework.WaitForState = 10 * time.Minute // Autopilot can take a little while due to autoscaling, be a little liberal.\n\t}\n\n\tlogrus.WithField(\"gameServerImage\", framework.GameServerImage).\n\t\tWithField(\"pullSecret\", framework.PullSecret).\n\t\tWithField(\"stressTestLevel\", framework.StressTestLevel).\n\t\tWithField(\"perfOutputDir\", framework.PerfOutputDir).\n\t\tWithField(\"version\", framework.Version).\n\t\tWithField(\"namespace\", framework.Namespace).\n\t\tWithField(\"cloudProduct\", framework.CloudProduct).\n\t\tWithField(\"featureGates\", runtime.EncodeFeatures()).\n\t\tInfo(\"Starting e2e test(s)\")\n\n\treturn framework, nil\n}", "func (ctx *zedmanagerContext) AddAgentSpecificCLIFlags(flagSet *flag.FlagSet) {\n\tctx.versionPtr = flagSet.Bool(\"v\", false, \"Version\")\n}", "func (ctx *verifierContext) AddAgentSpecificCLIFlags(flagSet *flag.FlagSet) {\n\tctx.versionPtr = flagSet.Bool(\"v\", false, \"Version\")\n}", "func AddAndParseFlags(fs *flag.FlagSet) error { return nil }", "func (c *cmdVersion) AddFlags(fs *flag.FlagSet) {\n\t// no flags\n}", "func (s *DeprecatedInsecureServingOptions) AddFlags(fs *pflag.FlagSet) {\n\tif s == nil {\n\t\treturn\n\t}\n\n\tfs.IPVar(&s.BindAddress, \"insecure-bind-address\", s.BindAddress, \"\"+\n\t\t\"The IP address on which to serve the --insecure-port (set to 0.0.0.0 or :: for listening on all interfaces and IP address families).\")\n\t// Though this flag is deprecated, we discovered security concerns over how to do health checks without it e.g. #43784\n\tfs.MarkDeprecated(\"insecure-bind-address\", \"This flag will be removed in a future version.\")\n\tfs.Lookup(\"insecure-bind-address\").Hidden = false\n\n\tfs.IntVar(&s.BindPort, \"insecure-port\", s.BindPort, \"\"+\n\t\t\"The port on which to serve unsecured, unauthenticated access.\")\n\t// Though this flag is deprecated, we discovered security concerns over how to do health checks without it e.g. #43784\n\tfs.MarkDeprecated(\"insecure-port\", \"This flag will be removed in a future version.\")\n\tfs.Lookup(\"insecure-port\").Hidden = false\n}", "func (c *Cmd) Flags() *flag.FlagSet {\n\tif c.flags == nil {\n\t\tc.flags = flag.NewFlagSet(\"reflow\", flag.ExitOnError)\n\t\tc.flags.Usage = func() { c.usage(c.flags) }\n\t\tc.flags.StringVar(&flow.Universe, \"universe\", \"\", \"digest namespace\")\n\t\tc.flags.StringVar(&c.ConfigFile, \"config\", c.DefaultConfigFile, \"path to configuration file; otherwise use default (builtin) config\")\n\t\tc.flags.StringVar(&c.httpFlag, \"http\", \"\", \"run a diagnostic HTTP server on this port\")\n\t\tc.flags.StringVar(&c.cpuProfileFlag, \"cpuprofile\", \"\", \"capture a CPU profile and deposit it to the provided path\")\n\t\tc.flags.StringVar(&c.memProfileFlag, \"memprofile\", \"\", \"capture a Memory profile and deposit it to the provided path\")\n\t\tc.flags.DurationVar(&c.memStatsDuration, \"memstatsduration\", 0, \"log high-level memory stats at this frequency (eg: 100ms)\")\n\t\tc.flags.BoolVar(&c.memStatsGC, \"memstatsgc\", false, \"whether to GC before collecting memstats (at each memstatsduration interval)\")\n\t\tc.flags.StringVar(&c.logFlag, \"log\", \"info\", \"set the log level: off, error, info, debug\")\n\t\tc.flags.IntVar(&c.filesetOpLim, \"fileset_op_limit\", -1, \"set the number of concurrent reflow fileset operations allowed (if unset or non-positive, uses default which is number of CPUs)\")\n\n\t\t// Add flags to override configuration.\n\t\tc.configFlags = make(map[string]*string)\n\t\tfor key := range c.SchemaKeys {\n\t\t\tc.configFlags[key] = c.flags.String(key, \"\", fmt.Sprintf(\"override %s from config; see reflow config -help\", key))\n\t\t}\n\t}\n\treturn c.flags\n}", "func (s *ClusterOperatorServerRunOptions) AddFlags(fs *pflag.FlagSet) {\n\t// Add the generic flags.\n\ts.GenericServerRunOptions.AddUniversalFlags(fs)\n\ts.Etcd.AddFlags(fs)\n\ts.SecureServing.AddFlags(fs)\n\ts.SecureServing.AddDeprecatedFlags(fs)\n\t//s.InsecureServing.AddFlags(fs)\n\t//s.InsecureServing.AddDeprecatedFlags(fs)\n\ts.Audit.AddFlags(fs)\n\ts.Features.AddFlags(fs)\n\ts.Authentication.AddFlags(fs)\n\ts.Authorization.AddFlags(fs)\n\ts.Admission.AddFlags(fs)\n\n\t// Note: the weird \"\"+ in below lines seems to be the only way to get gofmt to\n\t// arrange these text blocks sensibly. Grrr.\n\n\tfs.BoolVar(&s.EnableLogsHandler, \"enable-logs-handler\", s.EnableLogsHandler,\n\t\t\"If true, install a /logs handler for the apiserver logs.\")\n\n\tfs.IntVar(&s.MasterCount, \"apiserver-count\", s.MasterCount,\n\t\t\"The number of apiservers running in the cluster, must be a positive number.\")\n\n\tfs.BoolVar(&s.DisableAuth, \"disable-auth\", false,\n\t\t\"Disable authentication and authorization for testing purposes\")\n}", "func FeatureFlag(flag string) string {\n\tsess := session.Must(session.NewSession(&aws.Config{\n\t\tRegion: aws.String(\"us-west-2\"),\n\t}))\n\n\tssmsvc := ssm.New(sess, aws.NewConfig().WithRegion(\"us-west-2\"))\n\tkeyname := flag\n\tdecryption := false\n\tparam, err := ssmsvc.GetParameter(&ssm.GetParameterInput{\n\t\tName: &keyname,\n\t\tWithDecryption: &decryption,\n\t})\n\n\tif err != nil {\n\t\t//Later, it may be worth syncing configs in another region if failed to\n\t\t//pick up in us-west-2\n\t\tlog.Print(err)\n\t\treturn \"Error\"\n\t}\n\n\tvalue := *param.Parameter.Value\n\treturn value\n}", "func ComputeFeatureStates(featureGateSpec configv1alpha1.FeatureGateSpec, features []configv1alpha1.Feature) (activated, deactivated, unavailable []string) {\n\t// Collect features to be activated/deactivated in the spec.\n\ttoActivate := sets.String{}\n\ttoDeactivate := sets.String{}\n\tfor _, f := range featureGateSpec.Features {\n\t\tif f.Activate {\n\t\t\ttoActivate.Insert(f.Name)\n\t\t} else {\n\t\t\ttoDeactivate.Insert(f.Name)\n\t\t}\n\t}\n\n\t// discovered is set a set of available features that are discoverable.\n\tdiscovered := sets.String{}\n\t// discoveredDefaultActivated is a set of available features that are discoverable and activated by default.\n\tdiscoveredDefaultActivated := sets.String{}\n\t// discoveredDefaultDeactivated is a set of available features that are discoverable and deactivated by default.\n\tdiscoveredDefaultDeactivated := sets.String{}\n\tfor i := range features {\n\t\tfeature := features[i]\n\t\tif !feature.Spec.Discoverable {\n\t\t\tcontinue\n\t\t}\n\t\tdiscovered.Insert(feature.Name)\n\t\tif feature.Spec.Activated {\n\t\t\tdiscoveredDefaultActivated.Insert(feature.Name)\n\t\t} else {\n\t\t\tdiscoveredDefaultDeactivated.Insert(feature.Name)\n\t\t}\n\t}\n\n\t// activate is all the features that the spec intends to be activated and features that are default activated.\n\tactivate := discoveredDefaultActivated.Union(toActivate)\n\t// activationCandidates are features that are discovered, but are explicitly set *not* to be activated in this feature gate.\n\t// Only these features can be activated regardless of what the intent in the spec is.\n\tactivationCandidates := discovered.Difference(toDeactivate)\n\t// Intersection gives us the actual activated features.\n\tactivated = activationCandidates.Intersection(activate).List()\n\n\t// deactivate is all the features that the spec intends to be deactivated and features that are default deactivated.\n\tdeactivate := discoveredDefaultDeactivated.Union(toDeactivate)\n\t// deactivationCandidates are features that are discovered, but are explicitly set *not* to be deactivated in this feature gate.\n\t// Only these features can be deactivated regardless of what the intent in the spec is.\n\tdeactivationCandidates := discovered.Difference(toActivate)\n\t// Intersection gives us the actual deactivated features.\n\tdeactivated = deactivationCandidates.Intersection(deactivate).List()\n\n\t// Set of all features specified in the current spec.\n\tallFeaturesInSpec := toActivate.Union(toDeactivate)\n\t// Set difference with all the discovered features gives unavailable features.\n\tunavailable = allFeaturesInSpec.Difference(discovered).List()\n\n\treturn activated, deactivated, unavailable\n}", "func (c *client) WriteFeatureFlags(featureFlags map[string]string) error {\n\tfeatureFlagsList := make(map[string]string)\n\tfor feature, value := range featureFlags {\n\t\tfeatureFlagsList[strings.TrimSpace(feature)] = value\n\t}\n\tjsonString, err := json.Marshal(featureFlagsList)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to parse the feature flags as valid json\")\n\t}\n\tfile, err := os.Create(c.featureFlagConfigPath)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to create feature flag file\")\n\t}\n\tdefer file.Close()\n\t_, err = file.WriteString(string(jsonString))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to write to feature flag file\")\n\t}\n\treturn nil\n}", "func (f *Factory) SetFlags() {\n}", "func InitFlags() *FactoryOptions {\n\ttesting.Init()\n\t_, err := types.NewAttachedGinkgoFlagSet(flag.CommandLine, types.GinkgoFlags{}, nil, types.GinkgoFlagSections{}, types.GinkgoFlagSection{})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\ttestOptions := &FactoryOptions{}\n\ttestOptions.BindFlags(flag.CommandLine)\n\tflag.Parse()\n\n\treturn testOptions\n}", "func FeaturesBindFlags() {\n\tviper.SetDefault(FeatureGateFlag, \"\")\n\tpflag.String(FeatureGateFlag, viper.GetString(FeatureGateFlag), \"Flag to pass in the url query list of feature flags to enable or disable\")\n}", "func addFlags(s *server.Server, fs *pflag.FlagSet) {\n\tfs.StringVar(&s.APIServer, \"api-server\", s.APIServer, \"Endpoint for the api server\")\n\tfs.StringVar(&s.APIToken, \"api-token\", s.APIToken, \"Token to authenticate with the api server\")\n\tfs.StringVar(&s.AppPort, \"app-port\", s.AppPort, \"Kube2iam server http port\")\n\tfs.StringVar(&s.MetricsPort, \"metrics-port\", s.MetricsPort, \"Metrics server http port (default: same as kube2iam server port)\")\n\tfs.StringVar(&s.BaseRoleARN, \"base-role-arn\", s.BaseRoleARN, \"Base role ARN\")\n\tfs.BoolVar(&s.Debug, \"debug\", s.Debug, \"Enable debug features\")\n\tfs.StringVar(&s.DefaultIAMRole, \"default-role\", s.DefaultIAMRole, \"Fallback role to use when annotation is not set\")\n\tfs.StringVar(&s.IAMRoleKey, \"iam-role-key\", s.IAMRoleKey, \"Pod annotation key used to retrieve the IAM role\")\n\tfs.StringVar(&s.IAMExternalID, \"iam-external-id\", s.IAMExternalID, \"Pod annotation key used to retrieve the IAM ExternalId\")\n\tfs.DurationVar(&s.IAMRoleSessionTTL, \"iam-role-session-ttl\", s.IAMRoleSessionTTL, \"TTL for the assume role session\")\n\tfs.BoolVar(&s.Insecure, \"insecure\", false, \"Kubernetes server should be accessed without verifying the TLS. Testing only\")\n\tfs.StringVar(&s.MetadataAddress, \"metadata-addr\", s.MetadataAddress, \"Address for the ec2 metadata\")\n\tfs.BoolVar(&s.AddIPTablesRule, \"iptables\", false, \"Add iptables rule (also requires --host-ip)\")\n\tfs.BoolVar(&s.AutoDiscoverBaseArn, \"auto-discover-base-arn\", false, \"Queries EC2 Metadata to determine the base ARN\")\n\tfs.BoolVar(&s.AutoDiscoverDefaultRole, \"auto-discover-default-role\", false, \"Queries EC2 Metadata to determine the default Iam Role and base ARN, cannot be used with --default-role, overwrites any previous setting for --base-role-arn\")\n\tfs.StringVar(&s.HostInterface, \"host-interface\", \"docker0\", \"Host interface for proxying AWS metadata\")\n\tfs.BoolVar(&s.NamespaceRestriction, \"namespace-restrictions\", false, \"Enable namespace restrictions\")\n\tfs.StringVar(&s.NamespaceRestrictionFormat, \"namespace-restriction-format\", s.NamespaceRestrictionFormat, \"Namespace Restriction Format (glob/regexp)\")\n\tfs.StringVar(&s.NamespaceKey, \"namespace-key\", s.NamespaceKey, \"Namespace annotation key used to retrieve the IAM roles allowed (value in annotation should be json array)\")\n\tfs.DurationVar(&s.CacheResyncPeriod, \"cache-resync-period\", s.CacheResyncPeriod, \"Kubernetes caches resync period\")\n\tfs.BoolVar(&s.ResolveDupIPs, \"resolve-duplicate-cache-ips\", false, \"Queries the k8s api server to find the source of truth when the pod cache contains multiple pods with the same IP\")\n\tfs.StringVar(&s.HostIP, \"host-ip\", s.HostIP, \"IP address of host\")\n\tfs.StringVar(&s.NodeName, \"node\", s.NodeName, \"Name of the node where kube2iam is running\")\n\tfs.DurationVar(&s.BackoffMaxInterval, \"backoff-max-interval\", s.BackoffMaxInterval, \"Max interval for backoff when querying for role.\")\n\tfs.DurationVar(&s.BackoffMaxElapsedTime, \"backoff-max-elapsed-time\", s.BackoffMaxElapsedTime, \"Max elapsed time for backoff when querying for role.\")\n\tfs.StringVar(&s.LogFormat, \"log-format\", s.LogFormat, \"Log format (text/json)\")\n\tfs.StringVar(&s.LogLevel, \"log-level\", s.LogLevel, \"Log level\")\n\tfs.BoolVar(&s.UseRegionalStsEndpoint, \"use-regional-sts-endpoint\", false, \"use the regional sts endpoint if AWS_REGION is set\")\n\tfs.BoolVar(&s.Verbose, \"verbose\", false, \"Verbose\")\n\tfs.BoolVar(&s.Version, \"version\", false, \"Print the version and exits\")\n}", "func (ss *SousServer) AddFlags(fs *flag.FlagSet) {\n\tfs.StringVar(&ss.flags.laddr, `listen`, `:80`, \"The address to listen on, like '127.0.0.1:https'\")\n\tfs.StringVar(&ss.flags.gdmRepo, \"gdm-repo\", \"\", \"Git repo containing the GDM (cloned into config.SourceLocation)\")\n}", "func (v *VersionCommand) addFlags() {\n\t// TODO: add flags here\n}", "func (s *ProxyServerConfig) AddFlags(fs *pflag.FlagSet) {\n\tfs.Var(componentconfig.IPVar{&s.BindAddress}, \"bind-address\", \"The IP address for the proxy server to serve on (set to 0.0.0.0 for all interfaces)\")\n\tfs.StringVar(&s.Master, \"master\", s.Master, \"The address of the Kubernetes API server (overrides any value in kubeconfig)\")\n\tfs.IntVar(&s.HealthzPort, \"healthz-port\", s.HealthzPort, \"The port to bind the health check server. Use 0 to disable.\")\n\tfs.Var(componentconfig.IPVar{&s.HealthzBindAddress}, \"healthz-bind-address\", \"The IP address for the health check server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)\")\n\tfs.IntVar(s.OOMScoreAdj, \"oom-score-adj\", util.IntPtrDerefOr(s.OOMScoreAdj, qos.KubeProxyOOMScoreAdj), \"The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]\")\n\tfs.StringVar(&s.ResourceContainer, \"resource-container\", s.ResourceContainer, \"Absolute name of the resource-only container to create and run the Kube-proxy in (Default: /kube-proxy).\")\n\tfs.MarkDeprecated(\"resource-container\", \"This feature will be removed in a later release.\")\n\tfs.StringVar(&s.Kubeconfig, \"kubeconfig\", s.Kubeconfig, \"Path to kubeconfig file with authorization information (the master location is set by the master flag).\")\n\tfs.Var(componentconfig.PortRangeVar{&s.PortRange}, \"proxy-port-range\", \"Range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.\")\n\tfs.StringVar(&s.HostnameOverride, \"hostname-override\", s.HostnameOverride, \"If non-empty, will use this string as identification instead of the actual hostname.\")\n\tfs.Var(&s.Mode, \"proxy-mode\", \"Which proxy mode to use: 'userspace' (older) or 'iptables' (faster). If blank, look at the Node object on the Kubernetes API and respect the '\"+ExperimentalProxyModeAnnotation+\"' annotation if provided. Otherwise use the best-available proxy (currently iptables). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.\")\n\tfs.DurationVar(&s.IPTablesSyncPeriod.Duration, \"iptables-sync-period\", s.IPTablesSyncPeriod.Duration, \"How often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.\")\n\tfs.DurationVar(&s.ConfigSyncPeriod, \"config-sync-period\", s.ConfigSyncPeriod, \"How often configuration from the apiserver is refreshed. Must be greater than 0.\")\n\tfs.BoolVar(&s.MasqueradeAll, \"masquerade-all\", false, \"If using the pure iptables proxy, SNAT everything\")\n\tfs.BoolVar(&s.CleanupAndExit, \"cleanup-iptables\", false, \"If true cleanup iptables rules and exit.\")\n\tfs.Float32Var(&s.KubeAPIQPS, \"kube-api-qps\", s.KubeAPIQPS, \"QPS to use while talking with kubernetes apiserver\")\n\tfs.IntVar(&s.KubeAPIBurst, \"kube-api-burst\", s.KubeAPIBurst, \"Burst to use while talking with kubernetes apiserver\")\n\tfs.DurationVar(&s.UDPIdleTimeout.Duration, \"udp-timeout\", s.UDPIdleTimeout.Duration, \"How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace\")\n\tfs.IntVar(&s.ConntrackMax, \"conntrack-max\", s.ConntrackMax, \"Maximum number of NAT connections to track (0 to leave as-is)\")\n\tfs.DurationVar(&s.ConntrackTCPEstablishedTimeout.Duration, \"conntrack-tcp-timeout-established\", s.ConntrackTCPEstablishedTimeout.Duration, \"Idle timeout for established TCP connections (0 to leave as-is)\")\n}", "func (m *UserExperienceAnalyticsDeviceStartupHistory) SetIsFeatureUpdate(value *bool)() {\n err := m.GetBackingStore().Set(\"isFeatureUpdate\", value)\n if err != nil {\n panic(err)\n }\n}", "func AddFlags(flags *flag.FlagSet) {\n\tflags.Int(collectorQueueSize, DefaultQueueSize, \"The queue size of the collector\")\n\tflags.Int(collectorNumWorkers, DefaultNumWorkers, \"The number of workers pulling items from the queue\")\n\tflags.Int(collectorHTTPPort, 0, collectorHTTPPortWarning+\" see --\"+CollectorHTTPHostPort)\n\tflags.Int(collectorGRPCPort, 0, collectorGRPCPortWarning+\" see --\"+CollectorGRPCHostPort)\n\tflags.Int(collectorZipkinHTTPPort, 0, collectorZipkinHTTPPortWarning+\" see --\"+CollectorZipkinHTTPHostPort)\n\tflags.Uint(collectorDynQueueSizeMemory, 0, \"(experimental) The max memory size in MiB to use for the dynamic queue.\")\n\tflags.String(collectorTags, \"\", \"One or more tags to be added to the Process tags of all spans passing through this collector. Ex: key1=value1,key2=${envVar:defaultValue}\")\n\tflags.String(collectorZipkinAllowedOrigins, \"*\", \"Comma separated list of allowed origins for the Zipkin collector service, default accepts all\")\n\tflags.String(collectorZipkinAllowedHeaders, \"content-type\", \"Comma separated list of allowed headers for the Zipkin collector service, default content-type\")\n\tAddOTELJaegerFlags(flags)\n\tAddOTELZipkinFlags(flags)\n}", "func processFlags() *flags {\n\tflags := flags{}\n\n\tflag.StringVar(&flags.configFile, \"configfile\", \"\", \"Config File to process\")\n\tflag.StringVar(&flags.consulAddress, \"consuladdr\", \"\", \"Consul Address\")\n\tflag.StringVar(&flags.consulDatacenter, \"consuldc\", \"global\", \"Consul Datacentre\")\n\tflag.StringVar(&flags.consulPrefix, \"consulprefix\", \"\", \"Consul Prefix\")\n\tflag.StringVar(&flags.consulScheme, \"consulscheme\", \"http\", \"Consul Scheme\")\n\tflag.StringVar(&flags.consulToken, \"consultoken\", \"\", \"Consul Token\")\n\tflag.BoolVar(&flags.displayVer, \"version\", false, \"Display version and exit\")\n\tflag.Parse()\n\n\tif flags.displayVer == true {\n\t\tfmt.Printf(\"Build Type: %s\\n\", buildinfo.BuildType)\n\t\tfmt.Printf(\"Build TimeStamp: %s\\n\", buildinfo.BuildStamp)\n\t\tfmt.Printf(\"Build Revision: %s\\n\", buildinfo.BuildRevision)\n\t\tos.Exit(0)\n\t}\n\n\treturn &flags\n}", "func configureFlags(api *operations.EsiAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func (o *ClientOptions) AddFlags(fs *flag.FlagSet) {\n\tfs.StringVar(&o.LogID, \"cloud-logging-logs-id\", o.LogID,\n\t\t\"For cloud logging, the log stream ID.\")\n\tfs.StringVar(&o.ServiceName, \"cloud-logging-service\", o.ServiceName,\n\t\t\"For cloud logging, the service name.\")\n\tfs.StringVar(&o.ProjectID, \"cloud-logging-project-name\", o.ProjectID,\n\t\t\"For cloud logging, the project name.\")\n\tfs.StringVar(&o.ResourceType, \"cloud-logging-resource-type\", o.ResourceType,\n\t\t\"For cloud logging, the instance name.\")\n\tfs.StringVar(&o.ResourceID, \"cloud-logging-resource-id\", o.ResourceID,\n\t\t\"For cloud logging, the instance ID.\")\n\tfs.StringVar(&o.Region, \"cloud-logging-region\", o.Region,\n\t\t\"For cloud logging, the region.\")\n\tfs.StringVar(&o.UserID, \"cloud-logging-user\", o.UserID,\n\t\t\"For cloud logging, the user ID.\")\n\tfs.StringVar(&o.Zone, \"cloud-logging-zone\", o.Zone,\n\t\t\"For cloud logging, the zone.\")\n}", "func (s *StatsGraph) SetFlags() {\n\tif !(s.ZoomToken == \"\") {\n\t\ts.Flags.Set(0)\n\t}\n}", "func (d *Driver) GetCreateFlags() []mcnflag.Flag {\n\treturn []mcnflag.Flag{\n\t\tmcnflag.IntFlag{\n\t\t\tEnvVar: \"VSPHERE_CPU_COUNT\",\n\t\t\tName: \"vmwarevsphere-cpu-count\",\n\t\t\tUsage: \"vSphere CPU number for docker VM\",\n\t\t\tValue: defaultCpus,\n\t\t},\n\t\tmcnflag.IntFlag{\n\t\t\tEnvVar: \"VSPHERE_MEMORY_SIZE\",\n\t\t\tName: \"vmwarevsphere-memory-size\",\n\t\t\tUsage: \"vSphere size of memory for docker VM (in MB)\",\n\t\t\tValue: defaultMemory,\n\t\t},\n\t\tmcnflag.IntFlag{\n\t\t\tEnvVar: \"VSPHERE_DISK_SIZE\",\n\t\t\tName: \"vmwarevsphere-disk-size\",\n\t\t\tUsage: \"vSphere size of disk for docker VM (in MB)\",\n\t\t\tValue: defaultDiskSize,\n\t\t},\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"VSPHERE_BOOT2DOCKER_URL\",\n\t\t\tName: \"vmwarevsphere-boot2docker-url\",\n\t\t\tUsage: \"vSphere URL for boot2docker image\",\n\t\t},\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"VSPHERE_VCENTER\",\n\t\t\tName: \"vmwarevsphere-vcenter\",\n\t\t\tUsage: \"vSphere IP/hostname for vCenter\",\n\t\t},\n\t\tmcnflag.IntFlag{\n\t\t\tEnvVar: \"VSPHERE_VCENTER_PORT\",\n\t\t\tName: \"vmwarevsphere-vcenter-port\",\n\t\t\tUsage: \"vSphere Port for vCenter\",\n\t\t\tValue: defaultSDKPort,\n\t\t},\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"VSPHERE_USERNAME\",\n\t\t\tName: \"vmwarevsphere-username\",\n\t\t\tUsage: \"vSphere username\",\n\t\t},\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"VSPHERE_PASSWORD\",\n\t\t\tName: \"vmwarevsphere-password\",\n\t\t\tUsage: \"vSphere password\",\n\t\t},\n\t\tmcnflag.StringSliceFlag{\n\t\t\tEnvVar: \"VSPHERE_NETWORK\",\n\t\t\tName: \"vmwarevsphere-network\",\n\t\t\tUsage: \"vSphere network where the docker VM will be attached\",\n\t\t},\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"VSPHERE_DATASTORE\",\n\t\t\tName: \"vmwarevsphere-datastore\",\n\t\t\tUsage: \"vSphere datastore for docker VM\",\n\t\t},\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"VSPHERE_DATACENTER\",\n\t\t\tName: \"vmwarevsphere-datacenter\",\n\t\t\tUsage: \"vSphere datacenter for docker VM\",\n\t\t},\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"VSPHERE_POOL\",\n\t\t\tName: \"vmwarevsphere-pool\",\n\t\t\tUsage: \"vSphere resource pool for docker VM\",\n\t\t},\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"VSPHERE_HOSTSYSTEM\",\n\t\t\tName: \"vmwarevsphere-hostsystem\",\n\t\t\tUsage: \"vSphere compute resource where the docker VM will be instantiated. This can be omitted if using a cluster with DRS.\",\n\t\t},\n\t\tmcnflag.StringSliceFlag{\n\t\t\tEnvVar: \"VSPHERE_CFGPARAM\",\n\t\t\tName: \"vmwarevsphere-cfgparam\",\n\t\t\tUsage: \"vSphere vm configuration parameters (used for guestinfo)\",\n\t\t},\n\t\tmcnflag.StringFlag{\n\t\t\tEnvVar: \"VSPHERE_CLOUDINIT\",\n\t\t\tName: \"vmwarevsphere-cloudinit\",\n\t\t\tUsage: \"vSphere cloud-init file or url to set in the guestinfo\",\n\t\t},\n\t}\n}", "func (o *Options) AddFlags(fs *pflag.FlagSet) {\n\tfs.StringVar(&o.HelperImage, \"helper-image\", defaultHelperImage,\n\t\t\"The image that instrumentate mysql.\")\n\n\tfs.StringVar(&o.MetricsExporterImage, \"metrics-exporter-image\", defaultExporterImage,\n\t\t\"The image for mysql metrics exporter.\")\n\tfs.StringVar(&o.ImagePullSecretName, \"image-pull-secret\", \"\",\n\t\t\"The secret name for used as pull secret.\")\n\n\tfs.VarP(newPullPolicyValue(defaultImagePullPolicy, &o.ImagePullPolicy),\n\t\t\"image-pull-policy\", \"\", \"Set image pull policy.\")\n\n\tfs.StringVar(&o.OrchestratorURI, \"orchestrator-uri\", \"\",\n\t\t\"The orchestrator uri\")\n\tfs.StringVar(&o.OrchestratorTopologyPassword, \"orchestrator-topology-password\", defaultOrchestratorTopologyUser,\n\t\t\"The orchestrator topology password. Can also be set as ORC_TOPOLOGY_PASSWORD environment variable.\")\n\tfs.StringVar(&o.OrchestratorTopologyUser, \"orchestrator-topology-user\", defaultOrchestratorTopologyPassword,\n\t\t\"The orchestrator topology user. Can also be set as ORC_TOPOLOGY_USER environment variable.\")\n\tfs.DurationVar(&o.JobCompleteSuccessGraceTime, \"job-grace-time\", defaultJobGraceTime,\n\t\t\"The time in hours how jobs after completion are keept.\")\n\n\tfs.StringVar(&o.HTTPServeAddr, \"http-serve-addr\", defaultHTTPServerAddr,\n\t\t\"The address for http server.\")\n\n\tfs.StringVar(&o.LeaderElectionNamespace, \"leader-election-namespace\", defaultLeaderElectionNamespace,\n\t\t\"The leader election namespace.\")\n\tfs.StringVar(&o.LeaderElectionID, \"leader-election-id\", defaultLeaderElectionID,\n\t\t\"The leader election id.\")\n}", "func FeatureOverride(dda *DatadogAgentSpec, dso *DatadogAgentSpec) {\n\tif dda.Features.NetworkMonitoring != nil && apiutils.BoolValue(dda.Features.NetworkMonitoring.Enabled) {\n\t\t// If the Network Monitoring Feature is enabled, enable the System Probe.\n\t\tif !apiutils.BoolValue(dda.Agent.Enabled) {\n\t\t\tif dda.Agent.SystemProbe == nil {\n\t\t\t\tdda.Agent.SystemProbe = DefaultDatadogAgentSpecAgentSystemProbe(&dda.Agent)\n\t\t\t}\n\t\t\tdda.Agent.SystemProbe.Enabled = apiutils.NewBoolPointer(true)\n\t\t\tdso.Agent.SystemProbe = DefaultDatadogAgentSpecAgentSystemProbe(&dda.Agent)\n\t\t\tdso.Agent.SystemProbe.Enabled = apiutils.NewBoolPointer(true)\n\t\t}\n\t}\n\tif dda.Features.NetworkMonitoring != nil && apiutils.BoolValue(dda.Features.NetworkMonitoring.Enabled) ||\n\t\tdda.Features.OrchestratorExplorer != nil && apiutils.BoolValue(dda.Features.OrchestratorExplorer.Enabled) {\n\t\t// If the Network Monitoring or the Orchestrator Explorer Feature is enabled, enable the Process Agent.\n\t\tif !apiutils.BoolValue(dda.Agent.Enabled) {\n\t\t\tif dda.Agent.Process == nil {\n\t\t\t\tdda.Agent.Process = DefaultDatadogAgentSpecAgentProcess(&dda.Agent)\n\t\t\t}\n\t\t\tdda.Agent.Process.Enabled = apiutils.NewBoolPointer(true)\n\t\t\tdso.Agent.Process = DefaultDatadogAgentSpecAgentProcess(&dda.Agent)\n\t\t\tdso.Agent.Process.Enabled = apiutils.NewBoolPointer(true)\n\t\t}\n\t}\n}", "func CheckFeatureFlag(v *viper.Viper) error {\n\treturn nil\n}", "func AddFlags(flags *flag.FlagSet) {\n\tflags.Int(collectorQueueSize, app.DefaultQueueSize, \"The queue size of the collector\")\n\tflags.Int(collectorNumWorkers, app.DefaultNumWorkers, \"The number of workers pulling items from the queue\")\n\tflags.Duration(collectorWriteCacheTTL, time.Hour*12, \"The duration to wait before rewriting an existing service or operation name\")\n\tflags.Int(collectorPort, 14267, \"The tchannel port for the collector service\")\n\tflags.Int(collectorHTTPPort, 14268, \"The http port for the collector service\")\n\tflags.Int(collectorZipkinHTTPort, 0, \"The http port for the Zipkin collector service e.g. 9411\")\n\tflags.Int(collectorHealthCheckHTTPPort, 14269, \"The http port for the health check service\")\n\tflags.Bool(collectorAuthSpan, false, \"Defines if incoming spans should be authenticated\")\n\tflags.String(collectorSpanAuthTagKey, app.DefaultSpanAuthTagKey, \"The name of the tag's key associated with password / api token\")\n\tflags.Int(collectorAuthManagerCacheSize, 1000, \"The size of the authentication manager cache\")\n\tflags.Duration(collectorAuthManagerCacheTTL, time.Second * 3600, \"The TTL of the auth manager cache items\")\n}", "func (s *EtcdOptions) AddFlags(fs *pflag.FlagSet) {\n\tif s == nil {\n\t\treturn\n\t}\n\n\tfs.StringSliceVar(&s.EtcdServersOverrides, \"etcd-servers-overrides\", s.EtcdServersOverrides, \"\"+\n\t\t\"Per-resource etcd servers overrides, comma separated. The individual override \"+\n\t\t\"format: group/resource#servers, where servers are URLs, semicolon separated. \"+\n\t\t\"Note that this applies only to resources compiled into this server binary. \")\n\n\tfs.StringVar(&s.DefaultStorageMediaType, \"storage-media-type\", s.DefaultStorageMediaType, \"\"+\n\t\t\"The media type to use to store objects in storage. \"+\n\t\t\"Some resources or storage backends may only support a specific media type and will ignore this setting.\")\n\tfs.IntVar(&s.DeleteCollectionWorkers, \"delete-collection-workers\", s.DeleteCollectionWorkers,\n\t\t\"Number of workers spawned for DeleteCollection call. These are used to speed up namespace cleanup.\")\n\n\tfs.BoolVar(&s.EnableGarbageCollection, \"enable-garbage-collector\", s.EnableGarbageCollection, \"\"+\n\t\t\"Enables the generic garbage collector. MUST be synced with the corresponding flag \"+\n\t\t\"of the kube-controller-manager.\")\n\n\tfs.BoolVar(&s.EnableWatchCache, \"watch-cache\", s.EnableWatchCache,\n\t\t\"Enable watch caching in the apiserver\")\n\n\tfs.IntVar(&s.DefaultWatchCacheSize, \"default-watch-cache-size\", s.DefaultWatchCacheSize,\n\t\t\"Default watch cache size. If zero, watch cache will be disabled for resources that do not have a default watch size set.\")\n\n\tfs.StringSliceVar(&s.WatchCacheSizes, \"watch-cache-sizes\", s.WatchCacheSizes, \"\"+\n\t\t\"Watch cache size settings for some resources (pods, nodes, etc.), comma separated. \"+\n\t\t\"The individual setting format: resource[.group]#size, where resource is lowercase plural (no version), \"+\n\t\t\"group is omitted for resources of apiVersion v1 (the legacy core API) and included for others, \"+\n\t\t\"and size is a number. It takes effect when watch-cache is enabled. \"+\n\t\t\"Some resources (replicationcontrollers, endpoints, nodes, pods, services, apiservices.apiregistration.k8s.io) \"+\n\t\t\"have system defaults set by heuristics, others default to default-watch-cache-size\")\n\n\tfs.StringVar(&s.StorageConfig.Type, \"storage-backend\", s.StorageConfig.Type,\n\t\t\"The storage backend for persistence. Options: 'etcd3' (default).\")\n\n\tfs.StringSliceVar(&s.StorageConfig.Transport.ServerList, \"etcd-servers\", s.StorageConfig.Transport.ServerList,\n\t\t\"List of etcd servers to connect with (scheme://ip:port), comma separated.\")\n\n\tfs.StringVar(&s.StorageConfig.Prefix, \"etcd-prefix\", s.StorageConfig.Prefix,\n\t\t\"The prefix to prepend to all resource paths in etcd.\")\n\n\tfs.StringVar(&s.StorageConfig.Transport.KeyFile, \"etcd-keyfile\", s.StorageConfig.Transport.KeyFile,\n\t\t\"SSL key file used to secure etcd communication.\")\n\n\tfs.StringVar(&s.StorageConfig.Transport.CertFile, \"etcd-certfile\", s.StorageConfig.Transport.CertFile,\n\t\t\"SSL certification file used to secure etcd communication.\")\n\n\tfs.StringVar(&s.StorageConfig.Transport.TrustedCAFile, \"etcd-cafile\", s.StorageConfig.Transport.TrustedCAFile,\n\t\t\"SSL Certificate Authority file used to secure etcd communication.\")\n\n\tfs.StringVar(&s.EncryptionProviderConfigFilepath, \"encryption-provider-config\", s.EncryptionProviderConfigFilepath,\n\t\t\"The file containing configuration for encryption providers to be used for storing secrets in etcd\")\n\n\tfs.DurationVar(&s.StorageConfig.CompactionInterval, \"etcd-compaction-interval\", s.StorageConfig.CompactionInterval,\n\t\t\"The interval of compaction requests. If 0, the compaction request from apiserver is disabled.\")\n\n\tfs.DurationVar(&s.StorageConfig.CountMetricPollPeriod, \"etcd-count-metric-poll-period\", s.StorageConfig.CountMetricPollPeriod, \"\"+\n\t\t\"Frequency of polling etcd for number of resources per type. 0 disables the metric collection.\")\n\n\tfs.DurationVar(&s.StorageConfig.DBMetricPollInterval, \"etcd-db-metric-poll-interval\", s.StorageConfig.DBMetricPollInterval,\n\t\t\"The interval of requests to poll etcd and update metric. 0 disables the metric collection\")\n\n\tfs.DurationVar(&s.StorageConfig.HealthcheckTimeout, \"etcd-healthcheck-timeout\", s.StorageConfig.HealthcheckTimeout,\n\t\t\"The timeout to use when checking etcd health.\")\n\n\tfs.Int64Var(&s.StorageConfig.LeaseManagerConfig.ReuseDurationSeconds, \"lease-reuse-duration-seconds\", s.StorageConfig.LeaseManagerConfig.ReuseDurationSeconds,\n\t\t\"The time in seconds that each lease is reused. A lower value could avoid large number of objects reusing the same lease. Notice that a too small value may cause performance problems at storage layer.\")\n}", "func AddFlags(flags *flag.FlagSet) {\n\tflags.String(reporterType, string(GRPC), fmt.Sprintf(\"Reporter type to use e.g. %s\", string(GRPC)))\n\tif !setupcontext.IsAllInOne() {\n\t\tflags.String(agentTags, \"\", \"One or more tags to be added to the Process tags of all spans passing through this agent. Ex: key1=value1,key2=${envVar:defaultValue}\")\n\t}\n}", "func (fft FeatureFlagToggles) SetRouteServices(toggle bool) {\n\tfft[routeServicesFlag] = toggle\n}", "func (s *cpuSource) Discover() error {\n\ts.features = nfdv1alpha1.NewFeatures()\n\n\t// Detect CPUID\n\ts.features.Flags[CpuidFeature] = nfdv1alpha1.NewFlagFeatures(getCpuidFlags()...)\n\n\t// Detect CPU model\n\ts.features.Attributes[Cpumodel] = nfdv1alpha1.NewAttributeFeatures(getCPUModel())\n\n\t// Detect cstate configuration\n\tcstate, err := detectCstate()\n\tif err != nil {\n\t\tklog.ErrorS(err, \"failed to detect cstate\")\n\t} else {\n\t\ts.features.Attributes[CstateFeature] = nfdv1alpha1.NewAttributeFeatures(cstate)\n\t}\n\n\t// Detect pstate features\n\tpstate, err := detectPstate()\n\tif err != nil {\n\t\tklog.ErrorS(err, \"failed to detect pstate\")\n\t}\n\ts.features.Attributes[PstateFeature] = nfdv1alpha1.NewAttributeFeatures(pstate)\n\n\t// Detect RDT features\n\ts.features.Attributes[RdtFeature] = nfdv1alpha1.NewAttributeFeatures(discoverRDT())\n\n\t// Detect available guest protection(SGX,TDX,SEV) features\n\ts.features.Attributes[SecurityFeature] = nfdv1alpha1.NewAttributeFeatures(discoverSecurity())\n\n\t// Detect SGX features\n\t//\n\t// DEPRECATED in v0.12: will be removed in the future\n\tif val, ok := s.features.Attributes[SecurityFeature].Elements[\"sgx.enabled\"]; ok {\n\t\ts.features.Attributes[SgxFeature] = nfdv1alpha1.NewAttributeFeatures(map[string]string{\"enabled\": val})\n\t}\n\n\t// Detect Secure Execution features\n\t//\n\t// DEPRECATED in v0.12: will be removed in the future\n\tif val, ok := s.features.Attributes[SecurityFeature].Elements[\"se.enabled\"]; ok {\n\t\ts.features.Attributes[SeFeature] = nfdv1alpha1.NewAttributeFeatures(map[string]string{\"enabled\": val})\n\t}\n\n\t// Detect SST features\n\ts.features.Attributes[SstFeature] = nfdv1alpha1.NewAttributeFeatures(discoverSST())\n\n\t// Detect hyper-threading\n\ts.features.Attributes[TopologyFeature] = nfdv1alpha1.NewAttributeFeatures(discoverTopology())\n\n\t// Detect Coprocessor features\n\ts.features.Attributes[CoprocessorFeature] = nfdv1alpha1.NewAttributeFeatures(discoverCoprocessor())\n\n\tklog.V(3).InfoS(\"discovered features\", \"featureSource\", s.Name(), \"features\", utils.DelayedDumper(s.features))\n\n\treturn nil\n}", "func featureInfoList(ctx context.Context, cl *featuregateclient.FeatureGateClient, featuregate string) ([]FeatureInfo, error) {\n\tclusterFeatures, err := cl.GetFeatureList(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgateList, err := cl.GetFeatureGateList(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfeatureInfos := collectFeaturesInfo(gateList.Items, clusterFeatures.Items)\n\n\tsetShowInList(featureInfos, includeExperimental, featuregate)\n\n\tfilteredList := featuresFilteredByFlags(featureInfos, activated, deactivated)\n\treturn filteredList, nil\n}", "func configureFlags(api *operations.CalculatorAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func configureFlags(api *operations.ConfigServerAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func (f *EnvFlags) Feature() string {\n\treturn f.feature\n}", "func configureFlags(api *operations.SwaggertestAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func (sc *ServerConn) Features(ctx context.Context) (*ServerFeatures, error) {\n\tvar feats ServerFeatures\n\terr := sc.Request(ctx, \"server.features\", nil, &feats)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &feats, nil\n}", "func (sd *SousNewDeploy) AddFlags(fs *flag.FlagSet) {\n\tMustAddFlags(fs, &sd.DeployFilterFlags, NewDeployFilterFlagsHelp)\n\n\tfs.BoolVar(&sd.force, \"force\", false,\n\t\t\"force deploy no matter if GDM already is at the correct version\")\n\tfs.BoolVar(&sd.waitStable, \"wait-stable\", true,\n\t\t\"wait for the deploy to complete before returning (otherwise, use --wait-stable=false)\")\n\tfs.StringVar(&sd.dryrunOption, \"dry-run\", \"none\",\n\t\t\"prevent rectify from actually changing things - \"+\n\t\t\t\"values are none,scheduler,registry,both\")\n}", "func featuresFilteredByFlags(infos map[string]*FeatureInfo, activated, deactivated bool) []FeatureInfo {\n\tvar filteredList []FeatureInfo\n\tfor _, v := range infos {\n\t\tif activated && v.Activated && v.ShowInList {\n\t\t\tfilteredList = append(filteredList, *v)\n\t\t}\n\n\t\tif deactivated && !v.Activated && v.ShowInList {\n\t\t\tfilteredList = append(filteredList, *v)\n\t\t}\n\n\t\t// No flags were provided, so only filter out features that shouldn't be listed.\n\t\tif !activated && !deactivated && v.ShowInList {\n\t\t\tfilteredList = append(filteredList, *v)\n\t\t}\n\t}\n\treturn filteredList\n}", "func ServerFlags(cfg *config.Config) []cli.Flag {\n\treturn []cli.Flag{\n\t\t&cli.StringFlag{\n\t\t\tName: \"web.address\",\n\t\t\tValue: \"0.0.0.0:9000\",\n\t\t\tUsage: \"Address to bind the metrics server\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_WEB_ADDRESS\"},\n\t\t\tDestination: &cfg.Server.Addr,\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"web.path\",\n\t\t\tValue: \"/metrics\",\n\t\t\tUsage: \"Path to bind the metrics server\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_WEB_PATH\"},\n\t\t\tDestination: &cfg.Server.Path,\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"web.config\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"Path to web-config file\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_WEB_CONFIG\"},\n\t\t\tDestination: &cfg.Server.Web,\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"output.engine\",\n\t\t\tValue: \"file\",\n\t\t\tUsage: \"Enabled engine like file or http\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_OUTPUT_ENGINE\"},\n\t\t\tDestination: &cfg.Target.Engine,\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"output.file\",\n\t\t\tValue: \"/etc/prometheus/scw.json\",\n\t\t\tUsage: \"Path to write the file_sd config\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_OUTPUT_FILE\"},\n\t\t\tDestination: &cfg.Target.File,\n\t\t},\n\t\t&cli.IntFlag{\n\t\t\tName: \"output.refresh\",\n\t\t\tValue: 30,\n\t\t\tUsage: \"Discovery refresh interval in seconds\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_OUTPUT_REFRESH\"},\n\t\t\tDestination: &cfg.Target.Refresh,\n\t\t},\n\t\t&cli.BoolFlag{\n\t\t\tName: \"scw.check_instance\",\n\t\t\tValue: true,\n\t\t\tUsage: \"Enable instance gathering\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_CHECK_INSTANCE\"},\n\t\t\tDestination: &cfg.Target.CheckInstance,\n\t\t},\n\t\t&cli.BoolFlag{\n\t\t\tName: \"scw.check_baremetal\",\n\t\t\tValue: true,\n\t\t\tUsage: \"Enable baremetal gathering\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_CHECK_BAREMETAL\"},\n\t\t\tDestination: &cfg.Target.CheckBaremetal,\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"scw.access_key\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"Access key for the Scaleway API\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_ACCESS_KEY\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"scw.secret_key\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"Secret key for the Scaleway API\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_SECRET_KEY\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"scw.org\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"Organization for the Scaleway API\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_ORG\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"scw.zone\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"Zone for the Scaleway API\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_ZONE\"},\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"scw.config\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"Path to Scaleway configuration file\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_CONFIG\"},\n\t\t},\n\t\t&cli.StringSliceFlag{\n\t\t\tName: \"scw.instance_zone\",\n\t\t\tValue: cli.NewStringSlice(\"fr-par-1\", \"nl-ams-1\"),\n\t\t\tUsage: \"List of available zones for instance API\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_INSTANCE_ZONES\"},\n\t\t\tHidden: true,\n\t\t},\n\t\t&cli.StringSliceFlag{\n\t\t\tName: \"scw.baremetal_zone\",\n\t\t\tValue: cli.NewStringSlice(\"fr-par-2\"),\n\t\t\tUsage: \"List of available zones for baremetal API\",\n\t\t\tEnvVars: []string{\"PROMETHEUS_SCW_BAREMETAL_ZONES\"},\n\t\t\tHidden: true,\n\t\t},\n\t}\n}", "func (s *ServerOption) AddFlags(fs *flag.FlagSet) {\n\tfs.BoolVar(&s.JsonLogFormat, \"json-log-format\", true, \"Set true to use json style log format. Set false to use plaintext style log format\")\n\tfs.StringVar(&s.AreaConfigPath, \"area-config-path\", \"https://raw.githubusercontent.com/kubeflow/community/master/labels-owners.yaml\", \"Path to the YAML file mapping area labels to owners.\")\n\tfs.IntVar(&s.Port, \"port\", 8080, \"The port to use for an http server.\")\n}", "func processCommandLineFlags(s *suite.Suite) {\n\tgetopt.HelpColumn = 35\n\tgetopt.DisplayWidth = 120\n\tgetopt.SetParameters(\"\")\n\tgetopt.Parse()\n\n\t// Lets check to see if the version command line flag was given. If it is\n\t// lets print out the version information and exit.\n\tif *bOptVer {\n\t\tprintOutputHeader()\n\t\tos.Exit(0)\n\t}\n\n\t// Lets check to see if the help command line flag was given. If it is lets\n\t// print out the help information and exit.\n\tif *bOptHelp {\n\t\tprintOutputHeader()\n\t\tgetopt.Usage()\n\t\tos.Exit(0)\n\t}\n\n\t// ------------------------------------------------------------\n\t// Map command line parameters to struct values\n\t// ------------------------------------------------------------\n\ts.Verbose = *bOptVerbose\n\ts.Debug = *bOptDebug\n\n\ts.Settings.URL = *sOptURL\n\ts.Settings.Proxy = *sOptProxy\n\ts.Settings.Discovery = *sOptDiscovery\n\ts.Settings.APIRoot = *sOptAPIRoot\n\ts.Settings.Username = *sOptUsername\n\ts.Settings.Password = *sOptPassword\n\n\ts.CollectionIDs.ReadOnly = *sOptReadOnly\n\ts.CollectionIDs.WriteOnly = *sOptWriteOnly\n\ts.CollectionIDs.ReadWrite = *sOptReadWrite\n}", "func (s *StorageSerializationOptions) AddFlags(fs *pflag.FlagSet) {\n\t// Note: the weird \"\"+ in below lines seems to be the only way to get gofmt to\n\t// arrange these text blocks sensibly. Grrr.\n\n\tdeprecatedStorageVersion := \"\"\n\tfs.StringVar(&deprecatedStorageVersion, \"storage-version\", deprecatedStorageVersion,\n\t\t\"DEPRECATED: the version to store the legacy v1 resources with. Defaults to server preferred.\")\n\tfs.MarkDeprecated(\"storage-version\", \"--storage-version is deprecated and will be removed when the v1 API \"+\n\t\t\"is retired. Setting this has no effect. See --storage-versions instead.\")\n\n\tfs.StringVar(&s.StorageVersions, \"storage-versions\", s.StorageVersions, \"\"+\n\t\t\"The per-group version to store resources in. \"+\n\t\t\"Specified in the format \\\"group1/version1,group2/version2,...\\\". \"+\n\t\t\"In the case where objects are moved from one group to the other, \"+\n\t\t\"you may specify the format \\\"group1=group2/v1beta1,group3/v1beta1,...\\\". \"+\n\t\t\"You only need to pass the groups you wish to change from the defaults. \"+\n\t\t\"It defaults to a list of preferred versions of all registered groups, \"+\n\t\t\"which is derived from the KUBE_API_VERSIONS environment variable.\")\n\n}", "func (s *Server) ConfigureFlags() {\n\tif s.api != nil {\n\t\tconfigureFlags(s.api)\n\t}\n}", "func (cfg *Config) AddFlags(fs *flag.FlagSet) {\n\tfs.StringVar(&cfg.GatewayIP, \"gateway.ip\", cfg.GatewayIP, \"ScaleIO Gateway IP\")\n\tfs.StringVar(&cfg.Username, \"gateway.username\", cfg.Username, \"ScaleIO Gateway Username\")\n\tfs.StringVar(&cfg.Password, \"gateway.password\", cfg.Password, \"ScaleIO Gateway Password\")\n\tfs.StringVar(&cfg.Version, \"gateway.version\", cfg.Version, \"ScaleIO Gateway Version\")\n\tfs.StringVar(&cfg.SdsList, \"gateway.sds\", cfg.SdsList, \"ScaleIO SDS List\")\n}", "func configureFlags(api *operations.ControlAsistenciaAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func (a *AdminApiService) CreateFeatureFlag(ctx _context.Context, featureFlag FeatureFlag) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/admin/feature-flag\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &featureFlag\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 409 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func (s *MesosTurboService) AddFlags(fs *pflag.FlagSet) {\n\tfs.StringVar(&s.MesosMasterConfig, \"mesosconfig\", s.MesosMasterConfig, \"Path to the mesos config file.\")\n\tfs.StringVar(&s.TurboCommConfig, \"turboconfig\", s.TurboCommConfig, \"Path to the turbo config flag.\")\n\n\tfs.StringVar(&s.Master, \"mesostype\", s.Master, \"Mesos Master Type 'Apache Mesos'|'Mesosphere DCOS'\")\n\tfs.StringVar(&s.MasterIPPort, \"masteripport\", s.MasterIPPort, \"Comma separated list of IP:port of each Mesos Master in the cluster\")\n\tfs.StringVar(&s.MasterUsername, \"masteruser\", s.MasterUsername, \"User for the Mesos Master\")\n\tfs.StringVar(&s.MasterPassword, \"masterpwd\", s.MasterPassword, \"Password for the Mesos Master\")\n\n\tfs.StringVar(&s.TurboServerUrl, \"turboserverurl\", s.TurboServerUrl, \"Url for Turbo Server\")\n\tfs.StringVar(&s.TurboServerVersion, \"turboserverversion\", s.TurboServerVersion, \"Version for Turbo Server\")\n\tfs.StringVar(&s.OpsManagerUsername, \"opsmanagerusername\", s.OpsManagerUsername, \"Username for Ops Manager\")\n\tfs.StringVar(&s.OpsManagerPassword, \"opsmanagerpassword\", s.OpsManagerPassword, \"Password for Ops Manager\")\n}", "func (b *AdapterBase) Flags() *pflag.FlagSet {\n\tb.initFlagSet()\n\tb.InstallFlags()\n\n\treturn b.FlagSet\n}", "func (o *Options) AddFlags(fs *pflag.FlagSet) {\n\tif o == nil {\n\t\treturn\n\t}\n\tfs.StringVar(&o.ShowHiddenMetricsForVersion, \"show-hidden-metrics-for-version\", o.ShowHiddenMetricsForVersion,\n\t\t\"The previous version for which you want to show hidden metrics. \"+\n\t\t\t\"Only the previous minor version is meaningful, other values will not be allowed. \"+\n\t\t\t\"The format is <major>.<minor>, e.g.: '1.16'. \"+\n\t\t\t\"The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, \"+\n\t\t\t\"rather than being surprised when they are permanently removed in the release after that.\")\n\tfs.StringSliceVar(&o.DisabledMetrics,\n\t\t\"disabled-metrics\",\n\t\to.DisabledMetrics,\n\t\t\"This flag provides an escape hatch for misbehaving metrics. \"+\n\t\t\t\"You must provide the fully qualified metric name in order to disable it. \"+\n\t\t\t\"Disclaimer: disabling metrics is higher in precedence than showing hidden metrics.\")\n\tfs.StringToStringVar(&o.AllowListMapping, \"allow-metric-labels\", o.AllowListMapping,\n\t\t\"The map from metric-label to value allow-list of this label. The key's format is <MetricName>,<LabelName>. \"+\n\t\t\t\"The value's format is <allowed_value>,<allowed_value>...\"+\n\t\t\t\"e.g. metric1,label1='v1,v2,v3', metric1,label2='v1,v2,v3' metric2,label1='v1,v2,v3'.\")\n}", "func (fft FeatureFlagToggles) RouteServices() *FeatureFlag {\n\treturn &FeatureFlag{\n\t\tName: routeServicesFlag,\n\t\tDefault: false,\n\t\tisEnabled: func(ff *FeatureFlag) bool {\n\t\t\tif setValue, ok := fft[routeServicesFlag]; ok {\n\t\t\t\treturn setValue\n\t\t\t}\n\t\t\treturn ff.Default\n\t\t},\n\t}\n}", "func configureFlags(api *operations.JiliAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func (c *cmdCreate) AddFlags(fs *flag.FlagSet) {\n\tfs.StringVar(&(c.fileName), \"f\", \"\", \"gateway app file\")\n\tfs.StringVar(&(c.pingport), \"pingport\", \"\", \"ping port\")\n}", "func (o *SAControllerOptions) AddFlags(fs *pflag.FlagSet) {\n\tif o == nil {\n\t\treturn\n\t}\n\n\tfs.StringVar(&o.ServiceAccountKeyFile, \"service-account-private-key-file\", o.ServiceAccountKeyFile, \"Filename containing a PEM-encoded private RSA or ECDSA key used to sign service account tokens.\")\n\tfs.Int32Var(&o.ConcurrentSATokenSyncs, \"concurrent-serviceaccount-token-syncs\", o.ConcurrentSATokenSyncs, \"The number of service account token objects that are allowed to sync concurrently. Larger number = more responsive token generation, but more CPU (and network) load\")\n\tfs.StringVar(&o.RootCAFile, \"root-ca-file\", o.RootCAFile, \"If set, this root certificate authority will be included in service account's token secret. This must be a valid PEM-encoded CA bundle.\")\n}", "func (a *AssembliesApiService) UpdateFeature(ctx _context.Context, did string, wid string, eid string, fid string, localVarOptionals *UpdateFeatureOpts) (BtFeatureDefinitionResponse1617, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue BtFeatureDefinitionResponse1617\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/api/assemblies/d/{did}/w/{wid}/e/{eid}/features/featureid/{fid}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"did\"+\"}\", _neturl.QueryEscape(parameterToString(did, \"\")) , -1)\n\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"wid\"+\"}\", _neturl.QueryEscape(parameterToString(wid, \"\")) , -1)\n\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"eid\"+\"}\", _neturl.QueryEscape(parameterToString(eid, \"\")) , -1)\n\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"fid\"+\"}\", _neturl.QueryEscape(parameterToString(fid, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json;charset=UTF-8; qs=0.09\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/vnd.onshape.v1+json;charset=UTF-8;qs=0.1\", \"application/json;charset=UTF-8; qs=0.09\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tif localVarOptionals != nil && localVarOptionals.Body.IsSet() {\n\t\tlocalVarPostBody = localVarOptionals.Body.Value()\n\t}\n\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\t\tvar v BtFeatureDefinitionResponse1617\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (a *AdminApiService) DeleteFeatureFlag(ctx _context.Context, id string) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodDelete\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/admin/feature-flag/{id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", _neturl.QueryEscape(parameterToString(id, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 403 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 500 {\n\t\t\tvar v Error\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func SetFeatureGates(flags map[string][]string, featureGates featuregate.MutableFeatureGate) ([]string, error) {\n\tfeatureGatesMap := map[string]bool{}\n\tfeatureGateParser := flag.NewMapStringBool(&featureGatesMap)\n\tfor _, val := range flags[\"feature-gates\"] {\n\t\tif err := featureGateParser.Set(val); err != nil {\n\t\t\treturn []string{}, err\n\t\t}\n\t}\n\n\treturn setFeatureGates(featureGatesMap, featureGates)\n}", "func AddFlags(flags *flag.FlagSet) {\n\tflags.String(\n\t\thttpServerHostPort,\n\t\tdefaultHTTPServerHostPort,\n\t\t\"host:port of the http server (e.g. for /sampling point and /baggageRestrictions endpoint)\")\n\n\tfor _, p := range defaultProcessors {\n\t\tprefix := fmt.Sprintf(processorPrefixFmt, p.model, p.protocol)\n\t\tflags.Int(prefix+suffixWorkers, defaultServerWorkers, \"how many workers the processor should run\")\n\t\tflags.Int(prefix+suffixServerQueueSize, defaultQueueSize, \"length of the queue for the UDP server\")\n\t\tflags.Int(prefix+suffixServerMaxPacketSize, defaultMaxPacketSize, \"max packet size for the UDP server\")\n\t\tflags.Int(prefix+suffixServerSocketBufferSize, 0, \"socket buffer size for UDP packets in bytes\")\n\t\tflags.String(prefix+suffixServerHostPort, \":\"+strconv.Itoa(p.port), \"host:port for the UDP server\")\n\t}\n}", "func NewSvcFlag(p *SvcFlagParam) ServiceFlags {\n\tvar flags ServiceFlags\n\n\tswitch p.SvcType {\n\tcase SVCTypeExternalIPs:\n\t\tflags |= serviceFlagExternalIPs\n\tcase SVCTypeNodePort:\n\t\tflags |= serviceFlagNodePort\n\tcase SVCTypeLoadBalancer:\n\t\tflags |= serviceFlagLoadBalancer\n\tcase SVCTypeHostPort:\n\t\tflags |= serviceFlagHostPort\n\t\tif p.LoopbackHostport {\n\t\t\tflags |= serviceFlagLoopback\n\t\t}\n\tcase SVCTypeLocalRedirect:\n\t\tflags |= serviceFlagLocalRedirect\n\t}\n\n\tswitch p.SvcNatPolicy {\n\tcase SVCNatPolicyNat46:\n\t\tfallthrough\n\tcase SVCNatPolicyNat64:\n\t\tflags |= serviceFlagNat46x64\n\t}\n\n\tif p.SvcExtLocal {\n\t\tflags |= serviceFlagExtLocalScope\n\t}\n\tif p.SvcIntLocal {\n\t\tflags |= serviceFlagIntLocalScope\n\t}\n\tif p.SessionAffinity {\n\t\tflags |= serviceFlagSessionAffinity\n\t}\n\tif p.IsRoutable {\n\t\tflags |= serviceFlagRoutable\n\t}\n\tif p.CheckSourceRange {\n\t\tflags |= serviceFlagSourceRange\n\t}\n\tif p.L7LoadBalancer {\n\t\tflags |= serviceFlagL7LoadBalancer\n\t}\n\tif p.SvcExtLocal != p.SvcIntLocal && p.SvcType != SVCTypeClusterIP {\n\t\tflags |= serviceFlagTwoScopes\n\t}\n\n\treturn flags\n}", "func (sdr SDR) HandleFlags() (err error) {\n\t// Catch any errors panicked while visiting flags.\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr = r.(error)\n\t\t}\n\t}()\n\n\tflag.CommandLine.Visit(func(f *flag.Flag) {\n\t\tvar err error\n\t\tswitch f.Name {\n\t\tcase \"centerfreq\":\n\t\t\terr = sdr.SetCenterFreq(uint32(sdr.Flags.CenterFreq))\n\t\tcase \"samplerate\":\n\t\t\terr = sdr.SetSampleRate(uint32(sdr.Flags.SampleRate))\n\t\tcase \"tunergainmode\":\n\t\t\terr = sdr.SetGainMode(sdr.Flags.TunerGainMode)\n\t\tcase \"tunergain\":\n\t\t\terr = sdr.SetGain(uint32(sdr.Flags.TunerGain * 10.0))\n\t\tcase \"freqcorrection\":\n\t\t\terr = sdr.SetFreqCorrection(uint32(sdr.Flags.FreqCorrection))\n\t\tcase \"testmode\":\n\t\t\terr = sdr.SetTestMode(sdr.Flags.TestMode)\n\t\tcase \"agcmode\":\n\t\t\terr = sdr.SetAGCMode(sdr.Flags.AgcMode)\n\t\tcase \"directsampling\":\n\t\t\terr = sdr.SetDirectSampling(sdr.Flags.DirectSampling)\n\t\tcase \"offsettuning\":\n\t\t\terr = sdr.SetOffsetTuning(sdr.Flags.OffsetTuning)\n\t\tcase \"rtlxtalfreq\":\n\t\t\terr = sdr.SetRTLXtalFreq(uint32(sdr.Flags.RtlXtalFreq))\n\t\tcase \"tunerxtalfreq\":\n\t\t\terr = sdr.SetTunerXtalFreq(uint32(sdr.Flags.TunerXtalFreq))\n\t\tcase \"gainbyindex\":\n\t\t\terr = sdr.SetGainByIndex(uint32(sdr.Flags.GainByIndex))\n\t\t}\n\n\t\t// If we encounter an error, panic to catch in parent scope.\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t})\n\n\treturn\n}", "func (s *LifecyclerRPCServer) Flags(args interface{}, resp *PluginFlags) (err error) {\n\t*resp, err = s.Plugin.Flags()\n\treturn err\n}", "func configureFlags(api *operations.LolchestWinAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func (o *ResourcepoolPoolMember) SetFeatures(v []string) {\n\to.Features = v\n}", "func configureFlags(api *operations.ReservoirAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func configureFlags(api *operations.KubernikusAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func (o *SetupOptions) AddFlags(flagSet *pflag.FlagSet) {\n\t// Add flags for generic options\n\tflagSet.StringVarP(&o.LogLevel, \"log-level\", \"l\", \"INFO\", \"log print level\")\n\tflagSet.StringVarP(&o.ConfigPath, \"config\", \"c\", \"./cmd/client/conf/config-dev.toml\", \"init client by given config\")\n}", "func registerFlags(td *OsmTestData) {\n\tflag.BoolVar(&td.CleanupTest, \"cleanupTest\", true, \"Cleanup test resources when done\")\n\tflag.BoolVar(&td.WaitForCleanup, \"waitForCleanup\", true, \"Wait for effective deletion of resources\")\n\tflag.BoolVar(&td.IgnoreRestarts, \"ignoreRestarts\", false, \"When true, will not make tests fail if restarts of control plane processes are observed\")\n\n\tflag.StringVar(&td.TestDirBase, \"testDirBase\", testFolderBase, \"Test directory base. Test directory name will be created inside.\")\n\n\tflag.StringVar((*string)(&td.InstType), \"installType\", string(SelfInstall), \"Type of install/deployment for OSM\")\n\tflag.StringVar((*string)(&td.CollectLogs), \"collectLogs\", string(CollectLogsIfErrorOnly), \"Defines if/when to collect logs.\")\n\n\tflag.StringVar(&td.ClusterName, \"kindClusterName\", \"osm-e2e\", \"Name of the Kind cluster to be created\")\n\n\tflag.BoolVar(&td.CleanupKindCluster, \"cleanupKindCluster\", true, \"Cleanup kind cluster upon exit\")\n\tflag.BoolVar(&td.CleanupKindClusterBetweenTests, \"cleanupKindClusterBetweenTests\", false, \"Cleanup kind cluster between tests\")\n\tflag.StringVar(&td.ClusterVersion, \"kindClusterVersion\", \"\", \"Kind cluster version, ex. v.1.20.2\")\n\n\tflag.StringVar(&td.CtrRegistryServer, \"ctrRegistry\", os.Getenv(\"CTR_REGISTRY\"), \"Container registry\")\n\tflag.StringVar(&td.CtrRegistryUser, \"ctrRegistryUser\", os.Getenv(\"CTR_REGISTRY_USER\"), \"Container registry\")\n\tflag.StringVar(&td.CtrRegistryPassword, \"ctrRegistrySecret\", os.Getenv(\"CTR_REGISTRY_PASSWORD\"), \"Container registry secret\")\n\n\tflag.StringVar(&td.OsmImageTag, \"osmImageTag\", utils.GetEnv(\"CTR_TAG\", defaultImageTag), \"OSM image tag\")\n\tflag.StringVar(&td.OsmNamespace, \"OsmNamespace\", utils.GetEnv(\"K8S_NAMESPACE\", defaultOsmNamespace), \"OSM Namespace\")\n\tflag.StringVar(&td.OsmMeshConfigName, \"OsmMeshConfig\", defaultMeshConfigName, \"OSM MeshConfig name\")\n\n\tflag.BoolVar(&td.EnableNsMetricTag, \"EnableMetricsTag\", true, \"Enable tagging Namespaces for metrics collection\")\n\tflag.BoolVar(&td.DeployOnOpenShift, \"deployOnOpenShift\", false, \"Configure tests to run on OpenShift\")\n\tflag.BoolVar(&td.DeployOnWindowsWorkers, \"deployOnWindowsWorkers\", false, \"Configure tests to run on Windows workers\")\n\tflag.BoolVar(&td.RetryAppPodCreation, \"retryAppPodCreation\", true, \"Retry app pod creation on error\")\n\tflag.BoolVar(&td.EnableSPIFFE, \"enableSPIFFE\", false, \"Globally Enables SPIFFE IDs when running tests\")\n}", "func (client BaseClient) CreateFeature(ctx context.Context, body *Feature) (result Feature, err error) {\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: body,\n\t\t\tConstraints: []validation.Constraint{{Target: \"body\", Name: validation.Null, Rule: false,\n\t\t\t\tChain: []validation.Constraint{{Target: \"body.Name\", Name: validation.Null, Rule: true, Chain: nil},\n\t\t\t\t\t{Target: \"body.Version\", Name: validation.Null, Rule: true,\n\t\t\t\t\t\tChain: []validation.Constraint{{Target: \"body.Version\", Name: validation.Pattern, Rule: `^v?((\\d+)\\.(\\d+)\\.(\\d+))(?:-([\\dA-Za-z\\-]+(?:\\.[\\dA-Za-z\\-]+)*))?(?:\\+([\\dA-Za-z\\-]+(?:\\.[\\dA-Za-z\\-]+)*))?$`, Chain: nil}}},\n\t\t\t\t\t{Target: \"body.Path\", Name: validation.Null, Rule: false,\n\t\t\t\t\t\tChain: []validation.Constraint{{Target: \"body.Path\", Name: validation.Pattern, Rule: `^nrn:beacon:(?<tenant>[^:]+:(?<type>sys|exp|ftr|fin):(?<feature>[^:]+)?:(?<version>[^:]+)?:(?<instance>[^:]*)?:(?<system>[^:]*)?:(?<name>[^:]*)?)$`, Chain: nil}}},\n\t\t\t\t}}}}}); err != nil {\n\t\treturn result, validation.NewError(\"beacon.BaseClient\", \"CreateFeature\", err.Error())\n\t}\n\n\treq, err := client.CreateFeaturePreparer(ctx, body)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"beacon.BaseClient\", \"CreateFeature\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.CreateFeatureSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"beacon.BaseClient\", \"CreateFeature\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.CreateFeatureResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"beacon.BaseClient\", \"CreateFeature\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}", "func (a *AssembliesApiService) AddFeature(ctx _context.Context, did string, wvm string, wvmid string, eid string, localVarOptionals *AddFeatureOpts) (BtFeatureDefinitionResponse1617, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue BtFeatureDefinitionResponse1617\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/api/assemblies/d/{did}/{wvm}/{wvmid}/e/{eid}/features\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"did\"+\"}\", _neturl.QueryEscape(parameterToString(did, \"\")) , -1)\n\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"wvm\"+\"}\", _neturl.QueryEscape(parameterToString(wvm, \"\")) , -1)\n\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"wvmid\"+\"}\", _neturl.QueryEscape(parameterToString(wvmid, \"\")) , -1)\n\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"eid\"+\"}\", _neturl.QueryEscape(parameterToString(eid, \"\")) , -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json;charset=UTF-8; qs=0.09\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/vnd.onshape.v1+json;charset=UTF-8;qs=0.1\", \"application/json;charset=UTF-8; qs=0.09\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tif localVarOptionals != nil && localVarOptionals.Body.IsSet() {\n\t\tlocalVarPostBody = localVarOptionals.Body.Value()\n\t}\n\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\t\tvar v BtFeatureDefinitionResponse1617\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (ctx *Ctx) ParseFlags() {\n\tflag.Var(ctx.BugZilla.Origin, \"bugzilla-origin\", \"Bugzilla origin url\")\n\tflag.Var(ctx.BugZilla.EsIndex, \"bugzilla-es-index\", \"Bugzilla es index base name\")\n\tflag.Var(ctx.BugZilla.FromDate, \"bugzilla-from-date\", \"Optional, date to start syncing from\")\n\tflag.Var(ctx.BugZilla.Project, \"bugzilla-project\", \"Slug name of a project e.g. yocto\")\n\tflag.Var(ctx.BugZilla.DoFetch, \"bugzilla-do-fetch\", \"To decide whether will fetch raw data or not\")\n\tflag.Var(ctx.BugZilla.DoEnrich, \"bugzilla-do-enrich\", \"To decide whether will do enrich raw data or not.\")\n\tflag.Var(ctx.BugZilla.FetchSize, \"bugzilla-fetch-size\", \"Total number of fetched items per request.\")\n\tflag.Var(ctx.BugZilla.EnrichSize, \"bugzilla-enrich-size\", \"Total number of enriched items per request.\")\n\tflag.Var(ctx.PiperMail.ProjectSlug, \"bugzilla-slug\", \"Bugzilla project slug\")\n\n\tflag.Var(ctx.PiperMail.Origin, \"pipermail-origin\", \"Pipermail origin url\")\n\tflag.Var(ctx.PiperMail.ProjectSlug, \"pipermail-slug\", \"Pipermail project slug\")\n\tflag.Var(ctx.PiperMail.GroupName, \"pipermail-groupname\", \"Pipermail group name\")\n\tflag.Var(ctx.PiperMail.EsIndex, \"pipermail-es-index\", \"Pipermail es index base name\")\n\tflag.Var(ctx.PiperMail.FromDate, \"pipermail-from-date\", \"Optional, date to start syncing from\")\n\tflag.Var(ctx.PiperMail.Project, \"pipermail-project\", \"Slug name of a project e.g. yocto\")\n\tflag.Var(ctx.PiperMail.DoFetch, \"pipermail-do-fetch\", \"To decide whether will fetch raw data or not\")\n\tflag.Var(ctx.PiperMail.DoEnrich, \"pipermail-do-enrich\", \"To decide whether will do enrich raw data or not.\")\n\tflag.Var(ctx.PiperMail.FetchSize, \"pipermail-fetch-size\", \"Total number of fetched items per request.\")\n\tflag.Var(ctx.PiperMail.EnrichSize, \"pipermail-enrich-size\", \"Total number of enriched items per request.\")\n\n\tflag.Var(ctx.GoogleGroups.ProjectSlug, \"googlegroups-slug\", \"GoogleGroups project slug\")\n\tflag.Var(ctx.GoogleGroups.GroupName, \"googlegroups-groupname\", \"GoogleGroups email address\")\n\tflag.Var(ctx.GoogleGroups.EsIndex, \"googlegroups-es-index\", \"GoogleGroups es index base name\")\n\tflag.Var(ctx.GoogleGroups.FromDate, \"googlegroups-from-date\", \"Optional, date to start syncing from\")\n\tflag.Var(ctx.GoogleGroups.Project, \"googlegroups-project\", \"Slug name of a project e.g. yocto\")\n\tflag.Var(ctx.GoogleGroups.DoFetch, \"googlegroups-do-fetch\", \"To decide whether will fetch raw data or not\")\n\tflag.Var(ctx.GoogleGroups.DoEnrich, \"googlegroups-do-enrich\", \"To decide whether will do enrich raw data or not.\")\n\tflag.Var(ctx.GoogleGroups.FetchSize, \"googlegroups-fetch-size\", \"Total number of fetched items per request.\")\n\tflag.Var(ctx.GoogleGroups.EnrichSize, \"googlegroups-enrich-size\", \"Total number of enriched items per request.\")\n\n\tflag.Parse()\n}", "func FeatureVector(b *Board, version int) (f []float32) {\n\tif version > AllFeaturesDim {\n\t\tlog.Panicf(\"Requested %d features, but only know about %d\", version, AllFeaturesDim)\n\t}\n\tf = make([]float32, AllFeaturesDim)\n\tfor ii := range AllFeatures {\n\t\tfeatDef := &AllFeatures[ii]\n\t\tif featDef.Version <= version {\n\t\t\tfeatDef.Setter(b, featDef, f)\n\t\t}\n\t}\n\n\tif version != AllFeaturesDim {\n\t\t// Filter only features for given version.\n\t\tnewF := make([]float32, 0, version)\n\t\tfor ii := range AllFeatures {\n\t\t\tfeatDef := &AllFeatures[ii]\n\t\t\tif featDef.Version <= version {\n\t\t\t\tnewF = append(newF, f[featDef.VecIndex:featDef.VecIndex+featDef.Dim]...)\n\t\t\t}\n\t\t}\n\t\tf = newF\n\t}\n\n\treturn\n}", "func configureFlags(api *operations.OpenMockAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func configureFlags(api *operations.MonocularAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func setupFlags(params, paramsJSON string) *pflag.FlagSet {\n\tflagSet := pflag.NewFlagSet(\"TestGetParamsFromFlags\", pflag.PanicOnError)\n\tregisterParamsFlags(flagSet)\n\t// mirror actual usage by using Parse rather than Set\n\tcmdline := []string{\"apply\"}\n\tif params != \"\" {\n\t\tcmdline = append(cmdline, \"--params\", params)\n\t}\n\tif paramsJSON != \"\" {\n\t\tcmdline = append(cmdline, \"--paramsJSON\", paramsJSON)\n\t}\n\n\tif err := flagSet.Parse(append(cmdline, \"samples/test.hcl\")); err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn flagSet\n}", "func (h *HyperCommand) Flags() *pflag.FlagSet {\n\treturn h.root.Flags()\n}", "func configureFlags(api *operations.OpenPitrixAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func (VS *Server) features(c *gin.Context) {\n\trender(c, gin.H{}, \"presentation-features.html\")\n}", "func (client *LDClient) AllFlagsState(context ldcontext.Context, options ...flagstate.Option) flagstate.AllFlags {\n\tvalid := true\n\tif client.IsOffline() {\n\t\tclient.loggers.Warn(\"Called AllFlagsState in offline mode. Returning empty state\")\n\t\tvalid = false\n\t} else if !client.Initialized() {\n\t\tif client.store.IsInitialized() {\n\t\t\tclient.loggers.Warn(\"Called AllFlagsState before client initialization; using last known values from data store\")\n\t\t} else {\n\t\t\tclient.loggers.Warn(\"Called AllFlagsState before client initialization. Data store not available; returning empty state\") //nolint:lll\n\t\t\tvalid = false\n\t\t}\n\t}\n\n\tif !valid {\n\t\treturn flagstate.AllFlags{}\n\t}\n\n\titems, err := client.store.GetAll(datakinds.Features)\n\tif err != nil {\n\t\tclient.loggers.Warn(\"Unable to fetch flags from data store. Returning empty state. Error: \" + err.Error())\n\t\treturn flagstate.AllFlags{}\n\t}\n\n\tclientSideOnly := false\n\tfor _, o := range options {\n\t\tif o == flagstate.OptionClientSideOnly() {\n\t\t\tclientSideOnly = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tstate := flagstate.NewAllFlagsBuilder(options...)\n\tfor _, item := range items {\n\t\tif item.Item.Item != nil {\n\t\t\tif flag, ok := item.Item.Item.(*ldmodel.FeatureFlag); ok {\n\t\t\t\tif clientSideOnly && !flag.ClientSideAvailability.UsingEnvironmentID {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tresult := client.evaluator.Evaluate(flag, context, nil)\n\n\t\t\t\tstate.AddFlag(\n\t\t\t\t\titem.Key,\n\t\t\t\t\tflagstate.FlagState{\n\t\t\t\t\t\tValue: result.Detail.Value,\n\t\t\t\t\t\tVariation: result.Detail.VariationIndex,\n\t\t\t\t\t\tReason: result.Detail.Reason,\n\t\t\t\t\t\tVersion: flag.Version,\n\t\t\t\t\t\tTrackEvents: flag.TrackEvents || result.IsExperiment,\n\t\t\t\t\t\tTrackReason: result.IsExperiment,\n\t\t\t\t\t\tDebugEventsUntilDate: flag.DebugEventsUntilDate,\n\t\t\t\t\t},\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn state.Build()\n}" ]
[ "0.6283442", "0.58581585", "0.5759627", "0.56996477", "0.56361234", "0.5633944", "0.5601203", "0.5553357", "0.5548972", "0.55474377", "0.53957814", "0.53191704", "0.5276696", "0.5251556", "0.52426904", "0.5235017", "0.5213624", "0.5191827", "0.5179796", "0.5133154", "0.5120499", "0.51138586", "0.5092476", "0.5061599", "0.50592816", "0.50440687", "0.50232065", "0.49985152", "0.49919987", "0.49903297", "0.49874517", "0.49853578", "0.49526414", "0.49375442", "0.49143115", "0.48979914", "0.48936018", "0.48913407", "0.48873863", "0.48645976", "0.48630604", "0.4857825", "0.48540926", "0.4851144", "0.48507842", "0.4837795", "0.48188666", "0.48136187", "0.47989306", "0.47966653", "0.47813874", "0.4773839", "0.47676682", "0.4765815", "0.4745114", "0.4743934", "0.47432616", "0.47102305", "0.4708159", "0.47001722", "0.46999368", "0.4689274", "0.46850637", "0.46823114", "0.46820906", "0.46818152", "0.46815935", "0.46803293", "0.46718532", "0.4666791", "0.46652535", "0.46509233", "0.4648145", "0.46424124", "0.4641459", "0.4640468", "0.46400046", "0.46387824", "0.46294883", "0.46260738", "0.46257395", "0.4611324", "0.46098238", "0.4602865", "0.45977983", "0.45973852", "0.45812213", "0.45743674", "0.45725963", "0.45717955", "0.45549417", "0.45528668", "0.45518783", "0.45470873", "0.45296225", "0.452792", "0.4524855", "0.45227766", "0.45217437", "0.45112896" ]
0.78591985
0
ZeroLengthSectionAsEOF sets whether to allow the CARv1 decoder to treat a zerolength section as the end of the input CAR file. For example, this can be useful to allow "null padding" after a CARv1 without knowing where the padding begins.
ZeroLengthSectionAsEOF определяет, разрешать ли декодеру CARv1 рассматривать раздел нулевой длины как конец входного файла CAR. Например, это может быть полезно для разрешения "нулевого заполнения" после CARv1 без знания того, где начинается заполнение.
func ZeroLengthSectionAsEOF(enable bool) Option { return func(o *Options) { o.ZeroLengthSectionAsEOF = enable } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestReadEmptyAtEOF(t *testing.T) {\n\tb := new(Builder)\n\tslice := make([]byte, 0)\n\tn, err := b.Read(slice)\n\tif err != nil {\n\t\tt.Errorf(\"read error: %v\", err)\n\t}\n\tif n != 0 {\n\t\tt.Errorf(\"wrong count; got %d want 0\", n)\n\t}\n}", "func IsEOF(c rune, n int) bool {\n\treturn n == 0\n}", "func (*testObject) MaxHeaderLength() uint16 {\n\treturn 0\n}", "func IsZeroFilled(b []byte) bool {\n\thdr := (*reflect.SliceHeader)((unsafe.Pointer)(&b))\n\tdata := unsafe.Pointer(hdr.Data)\n\tlength := hdr.Len\n\tif length == 0 {\n\t\treturn true\n\t}\n\n\tif uintptr(data)&0x07 != 0 {\n\t\t// the data is not aligned, fallback to a simple way\n\t\treturn isZeroFilledSimple(b)\n\t}\n\n\tdataEnd := uintptr(data) + uintptr(length)\n\tdataWordsEnd := uintptr(dataEnd) & ^uintptr(0x07)\n\t// example:\n\t//\n\t// 012345678901234567\n\t// wwwwwwwwWWWWWWWWtt : w -- word 0; W -- word 1; t -- tail\n\t// ^\n\t// |\n\t// +-- dataWordsEnd\n\tfor ; uintptr(data) < dataWordsEnd; data = unsafe.Pointer(uintptr(data) + 8) {\n\t\tif *(*uint64)(data) != 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\tfor ; uintptr(data) < dataEnd; data = unsafe.Pointer(uintptr(data) + 1) {\n\t\tif *(*uint8)(data) != 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func TestZeroLength(t *testing.T) {\n\tkey1, err := NewFixedLengthKeyFromReader(os.Stdin, 0)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer key1.Wipe()\n\tif key1.data != nil {\n\t\tt.Error(\"Fixed length key from reader contained data\")\n\t}\n\n\tkey2, err := NewKeyFromReader(bytes.NewReader(nil))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer key2.Wipe()\n\tif key2.data != nil {\n\t\tt.Error(\"Key from empty reader contained data\")\n\t}\n}", "func (d *Decoder) ZeroEmpty(z bool) {\n\td.zeroEmpty = z\n}", "func forceEOF(yylex interface{}) {\n\tyylex.(*Tokenizer).ForceEOF = true\n}", "func forceEOF(yylex interface{}) {\n\tyylex.(*Tokenizer).ForceEOF = true\n}", "func forceEOF(yylex interface{}) {\n\tyylex.(*Tokenizer).ForceEOF = true\n}", "func forceEOF(yylex interface{}) {\n\tyylex.(*Tokenizer).ForceEOF = true\n}", "func forceEOF(yylex interface{}) {\n\tyylex.(*Tokenizer).ForceEOF = true\n}", "func TestIgnoreTruncatedPacketEOF(t *testing.T) {\n\toutputFile, err := ioutil.TempFile(\"\", \"joincap_output_\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\toutputFile.Close()\n\tdefer os.Remove(outputFile.Name())\n\n\terr = joincap([]string{\"joincap\",\n\t\t\"-v\", \"-w\", outputFile.Name(),\n\t\t\"test_pcaps/unexpected_eof_on_second_packet.pcap\"})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttestIsOrdered(t, outputFile.Name())\n\n\tif packetCount(t, outputFile.Name()) != 1 {\n\t\tt.Fatal(\"error counting\")\n\t}\n}", "func (f *Feature) EndZero() uint64 {\n\treturn f.StartZero()\n}", "func (ps *Parser) EOF() bool {\n\treturn ps.Offset >= len(ps.Runes)\n}", "func ReadSection0(reader io.Reader) (section0 Section0, err error) {\n\tsection0.Indicator = 255\n\terr = binary.Read(reader, binary.BigEndian, &section0)\n\tif err != nil {\n\t\treturn section0, err\n\t}\n\n\tif section0.Indicator == Grib {\n\t\tif section0.Edition != SupportedGribEdition {\n\t\t\treturn section0, fmt.Errorf(\"Unsupported grib edition %d\", section0.Edition)\n\t\t}\n\t} else {\n\t\treturn section0, fmt.Errorf(\"Unsupported grib indicator %d\", section0.Indicator)\n\t}\n\n\treturn\n\n}", "func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING }", "func (suite *RunePartTestSuite) TestReadToZeroLengthBuffer() {\n\tpart := runePart{runeVal: 'a'}\n\tbuff := make([]byte, 0, 0)\n\tcount, err := part.Read(buff)\n\tsuite.Nil(err)\n\tsuite.Equal(0, count)\n\tsuite.Equal(\"\", string(buff))\n}", "func AtEnd() OffsetOpt {\n\treturn offsetOpt{func(o *Offset) { o.request = -1 }}\n}", "func ZeroHeader() Header {\n\treturn Header{}\n}", "func eof(err error) bool { return err == io.EOF }", "func (h Header) IsOneway() bool {\n\treturn h[2]&0x20 == 0x20\n}", "func fmtChunkWithExtraOfZeroLen(t *testing.T) io.Reader {\n\tsrc := &bytes.Buffer{}\n\ttest.ReadFrom(t, src, Uint32(IDfmt)) // ( 0) 4 - Chunk ID\n\ttest.WriteUint32LE(t, src, 16+2) // ( 4) 4 - Chunk size\n\ttest.WriteUint16LE(t, src, CompPCM) // ( 6) 2 - CompCode\n\ttest.WriteUint16LE(t, src, 1) // ( 8) 2 - ChannelCnt\n\ttest.WriteUint32LE(t, src, 44100) // (10) 4 - SampleRate\n\ttest.WriteUint32LE(t, src, 88200) // (14) 4 - AvgByteRate\n\ttest.WriteUint16LE(t, src, 2) // (18) 2 - BlockAlign\n\ttest.WriteUint16LE(t, src, 16) // (20) 2 - BitsPerSample\n\ttest.WriteUint16LE(t, src, 0) // (22) 2 - ExtraBytes\n\t// Total length: 8+16+2+0=26\n\treturn src\n}", "func (d *Document) IsZero() bool {\n\treturn d == nil || (d.Version == \"\" && len(d.Markups) == 0 &&\n\t\tlen(d.Atoms) == 0 && len(d.Cards) == 0 && len(d.Sections) == 0)\n}", "func (c *Conn) parseEOFPacket(b []byte) bool {\n\tvar off int\n\n\toff++ // [fe] the EOF header (= _PACKET_EOF)\n\t// TODO: reset warning count\n\tc.warnings += binary.LittleEndian.Uint16(b[off : off+2])\n\toff += 2\n\tc.statusFlags = binary.LittleEndian.Uint16(b[off : off+2])\n\n\treturn c.reportWarnings()\n}", "func (dc *FixedLenByteArrayDictConverter) FillZero(out interface{}) {\n\to := out.([]parquet.FixedLenByteArray)\n\to[0] = dc.zeroVal\n\tfor i := 1; i < len(o); i *= 2 {\n\t\tcopy(o[i:], o[:i])\n\t}\n}", "func (e LogEntry) IsEOF() bool {\n\treturn e.LineNo == -2\n}", "func (z *Stream) End() {\n\tC.lzma_end(z.C())\n}", "func (w *Writer) WriteZeros(len int) error {\n\tzeros := make([]byte, len)\n\t_, err := w.out.Write(zeros)\n\treturn err\n}", "func TestEOF(t *testing.T) {\n\tc, s := setUp(t)\n\t// Since we're not using tearDown() here, manually call Finish()\n\tdefer s.ctrl.Finish()\n\n\t// Set up a handler to detect whether disconnected handlers are called\n\tdcon := callCheck(t)\n\tc.Handle(DISCONNECTED, dcon)\n\n\t// Simulate EOF from server\n\ts.nc.Close()\n\n\t// Verify that disconnected handler was called\n\tdcon.assertWasCalled(\"Conn did not call disconnected handlers.\")\n\n\t// Verify that the connection no longer thinks it's connected\n\tif c.Connected() {\n\t\tt.Errorf(\"Conn still thinks it's connected to the server.\")\n\t}\n}", "func TestMultiReaderFinalEOF(t *testing.T) {\n\tr := MultiReader(bytes.NewReader(nil), byteAndEOFReader('a'))\n\tbuf := make([]byte, 2)\n\tn, err := r.Read(buf)\n\tif n != 1 || err != EOF {\n\t\tt.Errorf(\"got %v, %v; want 1, EOF\", n, err)\n\t}\n}", "func (h *Header) SetOneway(oneway bool) {\n\tif oneway {\n\t\th[2] = h[2] | 0x20\n\t} else {\n\t\th[2] = h[2] &^ 0x20\n\t}\n}", "func TestZeroLengthTagError(t *testing.T) {\n\tname := filepath.Join(*dataDir, \"corrupt/infinite_loop_exif.jpg\")\n\tf, err := os.Open(name)\n\tif err != nil {\n\t\tt.Fatalf(\"%v\\n\", err)\n\t}\n\tdefer f.Close()\n\n\t_, err = Decode(f)\n\tif err == nil {\n\t\tt.Fatal(\"no error on bad exif data\")\n\t}\n\tif !strings.Contains(err.Error(), \"exif: decode failed (tiff: recursive IFD)\") {\n\t\tt.Fatal(\"wrong error:\", err.Error())\n\t}\n}", "func TestExactReadCloserExpectEOF(t *testing.T) {\n\tbuf := bytes.NewBuffer(make([]byte, 10))\n\trc := NewExactReadCloser(&readerNilCloser{buf}, 1)\n\tif _, err := rc.Read(make([]byte, 10)); err != ErrExpectEOF {\n\t\tt.Fatalf(\"expected %v, got %v\", ErrExpectEOF, err)\n\t}\n}", "func (dc *ByteArrayDictConverter) FillZero(out interface{}) {\n\to := out.([]parquet.ByteArray)\n\to[0] = dc.zeroVal\n\tfor i := 1; i < len(o); i *= 2 {\n\t\tcopy(o[i:], o[:i])\n\t}\n}", "func (p *Buffer) EOF() bool {\n\treturn ulen(p.buf) == p.index\n}", "func (self *bipbuf_t) IsEmpty() bool {\n\treturn self.a_start >= self.a_end\n}", "func (b ByteSlice) IsZero() bool {\n\treturn !b.Valid || len(b.ByteSlice) == 0\n}", "func (ctx *Context) outzero(size uintptr) unsafe.Pointer {\n\tstart := ctx.off + int(headerOutSize)\n\tif size > 0 {\n\t\tbuf := ctx.buf[start : start+int(size)]\n\t\tfor i := range buf {\n\t\t\tbuf[i] = 0\n\t\t}\n\t}\n\treturn unsafe.Pointer(&ctx.buf[start])\n}", "func treatEOFErrorsAsNil(err error) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\tif errors.Is(err, io.EOF) {\n\t\treturn nil\n\t}\n\tvar te TTransportException\n\tif errors.As(err, &te) && te.TypeId() == END_OF_FILE {\n\t\treturn nil\n\t}\n\treturn err\n}", "func (mes *MarkerEncodingScheme) EndOfStream() Marker { return mes.endOfStream }", "func (r *chanReader) eof() {\n\tif !r.dataClosed {\n\t\tr.dataClosed = true\n\t\tclose(r.data)\n\t}\n}", "func TestConnReadNonzeroAndEOF(t *testing.T) {\n\t// This test is racy: it assumes that after a write to a\n\t// localhost TCP connection, the peer TCP connection can\n\t// immediately read it. Because it's racy, we skip this test\n\t// in short mode, and then retry it several times with an\n\t// increasing sleep in between our final write (via srv.Close\n\t// below) and the following read.\n\tif testing.Short() {\n\t\tt.Skip(\"skipping in short mode\")\n\t}\n\tvar err error\n\tfor delay := time.Millisecond; delay <= 64*time.Millisecond; delay *= 2 {\n\t\tif err = testConnReadNonzeroAndEOF(t, delay); err == nil {\n\t\t\treturn\n\t\t}\n\t}\n\tt.Error(err)\n}", "func TestConnReadNonzeroAndEOF(t *testing.T) {\n\t// This test is racy: it assumes that after a write to a\n\t// localhost TCP connection, the peer TCP connection can\n\t// immediately read it. Because it's racy, we skip this test\n\t// in short mode, and then retry it several times with an\n\t// increasing sleep in between our final write (via srv.Close\n\t// below) and the following read.\n\tif testing.Short() {\n\t\tt.Skip(\"skipping in short mode\")\n\t}\n\tvar err error\n\tfor delay := time.Millisecond; delay <= 64*time.Millisecond; delay *= 2 {\n\t\tif err = testConnReadNonzeroAndEOF(t, delay); err == nil {\n\t\t\treturn\n\t\t}\n\t}\n\tt.Error(err)\n}", "func isEmptyOrEnd(line string) bool {\n\treturn len(line) == 0 || strings.HasPrefix(line, \"!\")\n}", "func IsZeroFilled(b []byte) bool {\n\treturn isZeroFilledSimple(b)\n}", "func EnsureEmpty(r io.Reader, stage string) error {\n\tbuf := bytesPool.Get().(*[]byte)\n\tdefer bytesPool.Put(buf)\n\n\tn, err := r.Read(*buf)\n\tif n > 0 {\n\t\treturn fmt.Errorf(\"found unexpected bytes after %s, found (upto 128 bytes): %x\", stage, (*buf)[:n])\n\t}\n\tif err == io.EOF {\n\t\treturn nil\n\t}\n\treturn err\n}", "func TestInvalidLength(t *testing.T) {\n\tkey, err := NewFixedLengthKeyFromReader(ConstReader(1), -1)\n\tif err == nil {\n\t\tkey.Wipe()\n\t\tt.Error(\"Negative lengths should cause failure\")\n\t}\n}", "func (me TClipFillRuleType) IsNonzero() bool { return me.String() == \"nonzero\" }", "func (b Bytes) IsEmpty() bool { return len(b) == 0 }", "func NewEmptyAcraBlock(length int) AcraBlock {\n\tb := make([]byte, length)\n\tcopy(b[:len(tagBegin)], tagBegin)\n\treturn b\n}", "func (e EndElement) isZero() bool {\n\treturn len(e.Name.Local) == 0\n}", "func (UTF8Decoder) FullRune(p []byte) bool { return utf8.FullRune(p) }", "func isEOF(tk Token) bool {\n\treturn tk.GetName() == EOF\n}", "func (b *buffer) isEmpty() bool {\n\tif b == nil {\n\t\treturn true\n\t}\n\tif len(b.buf)-b.offset <= 0 {\n\t\treturn true\n\t}\n\treturn false\n}", "func commitEOF() error {\n\treturn cliutil.ActionError(clitypes.CommitEOF)\n}", "func (h HexBytes) IsEmpty() bool { return len(h) == 0 }", "func headerWithNoFileMetaInformationGroupLength() (*headerData, error) {\n\theaderData := new(headerData)\n\n\telements := []*Element{\n\t\tmustNewElement(tag.MediaStorageSOPClassUID, []string{\"SecondaryCapture\"}),\n\t\tmustNewElement(tag.MediaStorageSOPInstanceUID, []string{\"1.3.6.1.4.1.35190.4.1.20210608.607733549593\"}),\n\t\tmustNewElement(tag.TransferSyntaxUID, []string{\"=RLELossless\"}),\n\t\tmustNewElement(tag.ImplementationClassUID, []string{\"1.6.6.1.4.1.9590.100.1.0.100.4.0\"}),\n\t\tmustNewElement(tag.SOPInstanceUID, []string{\"1.3.6.1.4.1.35190.4.1.20210608.607733549593\"}),\n\t}\n\tdata, err := writeElements(elements)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Construct valid DICOM header preamble.\n\tmagicWord := []byte(\"DICM\")\n\tpreamble := make([]byte, 128)\n\tpreamble = append(preamble, magicWord...)\n\theaderBytes := append(preamble, data...)\n\theaderData.HeaderBytes = bytes.NewBuffer(headerBytes)\n\theaderData.Elements = elements[0 : len(elements)-1]\n\treturn headerData, nil\n}", "func (pe *PEFile) calculateHeaderEnd(offset uint32) {\n\tvar rawDataPointers []uint32\n\tfor _, section := range pe.Sections {\n\t\tprd := section.Data.PointerToRawData\n\t\tif prd > uint32(0x0) {\n\t\t\trawDataPointers = append(rawDataPointers, pe.adjustFileAlignment(prd))\n\t\t}\n\t}\n\tminSectionOffset := uint32(0x0)\n\tif len(rawDataPointers) > 0 {\n\t\tminSectionOffset = rawDataPointers[0]\n\t\tfor _, pointer := range rawDataPointers {\n\t\t\tif pointer < minSectionOffset {\n\t\t\t\tminSectionOffset = pointer\n\t\t\t}\n\t\t}\n\t}\n\tif minSectionOffset == 0 || minSectionOffset < offset {\n\t\tpe.headerEnd = offset\n\t} else {\n\t\tpe.headerEnd = minSectionOffset\n\t}\n}", "func IsEOF(err error) bool {\n\terr = errs.Cause(err)\n\tif err == io.EOF {\n\t\treturn true\n\t}\n\tif ok, err := libCause(err); ok {\n\t\treturn IsEOF(err)\n\t}\n\treturn false\n}", "func (f genHelperDecoder) DecReadArrayEnd() { f.d.arrayEnd() }", "func (r *Reader) NextSection() error {\n\tbeginOffset, err := r.fl.Seek(int64(r.nextOffset), io.SeekStart)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvals := make([]byte, 16)\n\tbytesRead, err := r.fl.Read(vals)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// end marker\n\tif bytesRead == 8 && bytes.Equal(vals[:8], []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}) {\n\t\treturn io.EOF\n\t}\n\n\tsectionSize := binary.LittleEndian.Uint64(vals[:8])\n\trowCount := binary.LittleEndian.Uint64(vals[8:16])\n\n\tstr, err := readZeroTerminatedString(r.fl)\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\treturn fmt.Errorf(\"EOF while reading string section (partial: %s)\", str)\n\t\t}\n\t\treturn err\n\t}\n\n\tr.nextOffset = uint64(beginOffset) + sectionSize + 8 // well well, sectionSize includes the rowCount I guess?\n\n\tr.CurrentSection = &Section{\n\t\tName: SectionName(strings.TrimRight(str, string([]byte{0x00}))),\n\t\tOffset: uint64(beginOffset),\n\t\tSize: sectionSize,\n\t\tRowCount: rowCount,\n\t\tBufferSize: sectionSize - uint64(len(str)) - 1 /* str-pad 0x00 byte */ - 8,\n\t\tBuffer: r.fl,\n\t}\n\treturn nil\n}", "func TestDecodeHeader(t *testing.T) {\n\tdata := []byte{\n\t\t// header\n\t\t0x00, 0x00, 0x00, 0x0B, 0x27, 0x00, 0x02, 0x00, 0x00, 0x00, 0x23,\n\n\t\t// data\n\t\t0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x04,\n\t\t0x00, 0x00, 0x00, 0x0B, 0x00, 0x08, 0x03, 0xFF, 0xFD, 0xFF, 0x02, 0xFE,\n\t\t0xFE, 0xFE, 0x04, 0xEE, 0xED, 0x87, 0xFB, 0xCB, 0x2B, 0xFF, 0xAC,\n\t}\n\n\tr := reader.New(data)\n\td := &document{}\n\th, err := NewHeader(d, r, 0, OSequential)\n\trequire.NoError(t, err)\n\n\tassert.Equal(t, int64(11), h.HeaderLength)\n\tassert.Equal(t, uint64(11), h.SegmentDataStartOffset)\n\n\ts, err := h.subInputReader()\n\trequire.NoError(t, err)\n\n\tb, err := s.ReadByte()\n\trequire.NoError(t, err)\n\tassert.Equal(t, byte(0x00), b)\n\n\tthree := make([]byte, 3)\n\tread, err := s.Read(three)\n\trequire.NoError(t, err)\n\n\tassert.Equal(t, 3, read)\n\tassert.Equal(t, byte(0x36), three[2])\n}", "func TestEmptyBlockMarshall(t *testing.T) {\n\tconst scheme = TestNetScheme\n\n\tb1 := Block{}\n\tbts, err := b1.MarshalBinary(scheme)\n\trequire.NoError(t, err)\n\n\tb2 := Block{}\n\terr = b2.UnmarshalBinary(bts, scheme)\n\trequire.Error(t, err)\n}", "func (p *Parser) jumpIfZero() {\n\tp.emitByte(OP_JUMPZ)\n\tp.primary()\n}", "func (m *ModifyBearerResponse) SetLength() {\n\tm.Header.Length = uint16(m.MarshalLen() - 4)\n}", "func emptyCf() []byte {\n\n\trc := &ringcf.Ring{\n\t\tVersion: ringcf.VERSION,\n\t\tParts: []ringcf.Part{\n\t\t\t{Shard: []uint32{0}},\n\t\t},\n\t}\n\n\tres, _ := ringcf.ToBytes(rc)\n\tres = append([]byte(\"# config not found\\n# autogenerated example\\n\"), res...)\n\treturn res\n}", "func (word ControlWord) IsLongOffset() bool {\n\treturn word.Count() == 0\n}", "func hasPESOptionalHeader(streamID uint8) bool {\n\treturn streamID != StreamIDPaddingStream && streamID != StreamIDPrivateStream2\n}", "func IsEndOfStream(msg []byte) bool {\n\treturn bytes.Equal(msg, EndStreamHeader)\n}", "func (options *Options) isIncludableZero() bool {\n\tb, ok := options.HashProp(\"includeZero\").(bool)\n\tif ok && b {\n\t\tnb, ok := options.Param(0).(int)\n\t\tif ok && nb == 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (suite *IntPartTestSuite) TestReadToZeroLengthBuffer() {\n\tpart, _ := newIntPartFromString(\"9\")\n\tbuff := make([]byte, 0, 0)\n\tcount, _ := part.Read(buff)\n\tsuite.Equal(0, count)\n}", "func indexNullTerminator(b []byte) int {\n\tif len(b) < 2 {\n\t\treturn -1\n\t}\n\n\tfor i := 0; i < len(b); i += 2 {\n\t\tif b[i] == 0 && b[i+1] == 0 {\n\t\t\treturn i\n\t\t}\n\t}\n\n\treturn -1\n}", "func (r *Reader) Len() int {\n\tif r.file_v0 != nil {\n\t\treturn r.file_v0.Len()\n\t}\n\treturn int(r.header.num)\n}", "func (lex *Lexer) IsEOF() bool {\n\treturn lex.Token == scanner.TEOF\n}", "func isZero(buffer []byte) bool {\n\tfor i := range buffer {\n\t\tif buffer[i] != 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (o *OptionalString) IsZero() bool {\n\treturn len(*o) == 0\n}", "func (*endpoint) MaxHeaderLength() uint16 {\n\treturn header.EthernetMinimumSize\n}", "func TestZeros(t *testing.T) {\n\tvect, err := os.Open(\"randvect.txt\")\n\tif err != nil {\n\t\tt.Error(\"could not find text vector file\")\n\t}\n\tdefer vect.Close()\n\tscanner := bufio.NewScanner(vect)\n\tscanner.Scan()\n\n\tvar rng ISAAC\n\trng.randInit(true)\n\n\tvar buf bytes.Buffer\n\tfor i := 0; i < 2; i++ {\n\t\trng.isaac()\n\t\tfor j := 0; j < 256; j++ {\n\t\t\tbuf.WriteString(fmt.Sprintf(\"%.8x\", rng.randrsl[j]))\n\t\t\tif (j & 7) == 7 {\n\t\t\t\tvar output = buf.String()\n\t\t\t\tif scanner.Text() == output {\n\t\t\t\t\tscanner.Scan()\n\t\t\t\t\tbuf.Reset()\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Printf(\"o: \" + output + \"\\n\" + \"v: \" + scanner.Text() + \"\\n\")\n\t\t\t\t\tt.Fail()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (bc ByteCount) IsZero() bool {\n\treturn bc == 0\n}", "func (h *Headers) IsEmpty() bool {\n\tif h.public == true {\n\t\treturn false\n\t} else if h.private == true {\n\t\treturn false\n\t} else if h.maxAge.Valid {\n\t\treturn false\n\t} else if h.sharedMaxAge.Valid {\n\t\treturn false\n\t} else if h.noCache == true {\n\t\treturn false\n\t} else if h.noStore == true {\n\t\treturn false\n\t} else if h.noTransform == true {\n\t\treturn false\n\t} else if h.mustRevalidate == true {\n\t\treturn false\n\t} else if h.proxyRevalidate == true {\n\t\treturn false\n\t}\n\treturn true\n}", "func EndOfObject(b *bytes.Buffer) (int, error) {\n\treturn b.Write([]byte{0x00, 0x00, 0x09})\n}", "func (e *Encoder) avoidFlush() bool {\n\tswitch {\n\tcase e.tokens.last.length() == 0:\n\t\t// Never flush after ObjectStart or ArrayStart since we don't know yet\n\t\t// if the object or array will end up being empty.\n\t\treturn true\n\tcase e.tokens.last.needObjectValue():\n\t\t// Never flush before the object value since we don't know yet\n\t\t// if the object value will end up being empty.\n\t\treturn true\n\tcase e.tokens.last.needObjectName() && len(e.buf) >= 2:\n\t\t// Never flush after the object value if it does turn out to be empty.\n\t\tswitch string(e.buf[len(e.buf)-2:]) {\n\t\tcase `ll`, `\"\"`, `{}`, `[]`: // last two bytes of every empty value\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (t *FileType) IsEmpty() bool {\n\treturn len(t.FileTypes) < 1 &&\n\t\tt.MinLength == 0 &&\n\t\tt.MaxLength == 2147483647\n}", "func parsePESOptionalHeader(i []byte, offset *int) (h *PESOptionalHeader, dataStart int) {\n\t// Init\n\th = &PESOptionalHeader{}\n\n\t// Marker bits\n\th.MarkerBits = uint8(i[*offset]) >> 6\n\n\t// Scrambling control\n\th.ScramblingControl = uint8(i[*offset]) >> 4 & 0x3\n\n\t// Priority\n\th.Priority = uint8(i[*offset])&0x8 > 0\n\n\t// Data alignment indicator\n\th.DataAlignmentIndicator = uint8(i[*offset])&0x4 > 0\n\n\t// Copyrighted\n\th.IsCopyrighted = uint(i[*offset])&0x2 > 0\n\n\t// Original or copy\n\th.IsOriginal = uint8(i[*offset])&0x1 > 0\n\t*offset += 1\n\n\t// PTS DST indicator\n\th.PTSDTSIndicator = uint8(i[*offset]) >> 6 & 0x3\n\n\t// Flags\n\th.HasESCR = uint8(i[*offset])&0x20 > 0\n\th.HasESRate = uint8(i[*offset])&0x10 > 0\n\th.HasDSMTrickMode = uint8(i[*offset])&0x8 > 0\n\th.HasAdditionalCopyInfo = uint8(i[*offset])&0x4 > 0\n\th.HasCRC = uint8(i[*offset])&0x2 > 0\n\th.HasExtension = uint8(i[*offset])&0x1 > 0\n\t*offset += 1\n\n\t// Header length\n\th.HeaderLength = uint8(i[*offset])\n\t*offset += 1\n\n\t// Data start\n\tdataStart = *offset + int(h.HeaderLength)\n\n\t// PTS/DTS\n\tif h.PTSDTSIndicator == PTSDTSIndicatorOnlyPTS {\n\t\th.PTS = parsePTSOrDTS(i[*offset:])\n\t\t*offset += 5\n\t} else if h.PTSDTSIndicator == PTSDTSIndicatorBothPresent {\n\t\th.PTS = parsePTSOrDTS(i[*offset:])\n\t\t*offset += 5\n\t\th.DTS = parsePTSOrDTS(i[*offset:])\n\t\t*offset += 5\n\t}\n\n\t// ESCR\n\tif h.HasESCR {\n\t\th.ESCR = parseESCR(i[*offset:])\n\t\t*offset += 6\n\t}\n\n\t// ES rate\n\tif h.HasESRate {\n\t\th.ESRate = uint32(i[*offset])&0x7f<<15 | uint32(i[*offset+1])<<7 | uint32(i[*offset+2])>>1\n\t\t*offset += 3\n\t}\n\n\t// Trick mode\n\tif h.HasDSMTrickMode {\n\t\th.DSMTrickMode = parseDSMTrickMode(i[*offset])\n\t\t*offset += 1\n\t}\n\n\t// Additional copy info\n\tif h.HasAdditionalCopyInfo {\n\t\th.AdditionalCopyInfo = i[*offset] & 0x7f\n\t\t*offset += 1\n\t}\n\n\t// CRC\n\tif h.HasCRC {\n\t\th.CRC = uint16(i[*offset])>>8 | uint16(i[*offset+1])\n\t\t*offset += 2\n\t}\n\n\t// Extension\n\tif h.HasExtension {\n\t\t// Flags\n\t\th.HasPrivateData = i[*offset]&0x80 > 0\n\t\th.HasPackHeaderField = i[*offset]&0x40 > 0\n\t\th.HasProgramPacketSequenceCounter = i[*offset]&0x20 > 0\n\t\th.HasPSTDBuffer = i[*offset]&0x10 > 0\n\t\th.HasExtension2 = i[*offset]&0x1 > 0\n\t\t*offset += 1\n\n\t\t// Private data\n\t\tif h.HasPrivateData {\n\t\t\th.PrivateData = i[*offset : *offset+16]\n\t\t\t*offset += 16\n\t\t}\n\n\t\t// Pack field length\n\t\tif h.HasPackHeaderField {\n\t\t\th.PackField = uint8(i[*offset])\n\t\t\t*offset += 1\n\t\t}\n\n\t\t// Program packet sequence counter\n\t\tif h.HasProgramPacketSequenceCounter {\n\t\t\th.PacketSequenceCounter = uint8(i[*offset]) & 0x7f\n\t\t\th.MPEG1OrMPEG2ID = uint8(i[*offset+1]) >> 6 & 0x1\n\t\t\th.OriginalStuffingLength = uint8(i[*offset+1]) & 0x3f\n\t\t\t*offset += 2\n\t\t}\n\n\t\t// P-STD buffer\n\t\tif h.HasPSTDBuffer {\n\t\t\th.PSTDBufferScale = i[*offset] >> 5 & 0x1\n\t\t\th.PSTDBufferSize = uint16(i[*offset])&0x1f<<8 | uint16(i[*offset+1])\n\t\t\t*offset += 2\n\t\t}\n\n\t\t// Extension 2\n\t\tif h.HasExtension2 {\n\t\t\t// Length\n\t\t\th.Extension2Length = uint8(i[*offset]) & 0x7f\n\t\t\t*offset += 2\n\n\t\t\t// Data\n\t\t\th.Extension2Data = i[*offset : *offset+int(h.Extension2Length)]\n\t\t\t*offset += int(h.Extension2Length)\n\t\t}\n\t}\n\treturn\n}", "func ZeroTruncate(t BlockType, buf []byte) []byte {\n\tif t.depth() > 0 {\n\t\t// ignore slop at end of block\n\t\ti := (len(buf) / ScoreSize) * ScoreSize\n\t\tzero := ZeroScore()\n\t\tzeroBytes := zero.Bytes()\n\t\tfor i >= ScoreSize {\n\t\t\tif bytes.Equal(buf[i-ScoreSize:i], zeroBytes) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ti -= ScoreSize\n\t\t}\n\t\treturn buf[:i]\n\t} else if t == RootType {\n\t\tif len(buf) < RootSize {\n\t\t\treturn buf\n\t\t}\n\t\treturn buf[:RootSize]\n\t} else {\n\t\tvar i int\n\t\tfor i = len(buf); i > 0; i-- {\n\t\t\tif buf[i-1] != 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\treturn buf[:i]\n\t}\n}", "func (s CommitmentLengthObject) IsEmpty() bool {\n\treturn s.commitmentLength == nil\n}", "func TestEOFOrLengthEncodedIntFuzz(t *testing.T) {\n\tfor i := 0; i < 100; i++ {\n\t\tbytes := make([]byte, rand.Intn(16)+1)\n\t\t_, err := crypto_rand.Read(bytes)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"error doing rand.Read\")\n\t\t}\n\t\tbytes[0] = 0xfe\n\n\t\t_, _, isInt := readLenEncInt(bytes, 0)\n\t\tisEOF := isEOFPacket(bytes)\n\t\tif (isInt && isEOF) || (!isInt && !isEOF) {\n\t\t\tt.Fatalf(\"0xfe bytestring is EOF xor Int. Bytes %v\", bytes)\n\t\t}\n\t}\n}", "func (x *Secp256k1N) IsZero() bool {\n\tvar z Secp256k1N\n\tz.Set(x)\n\tz.Normalize()\n\treturn (z.limbs[0] | z.limbs[1] | z.limbs[2] | z.limbs[3] | z.limbs[4]) == 0\n}", "func (bio *BinaryIO) Zero(off int64, count int) {\n\tbuf := makeBuf(count)\n\tfor count > 0 {\n\t\tbuf = truncBuf(buf, count)\n\t\tbio.WriteAt(off, buf)\n\t\tcount -= len(buf)\n\t\toff += int64(len(buf))\n\t}\n}", "func HasZeroWidthCharacters(s string) bool {\n\treturn strings.ContainsRune(s, ZWSP) ||\n\t\tstrings.ContainsRune(s, ZWNBSP) ||\n\t\tstrings.ContainsRune(s, ZWJ) ||\n\t\tstrings.ContainsRune(s, ZWNJ)\n}", "func TestHB11ZeroHeader(t *testing.T) {\n\tn, _ := openConn(t)\n\tconn_headers := check11(TEST_HEADERS)\n\tc, _ := Connect(n, conn_headers.Add(\"heart-beat\", \"0,0\"))\n\tif c.protocol == SPL_10 {\n\t\t_ = closeConn(t, n)\n\t\treturn\n\t}\n\tif c.hbd != nil {\n\t\tt.Errorf(\"Expected no heartbeats for 1.1, zero header\")\n\t}\n\t_ = c.Disconnect(empty_headers)\n\t_ = closeConn(t, n)\n}", "func (dc *Int96DictConverter) FillZero(out interface{}) {\n\to := out.([]parquet.Int96)\n\to[0] = dc.zeroVal\n\tfor i := 1; i < len(o); i *= 2 {\n\t\tcopy(o[i:], o[:i])\n\t}\n}", "func (p *Parameter) IsDefaultHeaderEncoding() bool {\n\tif p.Explode == nil && (p.Style == \"\" || p.Style == \"simple\") {\n\t\treturn true\n\t}\n\tif p.Explode != nil && !*p.Explode && (p.Style == \"\" || p.Style == \"simple\") {\n\t\treturn true\n\t}\n\treturn false\n}", "func (o ParserConfigOutput) AllowNullHeader() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v ParserConfig) *bool { return v.AllowNullHeader }).(pulumi.BoolPtrOutput)\n}", "func (n NoOp) OutputLength() int {\n\treturn 0\n}", "func (o ParserConfigResponseOutput) AllowNullHeader() pulumi.BoolOutput {\n\treturn o.ApplyT(func(v ParserConfigResponse) bool { return v.AllowNullHeader }).(pulumi.BoolOutput)\n}", "func ScanNullTerminatedEntries(data []byte, atEOF bool) (advance int, token []byte, err error) {\n\tif i := bytes.IndexByte(data, 0); i >= 0 {\n\t\t// Valid record found.\n\t\treturn i + 1, data[0:i], nil\n\t} else if atEOF && len(data) != 0 {\n\t\t// Data at the end of the file without a null terminator.\n\t\treturn 0, nil, errors.New(\"Expected null byte terminator\")\n\t} else {\n\t\t// Request more data.\n\t\treturn 0, nil, nil\n\t}\n}", "func (d *MyDecimal) IsZero() bool {\n\tisZero := true\n\tfor _, val := range d.wordBuf {\n\t\tif val != 0 {\n\t\t\tisZero = false\n\t\t\tbreak\n\t\t}\n\t}\n\treturn isZero\n}", "func parseSectionV2(data []byte) ([]byte, []packetV2, error) {\n\tprevFieldType := fieldType(-1)\n\tvar packets []packetV2\n\tfor {\n\t\tif len(data) == 0 {\n\t\t\treturn nil, nil, fmt.Errorf(\"section extends past end of buffer\")\n\t\t}\n\t\trest, p, err := parsePacketV2(data)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tif p.fieldType == fieldEOS {\n\t\t\treturn rest, packets, nil\n\t\t}\n\t\tif p.fieldType <= prevFieldType {\n\t\t\treturn nil, nil, fmt.Errorf(\"fields out of order\")\n\t\t}\n\t\tpackets = append(packets, p)\n\t\tprevFieldType = p.fieldType\n\t\tdata = rest\n\t}\n}", "func (a ACME) IsZero() bool {\n\treturn !a.Enable &&\n\t\ta.Endpoint == \"\" &&\n\t\ta.Dir == \"\" &&\n\t\ta.Email == \"\" &&\n\t\tlen(a.Hosts) == 0\n}" ]
[ "0.52038777", "0.4886099", "0.46940255", "0.45598406", "0.45515433", "0.44806105", "0.44333175", "0.44333175", "0.44333175", "0.44333175", "0.44333175", "0.4400239", "0.43830788", "0.43444872", "0.43374717", "0.43305448", "0.43090913", "0.42828125", "0.42710194", "0.4262915", "0.4258667", "0.42167443", "0.4212383", "0.41943243", "0.41935387", "0.41891184", "0.41706467", "0.41652796", "0.4159023", "0.41405106", "0.41369393", "0.41142532", "0.40852728", "0.4079865", "0.40797725", "0.4079137", "0.40747815", "0.40716282", "0.4052887", "0.40360418", "0.40203223", "0.40081647", "0.40081605", "0.39913192", "0.3982908", "0.39751372", "0.39555615", "0.39538753", "0.3949698", "0.39153326", "0.3911693", "0.3908169", "0.38899034", "0.38893932", "0.3873781", "0.38735417", "0.38670933", "0.38438997", "0.3841547", "0.38360387", "0.38334435", "0.38330272", "0.38322303", "0.38279572", "0.38278058", "0.38267866", "0.382449", "0.38227883", "0.38136756", "0.38132867", "0.38099095", "0.38086256", "0.37995127", "0.37992713", "0.37944978", "0.37887704", "0.37846586", "0.37728453", "0.37655768", "0.37521088", "0.3748824", "0.37387827", "0.37338495", "0.3733493", "0.3731067", "0.37243837", "0.37214983", "0.3719686", "0.37153205", "0.3710973", "0.37103164", "0.37053216", "0.37016818", "0.3697489", "0.36878216", "0.36808568", "0.3676672", "0.3671102", "0.36588928", "0.36528853" ]
0.81493616
0
UseDataPadding sets the padding to be added between CARv2 header and its data payload on Finalize.
UseDataPadding устанавливает отступ, который добавляется между заголовком CARv2 и его данными в Finalize.
func UseDataPadding(p uint64) Option { return func(o *Options) { o.DataPadding = p } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func padData(rawData []byte) []byte {\n\tneedPadding := aes.BlockSize - ((len(rawData) + 2) % aes.BlockSize)\n\n\tvar dataBuf bytes.Buffer\n\tdataBuf.Grow(2 + len(rawData) + (aes.BlockSize % (len(rawData) + 2)))\n\n\tdataBuf.Write([]byte(\"|\"))\n\tdataBuf.Write(rawData)\n\tdataBuf.Write([]byte(\"|\"))\n\n\tfor i := 0; i < needPadding; i++ {\n\t\tdataBuf.Write([]byte(\" \"))\n\t}\n\n\treturn dataBuf.Bytes()\n}", "func (d *DataPacket) SetData(data []byte) {\n\tif len(data) > 512 {\n\t\tdata = data[0:512]\n\t}\n\t//make the length a multiply of 2\n\tif len(data)%2 != 0 { //add a 0 to make the length sufficient\n\t\tdata = append(data, 0)\n\t}\n\td.setFAL(uint16(126 + len(data)))\n\td.replace(126, data)\n}", "func WithPaddingAllowed() ParserOption {\n\treturn func(p *Parser) {\n\t\tp.decodePaddingAllowed = true\n\t}\n}", "func UseIndexPadding(p uint64) Option {\n\treturn func(o *Options) {\n\t\to.IndexPadding = p\n\t}\n}", "func (ctc *CustomTransactionContext) SetData(data []byte) {\n\tctc.data = data\n}", "func PKCS(data []byte, mode string) (padded_data []byte) {\r\n\tvar pad_num int\r\n\r\n\tif mode == \"add\" {\r\n\t\trem := len(data) % userlib.AESBlockSizeBytes\r\n\t\tpad_num = userlib.AESBlockSizeBytes - rem //number to pad by\r\n\t\t//pad := make([]byte, pad_num) //pad array we are appending later\r\n\t\tpadded_data = data[:]\r\n\t\tfor i := 0; i < pad_num; i++ {\r\n\t\t\t//pad = append(pad, byte(pad_num))\r\n\t\t\tpadded_data = append(padded_data, byte(pad_num))\r\n\t\t}\r\n\r\n\t\t//userlib.DebugMsg(\"%d\", padded_data)\r\n\t} else { //remove padding\r\n\t\t//last byte is amount of padding there is\r\n\t\t//ex: d = [1022] means 2 bytes of padding so return d[:2] which is [10]\r\n\r\n\t\tnum := len(data) - 1\r\n\t\tpad_num = len(data) - int(data[num]) //piazza: convert to byte > hex string > int?\r\n\t\tpadded_data = data[:pad_num]\r\n\t}\r\n\r\n\treturn padded_data\r\n}", "func (p *IPv4) SetData(data []byte) {\n\tp.data = data\n}", "func (socket *Socket) SetFinalData(data string) {\n\tsocket.Lock()\n\tdefer socket.Unlock()\n\tsocket.finalData = data\n}", "func (p *Patch) SetPadding(value mat.AABB) {\n\tp.Padding = value\n\tp.SetRegion(p.Region)\n}", "func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING }", "func (t DNSOverTCP) RequiresPadding() bool {\n\treturn t.requiresPadding\n}", "func (p *Packet) SetData(data []byte) {\n\tp.Data = data\n}", "func padFile(data []byte) (padData []byte) {\n\tif len(data) % userlib.AESBlockSize != 0{\n\t\t//padding\n\t\tif len(data) < userlib.AESBlockSize {\n\t\t\tpad := userlib.AESBlockSize - len(data)\n\t\t\tfor i := 0; i < pad; i++ {\n\t\t\t\tdata = append(data, byte(pad))\n\t\t\t}\n\t\t} else {\n\t\t\ttemp := userlib.AESBlockSize\n\t\t\tfor temp < len(data){\n\t\t\t\ttemp += userlib.AESBlockSize\n\t\t\t}\n\t\t\tpad := temp - len(data)\n\t\t\tfor i := 0; i < pad; i++ {\n\t\t\t\tdata = append(data, byte(pad))\n\t\t\t}\n\t\t}\n\t} else {\n\t\tpad := 0\n\t\tfor i := 0; i < userlib.AESBlockSize; i++ {\n\t\t\tdata = append(data, byte(pad))\n\t\t}\n\t}\n\treturn data\n}", "func (key Key) SetPadding(padding C.DWORD) error {\n\tif C.CryptSetKeyParam(key.hKey, C.KP_PADDING, C.LPBYTE(unsafe.Pointer(&padding)), 0) == 0 {\n\t\treturn getErr(\"Error setting padding for key\")\n\t}\n\treturn nil\n}", "func EncodeBytesWithPadding(data []byte, targetLength int) []byte {\n\tvar buf bytes.Buffer\n\n\tfor i := 0; i < targetLength-len(data); i++ {\n\t\tbuf.WriteByte(0)\n\t}\n\n\tbuf.Write(data)\n\treturn buf.Bytes()\n}", "func (enc Encoding) WithPadding(padding rune) *Encoding {\n\tswitch {\n\tcase padding < NoPadding || padding == '\\r' || padding == '\\n' || padding > 0xff:\n\t\tpanic(\"invalid padding\")\n\tcase padding != NoPadding && enc.decodeMap[byte(padding)] != invalidIndex:\n\t\tpanic(\"padding contained in alphabet\")\n\t}\n\tenc.padChar = padding\n\treturn &enc\n}", "func (o *SecretBagWritable) SetData(v map[string]string) {\n\to.Data = v\n}", "func (t *DNSOverTCPTransport) RequiresPadding() bool {\n\treturn t.requiresPadding\n}", "func (o *SecretBagPatchable) SetData(v map[string]string) {\n\to.Data = v\n}", "func (o *SwiftObject) SetData(size int64) (io.Writer, error) {\n\treturn o.newFile(\"data\", size)\n}", "func (znp *Znp) UtilDataReq(securityUse uint8) (rsp *StatusResponse, err error) {\n\treq := &UtilDataReq{SecurityUse: securityUse}\n\terr = znp.ProcessRequest(unp.C_SREQ, unp.S_UTIL, 0x11, req, &rsp)\n\treturn\n}", "func DecryptUseCBC(cipherText, key []byte, iv []byte) ([]byte, error) {\n\tblockKey, err := aes.NewCipher(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tblockSize := blockKey.BlockSize()\n\tif len(cipherText)%blockSize != 0 {\n\t\treturn nil, errors.New(\"cipher text is not an integral multiple of the block size\")\n\t}\n\tdecryptTool := cipher.NewCBCDecrypter(blockKey, iv)\n\t// CryptBlocks can work in-place if the two arguments are the same.\n\tdecryptTool.CryptBlocks(cipherText, cipherText)\n\treturn PKCS5UnPadding(cipherText), nil\n}", "func (k *Item) SetData(b []byte) {\n\tif b != nil {\n\t\tk.attr[DataKey] = b\n\t} else {\n\t\tdelete(k.attr, DataKey)\n\t}\n}", "func WithData(value string) OptFn {\n\treturn func(o *Opt) {\n\t\to.data = value\n\t}\n}", "func (_Withdrawable *WithdrawableSession) IsSigDataUsed(arg0 [32]byte) (bool, error) {\n\treturn _Withdrawable.Contract.IsSigDataUsed(&_Withdrawable.CallOpts, arg0)\n}", "func setupPadding() {\n\n\tpaddingMap[0] = \"10101010101010101010101010101010\"\n\tpaddingMap[1] = \"0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f\"\n\tpaddingMap[2] = \"0e0e0e0e0e0e0e0e0e0e0e0e0e0e\"\n\tpaddingMap[3] = \"0d0d0d0d0d0d0d0d0d0d0d0d0d\"\n\tpaddingMap[4] = \"0c0c0c0c0c0c0c0c0c0c0c0c\"\n\tpaddingMap[5] = \"0b0b0b0b0b0b0b0b0b0b0b\"\n\tpaddingMap[6] = \"0a0a0a0a0a0a0a0a0a0a\"\n\tpaddingMap[7] = \"090909090909090909\"\n\tpaddingMap[8] = \"0808080808080808\"\n\tpaddingMap[9] = \"07070707070707\"\n\tpaddingMap[10] = \"060606060606\"\n\tpaddingMap[11] = \"0505050505\"\n\tpaddingMap[12] = \"04040404\"\n\tpaddingMap[13] = \"030303\"\n\tpaddingMap[14] = \"0202\"\n\tpaddingMap[15] = \"01\"\n}", "func (e Des3CbcSha1Kd) DecryptData(key, data []byte) ([]byte, error) {\n\treturn rfc3961.DES3DecryptData(key, data, e)\n}", "func (g *GroupedAVP) Padding() int {\n\treturn 0\n}", "func UseExternalData(flag bool) {\n\tuseExternalData = flag\n}", "func (o *PostHAProxyConfigurationParams) SetData(data string) {\n\to.Data = data\n}", "func (dc *dataContainer) SetForceData(key, value interface{}) {\n\tdc.data[key] = value\n}", "func (e Aes128CtsHmacSha256128) DecryptData(key, data []byte) ([]byte, error) {\n\treturn rfc8009.DecryptData(key, data, e)\n}", "func (c *digisparkI2cConnection) WriteBlockData(reg uint8, data []byte) error {\n\tc.mtx.Lock()\n\tdefer c.mtx.Unlock()\n\n\tif len(data) > 32 {\n\t\tdata = data[:32]\n\t}\n\n\tbuf := make([]byte, len(data)+1)\n\tcopy(buf[1:], data)\n\tbuf[0] = reg\n\treturn c.writeAndCheckCount(buf, true)\n}", "func WithDataBus(conn stan.Conn) Option {\n\treturn func(a *App) {\n\t\ta.stanConn = conn\n\t}\n}", "func (_Withdrawable *WithdrawableCallerSession) IsSigDataUsed(arg0 [32]byte) (bool, error) {\n\treturn _Withdrawable.Contract.IsSigDataUsed(&_Withdrawable.CallOpts, arg0)\n}", "func (d PrinterCallbacks) OnDataUsage(dloadKiB, uploadKiB float64) {\n\td.Logger.Infof(\"experiment: recv %s, sent %s\",\n\t\thumanizex.SI(dloadKiB*1024, \"byte\"),\n\t\thumanizex.SI(uploadKiB*1024, \"byte\"),\n\t)\n}", "func WithCreateData(val map[string][]byte) CreateOption {\n\treturn func(cfg *createConfig) {\n\t\tcfg.Data = val\n\t}\n}", "func (o *PrivilegedBagData) SetData(v map[string]string) {\n\to.Data = v\n}", "func pad(data []byte, blockSize int, padder Padder) []byte {\n \tdataLen := len(data)\n\tpadLen := blockSize - (dataLen % blockSize)\n\tpadding := padder(padLen)\n\treturn append(data, padding...)\n}", "func (_Vault *VaultSession) SigDataUsed(arg0 [32]byte) (bool, error) {\n\treturn _Vault.Contract.SigDataUsed(&_Vault.CallOpts, arg0)\n}", "func EncodeStringWithPadding(data string, targetLength int) []byte {\n\tvar buf bytes.Buffer\n\n\tif len(data) < targetLength {\n\t\tfor i := 0; i < targetLength-len(data); i++ {\n\t\t\tbuf.WriteByte(0)\n\t\t}\n\t}\n\n\tbuf.Write([]byte(data))\n\treturn buf.Bytes()\n}", "func SetUserDataLayer(ud UserData) {\n\tuserData = ud\n}", "func PKCS7Padding(plainUnpaddedData []byte, blockSize int) []byte {\n\tpaddingSize := blockSize - len(plainUnpaddedData)%blockSize\n\tpadData := bytes.Repeat([]byte{byte(paddingSize)}, paddingSize)\n\treturn append(plainUnpaddedData, padData...)\n}", "func PKCS7Padding(plainUnpaddedData []byte, blockSize int) []byte {\n\tpaddingSize := blockSize - len(plainUnpaddedData)%blockSize\n\tpadData := bytes.Repeat([]byte{byte(paddingSize)}, paddingSize)\n\treturn append(plainUnpaddedData, padData...)\n}", "func (g *generator) InitData(kt *kit.Kit) error {\n\theader := http.Header{}\n\theader.Set(constant.UserKey, constant.BKUserForTestPrefix+\"gen-data\")\n\theader.Set(constant.RidKey, kt.Rid)\n\theader.Set(constant.AppCodeKey, \"test\")\n\theader.Add(\"Cookie\", \"bk_token=\"+constant.BKTokenForTest)\n\n\tg.data = make(AppReleaseMeta, 0)\n\n\t//if err := g.initApp1(kt.Ctx, header); err != nil {\n\t//\treturn err\n\t//}\n\n\tif err := g.initApp2(kt.Ctx, header); err != nil {\n\t\treturn err\n\t}\n\n\t//if err := g.initApp3(kt.Ctx, header); err != nil {\n\t//\treturn err\n\t//}\n\n\treturn nil\n}", "func ExpectKeyUsageUsageDataEncipherment(csr *certificatesv1.CertificateSigningRequest, _ crypto.Signer) error {\n\tcert, err := pki.DecodeX509CertificateBytes(csr.Status.Certificate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// taking the key usage here and use a binary OR to flip all non\n\t// KeyUsageDataEncipherment bits to 0 so if KeyUsageDataEncipherment the\n\t// value will be exactly x509.KeyUsageDataEncipherment\n\tusage := cert.KeyUsage\n\tusage &= x509.KeyUsageDataEncipherment\n\tif usage != x509.KeyUsageDataEncipherment {\n\t\treturn fmt.Errorf(\"Expected certificate to have KeyUsageDataEncipherment %#b, but got %v %#b\", x509.KeyUsageDataEncipherment, usage, usage)\n\t}\n\n\treturn nil\n}", "func (c *Context) WriteData(data interface{}) (err error) {\n\tvar bytes []byte\n\tif bytes, err = c.Serialize(data); err == nil {\n\t\t_, err = c.Write(bytes)\n\t}\n\treturn\n}", "func (decryptor *PgDecryptor) SetDataProcessor(processor base.DataProcessor) {\n\tdecryptor.dataProcessor = processor\n}", "func (b AcraBlock) SetDataEncryptionType(t DataEncryptionBackendType) error {\n\tdataEncryptionType, err := t.MarshalBinary()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcopy(b[DataEncryptionTypePosition:DataEncryptionTypePosition+DataEncryptionTypeSize], dataEncryptionType[:DataEncryptionTypeSize])\n\treturn nil\n}", "func (o *V2TcpConfiguration) SetData(v string) {\n\to.Data = &v\n}", "func (d *weakChecksum) addData(p ...byte) {\n\ts1, s2 := d.digest&0xffff, d.digest>>16\n\tfor len(p) > 0 {\n\t\tvar q []byte\n\t\tif len(p) > nmax {\n\t\t\tp, q = p[:nmax], p[nmax:]\n\t\t}\n\t\tfor _, x := range p {\n\t\t\ts1 += uint32(x)\n\t\t\ts2 += s1\n\t\t}\n\t\ts1 %= mod\n\t\ts2 %= mod\n\t\tp = q\n\t}\n\n\td.digest = (s2<<16 | s1)\n}", "func MergeDataKeyOptions(opts ...*DataKeyOptions) *DataKeyOptions {\n\tdko := DataKey()\n\tfor _, opt := range opts {\n\t\tif opt == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif opt.MasterKey != nil {\n\t\t\tdko.MasterKey = opt.MasterKey\n\t\t}\n\t\tif opt.KeyAltNames != nil {\n\t\t\tdko.KeyAltNames = opt.KeyAltNames\n\t\t}\n\t}\n\n\treturn dko\n}", "func (client *Client) DescribeVodTranscodeDataWithOptions(request *DescribeVodTranscodeDataRequest, runtime *util.RuntimeOptions) (_result *DescribeVodTranscodeDataResponse, _err error) {\n\t_err = util.ValidateModel(request)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\tquery := map[string]interface{}{}\n\tif !tea.BoolValue(util.IsUnset(request.EndTime)) {\n\t\tquery[\"EndTime\"] = request.EndTime\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.Interval)) {\n\t\tquery[\"Interval\"] = request.Interval\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.OwnerId)) {\n\t\tquery[\"OwnerId\"] = request.OwnerId\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.Region)) {\n\t\tquery[\"Region\"] = request.Region\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.Specification)) {\n\t\tquery[\"Specification\"] = request.Specification\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.StartTime)) {\n\t\tquery[\"StartTime\"] = request.StartTime\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.Storage)) {\n\t\tquery[\"Storage\"] = request.Storage\n\t}\n\n\treq := &openapi.OpenApiRequest{\n\t\tQuery: openapiutil.Query(query),\n\t}\n\tparams := &openapi.Params{\n\t\tAction: tea.String(\"DescribeVodTranscodeData\"),\n\t\tVersion: tea.String(\"2017-03-21\"),\n\t\tProtocol: tea.String(\"HTTPS\"),\n\t\tPathname: tea.String(\"/\"),\n\t\tMethod: tea.String(\"POST\"),\n\t\tAuthType: tea.String(\"AK\"),\n\t\tStyle: tea.String(\"RPC\"),\n\t\tReqBodyType: tea.String(\"formData\"),\n\t\tBodyType: tea.String(\"json\"),\n\t}\n\t_result = &DescribeVodTranscodeDataResponse{}\n\t_body, _err := client.CallApi(params, req, runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_err = tea.Convert(_body, &_result)\n\treturn _result, _err\n}", "func (_Vault *VaultCallerSession) SigDataUsed(arg0 [32]byte) (bool, error) {\n\treturn _Vault.Contract.SigDataUsed(&_Vault.CallOpts, arg0)\n}", "func pkcs7Padding(ciphertext []byte, blockSize int) []byte {\n\t// The bytes need to padding.\n\tpadding := blockSize - len(ciphertext)%blockSize\n\tpadtext := bytes.Repeat([]byte{byte(padding)}, padding)\n\treturn append(ciphertext, padtext...)\n}", "func (o *V2TcpConfiguration) HasData() bool {\n\tif o != nil && o.Data != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (d *DriverDMA) SetupData(mode sdcard.DataMode, buf []uint64, nbytes int) {\n\tif nbytes == 0 {\n\t\tpanicNoData()\n\t}\n\tif len(buf)*8 < nbytes {\n\t\tpanicShortBuf()\n\t}\n\tif uint(d.err)|uint(d.dmaErr) != 0 {\n\t\treturn\n\t}\n\td.dtc = DTEna | UseDMA | DataCtrl(mode)\n\tdmacfg := dma.PFC | dma.IncM\n\tif d.dtc&Recv == 0 {\n\t\tdmacfg |= dma.MTP\n\t}\n\tif nbytes&15 == 0 {\n\t\tdmacfg |= dma.FT4 | dma.PB4 | dma.MB4\n\t} else {\n\t\tdmacfg |= dma.FT2\n\t}\n\tch := d.dma\n\tch.Disable()\n\tch.Clear(dma.EvAll, dma.ErrAll)\n\tch.Setup(dmacfg)\n\tch.SetAddrP(unsafe.Pointer(&d.p.raw.FIFO))\n\tch.SetAddrM(unsafe.Pointer(&buf[0]))\n\tch.SetWordSize(4, 4)\n\t//ch.SetLen(len(buf) * 2) // Does STM32F1 require this? Use nbytes?\n\tch.Enable()\n\td.p.SetDataLen(nbytes)\n}", "func (o *CartaoProduto) SetDataNil() {\n\to.Data.Set(nil)\n}", "func (m *Msg) SetData(b []byte) {\n\tm.data = b\n}", "func (_Vault *VaultCaller) SigDataUsed(opts *bind.CallOpts, arg0 [32]byte) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Vault.contract.Call(opts, out, \"sigDataUsed\", arg0)\n\treturn *ret0, err\n}", "func PKCS7Padding(ciphertext []byte, blockSize int) []byte {\n\tpadding := blockSize - len(ciphertext)%blockSize\n\tpadtext := bytes.Repeat([]byte{byte(padding)}, padding)\n\treturn append(ciphertext, padtext...)\n}", "func PKCS7Padding(ciphertext []byte, blockSize int) []byte {\n\tpadding := blockSize - len(ciphertext)%blockSize\n\tpadtext := bytes.Repeat([]byte{byte(padding)}, padding)\n\treturn append(ciphertext, padtext...)\n}", "func TestNoDataObfuscate(t *testing.T) {\n\tif *fstest.RemoteName != \"\" {\n\t\tt.Skip(\"Skipping as -remote set\")\n\t}\n\tif runtime.GOOS == \"darwin\" {\n\t\tt.Skip(\"Skipping on macOS as obfuscating control characters makes filenames macOS can't cope with\")\n\t}\n\ttempdir := filepath.Join(os.TempDir(), \"rclone-crypt-test-obfuscate\")\n\tname := \"TestCrypt4\"\n\tfstests.Run(t, &fstests.Opt{\n\t\tRemoteName: name + \":\",\n\t\tNilObject: (*crypt.Object)(nil),\n\t\tExtraConfig: []fstests.ExtraConfigItem{\n\t\t\t{Name: name, Key: \"type\", Value: \"crypt\"},\n\t\t\t{Name: name, Key: \"remote\", Value: tempdir},\n\t\t\t{Name: name, Key: \"password\", Value: obscure.MustObscure(\"potato2\")},\n\t\t\t{Name: name, Key: \"filename_encryption\", Value: \"obfuscate\"},\n\t\t\t{Name: name, Key: \"no_data_encryption\", Value: \"true\"},\n\t\t},\n\t\tSkipBadWindowsCharacters: true,\n\t\tUnimplementableFsMethods: []string{\"OpenWriterAt\", \"OpenChunkWriter\"},\n\t\tUnimplementableObjectMethods: []string{\"MimeType\"},\n\t\tQuickTestOK: true,\n\t})\n}", "func blockPadding(offset int64) (n int64) {\n\treturn -offset & (blockSize - 1)\n}", "func (_Withdrawable *WithdrawableCaller) IsSigDataUsed(opts *bind.CallOpts, arg0 [32]byte) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Withdrawable.contract.Call(opts, out, \"isSigDataUsed\", arg0)\n\treturn *ret0, err\n}", "func (o *SessionDataUpdateParams) WithTimeout(timeout time.Duration) *SessionDataUpdateParams {\n\to.SetTimeout(timeout)\n\treturn o\n}", "func Data(data []byte, format string) (cleartext []byte, err error) {\n\t// Initialize a Sops JSON store\n\tvar store sops.Store\n\tswitch format {\n\tcase \"json\":\n\t\tstore = &sopsjson.Store{}\n\tcase \"yaml\":\n\t\tstore = &sopsyaml.Store{}\n\tdefault:\n\t\tstore = &sopsjson.BinaryStore{}\n\t}\n\t// Load SOPS file and access the data key\n\ttree, err := store.LoadEncryptedFile(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkey, err := tree.Metadata.GetDataKey()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Decrypt the tree\n\tcipher := aes.NewCipher()\n\tmac, err := tree.Decrypt(key, cipher)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Compute the hash of the cleartext tree and compare it with\n\t// the one that was stored in the document. If they match,\n\t// integrity was preserved\n\toriginalMac, err := cipher.Decrypt(\n\t\ttree.Metadata.MessageAuthenticationCode,\n\t\tkey,\n\t\ttree.Metadata.LastModified.Format(time.RFC3339),\n\t)\n\tif originalMac != mac {\n\t\treturn nil, fmt.Errorf(\"Failed to verify data integrity. expected mac %q, got %q\", originalMac, mac)\n\t}\n\n\treturn store.EmitPlainFile(tree.Branch)\n}", "func ccData(val string, size int32, commit bool) *btspb.ReadRowsResponse_CellChunk {\n\treturn cc(nilStr, nilStr, nilStr, 0, val, size, commit, []string{})\n}", "func getPadding(packetLen int) int {\n\tif packetLen%4 == 0 {\n\t\treturn 0\n\t}\n\treturn 4 - (packetLen % 4)\n}", "func (o *StorageHyperFlexStorageContainer) HasDataBlockSize() bool {\n\tif o != nil && o.DataBlockSize != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func prepareData(data []byte) ([]byte, error) {\n\n\t// convert nil data to empty data\n\tif data == nil {\n\t\tdata = []byte{}\n\t}\n\n\t// check data length\n\tif len(data) > MaxDataLength {\n\t\treturn []byte{}, errors.New(\"too much data for one image\")\n\t}\n\n\t//-------------------------------------------\n\n\t// extend data slice for header\n\tret := make([]byte, header+len(data))\n\tcopy(ret[header:], data)\n\n\t// add HEADER: version\n\tret[0] = 0x01 // add 1 byte\n\n\t// add HEADER: data length\n\tbinary.BigEndian.PutUint32(ret[1:5], uint32(len(data))) // add 4 byte\n\n\t// add HEADER: checksum\n\tsum := md5.Sum(data)\n\tcopy(ret[5:21], sum[:]) // add 16 byte\n\n\t//-------------------------------------------\n\n\t// final size check\n\tif len(ret) > dimension*dimension {\n\t\treturn []byte{}, errors.New(\"final size check fail\")\n\t}\n\n\t// success: return data with header\n\treturn ret, nil\n}", "func (o *IntegrationsManualHTTPSCreateParams) SetData(data IntegrationsManualHTTPSCreateBody) {\n\to.Data = data\n}", "func (o *CartaoProduto) HasData() bool {\n\tif o != nil && o.Data.IsSet() {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (_Vault *VaultCallerSession) IsSigDataUsed(hash [32]byte) (bool, error) {\n\treturn _Vault.Contract.IsSigDataUsed(&_Vault.CallOpts, hash)\n}", "func (_Vault *VaultSession) IsSigDataUsed(hash [32]byte) (bool, error) {\n\treturn _Vault.Contract.IsSigDataUsed(&_Vault.CallOpts, hash)\n}", "func (decryptor *PgDecryptor) ReadData(symmetricKey, zoneID []byte, reader io.Reader) ([]byte, error) {\n\t/* due to using two decryptors can be case when one decryptor match 2 bytes\n\tfrom TagBegin then didn't match anymore but another decryptor matched at\n\tthis time and was successfully used for decryption, we need return 2 bytes\n\tmatched and buffered by first decryptor and decrypted data from the second\n\n\tfor example case of matching begin tag:\n\tBEGIN_TA - failed decryptor1\n\t00BEGIN_TAG - successful decryptor2\n\tin this case first decryptor1 matched not full begin_tag and failed on 'G' but\n\tat this time was matched decryptor2 and successfully matched next bytes and decrypted data\n\tso we need return diff of two matches 'BE' and decrypted data\n\t*/\n\n\t// add zone_id to log if it used\n\tlogger := log.NewEntry(decryptor.logger.Logger)\n\tif decryptor.GetMatchedZoneID() != nil {\n\t\tlogger = decryptor.logger.WithField(\"zone_id\", string(decryptor.GetMatchedZoneID()))\n\t\t// use temporary logger in matched decryptor\n\t\tdecryptor.binaryDecryptor.SetLogger(logger)\n\t\t// reset to default logger without zone_id\n\t\tdefer decryptor.binaryDecryptor.SetLogger(decryptor.logger)\n\t}\n\n\t// take length of fully matched tag begin (each decryptor match tag begin with different length)\n\tcorrectMatchBeginTagLength := len(decryptor.binaryDecryptor.GetMatched())\n\t// take diff count of matched between two decryptors\n\tfalseBufferedBeginTagLength := decryptor.matchIndex - correctMatchBeginTagLength\n\tif falseBufferedBeginTagLength > 0 {\n\t\tlogger.Debugf(\"Return with false matched %v bytes\", falseBufferedBeginTagLength)\n\t\tdecrypted, err := decryptor.binaryDecryptor.ReadData(symmetricKey, zoneID, reader)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tlogger.Debugln(\"Decrypted AcraStruct\")\n\t\treturn append(decryptor.matchBuffer[:falseBufferedBeginTagLength], decrypted...), nil\n\t}\n\n\tdecrypted, err := decryptor.binaryDecryptor.ReadData(symmetricKey, zoneID, reader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlogger.Debugln(\"Decrypted AcraStruct\")\n\treturn decrypted, nil\n}", "func DecryptUseCBCWithDefaultProtocol(cipherText, key []byte) ([]byte, error) {\n\tif len(cipherText) < 16 {\n\t\treturn nil, errors.New(\"decrypt excepted iv parameter\")\n\t}\n\tplainText, err := DecryptUseCBC(cipherText[16:], key, cipherText[:16])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn plainText, nil\n}", "func NewDnsViewparamDataData() *DnsViewparamDataData {\n\tthis := DnsViewparamDataData{}\n\treturn &this\n}", "func MaxDataBytes(maxBytes int64, keyType crypto.KeyType, evidenceBytes int64, valsCount int) int64 {\n\tmaxDataBytes := maxBytes -\n\t\tMaxOverheadForBlock -\n\t\tMaxHeaderBytes -\n\t\tMaxCoreChainLockSize -\n\t\tMaxCommitOverheadBytes -\n\t\tevidenceBytes\n\n\tif maxDataBytes < 0 {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"Negative MaxDataBytes. Block.MaxBytes=%d is too small to accommodate header&lastCommit&evidence=%d\",\n\t\t\tmaxBytes,\n\t\t\t-(maxDataBytes - maxBytes),\n\t\t))\n\t}\n\n\treturn maxDataBytes\n}", "func DataServiceDatasetV2DataTransferProtocol(value string) DataServiceDatasetV2Attr {\n\treturn func(m optionalAttr) {\n\t\tm[\"data_transfer_protocol\"] = value\n\t}\n}", "func PKCS7Padding(text string, length int) string {\n\tpaddingLength := length - (len(text) % length)\n\n\tbs := make([]byte, 1)\n\tbinary.PutUvarint(bs, uint64(paddingLength))\n\n\tpadding := bytes.Repeat(bs, paddingLength)\n\n\treturn text + string(padding)\n}", "func (o *StorageHyperFlexStorageContainer) SetDataBlockSize(v int64) {\n\to.DataBlockSize = &v\n}", "func (ic *Context) InitNonceData() {\n\tif tx, ok := ic.Container.(*transaction.Transaction); ok {\n\t\tcopy(ic.NonceData[:], tx.Hash().BytesBE())\n\t}\n\tif ic.Block != nil {\n\t\tnonce := ic.Block.Nonce\n\t\tnonce ^= binary.LittleEndian.Uint64(ic.NonceData[:])\n\t\tbinary.LittleEndian.PutUint64(ic.NonceData[:], nonce)\n\t}\n}", "func (self *BytecodeReader) SkipPadding() {\n\tfor self.pc%4 != 0 {\n\t\tself.ReadUint8()\n\t}\n}", "func (ce *ClientEncryption) CreateDataKey(ctx context.Context, kmsProvider string,\n\topts ...*options.DataKeyOptions) (primitive.Binary, error) {\n\n\t// translate opts to mcopts.DataKeyOptions\n\tdko := options.MergeDataKeyOptions(opts...)\n\tco := mcopts.DataKey().SetKeyAltNames(dko.KeyAltNames)\n\tif dko.MasterKey != nil {\n\t\tkeyDoc, err := marshal(\n\t\t\tdko.MasterKey,\n\t\t\tce.keyVaultClient.bsonOpts,\n\t\t\tce.keyVaultClient.registry)\n\t\tif err != nil {\n\t\t\treturn primitive.Binary{}, err\n\t\t}\n\t\tco.SetMasterKey(keyDoc)\n\t}\n\tif dko.KeyMaterial != nil {\n\t\tco.SetKeyMaterial(dko.KeyMaterial)\n\t}\n\n\t// create data key document\n\tdataKeyDoc, err := ce.crypt.CreateDataKey(ctx, kmsProvider, co)\n\tif err != nil {\n\t\treturn primitive.Binary{}, err\n\t}\n\n\t// insert key into key vault\n\t_, err = ce.keyVaultColl.InsertOne(ctx, dataKeyDoc)\n\tif err != nil {\n\t\treturn primitive.Binary{}, err\n\t}\n\n\tsubtype, data := bson.Raw(dataKeyDoc).Lookup(\"_id\").Binary()\n\treturn primitive.Binary{Subtype: subtype, Data: data}, nil\n}", "func PKCSSPadding(ciphertext []byte, blockSize int) []byte {\n\tpadding := blockSize - len(ciphertext)%blockSize\n\tpadtext := bytes.Repeat([]byte{byte(padding)}, padding)\n\treturn append(ciphertext, padtext...)\n}", "func SetColumnPadding(padding int) {\n\tif padding < 0 {\n\t\tcolumnPadding = 2 // default value\n\t} else {\n\t\tcolumnPadding = padding\n\t}\n}", "func (b AcraBlock) setEncryptedData(data []byte) error {\n\tif len(b) < EncryptedDataEncryptionKeyPosition {\n\t\treturn ErrInvalidAcraBlock\n\t}\n\tkeySize := b.EncryptedDataEncryptionKeyLength()\n\tif len(b) < EncryptedDataEncryptionKeyPosition+keySize {\n\t\treturn ErrInvalidAcraBlock\n\t}\n\tif n := copy(b[EncryptedDataEncryptionKeyPosition+keySize:], data); n != len(data) {\n\t\treturn ErrInvalidAcraBlock\n\t}\n\treturn nil\n}", "func BufferData(target uint32, size int, data unsafe.Pointer, usage uint32) {\n\tsyscall.Syscall6(gpBufferData, 4, uintptr(target), uintptr(size), uintptr(data), uintptr(usage), 0, 0)\n}", "func (o *Wireless) SetData(v map[string]string) {\n\to.Data = &v\n}", "func (o *PrivilegedTextDataAllOf) SetData(v string) {\n\to.Data = &v\n}", "func NewDataEncryptor(keyStore keystore.DataEncryptorKeyStore) (*DataEncryptor, error) {\n\treturn &DataEncryptor{keyStore: keyStore, needSkipEncryptionFunc: encryptor.EmptyCheckFunction}, nil\n}", "func Conv3DBackpropFilterV2DataFormat(value string) Conv3DBackpropFilterV2Attr {\n\treturn func(m optionalAttr) {\n\t\tm[\"data_format\"] = value\n\t}\n}", "func (a *Attributes) RemoveDataAttribute(name string) bool {\n\tsuffix, _ := ToDataAttr(name)\n\tname = \"data-\" + suffix\n\treturn a.RemoveAttribute(name)\n}", "func (d *DV4Mini) WriteTXBufferData(data []byte) {\n\tvar packetSize int = 34 // full 36\n\t// var counter int = 0\n\t// var crcValue byte\n\n\t// []byte{0x04, data}\n\tcmd := []byte{ADFWRITE}\n\n\tfor i := 0; i < len(data); i += packetSize {\n\t\ttime.Sleep(time.Millisecond * 30)\n\n\t\tbatch := data[i:min(i+packetSize, len(data))]\n\n\t\t// if (counter % 2) == 0 {\n\t\t// \tbatch = append([]byte{0x91}, batch...)\n\t\t// } else {\n\t\t// \tbatch = append([]byte{0x23}, batch...)\n\t\t// }\n\n\t\t// counter++\n\t\tlog.Printf(\"[>>>] \\n%s\", hex.Dump(batch))\n\n\t\tfullPacket := cmd\n\t\tfullPacket = append(fullPacket, batch...)\n\n\t\td.sendCmd(fullPacket)\n\t}\n\n\t// d.FlushTXBuffer()\n\td.Port.Flush()\n}", "func (_TestClient *TestClientTransactorSession) SetOptEpochData(epoch *big.Int, fullSizeIn128Resultion *big.Int, branchDepth *big.Int, merkleNodes []*big.Int, start *big.Int, numElems *big.Int) (*types.Transaction, error) {\n\treturn _TestClient.Contract.SetOptEpochData(&_TestClient.TransactOpts, epoch, fullSizeIn128Resultion, branchDepth, merkleNodes, start, numElems)\n}", "func (_Vault *VaultCaller) IsSigDataUsed(opts *bind.CallOpts, hash [32]byte) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Vault.contract.Call(opts, out, \"isSigDataUsed\", hash)\n\treturn *ret0, err\n}", "func (s *State) SetSpecData(data []byte) {\n\ts.specData = data\n}", "func (_ERC725 *ERC725TransactorSession) SetData(_key [32]byte, _value []byte) (*types.Transaction, error) {\n\treturn _ERC725.Contract.SetData(&_ERC725.TransactOpts, _key, _value)\n}", "func (o LicenseOutput) Datacenter() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *License) pulumi.StringPtrOutput { return v.Datacenter }).(pulumi.StringPtrOutput)\n}" ]
[ "0.5258283", "0.4555469", "0.4480234", "0.44573864", "0.43533745", "0.4337491", "0.4326558", "0.42585558", "0.42437828", "0.42047027", "0.41544282", "0.41253066", "0.41225743", "0.41110697", "0.40878302", "0.40734228", "0.4069261", "0.40326965", "0.40245518", "0.40231052", "0.4017667", "0.4004762", "0.40028828", "0.3981506", "0.39749408", "0.39524114", "0.39291194", "0.39017254", "0.3901331", "0.39003903", "0.38983864", "0.38898408", "0.3888365", "0.38866386", "0.38855574", "0.38787165", "0.38774544", "0.38761184", "0.38349083", "0.38249984", "0.38229233", "0.381717", "0.3800436", "0.3800436", "0.37910527", "0.37802708", "0.37755", "0.37729806", "0.37717575", "0.37642992", "0.3755665", "0.37515277", "0.3742783", "0.3741775", "0.37301615", "0.37272277", "0.37262666", "0.37131295", "0.37040007", "0.3703508", "0.3694991", "0.3694991", "0.36933163", "0.36739767", "0.36727476", "0.36682856", "0.36660922", "0.3662528", "0.36623806", "0.3659459", "0.36552295", "0.36548847", "0.3653241", "0.3645022", "0.36347246", "0.36323908", "0.36302263", "0.36103266", "0.36076522", "0.36073667", "0.3605999", "0.3605248", "0.35974205", "0.35925213", "0.35916698", "0.35916293", "0.35876635", "0.35860714", "0.35770276", "0.35668093", "0.35622153", "0.35488746", "0.35482797", "0.3547052", "0.3544767", "0.3544515", "0.35394973", "0.3536466", "0.3536421", "0.35343453" ]
0.7320334
0
UseIndexPadding sets the padding between data payload and its index on Finalize.
UseIndexPadding устанавливает отступ между данными и их индексом на Finalize.
func UseIndexPadding(p uint64) Option { return func(o *Options) { o.IndexPadding = p } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *BasePlSqlParserListener) ExitUsing_index_clause(ctx *Using_index_clauseContext) {}", "func UseDataPadding(p uint64) Option {\n\treturn func(o *Options) {\n\t\to.DataPadding = p\n\t}\n}", "func (vm *vrfManager) releaseIndex(vrf *VRF) {\n\tvm.byIndex[int(vrf.index)] = nil\n}", "func (dict *Dictionary) DropIndex() {\n\tdict.shortIndex = nil\n\tdict.longIndex = nil\n}", "func IndexFixer(index int, listSize int) int {\n\tindex = index - 1\n\n\tif index <= 0 {\n\t\tindex = 0\n\t} else if index > listSize-1 {\n\t\tindex = listSize - 1\n\t}\n\n\treturn index\n}", "func indexTruncateInTxn(\n\tctx context.Context,\n\ttxn *kv.Txn,\n\texecCfg *ExecutorConfig,\n\tevalCtx *tree.EvalContext,\n\ttableDesc catalog.TableDescriptor,\n\tidx *descpb.IndexDescriptor,\n\ttraceKV bool,\n) error {\n\talloc := &rowenc.DatumAlloc{}\n\tvar sp roachpb.Span\n\tfor done := false; !done; done = sp.Key == nil {\n\t\trd := row.MakeDeleter(execCfg.Codec, tableDesc, nil /* requestedCols */)\n\t\ttd := tableDeleter{rd: rd, alloc: alloc}\n\t\tif err := td.init(ctx, txn, evalCtx); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvar err error\n\t\tsp, err = td.deleteIndex(\n\t\t\tctx, idx, sp, indexTruncateChunkSize, traceKV,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t// Remove index zone configs.\n\treturn RemoveIndexZoneConfigs(ctx, txn, execCfg, tableDesc, []descpb.IndexDescriptor{*idx})\n}", "func (i *Index) Write(off uint32, pos uint64) error {\n\tif uint64(len(i.mmap)) < i.size+entWidth {\n\t\treturn lib.Wrap(io.EOF, \"Not enough space to append index data\")\n\t}\n\n\tenc.PutUint32(i.mmap[i.size:i.size+offWidth], off)\n\tenc.PutUint64(i.mmap[i.size+offWidth:i.size+entWidth], pos)\n\n\ti.size += entWidth\n\n\treturn nil\n}", "func IndexWrite(x *suffixarray.Index, w io.Writer) error", "func clearIndex(\n\tctx context.Context,\n\texecCfg *sql.ExecutorConfig,\n\ttableDesc catalog.TableDescriptor,\n\tindex descpb.IndexDescriptor,\n) error {\n\tlog.Infof(ctx, \"clearing index %d from table %d\", index.ID, tableDesc.GetID())\n\tif index.IsInterleaved() {\n\t\treturn errors.Errorf(\"unexpected interleaved index %d\", index.ID)\n\t}\n\n\tsp := tableDesc.IndexSpan(execCfg.Codec, index.ID)\n\tstart, err := keys.Addr(sp.Key)\n\tif err != nil {\n\t\treturn errors.Errorf(\"failed to addr index start: %v\", err)\n\t}\n\tend, err := keys.Addr(sp.EndKey)\n\tif err != nil {\n\t\treturn errors.Errorf(\"failed to addr index end: %v\", err)\n\t}\n\trSpan := roachpb.RSpan{Key: start, EndKey: end}\n\treturn clearSpanData(ctx, execCfg.DB, execCfg.DistSender, rSpan)\n}", "func (mc *MockContiv) SetPodAppNsIndex(pod podmodel.ID, nsIndex uint32) {\n\tmc.podAppNs[pod] = nsIndex\n}", "func (self *SinglePad) SetIndexA(member int) {\n self.Object.Set(\"index\", member)\n}", "func (s *BaseCymbolListener) ExitIndex(ctx *IndexContext) {}", "func WriteIndex(index common.Index) error {\n\tbytes, err := json.Marshal(index)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ioutil.WriteFile(indexCachePath, bytes, 0600)\n\treturn err\n}", "func (rb *RB) CommitIndex_ignore_gap(index int) error {\n\tif index < 0 {\n\t\tmsg := fmt.Sprintf(\"index:%v < 0\", index)\n\t\treturn errors.New(msg)\n\t}\n\tidx := rb.Arrayindex(index)\n\trb.mu[idx].Lock()\n\tdefer rb.mu[idx].Unlock()\n\t// Index messed up\n\tif rb.idx[idx] != index {\n\t\tmsg := fmt.Sprintf(\"commit index:%v != stored index %v\", index, rb.idx[idx])\n\t\treturn errors.New(msg)\n\t}\n\trb.commit[idx] = true\n\treturn nil\n}", "func (s *BasevhdlListener) ExitIndex_specification(ctx *Index_specificationContext) {}", "func dataframeResetIndex(_ *starlark.Thread, b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {\n\tif err := starlark.UnpackArgs(\"reset_index\", args, kwargs); err != nil {\n\t\treturn nil, err\n\t}\n\tself := b.Receiver().(*DataFrame)\n\n\tif self.index == nil {\n\t\treturn self, nil\n\t}\n\n\tnewColumns := append([]string{\"index\"}, self.columns.texts...)\n\tnewBody := make([]Series, 0, self.numCols())\n\n\tnewBody = append(newBody, Series{which: typeObj, valObjs: self.index.texts})\n\tfor _, col := range self.body {\n\t\tnewBody = append(newBody, col)\n\t}\n\n\treturn &DataFrame{\n\t\tcolumns: NewIndex(newColumns, \"\"),\n\t\tbody: newBody,\n\t}, nil\n}", "func (_e *MockDataCoord_Expecter) DropIndex(ctx interface{}, req interface{}) *MockDataCoord_DropIndex_Call {\n\treturn &MockDataCoord_DropIndex_Call{Call: _e.mock.On(\"DropIndex\", ctx, req)}\n}", "func (w *worker) cleanupPhysicalTableIndex(t table.PhysicalTable, reorgInfo *reorgInfo) error {\n\tlogutil.BgLogger().Info(\"start to clean up index\", zap.String(\"category\", \"ddl\"), zap.String(\"job\", reorgInfo.Job.String()), zap.String(\"reorgInfo\", reorgInfo.String()))\n\treturn w.writePhysicalTableRecord(w.sessPool, t, typeCleanUpIndexWorker, reorgInfo)\n}", "func (du *DatumUpdate) SetNillableIndex(i *int) *DatumUpdate {\n\tif i != nil {\n\t\tdu.SetIndex(*i)\n\t}\n\treturn du\n}", "func (wou *WorkOrderUpdate) ClearIndex() *WorkOrderUpdate {\n\twou.index = nil\n\twou.clearindex = true\n\treturn wou\n}", "func (s *BasePlSqlParserListener) ExitAlter_index(ctx *Alter_indexContext) {}", "func (m *hasher) maskIndex(index []byte, depth int) []byte {\n\tif got, want := len(index), m.Size(); got != want {\n\t\tpanic(fmt.Sprintf(\"index len: %d, want %d\", got, want))\n\t}\n\tif got, want := depth, m.BitLen(); got < 0 || got > want {\n\t\tpanic(fmt.Sprintf(\"depth: %d, want <= %d && > 0\", got, want))\n\t}\n\n\t// Create an empty index Size() bytes long.\n\tret := make([]byte, m.Size())\n\tif depth > 0 {\n\t\t// Copy the first depthBytes.\n\t\tdepthBytes := (depth + 7) >> 3\n\t\tcopy(ret, index[:depthBytes])\n\t\t// Mask off unwanted bits in the last byte.\n\t\tret[depthBytes-1] = ret[depthBytes-1] & leftmask[depth%8]\n\t}\n\treturn ret\n}", "func (s *BasePlSqlParserListener) ExitDrop_index(ctx *Drop_indexContext) {}", "func (wou *WorkOrderUpdate) SetNillableIndex(i *int) *WorkOrderUpdate {\n\tif i != nil {\n\t\twou.SetIndex(*i)\n\t}\n\treturn wou\n}", "func (o *KeyValueOrdered) RemoveIndex(idx int) (cell KeyValueCapsule) {\n\tcell = o.s[idx]\n\tdelete(o.m, o.s[idx].K)\n\to.shift(idx+1, len(o.s), -1)\n\to.s = append(o.s[:idx], o.s[idx+1:]...)\n\treturn\n}", "func (x *Index) Write(w io.Writer) error", "func UseIndex(designDocument, name string) Parameter {\n\treturn func(pa Parameterizable) {\n\t\tif name == \"\" {\n\t\t\tpa.SetParameter(\"use_index\", designDocument)\n\t\t} else {\n\t\t\tpa.SetParameter(\"use_index\", []string{designDocument, name})\n\t\t}\n\t}\n}", "func (s *BasePlSqlParserListener) ExitDrop_index_partition(ctx *Drop_index_partitionContext) {}", "func UseIndexCodec(c multicodec.Code) Option {\n\treturn func(o *Options) {\n\t\to.IndexCodec = c\n\t}\n}", "func (o *Object) SetIdx(idx uint32, val interface{}) error {\n\treturn set(o, \"\", idx, val)\n}", "func (duo *DatumUpdateOne) SetNillableIndex(i *int) *DatumUpdateOne {\n\tif i != nil {\n\t\tduo.SetIndex(*i)\n\t}\n\treturn duo\n}", "func (mgr *Manager) ClosePIndex(pindex *PIndex) error {\n\treturn syncWorkReq(mgr.janitorCh, JANITOR_CLOSE_PINDEX,\n\t\t\"api-ClosePIndex\", pindex)\n}", "func (s *BasePlSqlParserListener) ExitIndex_properties(ctx *Index_propertiesContext) {}", "func (s *BasePlSqlParserListener) ExitModify_index_default_attrs(ctx *Modify_index_default_attrsContext) {\n}", "func TestAfterIndexWrapAroundCorrectIndex(t *testing.T) {\n\tremoveTestFiles()\n\tstartID := 999989\n\tendID := 1000000\n\n\t//Create some old files to clean up\n\tfor i := startID; i < endID; i = i + 1 {\n\t\tcreateTestFile(i, t)\n\t}\n\n\tfor i := 2; i < 10; i = i + 1 {\n\t\tcreateTestFile(i, t)\n\t}\n\n\tstorage := getNewStorageManager()\n\tlog.Println(storage.FileList())\n\n\tif storage.Index() != 10 {\n\t\tt.Errorf(\"Filename index (%d) is incorrect\", storage.Index())\n\t}\n\n\tfileList := storage.FileList()\n\n\tassert.NotNil(t, fileList, \"FileList should not be nil\")\n\tassert.NotEmpty(t, fileList, \"FileList should not be empty\")\n\n\tx := 0\n\tfor i := startID; i < endID; i = i + 1 {\n\t\tassert.Equal(t, fileList[x], storage.WorkDir()+C.SLASH+T_PREFIX+fmt.Sprintf(FILENAME_FORMAT, i)+T_SUFFIX)\n\t\tx = x + 1\n\t}\n}", "func (wouo *WorkOrderUpdateOne) SetNillableIndex(i *int) *WorkOrderUpdateOne {\n\tif i != nil {\n\t\twouo.SetIndex(*i)\n\t}\n\treturn wouo\n}", "func (x *Index) Write(w io.Writer) error {}", "func (i *index) Write(off uint32, pos uint64) error {\n\tif uint64(len(i.mmap)) < i.size+entWidth {\n\t\treturn io.EOF\n\t}\n\n\tenc.PutUint32(i.mmap[i.size:i.size+offWidth], off)\n\tenc.PutUint64(i.mmap[i.size+offWidth:i.size+entWidth], pos)\n\ti.size += uint64(entWidth)\n\treturn nil\n}", "func indexEnc() {\n\tfor i := 0; i < indexSize; i++ {\n\t\tindexItemEnc(testData[i], i)\n\t}\n}", "func DeleteIndex(a interface{}, index int) interface{} {\n\tswitch a.(type) {\n\tcase []int:\n\t\treturn DeleteIndexInt(a.([]int), index)\n\tdefault:\n\t\tpanic(\"not support type\")\n\t}\n}", "func (pw *PixelWand) SetIndex(index *IndexPacket) {\n\tC.PixelSetIndex(pw.pw, C.IndexPacket(*index))\n\truntime.KeepAlive(pw)\n}", "func (g *Index) ResetIndex(c *Client, tree Treeish) error {\n\tnewEntries, err := ExpandGitTreeIntoIndexes(c, tree, true, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\tg.NumberIndexEntries = uint32(len(newEntries))\n\tg.Objects = newEntries\n\treturn nil\n}", "func (this *KeyspaceTerm) SetIndexJoinNest() {\n\tthis.property |= TERM_INDEX_JOIN_NEST\n}", "func (c *Chip8) SetIndex() {\n\tc.index = c.inst & 0x0FFF\n}", "func (i ImageIndexer) DeleteFromIndex(request DeleteFromIndexRequest) error {\n\tbuildDir, outDockerfile, cleanup, err := buildContext(request.Generate, request.OutDockerfile)\n\tdefer cleanup()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdatabasePath, err := i.ExtractDatabase(buildDir, request.FromIndex, request.CaFile, request.SkipTLSVerify, request.PlainHTTP)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Run opm registry delete on the database\n\tdeleteFromRegistryReq := registry.DeleteFromRegistryRequest{\n\t\tPackages: request.Operators,\n\t\tInputDatabase: databasePath,\n\t\tPermissive: request.Permissive,\n\t}\n\n\t// Delete the bundles from the registry\n\terr = i.RegistryDeleter.DeleteFromRegistry(deleteFromRegistryReq)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// generate the dockerfile\n\tdockerfile := i.DockerfileGenerator.GenerateIndexDockerfile(request.BinarySourceImage, databasePath)\n\terr = write(dockerfile, outDockerfile, i.Logger)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif request.Generate {\n\t\treturn nil\n\t}\n\n\t// build the dockerfile\n\terr = build(outDockerfile, request.Tag, i.CommandRunner, i.Logger)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (va *VertexArray) SetIndexData(data []uint32) {\n\t// Index Buffer Object\n\tgl.GenBuffers(1, &va.ibo) // generates the buffer (or multiple)\n\tgl.BindBuffer(gl.ELEMENT_ARRAY_BUFFER, va.ibo) // tells OpenGL what kind of buffer this is\n\n\t// BufferData assigns data to the buffer.\n\tgl.BufferData(gl.ELEMENT_ARRAY_BUFFER, len(data)*4, gl.Ptr(data), gl.STATIC_DRAW)\n\n\tva.vertices = len(data)\n}", "func (wouo *WorkOrderUpdateOne) ClearIndex() *WorkOrderUpdateOne {\n\twouo.index = nil\n\twouo.clearindex = true\n\treturn wouo\n}", "func (ll *LevelLedger) SetClassIndex(ref *record.Reference, idx *index.ClassLifeline) error {\n\tk := prefixkey(scopeIDLifeline, ref.Key())\n\tencoded, err := index.EncodeClassLifeline(idx)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ll.ldb.Put(k, encoded, nil)\n}", "func (c *index) Create(sctx sessionctx.Context, rm kv.RetrieverMutator, indexedValues []types.Datum, h int64, opts ...table.CreateIdxOptFunc) (int64, error) {\n\tvar opt table.CreateIdxOpt\n\tfor _, fn := range opts {\n\t\tfn(&opt)\n\t}\n\tss := opt.AssertionProto\n\twriteBufs := sctx.GetSessionVars().GetWriteStmtBufs()\n\tskipCheck := sctx.GetSessionVars().LightningMode || sctx.GetSessionVars().StmtCtx.BatchCheck\n\tkey, distinct, err := c.GenIndexKey(sctx.GetSessionVars().StmtCtx, indexedValues, h, writeBufs.IndexKeyBuf)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tctx := opt.Ctx\n\tif opt.Untouched {\n\t\ttxn, err1 := sctx.Txn(true)\n\t\tif err1 != nil {\n\t\t\treturn 0, err1\n\t\t}\n\t\t// If the index kv was untouched(unchanged), and the key/value already exists in mem-buffer,\n\t\t// should not overwrite the key with un-commit flag.\n\t\t// So if the key exists, just do nothing and return.\n\t\t_, err = txn.GetMemBuffer().Get(ctx, key)\n\t\tif err == nil {\n\t\t\treturn 0, nil\n\t\t}\n\t}\n\n\t// save the key buffer to reuse.\n\twriteBufs.IndexKeyBuf = key\n\tif !distinct {\n\t\t// non-unique index doesn't need store value, write a '0' to reduce space\n\t\tvalue := []byte{'0'}\n\t\tif opt.Untouched {\n\t\t\tvalue[0] = kv.UnCommitIndexKVFlag\n\t\t}\n\t\terr = rm.Set(key, value)\n\t\tif ss != nil {\n\t\t\tss.SetAssertion(key, kv.None)\n\t\t}\n\t\treturn 0, err\n\t}\n\n\tif skipCheck {\n\t\tvalue := EncodeHandle(h)\n\t\tif opt.Untouched {\n\t\t\tvalue = append(value, kv.UnCommitIndexKVFlag)\n\t\t}\n\t\terr = rm.Set(key, value)\n\t\tif ss != nil {\n\t\t\tss.SetAssertion(key, kv.None)\n\t\t}\n\t\treturn 0, err\n\t}\n\n\tif ctx != nil {\n\t\tif span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {\n\t\t\tspan1 := span.Tracer().StartSpan(\"index.Create\", opentracing.ChildOf(span.Context()))\n\t\t\tdefer span1.Finish()\n\t\t\tctx = opentracing.ContextWithSpan(ctx, span1)\n\t\t}\n\t} else {\n\t\tctx = context.TODO()\n\t}\n\n\tvar value []byte\n\tvalue, err = rm.Get(ctx, key)\n\t// If (opt.Untouched && err == nil) is true, means the key is exists and exists in TiKV, not in txn mem-buffer,\n\t// then should also write the untouched index key/value to mem-buffer to make sure the data\n\t// is consistent with the index in txn mem-buffer.\n\tif kv.IsErrNotFound(err) || (opt.Untouched && err == nil) {\n\t\tv := EncodeHandle(h)\n\t\tif opt.Untouched {\n\t\t\tv = append(v, kv.UnCommitIndexKVFlag)\n\t\t}\n\t\terr = rm.Set(key, v)\n\t\tif ss != nil {\n\t\t\tss.SetAssertion(key, kv.NotExist)\n\t\t}\n\t\treturn 0, err\n\t}\n\n\thandle, err := DecodeHandle(value)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn handle, kv.ErrKeyExists\n}", "func (s *BasePlSqlParserListener) ExitCreate_index(ctx *Create_indexContext) {}", "func (k Keeper) RemoveCdpOwnerIndex(ctx sdk.Context, cdp types.CDP) {\n\tstore := prefix.NewStore(ctx.KVStore(k.key), types.CdpIDKeyPrefix)\n\tcdpIDs, found := k.GetCdpIdsByOwner(ctx, cdp.Owner)\n\tif !found {\n\t\treturn\n\t}\n\tupdatedCdpIds := []uint64{}\n\tfor _, id := range cdpIDs {\n\t\tif id != cdp.ID {\n\t\t\tupdatedCdpIds = append(updatedCdpIds, id)\n\t\t}\n\t}\n\tif len(updatedCdpIds) == 0 {\n\t\tstore.Delete(cdp.Owner)\n\t}\n\tstore.Set(cdp.Owner, k.cdc.MustMarshalBinaryLengthPrefixed(updatedCdpIds))\n\n}", "func (s *BasePlSqlParserListener) ExitCluster_index_clause(ctx *Cluster_index_clauseContext) {}", "func FixMaxEntryIndex(rdb *Store, profile *pb.Profile) error {\n\tuuid1, err := uuid.FromString(profile.Uuid)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// MAX Delimiter Key\n\tkey := MaxUUIDFlakeKey(TableEntryIndex, uuid1)\n\treturn rdb.Put(key.Bytes(), []byte(\"0000\"))\n}", "func UseIndex() *ishell.Cmd {\n\n\treturn &ishell.Cmd{\n\t\tName: \"use\",\n\t\tHelp: \"Select index to use for subsequent document operations\",\n\t\tFunc: func(c *ishell.Context) {\n\t\t\tif context == nil {\n\t\t\t\terrorMsg(c, errNotConnected)\n\t\t\t} else {\n\t\t\t\tdefer restorePrompt(c)\n\t\t\t\tif len(c.Args) < 1 {\n\t\t\t\t\tif context.ActiveIndex != \"\" {\n\t\t\t\t\t\tcprintlist(c, \"Using index \", cy(context.ActiveIndex))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcprintln(c, \"No index is in use\")\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif c.Args[0] == \"--\" {\n\t\t\t\t\tif context.ActiveIndex != \"\" {\n\t\t\t\t\t\tcprintlist(c, \"Index \", cy(context.ActiveIndex), \" is no longer in use\")\n\t\t\t\t\t\tcontext.ActiveIndex = \"\"\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcprintln(c, \"No index is in use\")\n\t\t\t\t\t}\n\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\ts, err := context.ResolveAndValidateIndex(c.Args[0])\n\t\t\t\tif err != nil {\n\t\t\t\t\terrorMsg(c, err.Error())\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tcontext.ActiveIndex = s\n\t\t\t\tif s != c.Args[0] {\n\t\t\t\t\tcprintlist(c, \"For alias \", cyb(c.Args[0]), \" selected index \", cy(s))\n\t\t\t\t} else {\n\t\t\t\t\tcprintlist(c, \"Selected index \", cy(s))\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t}\n\n}", "func (g Index) WriteIndex(file io.Writer) error {\n\tsort.Sort(ByPath(g.Objects))\n\ts := sha1.New()\n\tw := io.MultiWriter(file, s)\n\tbinary.Write(w, binary.BigEndian, g.fixedGitIndex)\n\tfor _, entry := range g.Objects {\n\t\tbinary.Write(w, binary.BigEndian, entry.FixedIndexEntry)\n\t\tbinary.Write(w, binary.BigEndian, []byte(entry.PathName))\n\t\tpadding := 8 - ((82 + len(entry.PathName) + 4) % 8)\n\t\tp := make([]byte, padding)\n\t\tbinary.Write(w, binary.BigEndian, p)\n\t}\n\tbinary.Write(w, binary.BigEndian, s.Sum(nil))\n\treturn nil\n}", "func (s *BasevhdlListener) ExitIndex_constraint(ctx *Index_constraintContext) {}", "func (a *AliasRemoveAction) Index(index ...string) *AliasRemoveAction {\n\ta.index = append(a.index, index...)\n\treturn a\n}", "func (s *BaseMySqlParserListener) ExitIndexHint(ctx *IndexHintContext) {}", "func (gen *AddressGenerator) SetIndex(i uint) *AddressGenerator {\n\tgen.state = addressState(i)\n\treturn gen\n}", "func (pal *CGBPalette) updateIndex(value byte) {\n\tpal.index = value & 0x3F\n\tpal.inc = bits.Test(value, 7)\n}", "func (s *BasePlSqlParserListener) ExitIndex_attributes(ctx *Index_attributesContext) {}", "func (rb *ShardsRecordBuilder) IndexingIndexTotal(indexingindextotal string) *ShardsRecordBuilder {\n\trb.v.IndexingIndexTotal = &indexingindextotal\n\treturn rb\n}", "func processIndex(length, index int) int {\n\tif index >= 0 {\n\t\tif index >= length {\n\t\t\treturn -1\n\t\t}\n\t\treturn index\n\t}\n\tindex = length + index\n\tif index < 0 || index >= length {\n\t\treturn -1\n\t}\n\treturn index\n}", "func (s *BaseDMLListener) ExitIndexType(ctx *IndexTypeContext) {}", "func (du *DatumUpdate) SetIndex(i int) *DatumUpdate {\n\tdu.mutation.ResetIndex()\n\tdu.mutation.SetIndex(i)\n\treturn du\n}", "func DecodeIndex(buf []byte) ([]byte, int64, int) {\n\tn := decodeInt(buf[0:4])\n\tif n+10 > len(buf) {\n\t\treturn nil, -1, 0\n\t}\n\tkey := buf[4 : n+4]\n\toff := decodeInt48(buf[n+4 : n+10])\n\treturn key, off, n + 10\n}", "func (b *FlushingBatch) Index(id string, data any) error {\n\tif err := b.batch.Index(id, data); err != nil {\n\t\treturn err\n\t}\n\treturn b.flushIfFull()\n}", "func (ci *createIndex) ApplyFilters() error {\n\treturn nil\n}", "func (s *StashList) RemoveStashAtIdx(ctx context.Context, vw types.ValueWriter, idx int) (hash.Hash, error) {\n\tamCount, err := s.am.Count()\n\tif err != nil {\n\t\treturn hash.Hash{}, err\n\t}\n\tif amCount <= idx {\n\t\treturn hash.Hash{}, fmt.Errorf(\"fatal: log for 'stash' only has %v entries\", amCount)\n\t}\n\n\tstash, err := getNthStash(ctx, s.am, amCount, idx)\n\tif err != nil {\n\t\treturn hash.Hash{}, err\n\t}\n\n\tame := s.am.Editor()\n\terr = ame.Delete(ctx, strconv.Itoa(stash.key))\n\tif err != nil {\n\t\treturn hash.Hash{}, err\n\t}\n\n\ts.am, err = ame.Flush(ctx)\n\tif err != nil {\n\t\treturn hash.Hash{}, err\n\t}\n\treturn s.updateStashListMap(ctx, vw)\n}", "func TestEnsureSkipListIndex(t *testing.T) {\n\tc := createClient(t, nil)\n\tdb := ensureDatabase(nil, c, \"index_test\", nil, t)\n\n\ttestOptions := []*driver.EnsureSkipListIndexOptions{\n\t\tnil,\n\t\t{Unique: true, Sparse: false, NoDeduplicate: true},\n\t\t{Unique: true, Sparse: true, NoDeduplicate: true},\n\t\t{Unique: false, Sparse: false, NoDeduplicate: false},\n\t\t{Unique: false, Sparse: true, NoDeduplicate: false},\n\t}\n\n\tfor i, options := range testOptions {\n\t\tcol := ensureCollection(nil, db, fmt.Sprintf(\"skiplist_index_test_%d\", i), nil, t)\n\n\t\tidx, created, err := col.EnsureSkipListIndex(nil, []string{\"name\", \"title\"}, options)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to create new index: %s\", describe(err))\n\t\t}\n\t\tif !created {\n\t\t\tt.Error(\"Expected created to be true, got false\")\n\t\t}\n\t\tif idxType := idx.Type(); idxType != driver.SkipListIndex {\n\t\t\tt.Errorf(\"Expected SkipListIndex, found `%s`\", idxType)\n\t\t}\n\t\tif options != nil && idx.Unique() != options.Unique {\n\t\t\tt.Errorf(\"Expected Unique to be %t, found `%t`\", options.Unique, idx.Unique())\n\t\t}\n\t\tif options != nil && idx.Sparse() != options.Sparse {\n\t\t\tt.Errorf(\"Expected Sparse to be %t, found `%t`\", options.Sparse, idx.Sparse())\n\t\t}\n\t\tif options != nil && !idx.Deduplicate() != options.NoDeduplicate {\n\t\t\tt.Errorf(\"Expected NoDeduplicate to be %t, found `%t`\", options.NoDeduplicate, idx.Deduplicate())\n\t\t}\n\n\t\t// Index must exists now\n\t\tif found, err := col.IndexExists(nil, idx.Name()); err != nil {\n\t\t\tt.Fatalf(\"Failed to check index '%s' exists: %s\", idx.Name(), describe(err))\n\t\t} else if !found {\n\t\t\tt.Errorf(\"Index '%s' does not exist, expected it to exist\", idx.Name())\n\t\t}\n\n\t\t// Ensure again, created must be false now\n\t\t_, created, err = col.EnsureSkipListIndex(nil, []string{\"name\", \"title\"}, options)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to re-create index: %s\", describe(err))\n\t\t}\n\t\tif created {\n\t\t\tt.Error(\"Expected created to be false, got true\")\n\t\t}\n\n\t\t// Remove index\n\t\tif err := idx.Remove(nil); err != nil {\n\t\t\tt.Fatalf(\"Failed to remove index '%s': %s\", idx.Name(), describe(err))\n\t\t}\n\n\t\t// Index must not exists now\n\t\tif found, err := col.IndexExists(nil, idx.Name()); err != nil {\n\t\t\tt.Fatalf(\"Failed to check index '%s' exists: %s\", idx.Name(), describe(err))\n\t\t} else if found {\n\t\t\tt.Errorf(\"Index '%s' does exist, expected it not to exist\", idx.Name())\n\t\t}\n\t}\n}", "func (p *Buffer) saveIndex(ptr unsafe.Pointer, idx uint) {\n\tif p.array_indexes == nil {\n\t\t// the 1st time we need to allocate\n\t\tp.array_indexes = make(map[unsafe.Pointer]uint)\n\t}\n\tp.array_indexes[ptr] = idx\n}", "func (s *BasePlSqlParserListener) ExitAlter_index_partitioning(ctx *Alter_index_partitioningContext) {\n}", "func poolSetIndex(a interface{}, i int) {\n\ta.(*freeClientPoolEntry).index = i\n}", "func (s *LDBStore) CleanGCIndex() error {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\n\tbatch := leveldb.Batch{}\n\n\tvar okEntryCount uint64\n\tvar totalEntryCount uint64\n\n\t// throw out all gc indices, we will rebuild from cleaned index\n\tit := s.db.NewIterator()\n\tit.Seek([]byte{keyGCIdx})\n\tvar gcDeletes int\n\tfor it.Valid() {\n\t\trowType, _ := parseIdxKey(it.Key())\n\t\tif rowType != keyGCIdx {\n\t\t\tbreak\n\t\t}\n\t\tbatch.Delete(it.Key())\n\t\tgcDeletes++\n\t\tit.Next()\n\t}\n\tlog.Debug(\"gc\", \"deletes\", gcDeletes)\n\tif err := s.db.Write(&batch); err != nil {\n\t\treturn err\n\t}\n\tbatch.Reset()\n\n\tit.Release()\n\n\t// corrected po index pointer values\n\tvar poPtrs [256]uint64\n\n\t// set to true if chunk count not on 4096 iteration boundary\n\tvar doneIterating bool\n\n\t// last key index in previous iteration\n\tlastIdxKey := []byte{keyIndex}\n\n\t// counter for debug output\n\tvar cleanBatchCount int\n\n\t// go through all key index entries\n\tfor !doneIterating {\n\t\tcleanBatchCount++\n\t\tvar idxs []dpaDBIndex\n\t\tvar chunkHashes [][]byte\n\t\tvar pos []uint8\n\t\tit := s.db.NewIterator()\n\n\t\tit.Seek(lastIdxKey)\n\n\t\t// 4096 is just a nice number, don't look for any hidden meaning here...\n\t\tvar i int\n\t\tfor i = 0; i < 4096; i++ {\n\n\t\t\t// this really shouldn't happen unless database is empty\n\t\t\t// but let's keep it to be safe\n\t\t\tif !it.Valid() {\n\t\t\t\tdoneIterating = true\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// if it's not keyindex anymore we're done iterating\n\t\t\trowType, chunkHash := parseIdxKey(it.Key())\n\t\t\tif rowType != keyIndex {\n\t\t\t\tdoneIterating = true\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// decode the retrieved index\n\t\t\tvar idx dpaDBIndex\n\t\t\terr := decodeIndex(it.Value(), &idx)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"corrupt index: %v\", err)\n\t\t\t}\n\t\t\tpo := s.po(chunkHash)\n\t\t\tlastIdxKey = it.Key()\n\n\t\t\t// if we don't find the data key, remove the entry\n\t\t\t// if we find it, add to the array of new gc indices to create\n\t\t\tdataKey := getDataKey(idx.Idx, po)\n\t\t\t_, err = s.db.Get(dataKey)\n\t\t\tif err != nil {\n\t\t\t\tlog.Warn(\"deleting inconsistent index (missing data)\", \"key\", chunkHash)\n\t\t\t\tbatch.Delete(it.Key())\n\t\t\t} else {\n\t\t\t\tidxs = append(idxs, idx)\n\t\t\t\tchunkHashes = append(chunkHashes, chunkHash)\n\t\t\t\tpos = append(pos, po)\n\t\t\t\tokEntryCount++\n\t\t\t\tif idx.Idx > poPtrs[po] {\n\t\t\t\t\tpoPtrs[po] = idx.Idx\n\t\t\t\t}\n\t\t\t}\n\t\t\ttotalEntryCount++\n\t\t\tit.Next()\n\t\t}\n\t\tit.Release()\n\n\t\t// flush the key index corrections\n\t\terr := s.db.Write(&batch)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbatch.Reset()\n\n\t\t// add correct gc indices\n\t\tfor i, okIdx := range idxs {\n\t\t\tgcIdxKey := getGCIdxKey(&okIdx)\n\t\t\tgcIdxData := getGCIdxValue(&okIdx, pos[i], chunkHashes[i])\n\t\t\tbatch.Put(gcIdxKey, gcIdxData)\n\t\t\tlog.Trace(\"clean ok\", \"key\", chunkHashes[i], \"gcKey\", gcIdxKey, \"gcData\", gcIdxData)\n\t\t}\n\n\t\t// flush them\n\t\terr = s.db.Write(&batch)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbatch.Reset()\n\n\t\tlog.Debug(\"clean gc index pass\", \"batch\", cleanBatchCount, \"checked\", i, \"kept\", len(idxs))\n\t}\n\n\tlog.Debug(\"gc cleanup entries\", \"ok\", okEntryCount, \"total\", totalEntryCount, \"batchlen\", batch.Len())\n\n\t// lastly add updated entry count\n\tvar entryCount [8]byte\n\tbinary.BigEndian.PutUint64(entryCount[:], okEntryCount)\n\tbatch.Put(keyEntryCnt, entryCount[:])\n\n\t// and add the new po index pointers\n\tvar poKey [2]byte\n\tpoKey[0] = keyDistanceCnt\n\tfor i, poPtr := range poPtrs {\n\t\tpoKey[1] = uint8(i)\n\t\tif poPtr == 0 {\n\t\t\tbatch.Delete(poKey[:])\n\t\t} else {\n\t\t\tvar idxCount [8]byte\n\t\t\tbinary.BigEndian.PutUint64(idxCount[:], poPtr)\n\t\t\tbatch.Put(poKey[:], idxCount[:])\n\t\t}\n\t}\n\n\t// if you made it this far your harddisk has survived. Congratulations\n\treturn s.db.Write(&batch)\n}", "func (o *IssueRemoveLabelParams) WithIndex(index int64) *IssueRemoveLabelParams {\n\to.SetIndex(index)\n\treturn o\n}", "func (s *store) afterIndex(index uint64) <-chan struct{} {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\n\tif index < s.data.Data.Index {\n\t\t// Client needs update so return a closed channel.\n\t\tch := make(chan struct{})\n\t\tclose(ch)\n\t\treturn ch\n\t}\n\n\treturn s.dataChanged\n}", "func ConvertToIndexUsage(defn *common.IndexDefn, localMeta *LocalIndexMetadata) (*IndexUsage, error) {\n\n\t// find the topology metadata\n\ttopology := findTopologyByBucket(localMeta.IndexTopologies, defn.Bucket)\n\tif topology == nil {\n\t\tlogging.Errorf(\"Planner::getIndexLayout: Fail to find index topology for bucket %v.\", defn.Bucket)\n\t\treturn nil, nil\n\t}\n\n\t// find the index instance from topology metadata\n\tinst := topology.GetIndexInstByDefn(defn.DefnId)\n\tif inst == nil {\n\t\tlogging.Errorf(\"Planner::getIndexLayout: Fail to find index instance for definition %v.\", defn.DefnId)\n\t\treturn nil, nil\n\t}\n\n\t// Check the index state. Only handle index that is active or being built.\n\t// For index that is in the process of being deleted, planner expects the resource\n\t// will eventually be freed, so it won't included in planning.\n\tstate, _ := topology.GetStatusByDefn(defn.DefnId)\n\tif state != common.INDEX_STATE_CREATED &&\n\t\tstate != common.INDEX_STATE_DELETED &&\n\t\tstate != common.INDEX_STATE_ERROR &&\n\t\tstate != common.INDEX_STATE_NIL {\n\n\t\t// create an index usage object\n\t\tindex := newIndexUsage(defn.DefnId, common.IndexInstId(inst.InstId), defn.Name, defn.Bucket)\n\n\t\t// index is pinned to a node\n\t\tif len(defn.Nodes) != 0 {\n\t\t\tindex.Hosts = defn.Nodes\n\t\t}\n\n\t\t// update sizing\n\t\tindex.IsPrimary = defn.IsPrimary\n\t\tindex.IsMOI = (defn.Using == common.IndexType(common.MemoryOptimized) || defn.Using == common.IndexType(common.MemDB))\n\t\tindex.NoUsage = defn.Deferred && state == common.INDEX_STATE_READY\n\n\t\t// Is the index being deleted by user? Thsi will read the delete token from metakv. If untable read from metakv,\n\t\t// pendingDelete is false (cannot assert index is to-be-delete).\n\t\tpendingDelete, err := client.DeleteCommandTokenExist(defn.DefnId)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tindex.pendingDelete = pendingDelete\n\n\t\t// update internal info\n\t\tindex.Instance = &common.IndexInst{\n\t\t\tInstId: common.IndexInstId(inst.InstId),\n\t\t\tDefn: *defn,\n\t\t\tState: common.IndexState(inst.State),\n\t\t\tStream: common.StreamId(inst.StreamId),\n\t\t\tError: inst.Error,\n\t\t\tReplicaId: int(inst.ReplicaId),\n\t\t\tVersion: int(inst.Version),\n\t\t\tRState: common.RebalanceState(inst.RState),\n\t\t}\n\n\t\tlogging.Debugf(\"Create Index usage %v %v %v %v\", index.Name, index.Bucket, index.Instance.InstId, index.Instance.ReplicaId)\n\n\t\treturn index, nil\n\t}\n\n\treturn nil, nil\n}", "func (p Permutator) Index() int {\n\t<- p.idle\n\tj := p.index - 1\n\tp.idle <- true\n\treturn j\n}", "func (o *IssueRemoveLabelParams) SetIndex(index int64) {\n\to.Index = index\n}", "func (s *storageMgr) updateIndexSnapMapForIndex(idxInstId common.IndexInstId, idxInst common.IndexInst,\n\tpartnMap PartitionInstMap, streamId common.StreamId, keyspaceId string) {\n\n\tpartitionIDs, _ := idxInst.Pc.GetAllPartitionIds()\n\tlogging.Infof(\"StorageMgr::updateIndexSnapMapForIndex IndexInst %v Partitions %v\",\n\t\tidxInstId, partitionIDs)\n\n\tneedRestart := false\n\t//if keyspace and stream have been provided\n\tif keyspaceId != \"\" && streamId != common.ALL_STREAMS {\n\t\t//skip the index if either keyspaceId or stream don't match\n\t\tif idxInst.Defn.KeyspaceId(idxInst.Stream) != keyspaceId || idxInst.Stream != streamId {\n\t\t\treturn\n\t\t}\n\t\t//skip deleted indexes\n\t\tif idxInst.State == common.INDEX_STATE_DELETED {\n\t\t\treturn\n\t\t}\n\t}\n\n\tindexSnapMap := s.indexSnapMap.Clone()\n\tsnapC := indexSnapMap[idxInstId]\n\tif snapC != nil {\n\t\tsnapC.Lock()\n\t\tDestroyIndexSnapshot(snapC.snap)\n\t\tdelete(indexSnapMap, idxInstId)\n\t\ts.indexSnapMap.Set(indexSnapMap)\n\t\tsnapC.Unlock()\n\t\ts.notifySnapshotDeletion(idxInstId)\n\t}\n\n\tvar tsVbuuid *common.TsVbuuid\n\tvar err error\n\tpartnSnapMap := make(PartnSnapMap)\n\n\tfor _, partnInst := range partnMap {\n\t\tpartnSnapMap, tsVbuuid, err = s.openSnapshot(idxInstId, partnInst, partnSnapMap)\n\t\tif err != nil {\n\t\t\tif err == errStorageCorrupted {\n\t\t\t\tneedRestart = true\n\t\t\t} else {\n\t\t\t\tpanic(\"Unable to open snapshot -\" + err.Error())\n\t\t\t}\n\t\t}\n\n\t\tif partnSnapMap == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t//if OSO snapshot, rollback all partitions to 0\n\t\tif tsVbuuid != nil && tsVbuuid.GetSnapType() == common.DISK_SNAP_OSO {\n\t\t\tfor _, partnInst := range partnMap {\n\t\t\t\tpartnId := partnInst.Defn.GetPartitionId()\n\t\t\t\tsc := partnInst.Sc\n\n\t\t\t\tfor _, slice := range sc.GetAllSlices() {\n\t\t\t\t\t_, err := s.rollbackToSnapshot(idxInstId, partnId,\n\t\t\t\t\t\tslice, nil, false)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tpanic(\"Unable to rollback to 0 - \" + err.Error())\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tpartnSnapMap = nil\n\t\t\tbreak\n\t\t}\n\t}\n\n\tbucket, _, _ := SplitKeyspaceId(keyspaceId)\n\tif len(partnSnapMap) != 0 {\n\t\tis := &indexSnapshot{\n\t\t\tinstId: idxInstId,\n\t\t\tts: tsVbuuid,\n\t\t\tpartns: partnSnapMap,\n\t\t}\n\t\tindexSnapMap = s.indexSnapMap.Clone()\n\t\tif snapC == nil {\n\t\t\tsnapC = &IndexSnapshotContainer{snap: is}\n\t\t} else {\n\t\t\tsnapC.Lock()\n\t\t\tsnapC.snap = is\n\t\t\tsnapC.Unlock()\n\t\t}\n\n\t\tindexSnapMap[idxInstId] = snapC\n\t\ts.indexSnapMap.Set(indexSnapMap)\n\t\ts.notifySnapshotCreation(is)\n\t} else {\n\t\tlogging.Infof(\"StorageMgr::updateIndexSnapMapForIndex IndexInst %v Adding Nil Snapshot.\",\n\t\t\tidxInstId)\n\t\ts.addNilSnapshot(idxInstId, bucket)\n\t}\n\n\tif needRestart {\n\t\tos.Exit(1)\n\t}\n}", "func (s *BasePlSqlParserListener) ExitTable_index_clause(ctx *Table_index_clauseContext) {}", "func ClearIndex(c float32) {\n\tsyscall.Syscall(gpClearIndex, 1, uintptr(math.Float32bits(c)), 0, 0)\n}", "func (d *Dao) ZRemIdx(c context.Context, category int, id int64) (err error) {\n\tvar (\n\t\tconn = d.redis.Get(c)\n\t\tkey = keyZone(category)\n\t)\n\tif _, err = conn.Do(\"ZREM\", key, id); err != nil {\n\t\tlog.Error(\"conn.Send(ZADD %s - %v) error(%v)\", key, id, err)\n\t}\n\tconn.Close()\n\treturn\n}", "func completeDroppedIndex(\n\tctx context.Context,\n\texecCfg *sql.ExecutorConfig,\n\ttable catalog.TableDescriptor,\n\tindexID descpb.IndexID,\n\tprogress *jobspb.SchemaChangeGCProgress,\n) error {\n\tif err := updateDescriptorGCMutations(ctx, execCfg, table.GetID(), indexID); err != nil {\n\t\treturn errors.Wrapf(err, \"updating GC mutations\")\n\t}\n\n\tmarkIndexGCed(ctx, indexID, progress)\n\n\treturn nil\n}", "func RemoveAtIndex(data interface{}, index int) (interface{}, error) {\n\t// Get concrete value of data\n\tvalue := reflect.ValueOf(data)\n\n\t// Get the type of value\n\tvalueType := value.Type()\n\n\tif valueType.Kind() != reflect.Array && valueType.Kind() != reflect.Slice {\n\t\terr := errors.New(\"Data parameter is not an array or slice\")\n\t\treturn nil, err\n\t}\n\n\tif index >= value.Len() {\n\t\terr := errors.New(\"Index is greater than data length\")\n\t\treturn nil, err\n\t}\n\n\t// Create slice from value\n\tresultSlice := reflect.AppendSlice(value.Slice(0, index), value.Slice(index+1, value.Len()))\n\n\treturn resultSlice.Interface(), nil\n}", "func (m *MockDriver) UseIndexPlaceholders() bool {\n\treturn false\n}", "func (i ImageIndexer) DeprecateFromIndex(request DeprecateFromIndexRequest) error {\n\tbuildDir, outDockerfile, cleanup, err := buildContext(request.Generate, request.OutDockerfile)\n\tdefer cleanup()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdatabasePath, err := i.ExtractDatabase(buildDir, request.FromIndex, request.CaFile, request.SkipTLSVerify, request.PlainHTTP)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdeprecateFromRegistryReq := registry.DeprecateFromRegistryRequest{\n\t\tBundles: request.Bundles,\n\t\tInputDatabase: databasePath,\n\t\tPermissive: request.Permissive,\n\t\tAllowPackageRemoval: request.AllowPackageRemoval,\n\t}\n\n\t// Deprecate the bundles from the registry\n\terr = i.RegistryDeprecator.DeprecateFromRegistry(deprecateFromRegistryReq)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// generate the dockerfile\n\tdockerfile := i.DockerfileGenerator.GenerateIndexDockerfile(request.BinarySourceImage, databasePath)\n\terr = write(dockerfile, outDockerfile, i.Logger)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif request.Generate {\n\t\treturn nil\n\t}\n\n\t// build the dockerfile with requested tooling\n\terr = build(outDockerfile, request.Tag, i.CommandRunner, i.Logger)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (v Value) SetIndex(i int, x interface{}) {\n\tpanic(message)\n}", "func (rb *ShardsRecordBuilder) IndexingIndexFailed(indexingindexfailed string) *ShardsRecordBuilder {\n\trb.v.IndexingIndexFailed = &indexingindexfailed\n\treturn rb\n}", "func (w *Writer) writeIndex() (int64, error) {\n\tw.written = true\n\n\tbuf := new(bytes.Buffer)\n\tst := sst.NewWriter(buf)\n\n\tw.spaceIds.Sort()\n\n\t// For each defined space, we index the space's\n\t// byte offset in the file and the length in bytes\n\t// of all data in the space.\n\tfor _, spaceId := range w.spaceIds {\n\t\tb := new(bytes.Buffer)\n\n\t\tbinary.WriteInt64(b, w.spaceOffsets[spaceId])\n\t\tbinary.WriteInt64(b, w.spaceLengths[spaceId])\n\n\t\tif err := st.Set([]byte(spaceId), b.Bytes()); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tif err := st.Close(); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn buf.WriteTo(w.file)\n}", "func (w *Writer) Close() (err error) {\n\tdefer func() {\n\t\tif w.closer == nil {\n\t\t\treturn\n\t\t}\n\t\terr1 := w.closer.Close()\n\t\tif err == nil {\n\t\t\terr = err1\n\t\t}\n\t\tw.closer = nil\n\t}()\n\tif w.err != nil {\n\t\treturn w.err\n\t}\n\n\t// Finish the last data block, or force an empty data block if there\n\t// aren't any data blocks at all.\n\tif w.nEntries > 0 || len(w.indexEntries) == 0 {\n\t\tbh, err := w.finishBlock()\n\t\tif err != nil {\n\t\t\tw.err = err\n\t\t\treturn w.err\n\t\t}\n\t\tw.pendingBH = bh\n\t\tw.flushPendingBH(nil)\n\t}\n\n\t// Write the (empty) metaindex block.\n\tmetaindexBlockHandle, err := w.finishBlock()\n\tif err != nil {\n\t\tw.err = err\n\t\treturn w.err\n\t}\n\n\t// Write the index block.\n\t// writer.append uses w.tmp[:3*binary.MaxVarintLen64].\n\ti0, tmp := 0, w.tmp[3*binary.MaxVarintLen64:5*binary.MaxVarintLen64]\n\tfor _, ie := range w.indexEntries {\n\t\tn := encodeBlockHandle(tmp, ie.bh)\n\t\ti1 := i0 + ie.keyLen\n\t\tw.append(w.indexKeys[i0:i1], tmp[:n], true)\n\t\ti0 = i1\n\t}\n\tindexBlockHandle, err := w.finishBlock()\n\tif err != nil {\n\t\tw.err = err\n\t\treturn w.err\n\t}\n\n\t// Write the table footer.\n\tfooter := w.tmp[:footerLen]\n\tfor i := range footer {\n\t\tfooter[i] = 0\n\t}\n\tn := encodeBlockHandle(footer, metaindexBlockHandle)\n\tencodeBlockHandle(footer[n:], indexBlockHandle)\n\tcopy(footer[footerLen-len(magic):], magic)\n\tif _, err := w.writer.Write(footer); err != nil {\n\t\tw.err = err\n\t\treturn w.err\n\t}\n\n\t// Flush the buffer.\n\tif w.bufWriter != nil {\n\t\tif err := w.bufWriter.Flush(); err != nil {\n\t\t\tw.err = err\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Make any future calls to Set or Close return an error.\n\tw.err = errors.New(\"leveldb/table: writer is closed\")\n\treturn nil\n}", "func blockPadding(offset int64) (n int64) {\n\treturn -offset & (blockSize - 1)\n}", "func (m *MockDBStorage) DeleteIndex(arg0 string, arg1, arg2 common.Resource, arg3 string) (sql.Result, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteIndex\", arg0, arg1, arg2, arg3)\n\tret0, _ := ret[0].(sql.Result)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (s *BasePlSqlParserListener) EnterUsing_index_clause(ctx *Using_index_clauseContext) {}", "func (app *fetchRegistryBuilder) WithIndex(index IntPointer) FetchRegistryBuilder {\n\tapp.index = index\n\treturn app\n}", "func (t *BenchmarkerChaincode) updateIndex(stub shim.ChaincodeStubInterface, key, indexName string, indexValueSpace [][]string) error {\n\tif indexName == \"\" {\n\t\treturn nil\n\t}\n\n\tvar indexValues []string\n\tfor _, validValues := range indexValueSpace {\n\t\tchoice := rand.Intn(len(validValues))\n\t\tindexValues = append(indexValues, validValues[choice])\n\t}\n\n\tindexKey, err := stub.CreateCompositeKey(indexName+\"~id\", append(indexValues, key))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvalue := []byte{0x00}\n\tif err := stub.PutState(indexKey, value); err != nil {\n\t\treturn err\n\t}\n\tfmt.Printf(\"Set composite key '%s' to '%s' for key '%s'\\n\", indexKey, value, key)\n\n\treturn nil\n}", "func (e *Engine) setIndex(index int64) {\n\te.Index = index\n\te.Name = naming.Name(index)\n}", "func ListDiffOutIdx(value tf.DataType) ListDiffAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"out_idx\"] = value\n\t}\n}", "func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING }", "func (duo *DatumUpdateOne) SetIndex(i int) *DatumUpdateOne {\n\tduo.mutation.ResetIndex()\n\tduo.mutation.SetIndex(i)\n\treturn duo\n}" ]
[ "0.496226", "0.49470598", "0.49359372", "0.4929836", "0.48813668", "0.48754716", "0.48505393", "0.47770718", "0.47507846", "0.4717412", "0.47086638", "0.47060275", "0.46133706", "0.45682114", "0.45536166", "0.45449305", "0.45405245", "0.45287335", "0.45117015", "0.45112503", "0.4510972", "0.44861117", "0.4467708", "0.44529444", "0.44493714", "0.44267884", "0.44216266", "0.44153744", "0.44027048", "0.43976918", "0.43774116", "0.43747947", "0.43645355", "0.4357643", "0.43573222", "0.43363515", "0.43358374", "0.43344334", "0.43200192", "0.4319228", "0.43187538", "0.43155512", "0.43152407", "0.42927882", "0.42908022", "0.42855412", "0.42820108", "0.42651019", "0.42601702", "0.42455602", "0.42396376", "0.423732", "0.42330626", "0.42318133", "0.42210773", "0.42208946", "0.4210823", "0.41934806", "0.41857693", "0.41814056", "0.4176728", "0.4170431", "0.41697422", "0.41654098", "0.41579354", "0.4157296", "0.41569877", "0.415525", "0.41478556", "0.41431764", "0.41425395", "0.41392374", "0.4136004", "0.41348842", "0.4128971", "0.41243795", "0.41213477", "0.41203418", "0.4119939", "0.4100516", "0.40961242", "0.4095788", "0.4088729", "0.40872386", "0.4082269", "0.4080401", "0.4079942", "0.40757367", "0.40735227", "0.40682673", "0.4065863", "0.4059869", "0.4059548", "0.40586948", "0.40577215", "0.40572336", "0.40524673", "0.40519613", "0.40494913", "0.4046687" ]
0.7548068
0
UseIndexCodec sets the codec used for index generation.
UseIndexCodec задаёт кодировку, используемую для генерации индекса.
func UseIndexCodec(c multicodec.Code) Option { return func(o *Options) { o.IndexCodec = c } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func UseIndex(designDocument, name string) Parameter {\n\treturn func(pa Parameterizable) {\n\t\tif name == \"\" {\n\t\t\tpa.SetParameter(\"use_index\", designDocument)\n\t\t} else {\n\t\t\tpa.SetParameter(\"use_index\", []string{designDocument, name})\n\t\t}\n\t}\n}", "func (o *BlockBasedTableOptions) SetIndexType(value IndexType) {\n\tC.rocksdb_block_based_options_set_index_type(o.c, C.int(value))\n}", "func NewIndexDriver(root string) sql.IndexDriver {\n\treturn NewDriver(root, pilosa.DefaultClient())\n}", "func UseIndex() *ishell.Cmd {\n\n\treturn &ishell.Cmd{\n\t\tName: \"use\",\n\t\tHelp: \"Select index to use for subsequent document operations\",\n\t\tFunc: func(c *ishell.Context) {\n\t\t\tif context == nil {\n\t\t\t\terrorMsg(c, errNotConnected)\n\t\t\t} else {\n\t\t\t\tdefer restorePrompt(c)\n\t\t\t\tif len(c.Args) < 1 {\n\t\t\t\t\tif context.ActiveIndex != \"\" {\n\t\t\t\t\t\tcprintlist(c, \"Using index \", cy(context.ActiveIndex))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcprintln(c, \"No index is in use\")\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif c.Args[0] == \"--\" {\n\t\t\t\t\tif context.ActiveIndex != \"\" {\n\t\t\t\t\t\tcprintlist(c, \"Index \", cy(context.ActiveIndex), \" is no longer in use\")\n\t\t\t\t\t\tcontext.ActiveIndex = \"\"\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcprintln(c, \"No index is in use\")\n\t\t\t\t\t}\n\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\ts, err := context.ResolveAndValidateIndex(c.Args[0])\n\t\t\t\tif err != nil {\n\t\t\t\t\terrorMsg(c, err.Error())\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tcontext.ActiveIndex = s\n\t\t\t\tif s != c.Args[0] {\n\t\t\t\t\tcprintlist(c, \"For alias \", cyb(c.Args[0]), \" selected index \", cy(s))\n\t\t\t\t} else {\n\t\t\t\t\tcprintlist(c, \"Selected index \", cy(s))\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t}\n\n}", "func WithoutIndex() Option {\n\treturn func(o *Options) {\n\t\to.IndexCodec = index.CarIndexNone\n\t}\n}", "func (s *Store) SetCodec(codec types.Codec) {\n\ts.codec = codec\n}", "func (idx *IndexMap) SetIndexType(indtype string) *IndexMap {\n\tidx.IndexType = indtype\n\treturn idx\n}", "func (c *Chip8) SetIndex() {\n\tc.index = c.inst & 0x0FFF\n}", "func WithIndexCtx(ctx context.Context, indexCtx IndexCtx) context.Context {\n\treturn context.WithValue(ctx, indexCtxKey{}, indexCtx)\n}", "func (o *NearestUsingGET1Params) SetIndexType(indexType *string) {\n\to.IndexType = indexType\n}", "func (u UserConfig) IndexType() string {\n\treturn \"hnsw\"\n}", "func (self *FileBaseDataStore) SetIndex(\n\tconfig_obj *api_proto.Config,\n\tindex_urn string,\n\tentity string,\n\tkeywords []string) error {\n\n\tfor _, keyword := range keywords {\n\t\tsubject := path.Join(index_urn, strings.ToLower(keyword), entity)\n\t\terr := writeContentToFile(config_obj, subject, []byte{})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func setIndex(resp http.ResponseWriter, index uint64) {\n\t// If we ever return X-Consul-Index of 0 blocking clients will go into a busy\n\t// loop and hammer us since ?index=0 will never block. It's always safe to\n\t// return index=1 since the very first Raft write is always an internal one\n\t// writing the raft config for the cluster so no user-facing blocking query\n\t// will ever legitimately have an X-Consul-Index of 1.\n\tif index == 0 {\n\t\tindex = 1\n\t}\n\tresp.Header().Set(\"X-Consul-Index\", strconv.FormatUint(index, 10))\n}", "func (g *GenOpts) BlobIndex() (string, error) {\n\tbp, err := g.blobIndexPrefix()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tjk, err := g.jsonKey()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\ts := bp + jk\n\treturn s, nil\n}", "func EncodingIndexer(encoding string) Indexer {\n\treturn func(r *http.Request) interface{} {\n\t\tp := r.Method\n\t\tif strings.Contains(r.Header.Get(header.AcceptEncoding), encoding) {\n\t\t\tp += \":\" + encoding\n\t\t}\n\t\tp += \":\" + path.Clean(r.URL.Path)\n\t\treturn p\n\t}\n}", "func (m *metricEventDimensions) SetIndex(val *int32) {\n\tm.indexField = val\n}", "func WriteIndex(index common.Index) error {\n\tbytes, err := json.Marshal(index)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ioutil.WriteFile(indexCachePath, bytes, 0600)\n\treturn err\n}", "func WithIndexBy(val IndexBy) Option {\n\treturn func(o *Options) {\n\t\to.IndexBy = val\n\t}\n}", "func (e *Engine) setIndex(index int64) {\n\te.Index = index\n\te.Name = naming.Name(index)\n}", "func (d *dbBasePostgres) GenerateSpecifyIndex(tableName string, useIndex int, indexes []string) string {\n\tDebugLog.Println(\"[WARN] Not support any specifying index action, so that action is ignored\")\n\treturn ``\n}", "func NewIndexClient(name string, cfg Config, schemaCfg config.SchemaConfig, limits StoreLimits, cm ClientMetrics, ownsTenantFn downloads.IndexGatewayOwnsTenant, registerer prometheus.Registerer) (index.Client, error) {\n\tswitch name {\n\tcase config.StorageTypeInMemory:\n\t\tstore := testutils.NewMockStorage()\n\t\treturn store, nil\n\tcase config.StorageTypeAWS, config.StorageTypeAWSDynamo:\n\t\tif cfg.AWSStorageConfig.DynamoDB.URL == nil {\n\t\t\treturn nil, fmt.Errorf(\"Must set -dynamodb.url in aws mode\")\n\t\t}\n\t\tpath := strings.TrimPrefix(cfg.AWSStorageConfig.DynamoDB.URL.Path, \"/\")\n\t\tif len(path) > 0 {\n\t\t\tlevel.Warn(util_log.Logger).Log(\"msg\", \"ignoring DynamoDB URL path\", \"path\", path)\n\t\t}\n\t\treturn aws.NewDynamoDBIndexClient(cfg.AWSStorageConfig.DynamoDBConfig, schemaCfg, registerer)\n\tcase config.StorageTypeGCP:\n\t\treturn gcp.NewStorageClientV1(context.Background(), cfg.GCPStorageConfig, schemaCfg)\n\tcase config.StorageTypeGCPColumnKey, config.StorageTypeBigTable:\n\t\treturn gcp.NewStorageClientColumnKey(context.Background(), cfg.GCPStorageConfig, schemaCfg)\n\tcase config.StorageTypeBigTableHashed:\n\t\tcfg.GCPStorageConfig.DistributeKeys = true\n\t\treturn gcp.NewStorageClientColumnKey(context.Background(), cfg.GCPStorageConfig, schemaCfg)\n\tcase config.StorageTypeCassandra:\n\t\treturn cassandra.NewStorageClient(cfg.CassandraStorageConfig, schemaCfg, registerer)\n\tcase config.StorageTypeBoltDB:\n\t\treturn local.NewBoltDBIndexClient(cfg.BoltDBConfig)\n\tcase config.StorageTypeGrpc:\n\t\treturn grpc.NewStorageClient(cfg.GrpcConfig, schemaCfg)\n\tcase config.BoltDBShipperType:\n\t\tif boltDBIndexClientWithShipper != nil {\n\t\t\treturn boltDBIndexClientWithShipper, nil\n\t\t}\n\n\t\tif shouldUseIndexGatewayClient(cfg.BoltDBShipperConfig.Config) {\n\t\t\tgateway, err := gatewayclient.NewGatewayClient(cfg.BoltDBShipperConfig.IndexGatewayClientConfig, registerer, util_log.Logger)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tboltDBIndexClientWithShipper = gateway\n\t\t\treturn gateway, nil\n\t\t}\n\n\t\tobjectClient, err := NewObjectClient(cfg.BoltDBShipperConfig.SharedStoreType, cfg, cm)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\ttableRanges := getIndexStoreTableRanges(config.BoltDBShipperType, schemaCfg.Configs)\n\n\t\tboltDBIndexClientWithShipper, err = shipper.NewShipper(cfg.BoltDBShipperConfig, objectClient, limits,\n\t\t\townsTenantFn, tableRanges, registerer)\n\n\t\treturn boltDBIndexClientWithShipper, err\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Unrecognized storage client %v, choose one of: %v, %v, %v, %v, %v, %v\", name, config.StorageTypeAWS, config.StorageTypeCassandra, config.StorageTypeInMemory, config.StorageTypeGCP, config.StorageTypeBigTable, config.StorageTypeBigTableHashed)\n\t}\n}", "func NewIndexResetter(s dbworkerstore.Store, interval time.Duration, metrics *metrics, observationContext *observation.Context) *dbworker.Resetter {\n\treturn dbworker.NewResetter(s, dbworker.ResetterOptions{\n\t\tName: \"precise_code_intel_index_worker_resetter\",\n\t\tInterval: interval,\n\t\tMetrics: dbworker.ResetterMetrics{\n\t\t\tRecordResets: metrics.numIndexResets,\n\t\t\tRecordResetFailures: metrics.numIndexResetFailures,\n\t\t\tErrors: metrics.numErrors,\n\t\t},\n\t})\n}", "func (pw *PixelWand) SetIndex(index *IndexPacket) {\n\tC.PixelSetIndex(pw.pw, C.IndexPacket(*index))\n\truntime.KeepAlive(pw)\n}", "func (wouo *WorkOrderUpdateOne) SetIndex(i int) *WorkOrderUpdateOne {\n\twouo.index = &i\n\twouo.addindex = nil\n\treturn wouo\n}", "func CompressIndex(ctx context.Context, dbo Database) error {\n\tdb := dbo.(*database)\n\tsql := db.getRawDB()\n\n\tconn, err := sql.Conn(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\ttx, err := conn.BeginTx(ctx, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif tx != nil {\n\t\t\ttx.Rollback()\n\t\t}\n\t}()\n\n\t_, err = tx.ExecContext(ctx, `update docs set txt=compress(txt) where not iscompressed(txt)`)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = tx.Commit()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttx = nil\n\treturn nil\n}", "func (wou *WorkOrderUpdate) SetIndex(i int) *WorkOrderUpdate {\n\twou.index = &i\n\twou.addindex = nil\n\treturn wou\n}", "func (defintion *IndexDefinition) SetIndexOn(value IndexType) (outDef *IndexDefinition) {\n\toutDef = defintion\n\toutDef.IndexOn = value.String()\n\treturn\n}", "func (d *dbBase) GenerateSpecifyIndex(tableName string, useIndex int, indexes []string) string {\n\tvar s []string\n\tQ := d.TableQuote()\n\tfor _, index := range indexes {\n\t\ttmp := fmt.Sprintf(`%s%s%s`, Q, index, Q)\n\t\ts = append(s, tmp)\n\t}\n\n\tvar useWay string\n\n\tswitch useIndex {\n\tcase hints.KeyUseIndex:\n\t\tuseWay = `USE`\n\tcase hints.KeyForceIndex:\n\t\tuseWay = `FORCE`\n\tcase hints.KeyIgnoreIndex:\n\t\tuseWay = `IGNORE`\n\tdefault:\n\t\tDebugLog.Println(\"[WARN] Not a valid specifying action, so that action is ignored\")\n\t\treturn ``\n\t}\n\n\treturn fmt.Sprintf(` %s INDEX(%s) `, useWay, strings.Join(s, `,`))\n}", "func NewIndexClient(name string, cfg Config, schemaCfg chunk.SchemaConfig, registerer prometheus.Registerer) (chunk.IndexClient, error) {\n\tif indexClientFactory, ok := customIndexStores[name]; ok {\n\t\tif indexClientFactory.indexClientFactoryFunc != nil {\n\t\t\treturn indexClientFactory.indexClientFactoryFunc()\n\t\t}\n\t}\n\n\tswitch name {\n\tcase StorageTypeInMemory:\n\t\tstore := chunk.NewMockStorage()\n\t\treturn store, nil\n\tcase StorageTypeAWS, StorageTypeAWSDynamo:\n\t\tif cfg.AWSStorageConfig.DynamoDB.URL == nil {\n\t\t\treturn nil, fmt.Errorf(\"Must set -dynamodb.url in aws mode\")\n\t\t}\n\t\tpath := strings.TrimPrefix(cfg.AWSStorageConfig.DynamoDB.URL.Path, \"/\")\n\t\tif len(path) > 0 {\n\t\t\tlevel.Warn(util_log.Logger).Log(\"msg\", \"ignoring DynamoDB URL path\", \"path\", path)\n\t\t}\n\t\treturn aws.NewDynamoDBIndexClient(cfg.AWSStorageConfig.DynamoDBConfig, schemaCfg, registerer)\n\tcase StorageTypeGCP:\n\t\treturn gcp.NewStorageClientV1(context.Background(), cfg.GCPStorageConfig, schemaCfg)\n\tcase StorageTypeGCPColumnKey, StorageTypeBigTable:\n\t\treturn gcp.NewStorageClientColumnKey(context.Background(), cfg.GCPStorageConfig, schemaCfg)\n\tcase StorageTypeBigTableHashed:\n\t\tcfg.GCPStorageConfig.DistributeKeys = true\n\t\treturn gcp.NewStorageClientColumnKey(context.Background(), cfg.GCPStorageConfig, schemaCfg)\n\tcase StorageTypeCassandra:\n\t\treturn cassandra.NewStorageClient(cfg.CassandraStorageConfig, schemaCfg, registerer)\n\tcase StorageTypeBoltDB:\n\t\treturn local.NewBoltDBIndexClient(cfg.BoltDBConfig)\n\tcase StorageTypeGrpc:\n\t\treturn grpc.NewStorageClient(cfg.GrpcConfig, schemaCfg)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Unrecognized storage client %v, choose one of: %v, %v, %v, %v, %v, %v\", name, StorageTypeAWS, StorageTypeCassandra, StorageTypeInMemory, StorageTypeGCP, StorageTypeBigTable, StorageTypeBigTableHashed)\n\t}\n}", "func (dagOpts) StoreCodec(codec string) DagPutOption {\n\treturn func(opts *DagPutSettings) error {\n\t\topts.StoreCodec = codec\n\t\treturn nil\n\t}\n}", "func NewIndex(addr, name, typ string, md *index.Metadata) (*Index, error) {\n\n\tfmt.Println(\"Get a new index: \", addr, name)\n client := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\t//MaxIdleConnsPerHost: 200,\n\t\t\tMaxIdleConnsPerHost: 2000000,\n\t\t},\n\t\tTimeout: 2500000 * time.Millisecond,\n\t}\n\tconn, err := elastic.NewClient(elastic.SetURL(addr), elastic.SetHttpClient(client))\n\tif err != nil {\n fmt.Println(\"Get error here\");\n\t\treturn nil, err\n\t}\n\tret := &Index{\n\t\tconn: conn,\n\t\tmd: md,\n\t\tname: name,\n\t\ttyp: typ,\n\t}\n fmt.Println(\"get here ======\");\n\n\treturn ret, nil\n\n}", "func UseIndexPadding(p uint64) Option {\n\treturn func(o *Options) {\n\t\to.IndexPadding = p\n\t}\n}", "func NewAutoincrementIndex(o ...option.Option) index.Index {\n\topts := &option.Options{}\n\tfor _, opt := range o {\n\t\topt(opts)\n\t}\n\n\tu := &Autoincrement{\n\t\tindexBy: opts.IndexBy,\n\t\ttypeName: opts.TypeName,\n\t\tfilesDir: opts.FilesDir,\n\t\tbound: opts.Bound,\n\t\tindexBaseDir: path.Join(opts.DataDir, \"index.cs3\"),\n\t\tindexRootDir: path.Join(path.Join(opts.DataDir, \"index.cs3\"), strings.Join([]string{\"autoincrement\", opts.TypeName, opts.IndexBy}, \".\")),\n\t\tcs3conf: &Config{\n\t\t\tProviderAddr: opts.ProviderAddr,\n\t\t\tDataURL: opts.DataURL,\n\t\t\tDataPrefix: opts.DataPrefix,\n\t\t\tJWTSecret: opts.JWTSecret,\n\t\t\tServiceUser: opts.ServiceUser,\n\t\t},\n\t\tdataProvider: dataProviderClient{\n\t\t\tbaseURL: singleJoiningSlash(opts.DataURL, opts.DataPrefix),\n\t\t\tclient: http.Client{\n\t\t\t\tTransport: http.DefaultTransport,\n\t\t\t},\n\t\t},\n\t}\n\n\treturn u\n}", "func (db *DB) Index(ctx context.Context, i services.Consumable) error {\n\tvar (\n\t\terr error\n\t\tjob = db.stream.NewJob(\"index\")\n\t\tsess = db.db.NewSession(job)\n\t)\n\tjob.KeyValue(\"id\", i.ID())\n\tjob.KeyValue(\"chain_id\", i.ChainID())\n\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tjob.CompleteKv(health.Error, health.Kvs{\"err\": err.Error()})\n\t\t\treturn\n\t\t}\n\t\tjob.Complete(health.Success)\n\t}()\n\n\t// Create db tx\n\tvar dbTx *dbr.Tx\n\tdbTx, err = sess.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer dbTx.RollbackUnlessCommitted()\n\n\t// Ingest the tx and commit\n\terr = db.ingestTx(services.NewConsumerContext(ctx, job, dbTx, i.Timestamp()), i.Body())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = dbTx.Commit()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (t Ticker) IsIndex() bool {\r\n\treturn strings.Contains(t.Exchange, \"INDEX\")\r\n}", "func ShowIndex(ctx context.Context, db QueryExecutor, schemaName string, table string) ([]*IndexInfo, error) {\n\t/*\n\t\tshow index example result:\n\t\tmysql> show index from test;\n\t\t+-------+------------+----------+--------------+-------------+-----------+-------------+----------+--------+------+------------+---------+---------------+\n\t\t| Table | Non_unique | Key_name | Seq_in_index | Column_name | Collation | Cardinality | Sub_part | Packed | Null | Index_type | Comment | Index_comment |\n\t\t+-------+------------+----------+--------------+-------------+-----------+-------------+----------+--------+------+------------+---------+---------------+\n\t\t| test | 0 | PRIMARY | 1 | id | A | 0 | NULL | NULL | | BTREE | | |\n\t\t| test | 0 | aid | 1 | aid | A | 0 | NULL | NULL | YES | BTREE | | |\n\t\t+-------+------------+----------+--------------+-------------+-----------+-------------+----------+--------+------+------------+---------+---------------+\n\t*/\n\tindices := make([]*IndexInfo, 0, 3)\n\tquery := fmt.Sprintf(\"SHOW INDEX FROM %s\", TableName(schemaName, table))\n\trows, err := db.QueryContext(ctx, query)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tfields, err1 := ScanRow(rows)\n\t\tif err1 != nil {\n\t\t\treturn nil, errors.Trace(err1)\n\t\t}\n\t\tseqInIndex, err1 := strconv.Atoi(string(fields[\"Seq_in_index\"].Data))\n\t\tif err1 != nil {\n\t\t\treturn nil, errors.Trace(err1)\n\t\t}\n\t\tcardinality, err1 := strconv.Atoi(string(fields[\"Cardinality\"].Data))\n\t\tif err1 != nil {\n\t\t\treturn nil, errors.Trace(err1)\n\t\t}\n\t\tindex := &IndexInfo{\n\t\t\tTable: string(fields[\"Table\"].Data),\n\t\t\tNoneUnique: string(fields[\"Non_unique\"].Data) == \"1\",\n\t\t\tKeyName: string(fields[\"Key_name\"].Data),\n\t\t\tColumnName: string(fields[\"Column_name\"].Data),\n\t\t\tSeqInIndex: seqInIndex,\n\t\t\tCardinality: cardinality,\n\t\t}\n\t\tindices = append(indices, index)\n\t}\n\n\treturn indices, nil\n}", "func (i *Index) Encode() (string, error) {\n\tout, err := yaml.Marshal(i)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(out), nil\n}", "func (o *EmsEventCollectionGetParams) SetIndex(index *int64) {\n\to.Index = index\n}", "func (mc *MockContiv) SetContainerIndex(ci *containeridx.ConfigIndex) {\n\tmc.containerIndex = ci\n}", "func WithCodec(codec eh.EventCodec) Option {\n\treturn func(b *EventBus) error {\n\t\tb.codec = codec\n\n\t\treturn nil\n\t}\n}", "func (i indexer) Index(ctx context.Context, req IndexQuery) (\n\tresp *IndexResult, err error) {\n\n\tlog.Info(\"index [%v] root [%v] len_dirs=%v len_files=%v\",\n\t\treq.Key, req.Root, len(req.Dirs), len(req.Files))\n\tstart := time.Now()\n\t// Setup the response\n\tresp = NewIndexResult()\n\tif err = req.Normalize(); err != nil {\n\t\tlog.Info(\"index [%v] error: %v\", req.Key, err)\n\t\tresp.Error = errs.NewStructError(err)\n\t\treturn\n\t}\n\n\t// create index shards\n\tvar nshards int\n\tif nshards = i.cfg.NumShards; nshards == 0 {\n\t\tnshards = 1\n\t}\n\tnshards = utils.MinInt(nshards, maxShards)\n\ti.shards = make([]index.IndexWriter, nshards)\n\ti.root = getRoot(i.cfg, &req)\n\n\tfor n := range i.shards {\n\t\tname := path.Join(i.root, shardName(req.Key, n))\n\t\tixw, err := getIndexWriter(ctx, name)\n\t\tif err != nil {\n\t\t\tresp.Error = errs.NewStructError(err)\n\t\t\treturn resp, nil\n\t\t}\n\t\ti.shards[n] = ixw\n\t}\n\n\tfs := getFileSystem(ctx, i.root)\n\trepo := newRepoFromQuery(&req, i.root)\n\trepo.SetMeta(i.cfg.RepoMeta, req.Meta)\n\tresp.Repo = repo\n\n\t// Add query Files and scan Dirs for files to index\n\tnames, err := i.scanner(fs, &req)\n\tch := make(chan int, nshards)\n\tchnames := make(chan string, 100)\n\tgo func() {\n\t\tfor _, name := range names {\n\t\t\tchnames <- name\n\t\t}\n\t\tclose(chnames)\n\t}()\n\treqch := make(chan par.RequestFunc, nshards)\n\tfor _, shard := range i.shards {\n\t\treqch <- indexShard(&i, &req, shard, fs, chnames, ch)\n\t}\n\tclose(reqch)\n\terr = par.Requests(reqch).WithConcurrency(nshards).DoWithContext(ctx)\n\tclose(ch)\n\n\t// Await results, each indicating the number of files scanned\n\tfor num := range ch {\n\t\trepo.NumFiles += num\n\t}\n\n\trepo.NumShards = len(i.shards)\n\t// Flush our index shard files\n\tfor _, shard := range i.shards {\n\t\tshard.Flush()\n\t\trepo.SizeIndex += ByteSize(shard.IndexBytes())\n\t\trepo.SizeData += ByteSize(shard.DataBytes())\n\t\tlog.Debug(\"index flush %v (data) %v (index)\",\n\t\t\trepo.SizeData, repo.SizeIndex)\n\t}\n\trepo.ElapsedIndexing = time.Since(start)\n\trepo.TimeUpdated = time.Now().UTC()\n\n\tvar msg string\n\tif err != nil {\n\t\trepo.State = ERROR\n\t\tresp.SetError(err)\n\t\tmsg = \"error: \" + resp.Error.Error()\n\t} else {\n\t\trepo.State = OK\n\t\tmsg = \"ok \" + fmt.Sprintf(\n\t\t\t\"(%v files, %v data, %v index)\",\n\t\t\trepo.NumFiles, repo.SizeData, repo.SizeIndex)\n\t}\n\tlog.Info(\"index [%v] %v [%v]\", req.Key, msg, repo.ElapsedIndexing)\n\treturn\n}", "func (s *BasevhdlListener) EnterIndex_specification(ctx *Index_specificationContext) {}", "func (idx *ManualIndex) Index() error {\n\tvar buf bytes.Buffer\n\n\tfor pkg := range idx.packages {\n\t\t_, err := fmt.Fprintf(&buf, \"\\x00%s\", pkg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tidx.index = suffixarray.New(buf.Bytes())\n\treturn nil\n}", "func (engine *Engine) Index(docId uint64, data types.DocData,\n\tforceUpdate ...bool) {\n\n\tvar force bool\n\tif len(forceUpdate) > 0 {\n\t\tforce = forceUpdate[0]\n\t}\n\n\t// if engine.HasDoc(docId) {\n\t// \tengine.RemoveDoc(docId)\n\t// }\n\n\t// data.Tokens\n\tengine.internalIndexDoc(docId, data, force)\n\n\thash := murmur.Sum32(fmt.Sprintf(\"%d\", docId)) %\n\t\tuint32(engine.initOptions.StoreShards)\n\n\tif engine.initOptions.UseStore && docId != 0 {\n\t\tengine.storeIndexDocChans[hash] <- storeIndexDocReq{\n\t\t\tdocId: docId, data: data}\n\t}\n}", "func Codec(contentType string, c encoding.Codec) client.Option {\n\treturn func(o *client.Options) {\n\t\tcodecs := make(map[string]encoding.Codec)\n\t\tif o.Context == nil {\n\t\t\to.Context = context.Background()\n\t\t}\n\t\tif v := o.Context.Value(codecsKey{}); v != nil {\n\t\t\tcodecs = v.(map[string]encoding.Codec)\n\t\t}\n\t\tcodecs[contentType] = c\n\t\to.Context = context.WithValue(o.Context, codecsKey{}, codecs)\n\t}\n}", "func (s *ChartStreamServer) IndexHandler(c *gin.Context) {\n\tindex, err := s.chartProvider.GetIndexFile()\n\tif err != nil {\n\t\tc.AbortWithError(500, err)\n\t}\n\n\tc.YAML(200, index)\n}", "func NewIndex(f *os.File, c Config) (*Index, error) {\n\tidx := &Index{\n\t\tfile: f,\n\t}\n\n\tfi, err := os.Stat(f.Name())\n\tif err != nil {\n\t\treturn nil, lib.Wrap(err, \"Unable to get file stats\")\n\t}\n\n\tidx.size = uint64(fi.Size())\n\tif err = os.Truncate(\n\t\tf.Name(), int64(c.Segment.MaxIndexBytes),\n\t); err != nil {\n\t\treturn nil, lib.Wrap(err, \"Unable to truncate file\")\n\t}\n\n\tif idx.mmap, err = gommap.Map(\n\t\tidx.file.Fd(),\n\t\tgommap.PROT_READ|gommap.PROT_WRITE,\n\t\tgommap.MAP_SHARED,\n\t); err != nil {\n\t\treturn nil, lib.Wrap(err, \"Unable to create gommap map\")\n\t}\n\n\treturn idx, nil\n}", "func (b *mysql) Index(table *Table, index *Index) string {\n\tlog.Printf(\"create index:%+v\", index)\n\tvar obj = \"INDEX\"\n\tif index.Unique {\n\t\tobj = \"UNIQUE INDEX\"\n\t}\n\treturn fmt.Sprintf(\"CREATE %s %s ON %s (%s);\", obj, index.Name, table.Name, b.columns(nil, index.Fields, true, false, false))\n}", "func indexHandler(w http.ResponseWriter, req *http.Request) {\n\tlayout, err := template.ParseFile(PATH_PUBLIC + TEMPLATE_LAYOUT)\n\tif err != nil {\n\t\thttp.Error(w, ERROR_TEMPLATE_NOT_FOUND, http.StatusNotFound)\n\t\treturn\n\t}\n\tindex, err := template.ParseFile(PATH_PUBLIC + TEMPLATE_INDEX)\n\t//artical, err := template.ParseFile(PATH_PUBLIC + TEMPLATE_ARTICAL)\n\tif err != nil {\n\t\thttp.Error(w, ERROR_TEMPLATE_NOT_FOUND, http.StatusNotFound)\n\t\treturn\n\t}\n\tmapOutput := map[string]interface{}{\"Title\": \"炫酷的网站技术\" + TITLE, \"Keyword\": KEYWORD, \"Description\": DESCRIPTION, \"Base\": BASE_URL, \"Url\": BASE_URL, \"Carousel\": getAddition(PREFIX_INDEX), \"Script\": getAddition(PREFIX_SCRIPT), \"Items\": leveldb.GetRandomContents(20, &Filter{})}\n\tcontent := []byte(index.RenderInLayout(layout, mapOutput))\n\tw.Write(content)\n\tgo cacheFile(\"index\", content)\n}", "func (i *IndexDB) SetIndex(ctx context.Context, pn insolar.PulseNumber, bucket record.Index) error {\n\ti.lock.Lock()\n\tdefer i.lock.Unlock()\n\n\terr := i.setBucket(pn, bucket.ObjID, &bucket)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstats.Record(ctx, statIndexesAddedCount.M(1))\n\n\tinslogger.FromContext(ctx).Debugf(\"[SetIndex] bucket for obj - %v was set successfully. Pulse: %d\", bucket.ObjID.DebugString(), pn)\n\n\treturn nil\n}", "func (api *API) GetIndex(w http.ResponseWriter, r *http.Request) {\n\n\tinfo := Info{Port: api.Session.Config.API.Port, Versions: Version}\n\td := Metadata{Info: info}\n\n\tres := CodeToResult[CodeOK]\n\tres.Data = d\n\tres.Message = \"Documentation available at https://github.com/netm4ul/netm4ul\"\n\tw.WriteHeader(res.HTTPCode)\n\tjson.NewEncoder(w).Encode(res)\n}", "func (i *Index) Encode() []byte {\n\tvar buf bytes.Buffer\n\t_ = gob.NewEncoder(&buf).Encode(i)\n\treturn buf.Bytes()\n}", "func indexEnc() {\n\tfor i := 0; i < indexSize; i++ {\n\t\tindexItemEnc(testData[i], i)\n\t}\n}", "func (a *TarArchiver) Index(fn func(k string) error) error {\n\treturn fn(slashpath.Join(a.keyPrefix, TarArchiverKey))\n}", "func MakeIndex() error {\n\n\treturn nil\n}", "func Codec(contentType string, c codec.Codec) Option {\n\treturn func(o *Options) {\n\t\to.Codecs[contentType] = c\n\t}\n}", "func (bA *CompactBitArray) SetIndex(i int, v bool) bool {\n\tif bA == nil {\n\t\treturn false\n\t}\n\n\tif i < 0 || i >= bA.Count() {\n\t\treturn false\n\t}\n\n\tif v {\n\t\tbA.Elems[i>>3] |= (1 << uint8(7-(i%8)))\n\t} else {\n\t\tbA.Elems[i>>3] &= ^(1 << uint8(7-(i%8)))\n\t}\n\n\treturn true\n}", "func (es *Connection) Index(\n\tindex, docType, id string,\n\tparams map[string]string,\n\tbody interface{},\n) (int, *QueryResult, error) {\n\tmethod := \"PUT\"\n\tif id == \"\" {\n\t\tmethod = \"POST\"\n\t}\n\treturn withQueryResult(es.apiCall(method, index, docType, id, \"\", params, body))\n}", "func CacheIndex() ReadOption {\n\treturn func(r *Reader) {\n\t\tr.cacheIndex = true\n\t}\n}", "func (gen *AddressGenerator) SetIndex(i uint) *AddressGenerator {\n\tgen.state = addressState(i)\n\treturn gen\n}", "func loadIndex(ctx context.Context, repo restic.Repository, id restic.ID) (*index.Index, error) {\n\tbuf, err := repo.LoadUnpacked(ctx, restic.IndexFile, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tidx, oldFormat, err := index.DecodeIndex(buf, id)\n\tif oldFormat {\n\t\tfmt.Fprintf(os.Stderr, \"index %v has old format\\n\", id.Str())\n\t}\n\treturn idx, err\n}", "func NewIndex(mapping IndexMapping, opts ...IndexOption) *Index {\n\tindex := &Index{\n\t\tIndexMapping: mapping,\n\t\tpopulateBatchSize: defaultPopulateBatchSize,\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(index)\n\t}\n\n\treturn index\n}", "func (o *Output) WriteIndex(ctx context.Context, cluster string, timestamp time.Time, clusterSummary *api.ClusterSummary) error {\n\tbuffer, err := o.exporter.ExportIndex(ctx, clusterSummary)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfullpath := path.Join(o.path, \"index\", fmt.Sprintf(\"%s%s\", cluster, o.exporter.FileExtension()))\n\tinfo, err := os.Stat(fullpath)\n\n\tif os.IsNotExist(err) {\n\t\treturn writeBufferToPath(fullpath, buffer)\n\t} else if err != nil {\n\t\treturn err\n\t} else if info.IsDir() {\n\t\treturn fmt.Errorf(\"%q is an existing directory\", fullpath)\n\t} else {\n\t\tlog.Printf(\"%q is an existing index, overwriting...\", fullpath)\n\t\treturn writeBufferToPath(fullpath, buffer)\n\t}\n}", "func (c AppConfig) IndexGen() int {\n\tval, ok := c.ConfigVars[\"IndexGen\"]\n\tif !ok {\n\t\tval = \"0\"\n\t\tlog.Printf(\"config.IndexGen: no value found for IndexGen using %s\", val)\n\t}\n\tgen, err := strconv.Atoi(val)\n\tif err != nil {\n\t\tgen = 0\n\t\tlog.Printf(\"config.IndexGen: bad value %s found for IndexGen using %d\", val, gen)\n\t}\n\treturn gen\n}", "func (rc *Cache) PutIndex(key, name string) error {\n\tvar err error\n\tif _, err = rc.do(\"HSET\", key, name, \"1\"); err != nil {\n\t\treturn err\n\t}\n\treturn err\n}", "func New(indexRegistry *registry.IndexRegistry, options ...func(*Index)) (I *Index, err error) {\n\tI = &Index{\n\t\tindexRegistry: indexRegistry,\n\t}\n\n\tfor _, option := range options {\n\t\toption(I)\n\t}\n\n\treturn\n}", "func (as *API) Index(ctx context.Context, req *pbreq.Index) (*pbresp.Index, error) {\n\tswitch req.GetType() {\n\tcase \"ipld\":\n\t\tbreak\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"invalid data type '%s'\", req.GetType())\n\t}\n\n\tvar name = req.GetIdentifier()\n\tvar reindex = req.GetReindex()\n\tmetaData, err := as.lens.Magnify(name, reindex)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to perform indexing for '%s': %s\",\n\t\t\tname, err.Error())\n\t}\n\n\tvar resp *lens.Object\n\tif !reindex {\n\t\tif resp, err = as.lens.Store(name, metaData); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tb, err := as.lens.Get(name)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to find ID for object '%s'\", name)\n\t\t}\n\t\tid, err := uuid.FromBytes(b)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid uuid found for '%s' ('%s'): %s\",\n\t\t\t\tname, string(b), err.Error())\n\t\t}\n\t\tif resp, err = as.lens.Update(id, name, metaData); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to update object: %s\", err.Error())\n\t\t}\n\t}\n\n\treturn &pbresp.Index{\n\t\tId: resp.LensID.String(),\n\t\tKeywords: metaData.Summary,\n\t}, nil\n}", "func (app *fetchRegistryBuilder) WithIndex(index IntPointer) FetchRegistryBuilder {\n\tapp.index = index\n\treturn app\n}", "func (es *ElasticSearch) Index(esIndex string, esType string, body interface{}) {\n\t// Add a document to the index\n\t_, err := client.Index().\n\t\tIndex(esIndex).\n\t\tType(\"project\").\n\t\tBodyJson(body).\n\t\tRefresh(true).\n\t\tDo()\n\tif err != nil {\n\t\t// TODO: Handle error\n\t\tpanic(err)\n\t}\n}", "func (m *MonkeyWrench) ReadUsingIndex(table, index string, keys []spanner.KeySet, columns []string) ([]*spanner.Row, error) {\n\t// Default to all keys.\n\tvar spannerKeys = spanner.AllKeys()\n\n\t// If we have some specified keys, use those instead.\n\tif len(keys) > 0 {\n\t\tspannerKeys = spanner.KeySets(keys...)\n\t}\n\n\t// Execute the query.\n\titer := m.Client.Single().ReadUsingIndex(m.Context, table, index, spannerKeys, columns)\n\treturn getResultSlice(iter)\n}", "func ConvertToIndexUsage(defn *common.IndexDefn, localMeta *LocalIndexMetadata) (*IndexUsage, error) {\n\n\t// find the topology metadata\n\ttopology := findTopologyByBucket(localMeta.IndexTopologies, defn.Bucket)\n\tif topology == nil {\n\t\tlogging.Errorf(\"Planner::getIndexLayout: Fail to find index topology for bucket %v.\", defn.Bucket)\n\t\treturn nil, nil\n\t}\n\n\t// find the index instance from topology metadata\n\tinst := topology.GetIndexInstByDefn(defn.DefnId)\n\tif inst == nil {\n\t\tlogging.Errorf(\"Planner::getIndexLayout: Fail to find index instance for definition %v.\", defn.DefnId)\n\t\treturn nil, nil\n\t}\n\n\t// Check the index state. Only handle index that is active or being built.\n\t// For index that is in the process of being deleted, planner expects the resource\n\t// will eventually be freed, so it won't included in planning.\n\tstate, _ := topology.GetStatusByDefn(defn.DefnId)\n\tif state != common.INDEX_STATE_CREATED &&\n\t\tstate != common.INDEX_STATE_DELETED &&\n\t\tstate != common.INDEX_STATE_ERROR &&\n\t\tstate != common.INDEX_STATE_NIL {\n\n\t\t// create an index usage object\n\t\tindex := newIndexUsage(defn.DefnId, common.IndexInstId(inst.InstId), defn.Name, defn.Bucket)\n\n\t\t// index is pinned to a node\n\t\tif len(defn.Nodes) != 0 {\n\t\t\tindex.Hosts = defn.Nodes\n\t\t}\n\n\t\t// update sizing\n\t\tindex.IsPrimary = defn.IsPrimary\n\t\tindex.IsMOI = (defn.Using == common.IndexType(common.MemoryOptimized) || defn.Using == common.IndexType(common.MemDB))\n\t\tindex.NoUsage = defn.Deferred && state == common.INDEX_STATE_READY\n\n\t\t// Is the index being deleted by user? Thsi will read the delete token from metakv. If untable read from metakv,\n\t\t// pendingDelete is false (cannot assert index is to-be-delete).\n\t\tpendingDelete, err := client.DeleteCommandTokenExist(defn.DefnId)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tindex.pendingDelete = pendingDelete\n\n\t\t// update internal info\n\t\tindex.Instance = &common.IndexInst{\n\t\t\tInstId: common.IndexInstId(inst.InstId),\n\t\t\tDefn: *defn,\n\t\t\tState: common.IndexState(inst.State),\n\t\t\tStream: common.StreamId(inst.StreamId),\n\t\t\tError: inst.Error,\n\t\t\tReplicaId: int(inst.ReplicaId),\n\t\t\tVersion: int(inst.Version),\n\t\t\tRState: common.RebalanceState(inst.RState),\n\t\t}\n\n\t\tlogging.Debugf(\"Create Index usage %v %v %v %v\", index.Name, index.Bucket, index.Instance.InstId, index.Instance.ReplicaId)\n\n\t\treturn index, nil\n\t}\n\n\treturn nil, nil\n}", "func (o SecondaryIndexOutput) IndexType() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *SecondaryIndex) pulumi.StringOutput { return v.IndexType }).(pulumi.StringOutput)\n}", "func (c ClientWrapper) Index() es.IndexService {\n\tr := elastic.NewBulkIndexRequest()\n\treturn WrapESIndexService(r, c.bulkService, c.esVersion)\n}", "func (o *LogQueryDefinition) SetIndex(v string) {\n\to.Index = &v\n}", "func (stqu *SurveyTemplateQuestionUpdate) SetIndex(i int) *SurveyTemplateQuestionUpdate {\n\tstqu.index = &i\n\tstqu.addindex = nil\n\treturn stqu\n}", "func (h *indexHandler) Index() gin.HandlerFunc {\n\treturn func(context *gin.Context) {\n\t\tvar requestFromJson indexRequest\n\n\t\tif err := context.ShouldBindJSON(&requestFromJson); nil != err {\n\t\t\th.errorDispatcher.Dispatch(context, err)\n\n\t\t\treturn\n\t\t}\n\n\t\tvar payload *index.Index = h.indexBuilder.Build(\n\t\t\trequestFromJson.BuilderContext,\n\t\t\trequestFromJson.Locale,\n\t\t)\n\n\t\tcontext.JSON(\n\t\t\thttp.StatusOK,\n\t\t\t&indexResponse{response.NewOkResponse(), *payload},\n\t\t)\n\t}\n}", "func (i ImageIndexer) ExportFromIndex(request ExportFromIndexRequest) error {\n\t// set a temp directory\n\tworkingDir, err := ioutil.TempDir(\"./\", tmpDirPrefix)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer os.RemoveAll(workingDir)\n\n\t// extract the index database to the file\n\tdatabaseFile, err := i.getDatabaseFile(workingDir, request.Index, request.CaFile, request.SkipTLSVerify, request.PlainHTTP)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdb, err := sqlite.Open(databaseFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer db.Close()\n\n\tdbQuerier := sqlite.NewSQLLiteQuerierFromDb(db)\n\n\t// fetch all packages from the index image if packages is empty\n\tif len(request.Packages) == 0 {\n\t\trequest.Packages, err = dbQuerier.ListPackages(context.TODO())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tbundles, err := getBundlesToExport(dbQuerier, request.Packages)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ti.Logger.Infof(\"Preparing to pull bundles %+q\", bundles)\n\n\t// Creating downloadPath dir\n\tif err := os.MkdirAll(request.DownloadPath, 0777); err != nil {\n\t\treturn err\n\t}\n\n\tvar errs []error\n\tvar wg sync.WaitGroup\n\twg.Add(len(bundles))\n\tvar mu = &sync.Mutex{}\n\n\tsem := make(chan struct{}, concurrencyLimitForExport)\n\n\tfor bundleImage, bundleDir := range bundles {\n\t\tgo func(bundleImage string, bundleDir bundleDirPrefix) {\n\t\t\tdefer wg.Done()\n\n\t\t\tsem <- struct{}{}\n\t\t\tdefer func() {\n\t\t\t\t<-sem\n\t\t\t}()\n\n\t\t\t// generate a random folder name if bundle version is empty\n\t\t\tif bundleDir.bundleVersion == \"\" {\n\t\t\t\tbundleDir.bundleVersion = strconv.Itoa(rand.Intn(10000))\n\t\t\t}\n\t\t\texporter := bundle.NewExporterForBundle(bundleImage, filepath.Join(request.DownloadPath, bundleDir.pkgName, bundleDir.bundleVersion), request.ContainerTool)\n\t\t\tif err := exporter.Export(request.SkipTLSVerify, request.PlainHTTP); err != nil {\n\t\t\t\terr = fmt.Errorf(\"exporting bundle image:%s failed with %s\", bundleImage, err)\n\t\t\t\tmu.Lock()\n\t\t\t\terrs = append(errs, err)\n\t\t\t\tmu.Unlock()\n\t\t\t}\n\t\t}(bundleImage, bundleDir)\n\t}\n\t// Wait for all the go routines to finish export\n\twg.Wait()\n\n\tif errs != nil {\n\t\treturn utilerrors.NewAggregate(errs)\n\t}\n\n\tfor _, packageName := range request.Packages {\n\t\terr := generatePackageYaml(dbQuerier, packageName, filepath.Join(request.DownloadPath, packageName))\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\treturn utilerrors.NewAggregate(errs)\n}", "func (r *Search) Index(index string) *Search {\n\tr.paramSet |= indexMask\n\tr.index = index\n\n\treturn r\n}", "func (s *Storage) PutIndex(ctx context.Context, uri string, acl string, r io.Reader) error {\n\tif strings.HasPrefix(uri, \"index.yaml\") {\n\t\treturn errors.New(\"uri must not contain \\\"index.yaml\\\" suffix, it appends automatically\")\n\t}\n\turi += \"/index.yaml\"\n\n\tbucket, key, err := parseURI(uri)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = s3manager.NewUploader(s.session).UploadWithContext(\n\t\tctx,\n\t\t&s3manager.UploadInput{\n\t\t\tBucket: aws.String(bucket),\n\t\t\tKey: aws.String(key),\n\t\t\tACL: aws.String(acl),\n\t\t\tServerSideEncryption: getSSE(),\n\t\t\tBody: r,\n\t\t})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"upload index to S3 bucket\")\n\t}\n\n\treturn nil\n}", "func NewIndex(addrs []string, pass string, temporary int, name string, md *index.Metadata) *Index {\n\n\tret := &Index{\n\n\t\thosts: addrs,\n\n\t\tmd: md,\n\t\tpassword: pass,\n\t\ttemporary: temporary,\n\n\t\tname: name,\n\n\t\tcommandPrefix: \"FT\",\n\t}\n\tif md != nil && md.Options != nil {\n\t\tif opts, ok := md.Options.(IndexingOptions); ok {\n\t\t\tif opts.Prefix != \"\" {\n\t\t\t\tret.commandPrefix = md.Options.(IndexingOptions).Prefix\n\t\t\t}\n\t\t}\n\t}\n\t//ret.pool.MaxActive = ret.pool.MaxIdle\n\n\treturn ret\n\n}", "func (wc *WriterBase) ShouldWriteIndex() bool {\n\treturn wc.currentResolution == wc.resolutions[0][0] || len(wc.resolutions) == 1\n}", "func NewIndex(kind IndexKind, table string) Index {\n\treturn &index{\n\t\tkind: kind,\n\t\ttable: table,\n\t}\n}", "func Index(w http.ResponseWriter, data *IndexData) {\n\trender(tpIndex, w, data)\n}", "func Index(w http.ResponseWriter, data *IndexData) {\n\trender(tpIndex, w, data)\n}", "func SaveIndex(target string, source QueryList, verbose bool) {\n\tlogm(\"INFO\", fmt.Sprintf(\"saving index to %s...\", target), verbose)\n\tfile, err := os.Create(target)\n\tcheckResult(err)\n\tdefer file.Close()\n\n\tgr := gzip.NewWriter(file)\n\tdefer gr.Close()\n\n\tencoder := gob.NewEncoder(gr)\n\n\terr = encoder.Encode(source.Names)\n\tcheckResult(err)\n\tlogm(\"INFO\", fmt.Sprintf(\"%v sequence names saved\", len(source.Names)), verbose)\n\n\terr = encoder.Encode(source.SeedSize)\n\tcheckResult(err)\n\n\terr = encoder.Encode(source.Cgst)\n\tcheckResult(err)\n\n\t// save the index, but go has a size limit\n\tindexSize := len(source.Index)\n\terr = encoder.Encode(indexSize)\n\tcheckResult(err)\n\tlogm(\"INFO\", fmt.Sprintf(\"%v queries to save...\", indexSize), verbose)\n\n\tcount := 0\n\tfor key, value := range source.Index {\n\t\terr = encoder.Encode(key)\n\t\tcheckResult(err)\n\t\terr = encoder.Encode(value)\n\t\tcheckResult(err)\n\t\tcount++\n\t\tif count%10000 == 0 {\n\t\t\tlogm(\"INFO\", fmt.Sprintf(\"processing: saved %v items\", count), false)\n\t\t}\n\t}\n\n\tlogm(\"INFO\", fmt.Sprintf(\"saving index to %s: done\", target), verbose)\n}", "func OpenIndex(collectionName, indexName string, fd *feed.API, ai *account.Info, user utils.Address, client blockstore.Client, logger logging.Logger) (*Index, error) {\n\tactualIndexName := collectionName + indexName\n\tmanifest := getRootManifestOfIndex(actualIndexName, fd, user, client) // this will load the entire Manifest for immutable indexes\n\tif manifest == nil {\n\t\treturn nil, ErrIndexNotPresent\n\t}\n\n\tidx := &Index{\n\t\tname: manifest.Name,\n\t\tmutable: manifest.Mutable,\n\t\tindexType: manifest.IdxType,\n\t\tpodFile: manifest.PodFile,\n\t\tuser: user,\n\t\taccountInfo: ai,\n\t\tfeed: fd,\n\t\tclient: client,\n\t\tcount: 0,\n\t\tmemDB: manifest,\n\t\tlogger: logger,\n\t}\n\treturn idx, nil\n}", "func (b *Blueprint) Index(columns []string, name string, algorithm string) *Blueprint {\n\treturn b.indexCommand(\"index\", columns, name, algorithm)\n}", "func Codec(codec *encoding.Codec) Opt {\n\treturn func(c *Client) Opt {\n\t\told := c.codec\n\t\tc.codec = codec\n\t\treturn Codec(old)\n\t}\n}", "func (rb *ShardsRecordBuilder) IndexingIndexTime(indexingindextime string) *ShardsRecordBuilder {\n\trb.v.IndexingIndexTime = &indexingindextime\n\treturn rb\n}", "func (c *rawConnection) Index(repo string, idx []FileInfo) {\n\tc.imut.Lock()\n\tvar msgType int\n\tif c.indexSent[repo] == nil {\n\t\t// This is the first time we send an index.\n\t\tmsgType = messageTypeIndex\n\n\t\tc.indexSent[repo] = make(map[string][2]int64)\n\t\tfor _, f := range idx {\n\t\t\tc.indexSent[repo][f.Name] = [2]int64{f.Modified, int64(f.Version)}\n\t\t}\n\t} else {\n\t\t// We have sent one full index. Only send updates now.\n\t\tmsgType = messageTypeIndexUpdate\n\t\tvar diff []FileInfo\n\t\tfor _, f := range idx {\n\t\t\tif vs, ok := c.indexSent[repo][f.Name]; !ok || f.Modified != vs[0] || int64(f.Version) != vs[1] {\n\t\t\t\tdiff = append(diff, f)\n\t\t\t\tc.indexSent[repo][f.Name] = [2]int64{f.Modified, int64(f.Version)}\n\t\t\t}\n\t\t}\n\t\tidx = diff\n\t}\n\tc.imut.Unlock()\n\n\tc.send(header{0, -1, msgType}, IndexMessage{repo, idx})\n}", "func (m *RecurrencePattern) SetIndex(value *WeekIndex)() {\n m.index = value\n}", "func (i *Index) Index(docs []index.Document, options interface{}) error {\n\n\tvar opts IndexingOptions\n\thasOpts := false\n\tif options != nil {\n\t\tif opts, hasOpts = options.(IndexingOptions); !hasOpts {\n\t\t\treturn errors.New(\"invalid indexing options\")\n\t\t}\n\t}\n\n\tconn := i.getConn()\n\tdefer conn.Close()\n\n\tn := 0\n\n\tfor _, doc := range docs {\n\t\targs := make(redis.Args, 0, len(i.md.Fields)*2+4)\n\t\targs = append(args, i.name, doc.Id, doc.Score)\n\t\t// apply options\n\t\tif hasOpts {\n\t\t\tif opts.NoSave {\n\t\t\t\targs = append(args, \"NOSAVE\")\n\t\t\t}\n\t\t\tif opts.Language != \"\" {\n\t\t\t\targs = append(args, \"LANGUAGE\", opts.Language)\n\t\t\t}\n\t\t}\n\n\t\targs = append(args, \"FIELDS\")\n\n\t\tfor k, f := range doc.Properties {\n\t\t\targs = append(args, k, f)\n\t\t}\n\n\t\tif err := conn.Send(i.commandPrefix+\".ADD\", args...); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tn++\n\t}\n\n\tif err := conn.Flush(); err != nil {\n\t\treturn err\n\t}\n\n\tfor n > 0 {\n\t\tif _, err := conn.Receive(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tn--\n\t}\n\n\treturn nil\n}", "func (r *Redis) Index(i services.Consumable) error {\n\tvar (\n\t\tpipe = r.client.TxPipeline()\n\n\t\ttxByIDKey = redisIndexKeysTxByID(r.chainID.String(), i.ID())\n\t\ttxCountKey = redisIndexKeysTxCount(r.chainID.String())\n\t\trecentTxsKey = redisIndexKeysRecentTxs(r.chainID.String())\n\n\t\tctx, cancelFn = context.WithTimeout(context.Background(), redisTimeout)\n\t)\n\tdefer cancelFn()\n\n\tif err := pipe.Set(ctx, txByIDKey, i.Body(), 0).Err(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := pipe.Incr(ctx, txCountKey).Err(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := pipe.LPush(ctx, recentTxsKey, i.ID()).Err(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := pipe.LTrim(ctx, recentTxsKey, 0, redisRecentTxsSize-1).Err(); err != nil {\n\t\treturn err\n\t}\n\n\t_, err := pipe.Exec(ctx)\n\treturn err\n}", "func (o *IssueRemoveLabelParams) SetIndex(index int64) {\n\to.Index = index\n}", "func generateIssueIndexMapping() (mapping.IndexMapping, error) {\n\tmapping := bleve.NewIndexMapping()\n\tdocMapping := bleve.NewDocumentMapping()\n\n\tnumericFieldMapping := bleve.NewNumericFieldMapping()\n\tnumericFieldMapping.IncludeInAll = false\n\tdocMapping.AddFieldMappingsAt(\"RepoID\", numericFieldMapping)\n\n\ttextFieldMapping := bleve.NewTextFieldMapping()\n\ttextFieldMapping.Store = false\n\ttextFieldMapping.IncludeInAll = false\n\tdocMapping.AddFieldMappingsAt(\"Title\", textFieldMapping)\n\tdocMapping.AddFieldMappingsAt(\"Content\", textFieldMapping)\n\tdocMapping.AddFieldMappingsAt(\"Comments\", textFieldMapping)\n\n\tif err := addUnicodeNormalizeTokenFilter(mapping); err != nil {\n\t\treturn nil, err\n\t} else if err = mapping.AddCustomAnalyzer(issueIndexerAnalyzer, map[string]interface{}{\n\t\t\"type\": custom.Name,\n\t\t\"char_filters\": []string{},\n\t\t\"tokenizer\": unicode.Name,\n\t\t\"token_filters\": []string{unicodeNormalizeName, camelcase.Name, lowercase.Name},\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\n\tmapping.DefaultAnalyzer = issueIndexerAnalyzer\n\tmapping.AddDocumentMapping(issueIndexerDocType, docMapping)\n\tmapping.AddDocumentMapping(\"_all\", bleve.NewDocumentDisabledMapping())\n\n\treturn mapping, nil\n}", "func (c *index) GenIndexKey(sc *stmtctx.StatementContext, indexedValues []types.Datum, h int64, buf []byte) (key []byte, distinct bool, err error) {\n\tif c.idxInfo.Unique {\n\t\t// See https://dev.mysql.com/doc/refman/5.7/en/create-index.html\n\t\t// A UNIQUE index creates a constraint such that all values in the index must be distinct.\n\t\t// An error occurs if you try to add a new row with a key value that matches an existing row.\n\t\t// For all engines, a UNIQUE index permits multiple NULL values for columns that can contain NULL.\n\t\tdistinct = true\n\t\tfor _, cv := range indexedValues {\n\t\t\tif cv.IsNull() {\n\t\t\t\tdistinct = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\t// For string columns, indexes can be created using only the leading part of column values,\n\t// using col_name(length) syntax to specify an index prefix length.\n\tindexedValues = TruncateIndexValuesIfNeeded(c.tblInfo, c.idxInfo, indexedValues)\n\tkey = c.getIndexKeyBuf(buf, len(c.prefix)+len(indexedValues)*9+9)\n\tkey = append(key, []byte(c.prefix)...)\n\tkey, err = codec.EncodeKey(sc, key, indexedValues...)\n\tif !distinct && err == nil {\n\t\tkey, err = codec.EncodeKey(sc, key, types.NewDatum(h))\n\t}\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\treturn\n}", "func (s *Server) RegisterCodec(codec Codec, contentType string) {\n s.codecs[strings.ToLower(contentType)] = codec\n}", "func Isindex(attrs []htmlgo.Attribute, children ...HTML) HTML {\n\treturn &htmlgo.Tree{Tag: \"isindex\", Attributes: attrs, Children: children}\n}", "func (_e *MockDataCoord_Expecter) DescribeIndex(ctx interface{}, req interface{}) *MockDataCoord_DescribeIndex_Call {\n\treturn &MockDataCoord_DescribeIndex_Call{Call: _e.mock.On(\"DescribeIndex\", ctx, req)}\n}", "func (s ConsoleIndexStore) StoreIndex(name string, idx Index) error {\n\t_, err := idx.WriteTo(os.Stdout)\n\treturn err\n}" ]
[ "0.5502938", "0.52192473", "0.51957196", "0.51697326", "0.5116415", "0.5042837", "0.50319487", "0.48964846", "0.48657367", "0.48536038", "0.48389342", "0.4827276", "0.48076987", "0.4801819", "0.47965235", "0.4753826", "0.47460607", "0.4742304", "0.47193858", "0.47038877", "0.4692363", "0.46868733", "0.46837276", "0.46707043", "0.46681562", "0.46651402", "0.46597168", "0.46380782", "0.4627767", "0.4624632", "0.46226254", "0.4613507", "0.46049985", "0.46007589", "0.4594684", "0.45830384", "0.4570369", "0.45666355", "0.4563887", "0.45582423", "0.4557208", "0.4527146", "0.45203593", "0.451343", "0.45121947", "0.45016536", "0.4500156", "0.44881213", "0.44687417", "0.4468659", "0.44579467", "0.44543144", "0.44469458", "0.4436678", "0.44208673", "0.4416574", "0.441252", "0.44060874", "0.4402094", "0.44005042", "0.43964043", "0.43947968", "0.43873066", "0.43831855", "0.43789312", "0.43743363", "0.43726158", "0.43500626", "0.43313614", "0.432883", "0.4323144", "0.43074837", "0.42871547", "0.42828578", "0.42799073", "0.42763367", "0.42760184", "0.42684796", "0.42634934", "0.4257159", "0.4249881", "0.42488283", "0.42481467", "0.42481467", "0.42472848", "0.4247067", "0.42452997", "0.42407218", "0.42377353", "0.422243", "0.42178258", "0.42147046", "0.4211853", "0.42115742", "0.42098293", "0.4208507", "0.4207388", "0.42053095", "0.42048842", "0.42034593" ]
0.8574102
0
WithoutIndex flags that no index should be included in generation.
Флаг WithoutIndex указывает на то, что индекс не должен включаться в генерацию.
func WithoutIndex() Option { return func(o *Options) { o.IndexCodec = index.CarIndexNone } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func IndexOptionsNone() IndexOptions {\n\tresult := IndexOptions{}\n\n\treturn result\n}", "func (dict *Dictionary) DropIndex() {\n\tdict.shortIndex = nil\n\tdict.longIndex = nil\n}", "func (_m *DirectRepositoryWriter) DisableIndexRefresh() {\n\t_m.Called()\n}", "func (r *Search) AllowNoIndices(allownoindices bool) *Search {\n\tr.values.Set(\"allow_no_indices\", strconv.FormatBool(allownoindices))\n\n\treturn r\n}", "func (wou *WorkOrderUpdate) ClearIndex() *WorkOrderUpdate {\n\twou.index = nil\n\twou.clearindex = true\n\treturn wou\n}", "func (wouo *WorkOrderUpdateOne) ClearIndex() *WorkOrderUpdateOne {\n\twouo.index = nil\n\twouo.clearindex = true\n\treturn wouo\n}", "func WithoutPosition() OptionFunc {\n\treturn func(opt *Options) {\n\t\topt.ShowFlag = Fnopos\n\t}\n}", "func (s *FieldStatsService) AllowNoIndices(allowNoIndices bool) *FieldStatsService {\n\ts.allowNoIndices = &allowNoIndices\n\treturn s\n}", "func WithoutTimestamp() Option {\n\treturn func(l LoggerOpts) LoggerOpts {\n\t\tl.IncludeTime = false\n\t\treturn l\n\t}\n}", "func (o *DatasetEvent) UnsetSourceMapIndex() {\n\to.SourceMapIndex.Unset()\n}", "func (g *GeneratedFile) Unskip() {\n\tg.skip = false\n}", "func WithoutLocation() Option {\n\treturn func(l LoggerOpts) LoggerOpts {\n\t\tl.IncludeLocation = false\n\t\treturn l\n\t}\n}", "func (index *spdIndex) Clear() {\n\tindex.mapping.Clear()\n}", "func WithNoVersion() Option {\n\treturn func(a *App) {\n\t\ta.noVersion = true\n\t}\n}", "func TestEngine_WriteIndex_NoPoints(t *testing.T) {\n\te := OpenDefaultEngine()\n\tdefer e.Close()\n\tif err := e.WriteIndex(map[string][][]byte{\"cpu\": nil}, nil, nil); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func NotIt() {\n\toutput.EmitLn(\"EOR #-1,D0\")\n}", "func DisableVertexAttribArray(index uint32) {\n C.glowDisableVertexAttribArray(gpDisableVertexAttribArray, (C.GLuint)(index))\n}", "func (s *IndicesSyncedFlushService) AllowNoIndices(allowNoIndices bool) *IndicesSyncedFlushService {\n\ts.allowNoIndices = &allowNoIndices\n\treturn s\n}", "func (b CreateIndexBuilder) IfNotExists() CreateIndexBuilder {\n\treturn builder.Set(b, \"IfNotExists\", true).(CreateIndexBuilder)\n}", "func (c RawConfiguration) WithoutNodes(ids ...uint32) NodeListOption {\n\trmIDs := make(map[uint32]bool)\n\tfor _, id := range ids {\n\t\trmIDs[id] = true\n\t}\n\tkeepIDs := make([]uint32, 0, len(c))\n\tfor _, cNode := range c {\n\t\tif !rmIDs[cNode.id] {\n\t\t\tkeepIDs = append(keepIDs, cNode.id)\n\t\t}\n\t}\n\treturn &nodeIDs{nodeIDs: keepIDs}\n}", "func NoDebugFlagOnForcedSampling(noDebugFlagOnForcedSampling bool) Option {\n\treturn func(c *Options) {\n\t\tc.noDebugFlagOnForcedSampling = noDebugFlagOnForcedSampling\n\t}\n}", "func IndexNotIn(vs ...int) predicate.Step {\n\tv := make([]interface{}, len(vs))\n\tfor i := range v {\n\t\tv[i] = vs[i]\n\t}\n\treturn predicate.Step(func(s *sql.Selector) {\n\t\t// if not arguments were provided, append the FALSE constants,\n\t\t// since we can't apply \"IN ()\". This will make this predicate falsy.\n\t\tif len(v) == 0 {\n\t\t\ts.Where(sql.False())\n\t\t\treturn\n\t\t}\n\t\ts.Where(sql.NotIn(s.C(FieldIndex), v...))\n\t})\n}", "func GenerateNotFoundIndex(datatypeName string) string {\n\tswitch datatypeName {\n\tcase field.TypeString:\n\t\treturn \"strconv.Itoa(100000)\"\n\tcase field.TypeUint, field.TypeInt:\n\t\treturn \"100000\"\n\tcase field.TypeBool:\n\t\treturn valueFalse\n\tcase field.TypeCustom:\n\t\treturn valueNull\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown type %s\", datatypeName))\n\t}\n}", "func GetIndicesWithoutIgnored() (parsedIndices []types.Index, err string) {\n\tvar indices, getErr = GetIndices()\n\tcleanIndices := make([]types.Index, len(parsedIndices))\n\tfor _, indexed := range indices {\n\t\tvar ignorable bool\n\t\tfor _, ignored := range singleton.GetConfig().Parser.Ignorelist {\n\t\t\tif ignored != \"\" {\n\t\t\t\tr, _ := regexp.Compile(ignored)\n\n\t\t\t\tif r.MatchString(indexed.Name) {\n\n\t\t\t\t\tignorable = true\n\t\t\t\t\tif singleton.GetVerbose() {\n\t\t\t\t\t\tlog.Println(\"Index name: \" + indexed.Name + \" matches the regex: \" + ignored)\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif !ignorable {\n\t\t\tcleanIndices = append(cleanIndices, indexed)\n\t\t}\n\t\tignorable = false\n\t}\n\treturn cleanIndices, getErr\n}", "func (list BuildpackV2List) WithoutDisabled() BuildpackV2List {\n\tvar out BuildpackV2List\n\n\tfor _, buildpack := range list {\n\t\tif !buildpack.Disabled {\n\t\t\tout = append(out, buildpack)\n\t\t}\n\t}\n\n\treturn out\n}", "func (n Noop) Index() int {\n\treturn 0\n}", "func (m *MockDriver) UseIndexPlaceholders() bool {\n\treturn false\n}", "func (ibt *IndexBehaviorTest) TestCheckIndexNoIndexNeeded(c *C) {\n\tctx := context.Background()\n\tdsClient, err := datastore.NewClient(ctx, TestProject)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tnoIndex := func(q *datastore.Query) {\n\t\tt := dsClient.Run(ctx, q)\n\t\tvar x testKind\n\t\t_, err := t.Next(&x)\n\t\tif err != iterator.Done {\n\t\t\tc.Assert(err, IsNil)\n\t\t}\n\t}\n\n\t// Kindless queries using only ancestor and key filters\n\t// (these are scary though, so we don't use them)\n\tnoIndex(datastore.NewQuery(\"\").Filter(\"__key__ >\", testKey).Ancestor(testKey))\n\n\t// Queries using only ancestor and equality filters\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\"))\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\").Filter(\"singleField =\", 0))\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\").Filter(\"singleField =\", 0).Ancestor(testKey))\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\").Filter(\"singleField =\", 0).Filter(\"otherField =\", 1).Ancestor(testKey))\n\n\t// Queries using only inequality filters (which are limited to a single property)\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\").Filter(\"singleField >\", 0))\n\t// Even if there's two on the same field.\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\").Filter(\"singleField >\", 0).Filter(\"singleField <\", 200))\n\n\t// Queries using only ancestor filters, equality filters on properties, and inequality filters on keys\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\").Filter(\"singleField =\", 0).Filter(\"__key__ >\", testKey).Ancestor(testKey))\n\n\t// Queries with no filters and only one sort order on a property, either ascending or descending\n\t// (unless descending key)\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\").Order(\"singleField\"))\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\").Order(\"-singleField\"))\n\n\t// Also with a filter on the ordered property (undocumented)\n\t// (Ordering of query results is undefined when no sort order is specified)\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\").Filter(\"singleField >\", 0).Order(\"singleField\"))\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\").Filter(\"singleField >\", 0).Filter(\"singleField <\", 0).Order(\"-singleField\"))\n\n\t// If a query does not need an index, making it keys-only does not make you need one.\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\").KeysOnly())\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\").Filter(\"singleField >\", 0).KeysOnly())\n\n\t// Single project + Order\n\tnoIndex(datastore.NewQuery(\"testKindNoIndex\").Project(\"A\").Order(\"-A\"))\n}", "func TestEnsureSkipListIndex(t *testing.T) {\n\tc := createClient(t, nil)\n\tdb := ensureDatabase(nil, c, \"index_test\", nil, t)\n\n\ttestOptions := []*driver.EnsureSkipListIndexOptions{\n\t\tnil,\n\t\t{Unique: true, Sparse: false, NoDeduplicate: true},\n\t\t{Unique: true, Sparse: true, NoDeduplicate: true},\n\t\t{Unique: false, Sparse: false, NoDeduplicate: false},\n\t\t{Unique: false, Sparse: true, NoDeduplicate: false},\n\t}\n\n\tfor i, options := range testOptions {\n\t\tcol := ensureCollection(nil, db, fmt.Sprintf(\"skiplist_index_test_%d\", i), nil, t)\n\n\t\tidx, created, err := col.EnsureSkipListIndex(nil, []string{\"name\", \"title\"}, options)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to create new index: %s\", describe(err))\n\t\t}\n\t\tif !created {\n\t\t\tt.Error(\"Expected created to be true, got false\")\n\t\t}\n\t\tif idxType := idx.Type(); idxType != driver.SkipListIndex {\n\t\t\tt.Errorf(\"Expected SkipListIndex, found `%s`\", idxType)\n\t\t}\n\t\tif options != nil && idx.Unique() != options.Unique {\n\t\t\tt.Errorf(\"Expected Unique to be %t, found `%t`\", options.Unique, idx.Unique())\n\t\t}\n\t\tif options != nil && idx.Sparse() != options.Sparse {\n\t\t\tt.Errorf(\"Expected Sparse to be %t, found `%t`\", options.Sparse, idx.Sparse())\n\t\t}\n\t\tif options != nil && !idx.Deduplicate() != options.NoDeduplicate {\n\t\t\tt.Errorf(\"Expected NoDeduplicate to be %t, found `%t`\", options.NoDeduplicate, idx.Deduplicate())\n\t\t}\n\n\t\t// Index must exists now\n\t\tif found, err := col.IndexExists(nil, idx.Name()); err != nil {\n\t\t\tt.Fatalf(\"Failed to check index '%s' exists: %s\", idx.Name(), describe(err))\n\t\t} else if !found {\n\t\t\tt.Errorf(\"Index '%s' does not exist, expected it to exist\", idx.Name())\n\t\t}\n\n\t\t// Ensure again, created must be false now\n\t\t_, created, err = col.EnsureSkipListIndex(nil, []string{\"name\", \"title\"}, options)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to re-create index: %s\", describe(err))\n\t\t}\n\t\tif created {\n\t\t\tt.Error(\"Expected created to be false, got true\")\n\t\t}\n\n\t\t// Remove index\n\t\tif err := idx.Remove(nil); err != nil {\n\t\t\tt.Fatalf(\"Failed to remove index '%s': %s\", idx.Name(), describe(err))\n\t\t}\n\n\t\t// Index must not exists now\n\t\tif found, err := col.IndexExists(nil, idx.Name()); err != nil {\n\t\t\tt.Fatalf(\"Failed to check index '%s' exists: %s\", idx.Name(), describe(err))\n\t\t} else if found {\n\t\t\tt.Errorf(\"Index '%s' does exist, expected it not to exist\", idx.Name())\n\t\t}\n\t}\n}", "func WithoutAll() Option {\n\treturn func(d *Decoder) {\n\t\td.proto = false\n\t\td.byteDec = false\n\t\td.hex = false\n\t\td.base64 = false\n\t}\n}", "func TestEngine_WriteIndex_NoKeys(t *testing.T) {\n\te := OpenDefaultEngine()\n\tdefer e.Close()\n\tif err := e.WriteIndex(nil, nil, nil); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func WithoutFields(fields ...string) ReqOption {\n\treturn func(v url.Values) {\n\t\tv.Set(\"fields\", strings.Join(fields, \",\"))\n\t\tv.Set(\"include_fields\", \"false\")\n\t}\n}", "func (uh *UserHandler) IndexNot(w http.ResponseWriter, r *http.Request) {\n\tw.Write([]byte(\"Not Sucessfull!\"))\n}", "func (s *ValidateService) AllowNoIndices(allowNoIndices bool) *ValidateService {\n\ts.allowNoIndices = &allowNoIndices\n\treturn s\n}", "func ignorePredicate() predicate.Predicate {\n\treturn predicate.Funcs{\n\t\tUpdateFunc: func(e event.UpdateEvent) bool {\n\t\t\treturn e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration()\n\t\t},\n\t}\n}", "func Not(p predicate.AllocationStrategy) predicate.AllocationStrategy {\n\treturn predicate.AllocationStrategy(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func DisableVertexAttribArray(index uint32) {\n\tgl.DisableVertexAttribArray(index)\n}", "func (s *IndicesClearCacheService) AllowNoIndices(allowNoIndices bool) *IndicesClearCacheService {\n\ts.allowNoIndices = &allowNoIndices\n\treturn s\n}", "func WithoutLogLocation() ServeOpt {\n\treturn serveConfigFunc(func(in *ServeConfig) error {\n\t\tin.disableLogLocation = true\n\t\treturn nil\n\t})\n}", "func generateNoOpBlock() {\n\tfmt.Println(\"Generating a NoOp Block...\")\n\tfmt.Println(\"Block chain size:\", len(blockChain), \"number transactions:\", len(transactions))\n\t// TODO this printstate() actually seemed to help performance... Maybe could use a tiny sleep here?\n\tprintState()\n\tif len(leafBlocks) > 1 {\n\t\tfmt.Println(\"We have a fork!!!!!!!!!!!!!!\")\n\t}\n\tnoOpBlock := Block{HashBlock: HashBlock{TxID: 0, NodeID: myNodeID, Nonce: 0}}\n\tnoOpBlock = setCorrectParentHashAndDepth(noOpBlock)\n\tfor isGenerateNoOps {\n\t\tsuccess, _ := generateBlock(&noOpBlock)\n\t\tif success {\n\t\t\treturn\n\t\t}\n\t}\n\t// received a call to commit or AddBlock which set isGenerateNoOps = false\n\treturn\n}", "func DisableVertexAttribArray(index uint32) {\n\tC.glowDisableVertexAttribArray(gpDisableVertexAttribArray, (C.GLuint)(index))\n}", "func DisableVertexAttribArray(index uint32) {\n\tC.glowDisableVertexAttribArray(gpDisableVertexAttribArray, (C.GLuint)(index))\n}", "func (b *ProxyBuilder) NoProxy(value string) *ProxyBuilder {\n\tb.noProxy = value\n\tb.bitmap_ |= 4\n\treturn b\n}", "func WithSingleSampleDisabled() Option {\n\treturn func(e *Engine) {\n\t\te.graph.singleSampleDisabled = true\n\t}\n}", "func (x EntIndex) IsUnique() bool { return (x.Flags & EntIndexUnique) != 0 }", "func Not(p predicate.OutcomeOverview) predicate.OutcomeOverview {\n\treturn predicate.OutcomeOverview(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func (gq *GoodsQuery) OnlyIDX(ctx context.Context) string {\n\tid, err := gq.OnlyID(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn id\n}", "func (c Capabilities) Without(feature string) Capabilities {\n\tc[feature] = false\n\treturn c\n}", "func (g *Generator) generateDDLsForAbsentIndex(currentIndex Index, currentTable Table, desiredTable Table) ([]string, error) {\n\tddls := []string{}\n\n\tif currentIndex.primary {\n\t\tvar primaryKeyColumn *Column\n\t\tfor _, column := range desiredTable.columns {\n\t\t\tif column.keyOption == ColumnKeyPrimary {\n\t\t\t\tprimaryKeyColumn = &column\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// If nil, it will be `DROP COLUMN`-ed. Ignore it.\n\t\tif primaryKeyColumn != nil && primaryKeyColumn.name != currentIndex.columns[0].column { // TODO: check length of currentIndex.columns\n\t\t\t// TODO: handle this. Rename primary key column...?\n\t\t\treturn ddls, fmt.Errorf(\n\t\t\t\t\"primary key column name of '%s' should be '%s' but currently '%s'. This is not handled yet.\",\n\t\t\t\tcurrentTable.name, primaryKeyColumn.name, currentIndex.columns[0].column,\n\t\t\t)\n\t\t}\n\t} else if currentIndex.unique {\n\t\tvar uniqueKeyColumn *Column\n\t\tfor _, column := range desiredTable.columns {\n\t\t\tif column.name == currentIndex.columns[0].column && column.keyOption.isUnique() {\n\t\t\t\tuniqueKeyColumn = &column\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif uniqueKeyColumn == nil {\n\t\t\t// No unique column. Drop unique key index.\n\t\t\tddls = append(ddls, g.generateDropIndex(currentTable.name, currentIndex.name))\n\t\t}\n\t} else {\n\t\tddls = append(ddls, g.generateDropIndex(currentTable.name, currentIndex.name))\n\t}\n\n\treturn ddls, nil\n}", "func DisableVertexArrayAttrib(vaobj uint32, index uint32) {\n\tC.glowDisableVertexArrayAttrib(gpDisableVertexArrayAttrib, (C.GLuint)(vaobj), (C.GLuint)(index))\n}", "func DisableVertexArrayAttrib(vaobj uint32, index uint32) {\n\tC.glowDisableVertexArrayAttrib(gpDisableVertexArrayAttrib, (C.GLuint)(vaobj), (C.GLuint)(index))\n}", "func (adminAPIOp) SkipVerification() bool { return true }", "func (gsuo *GameServerUpdateOne) ClearDisabledAt() *GameServerUpdateOne {\n\tgsuo.mutation.ClearDisabledAt()\n\treturn gsuo\n}", "func DisableVertexAttribArray(index Uint) {\n\tcindex, _ := (C.GLuint)(index), cgoAllocsUnknown\n\tC.glDisableVertexAttribArray(cindex)\n}", "func (b *Base) Omit(keys ...string) Serializer {\n\treturn b.OmitIf(alwaysTrue, keys...)\n}", "func WithoutHeaderIgnore(ks []string) Option {\n\treturn func(c *Config) {\n\t\tc.withoutHeaderIgnore = ks\n\t}\n}", "func (h History) AttachIndexIfNoExists() {\n\tif len(h) != 0 && h[0].Index.Present() {\n\t\treturn\n\t}\n\tfor i := range h {\n\t\th[i].Index = IntOptional{i}\n\t}\n}", "func (td TupleDesc) WithoutFixedAccess() TupleDesc {\n\treturn TupleDesc{Types: td.Types, cmp: td.cmp}\n}", "func NonMaxSuppression(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, optional ...NonMaxSuppressionAttr) (selected_indices tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\tattrs := map[string]interface{}{}\n\tfor _, a := range optional {\n\t\ta(attrs)\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"NonMaxSuppression\",\n\t\tInput: []tf.Input{\n\t\t\tboxes, scores, max_output_size,\n\t\t},\n\t\tAttrs: attrs,\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}", "func DisablesTestReadIndexTask_IgnoreHiddenPics(t *testing.T) {\n\tc := Container(t)\n\tdefer c.Close()\n\n\tu := c.CreateUser()\n\tu.User.Capability = append(u.User.Capability, schema.User_PIC_INDEX)\n\tu.Update()\n\n\tp1 := c.CreatePic()\n\tp3 := c.CreatePic()\n\t// A hard deletion\n\tp3.Pic.DeletionStatus = &schema.Pic_DeletionStatus{\n\t\tActualDeletedTs: schema.ToTs(time.Now()),\n\t}\n\tp3.Update()\n\n\ttask := ReadIndexPicsTask{\n\t\tDB: c.DB(),\n\t\tCtx: CtxFromUserID(context.Background(), u.User.UserId),\n\t}\n\n\tif err := task.Run(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif len(task.Pics) != 1 || !proto.Equal(p1.Pic, task.Pics[0]) {\n\t\tt.Fatalf(\"Unable to find %s in\\n %s\", p1, task.Pics)\n\t}\n}", "func clearIndex(\n\tctx context.Context,\n\texecCfg *sql.ExecutorConfig,\n\ttableDesc catalog.TableDescriptor,\n\tindex descpb.IndexDescriptor,\n) error {\n\tlog.Infof(ctx, \"clearing index %d from table %d\", index.ID, tableDesc.GetID())\n\tif index.IsInterleaved() {\n\t\treturn errors.Errorf(\"unexpected interleaved index %d\", index.ID)\n\t}\n\n\tsp := tableDesc.IndexSpan(execCfg.Codec, index.ID)\n\tstart, err := keys.Addr(sp.Key)\n\tif err != nil {\n\t\treturn errors.Errorf(\"failed to addr index start: %v\", err)\n\t}\n\tend, err := keys.Addr(sp.EndKey)\n\tif err != nil {\n\t\treturn errors.Errorf(\"failed to addr index end: %v\", err)\n\t}\n\trSpan := roachpb.RSpan{Key: start, EndKey: end}\n\treturn clearSpanData(ctx, execCfg.DB, execCfg.DistSender, rSpan)\n}", "func (h *Headers) NoTransform() *Headers {\n\th.noTransform = true\n\treturn h\n}", "func NoDecorator() Decorator { return func(c Context) Context { return c } }", "func (b *ShardBuilder) WithNoProgress() *ShardBuilder {\n\tb.shard.Progress = spanner.NullInt64{Valid: false}\n\treturn b\n}", "func NoopHook(index string) error {\n\treturn nil\n}", "func GenerateValidIndex(datatypeName string) string {\n\tswitch datatypeName {\n\tcase field.TypeString:\n\t\treturn \"strconv.Itoa(0)\"\n\tcase field.TypeUint, field.TypeInt:\n\t\treturn \"0\"\n\tcase field.TypeBool:\n\t\treturn valueFalse\n\tcase field.TypeCustom:\n\t\treturn valueNull\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown type %s\", datatypeName))\n\t}\n}", "func generateNoOpBlocks() {\n\tfor {\n\t\tif isGenerateNoOps && !isWorkingOnCommit {\n\t\t\tisWorkingOnNoOp = true\n\t\t\tgenerateNoOpBlock()\n\t\t\tisWorkingOnNoOp = false\n\t\t\ttime.Sleep(time.Millisecond * 100)\n\t\t} else {\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}\n}", "func (g *GeneratedFile) Skip() {\n\tg.skip = true\n}", "func (t *Tags) NoFinalize() {\n\tt.noFinalize = true\n\tfor _, tag := range t.values {\n\t\ttag.NoFinalize()\n\t}\n}", "func Without[T comparable](collection []T, exclude ...T) []T {\n\tresult := make([]T, 0, len(collection))\n\tfor _, e := range collection {\n\t\tif !Contains(exclude, e) {\n\t\t\tresult = append(result, e)\n\t\t}\n\t}\n\treturn result\n}", "func DeleteIndexWithoutOrder(a interface{}, index int) interface{} {\n\tswitch a.(type) {\n\tcase []int:\n\t\treturn DeleteIndexWithoutOrderInt(a.([]int), index)\n\tdefault:\n\t\tpanic(\"not support type\")\n\t}\n}", "func (w *Writer) ZeroUntil(index int64)", "func NoopFilter(index string, data []byte) ([]byte, error) {\n\treturn data, nil\n}", "func (h *Headers) NoStore() *Headers {\n\th.noStore = true\n\treturn h\n}", "func Exclude(attributes ...string) Options {\n\treturn exclude{attributes: attributes}\n}", "func Not(p predicate.MetaSchema) predicate.MetaSchema {\n\treturn predicate.MetaSchema(\n\t\tfunc(s *sql.Selector) {\n\t\t\tp(s.Not())\n\t\t},\n\t)\n}", "func NoOp(scope *Scope) (o *tf.Operation) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"NoOp\",\n\t}\n\treturn scope.AddOperation(opspec)\n}", "func (d *dbBasePostgres) GenerateSpecifyIndex(tableName string, useIndex int, indexes []string) string {\n\tDebugLog.Println(\"[WARN] Not support any specifying index action, so that action is ignored\")\n\treturn ``\n}", "func DisableVertexAttribArray(index uint32) {\n\tsyscall.Syscall(gpDisableVertexAttribArray, 1, uintptr(index), 0, 0)\n}", "func DisableVertexArrayAttrib(vaobj uint32, index uint32) {\n\tsyscall.Syscall(gpDisableVertexArrayAttrib, 2, uintptr(vaobj), uintptr(index), 0)\n}", "func noHeader(predeterminedHeader *Header) headerOption {\n\tif predeterminedHeader == nil {\n\t\tpanic(\"nil predeterminedHeader\")\n\t}\n\treturn headerOption{noHeader: true, predeterminedHeader: predeterminedHeader}\n}", "func NoParallel() TestOptionsFunc {\n\treturn func(_ *testing.T, test *Test) { test.RunOptions.NoParallel = true }\n}", "func Not(p predicate.Bulk) predicate.Bulk {\n\treturn predicate.Bulk(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func DropIndexColumnFlag(tblInfo *model.TableInfo, indexInfo *model.IndexInfo) {\n\tif indexInfo.Primary {\n\t\tfor _, col := range indexInfo.Columns {\n\t\t\ttblInfo.Columns[col.Offset].DelFlag(mysql.PriKeyFlag)\n\t\t}\n\t} else if indexInfo.Unique && len(indexInfo.Columns) == 1 {\n\t\ttblInfo.Columns[indexInfo.Columns[0].Offset].DelFlag(mysql.UniqueKeyFlag)\n\t} else {\n\t\ttblInfo.Columns[indexInfo.Columns[0].Offset].DelFlag(mysql.MultipleKeyFlag)\n\t}\n\n\tcol := indexInfo.Columns[0]\n\t// other index may still cover this col\n\tfor _, index := range tblInfo.Indices {\n\t\tif index.Name.L == indexInfo.Name.L {\n\t\t\tcontinue\n\t\t}\n\n\t\tif index.Columns[0].Name.L != col.Name.L {\n\t\t\tcontinue\n\t\t}\n\n\t\tAddIndexColumnFlag(tblInfo, index)\n\t}\n}", "func NoTrace() zap.Option {\n\treturn zap.AddStacktrace(noTrace{})\n}", "func Not(d Dense) Dense {\n\tr := Dense{\n\t\tbits: make([]byte, 0, BytesFor(d.len)),\n\t\tlen: d.len,\n\t\tnegated: !d.negated,\n\t}\n\tfor i := range d.bits {\n\t\tr.bits = append(r.bits, ^d.bits[i])\n\t}\n\treturn r\n}", "func (HashValidationOption) NoCheck() HashValidationOption { return HashValidationOption(1) }", "func (m Map) Without(filter Coord) Map {\n\tfiltered := make(Map, 0)\n\n\tfor _, c := range m {\n\t\tif c != filter {\n\t\t\tfiltered = append(filtered, c)\n\t\t}\n\t}\n\n\treturn filtered\n}", "func GenMissingData(hasMissingData bool) Option {\n\treturn func(f *genBowOptions) { f.missingData = hasMissingData }\n}", "func WithoutProfilerEndpoints() Option {\n\treturn func(ctx context.Context, s *Server) error {\n\t\ts.noProfilerEndpoint = true\n\t\treturn nil\n\t}\n}", "func Exclude(fields []string) Option {\n\treturn func(op *PartialMutation) {\n\t\top.excludeFields = fields\n\t}\n}", "func (iob *IndexOptionsBuilder) Unique(unique bool) *IndexOptionsBuilder {\n\tiob.document = append(iob.document, bson.E{\"unique\", unique})\n\treturn iob\n}", "func WithDisable() *CallOption {\n\treturn WithMax(0)\n}", "func WithoutFileInfo() Option {\n\treturn func(o *options) {\n\t\to.withoutFileInfo = true\n\t}\n}", "func (d *Document) False() Node {\n\tid := uint(len(d.nodes))\n\tn := d.grow()\n\tn.reset(vBoolean|infRoot, strFalse, n.values[:0])\n\treturn d.Node(id)\n}", "func WithNoBuild(r *v1alpha1.Revision) {\n\tr.Status.PropagateBuildStatus(duckv1alpha1.KResourceStatus{\n\t\tConditions: []duckv1alpha1.Condition{{\n\t\t\tType: duckv1alpha1.ConditionSucceeded,\n\t\t\tStatus: corev1.ConditionTrue,\n\t\t\tReason: \"NoBuild\",\n\t\t}},\n\t})\n}", "func (t *Tag) NoFinalize() {\n\tt.noFinalize = true\n\tt.Name.NoFinalize()\n\tt.Value.NoFinalize()\n}", "func (gsu *GameServerUpdate) ClearDisabledAt() *GameServerUpdate {\n\tgsu.mutation.ClearDisabledAt()\n\treturn gsu\n}", "func (i *SGIndex) shouldIndexTombstones(useXattrs bool) bool {\n\treturn (i.flags&IdxFlagIndexTombstones != 0 && useXattrs)\n}", "func (s *sectionHeader) WithoutEnv() *sectionHeader {\n\ts.env = false\n\treturn s\n}" ]
[ "0.62528235", "0.6120058", "0.6103179", "0.5899328", "0.5636369", "0.56347483", "0.56101", "0.5556883", "0.54259133", "0.5418361", "0.54178435", "0.53887594", "0.5346785", "0.5267003", "0.52648", "0.52637637", "0.5242605", "0.5237273", "0.52297145", "0.5219901", "0.5217866", "0.5215443", "0.51945925", "0.51792794", "0.5176227", "0.5172019", "0.51641273", "0.5146433", "0.5145777", "0.51309407", "0.5123686", "0.5092554", "0.50898963", "0.50892127", "0.50815916", "0.5067341", "0.5066315", "0.5066008", "0.50632924", "0.50631475", "0.50372595", "0.50372595", "0.50094604", "0.50091827", "0.5001731", "0.49948606", "0.49848473", "0.49826127", "0.4972266", "0.49674815", "0.49674815", "0.49450326", "0.4942936", "0.49356323", "0.4930749", "0.4921363", "0.49126956", "0.4912399", "0.4911827", "0.4909832", "0.49091545", "0.49055356", "0.4899774", "0.48947293", "0.48906347", "0.48836088", "0.48827437", "0.48786896", "0.48778895", "0.48743352", "0.48720172", "0.48621243", "0.48569548", "0.4842319", "0.48255187", "0.48092124", "0.48062918", "0.4805452", "0.48043838", "0.47993225", "0.47985488", "0.47972873", "0.479307", "0.47862405", "0.47829956", "0.47778806", "0.47764432", "0.4774434", "0.47712976", "0.47702038", "0.47651252", "0.47602248", "0.47474357", "0.47454923", "0.47440064", "0.47373834", "0.47329834", "0.47303718", "0.47196382", "0.47185692" ]
0.7322742
0
StoreIdentityCIDs sets whether to persist sections that are referenced by CIDs with multihash.IDENTITY digest. When writing CAR files with this option, Characteristics.IsFullyIndexed will be set. By default, the blockstore interface will always return true for Has() called with identity CIDs, but when this option is turned on, it will defer to the index. When creating an index (or loading a CARv1 as a blockstore), when this option is on, identity CIDs will be included in the index. This option is disabled by default.
StoreIdentityCIDs определяет, следует ли сохранять разделы, которые ссылаются на CIDs с дайджестом multihash.IDENTITY. При записи файлов CAR с использованием этой опции будет установлено Characteristics.IsFullyIndexed. По умолчанию интерфейс blockstore всегда возвращает true для Has(), вызываемого с CIDs identity, но при включении этой опции будет опираться на индекс. При создании индекса (или загрузке CARv1 как blockstore), при включении этой опции CIDs identity будут включены в индекс. Эта опция по умолчанию отключена.
func StoreIdentityCIDs(b bool) Option { return func(o *Options) { o.StoreIdentityCIDs = b } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (cosi *cosiAggregate) StoreIdentities(idents map[string]proto.Message) {\n\tfor k, v := range idents {\n\t\tpoint := suite.G2().Point()\n\t\terr := point.UnmarshalBinary(v.(*BdnIdentity).PublicKey)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tcosi.skipchain.identities[k] = point\n\t}\n}", "func StoreIdentityInFiles(i *security.Identity, keyFile string, crtFile string, csrFile string) error {\n\tvar err error\n\n\tif i.Key != nil {\n\t\tif err = CreatePEM(keyFile, i.Key); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif i.Certificate != nil {\n\t\tif err = CreatePEM(crtFile, i.Certificate); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif i.Request != nil {\n\t\tif err = CreatePEM(csrFile, i.Request); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (k Keeper) SetIdentityCount(ctx sdk.Context, count int64) {\n\tstore := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.IdentityCountKey))\n\tbyteKey := types.KeyPrefix(types.IdentityCountKey)\n\tbz := []byte(strconv.FormatInt(count, 10))\n\tstore.Set(byteKey, bz)\n}", "func (m *cidsMap) Sync(vmis []*virtv1.VirtualMachineInstance) {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tfor _, vmi := range vmis {\n\t\tif vmi.Status.VSOCKCID == nil {\n\t\t\tcontinue\n\t\t}\n\t\tkey := controller.VirtualMachineInstanceKey(vmi)\n\t\tm.cids[key] = *vmi.Status.VSOCKCID\n\t\tm.reverse[*vmi.Status.VSOCKCID] = key\n\t}\n}", "func (m *User) SetIdentities(value []ObjectIdentityable)() {\n m.identities = value\n}", "func (ic *IdentityCache) StoreIdentity(identity Identity) error {\n\tcache := cacheData{\n\t\tIdentity: identity,\n\t}\n\n\treturn ic.writeCache(cache)\n}", "func (s *Secrets) IdentityStoreID() (string, error) {\n\treturn s.getSecret(\"SSOSyncIdentityStoreID\")\n}", "func SetIdentity(storageDir string, cid, nid uint64) (err error) {\n\tif cid == 0 {\n\t\treturn errors.New(\"raft: cid is zero\")\n\t}\n\tif nid == 0 {\n\t\treturn errors.New(\"raft: nid is zero\")\n\t}\n\td, err := os.Stat(storageDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !d.IsDir() {\n\t\treturn fmt.Errorf(\"raft: %q is not a diretory\", storageDir)\n\t}\n\tif err := lockDir(storageDir); err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\terr = unlockDir(storageDir)\n\t}()\n\tval, err := openValue(storageDir, \".id\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif cid == val.v1 && nid == val.v2 {\n\t\treturn nil\n\t}\n\tif val.v1 != 0 && val.v2 != 0 {\n\t\treturn ErrIdentityAlreadySet\n\t}\n\treturn val.set(cid, nid)\n}", "func AddIndependentPropertyGeneratorsForManagedClusterStorageProfileFileCSIDriver(gens map[string]gopter.Gen) {\n\tgens[\"Enabled\"] = gen.PtrOf(gen.Bool())\n}", "func (o GetKubernetesClusterIdentityOutput) IdentityIds() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetKubernetesClusterIdentity) []string { return v.IdentityIds }).(pulumi.StringArrayOutput)\n}", "func (o *SparseClaims) Identity() elemental.Identity {\n\n\treturn ClaimsIdentity\n}", "func (o *VirtualizationBaseHostPciDeviceAllOf) SetIdentity(v string) {\n\to.Identity = &v\n}", "func StoreCAUniqueIDToCNMap(c context.Context, mapping map[int64]string) error {\n\tbuf := bytes.Buffer{}\n\tenc := gob.NewEncoder(&buf)\n\tif err := enc.Encode(mapping); err != nil {\n\t\treturn err\n\t}\n\t// Note that in practice 'mapping' is usually very small, so we are not\n\t// concerned about 1MB entity size limit.\n\treturn errors.WrapTransient(datastore.Get(c).Put(&CAUniqueIDToCNMap{\n\t\tGobEncodedMap: buf.Bytes(),\n\t}))\n}", "func (o *SnapmirrorCreateRequest) SetIdentityPreserve(newValue bool) *SnapmirrorCreateRequest {\n\to.IdentityPreservePtr = &newValue\n\treturn o\n}", "func (o KubernetesClusterIdentityOutput) IdentityIds() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v KubernetesClusterIdentity) []string { return v.IdentityIds }).(pulumi.StringArrayOutput)\n}", "func NewAtomixStore(client primitive.Client) (Store, error) {\n\tconfigurations, err := _map.NewBuilder[configapi.ConfigurationID, *configapi.Configuration](client, \"configurations\").\n\t\tTag(\"onos-config\", \"configuration\").\n\t\tCodec(generic.Proto[*configapi.Configuration](&configapi.Configuration{})).\n\t\tGet(context.Background())\n\tif err != nil {\n\t\treturn nil, errors.FromAtomix(err)\n\t}\n\treturn &configurationStore{\n\t\tconfigurations: configurations,\n\t}, nil\n}", "func (cs *ClientStore) Set(id string, cli oauth2.ClientInfo) (err error) {\n\tcs.Lock()\n\tdefer cs.Unlock()\n\tcs.data[id] = cli\n\treturn\n}", "func (o *UserDisco) HasIdentity() bool {\n\tif o != nil && o.Identity != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (mvccs *KVMVCCStore) Set(datas *types.StoreSet, hash []byte, sync bool) ([]byte, error) {\n\tif hash == nil {\n\t\thash = calcHash(datas)\n\t}\n\tkvlist, err := mvccs.mvcc.AddMVCC(datas.KV, hash, datas.StateHash, datas.Height)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmvccs.saveKVSets(kvlist, sync)\n\treturn hash, nil\n}", "func (c *Component) ClaimIDs(ctx context.Context, ids cluster.EntityIdentifiers) error {\n\treturn c.cluster.ClaimIDs(ctx, ids)\n}", "func SetIntInStore(id, key string, value int) error {\n\treturn ecs.AddOrUpdateIntInMapComponent(id, \"store\", key, value)\n}", "func (o *VirtualizationBaseHostPciDeviceAllOf) HasIdentity() bool {\n\tif o != nil && o.Identity != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func ContactIdentityKey(id string) ([]byte, error) {\n\ts := textSecureStore\n\tidkeyfile := filepath.Join(s.identityDir, \"remote_\"+id)\n\tif !exists(idkeyfile) {\n\t\treturn nil, UnknownContactError{id}\n\t}\n\tb, err := s.readFile(idkeyfile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn append([]byte{5}, b...), nil\n}", "func AddIndependentPropertyGeneratorsForManagedClusterIdentity(gens map[string]gopter.Gen) {\n\tgens[\"Type\"] = gen.PtrOf(gen.AlphaString())\n}", "func (o SparseClaimsList) Identity() elemental.Identity {\n\n\treturn ClaimsIdentity\n}", "func storeCandidates(candidateMap map[hash.Hash160]*state.Candidate, sm protocol.StateManager, blkHeight uint64) error {\n\tcandidateList, err := state.MapToCandidates(candidateMap)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to convert candidate map to candidate list\")\n\t}\n\tsort.Sort(candidateList)\n\tcandidatesKey := ConstructKey(blkHeight)\n\treturn sm.PutState(candidatesKey, &candidateList)\n}", "func (c *ClusterClient) IdentityProviders() *IdentityProvidersClient {\n\treturn NewIdentityProvidersClient(\n\t\tc.transport,\n\t\tpath.Join(c.path, \"identity_providers\"),\n\t)\n}", "func NewSparseClaims() *SparseClaims {\n\treturn &SparseClaims{}\n}", "func (s *Store) PutBulk(suffixes []string, cid string) error {\n\toperations := make([]storage.Operation, len(suffixes))\n\n\tfor i, suffix := range suffixes {\n\t\top := storage.Operation{\n\t\t\tKey: suffix,\n\t\t\tValue: []byte(cid),\n\t\t}\n\n\t\toperations[i] = op\n\t}\n\n\terr := s.store.Batch(operations)\n\tif err != nil {\n\t\treturn orberrors.NewTransient(fmt.Errorf(\"failed to add cid[%s] to suffixes%s: %w\", cid, suffixes, err))\n\t}\n\n\tlogger.Debugf(\"updated latest anchor[%s] for suffixes: %s\", cid, suffixes)\n\n\treturn nil\n}", "func (o ServiceIdentityOutput) IdentityIds() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v ServiceIdentity) []string { return v.IdentityIds }).(pulumi.StringArrayOutput)\n}", "func (bc *Baiducloud) HasClusterID() bool {\n\treturn true\n}", "func (c *IdentityConfig) loadIdentityConfigEntities() error {\n\tconfigEntity := identityConfigEntity{}\n\n\terr := c.backend.UnmarshalKey(\"client\", &configEntity.Client)\n\tlogger.Debugf(\"Client is: %+v\", configEntity.Client)\n\tif err != nil {\n\t\treturn errors.WithMessage(err, \"failed to parse 'client' config item to identityConfigEntity.Client type\")\n\t}\n\n\terr = c.backend.UnmarshalKey(\"organizations\", &configEntity.Organizations)\n\tlogger.Debugf(\"organizations are: %+v\", configEntity.Organizations)\n\tif err != nil {\n\t\treturn errors.WithMessage(err, \"failed to parse 'organizations' config item to identityConfigEntity.Organizations type\")\n\t}\n\n\terr = c.backend.UnmarshalKey(\"certificateAuthorities\", &configEntity.CertificateAuthorities)\n\tlogger.Debugf(\"certificateAuthorities are: %+v\", configEntity.CertificateAuthorities)\n\tif err != nil {\n\t\treturn errors.WithMessage(err, \"failed to parse 'certificateAuthorities' config item to identityConfigEntity.CertificateAuthorities type\")\n\t}\n\t// Populate ID from the lookup keys\n\tfor caID := range configEntity.CertificateAuthorities {\n\t\tca := configEntity.CertificateAuthorities[caID]\n\t\tca.ID = caID\n\t\tconfigEntity.CertificateAuthorities[caID] = ca\n\t}\n\n\t//compile CA matchers\n\terr = c.compileMatchers()\n\tif err != nil {\n\t\treturn errors.WithMessage(err, \"failed to compile certificate authority matchers\")\n\t}\n\n\terr = c.loadClientTLSConfig(&configEntity)\n\tif err != nil {\n\t\treturn errors.WithMessage(err, \"failed to load client TLSConfig \")\n\t}\n\n\terr = c.loadCATLSConfig(&configEntity)\n\tif err != nil {\n\t\treturn errors.WithMessage(err, \"failed to load CA TLSConfig \")\n\t}\n\n\terr = c.loadAllCAConfigs(&configEntity)\n\tif err != nil {\n\t\treturn errors.WithMessage(err, \"failed to load all CA configs \")\n\t}\n\n\terr = c.loadTLSCertPool(&configEntity)\n\tif err != nil {\n\t\treturn errors.WithMessage(err, \"failed to load TLS Cert Pool\")\n\t}\n\n\tc.caKeyStorePath = pathvar.Subst(c.backend.GetString(\"client.credentialStore.cryptoStore.path\"))\n\tc.credentialStorePath = pathvar.Subst(c.backend.GetString(\"client.credentialStore.path\"))\n\n\treturn nil\n}", "func (me TxsdComponentTransferFunctionAttributesType) IsIdentity() bool {\n\treturn me.String() == \"identity\"\n}", "func AddIndependentPropertyGeneratorsForManagedClusterStorageProfileDiskCSIDriver(gens map[string]gopter.Gen) {\n\tgens[\"Enabled\"] = gen.PtrOf(gen.Bool())\n}", "func AddIndependentPropertyGeneratorsForManagedClusterOIDCIssuerProfile(gens map[string]gopter.Gen) {\n\tgens[\"Enabled\"] = gen.PtrOf(gen.Bool())\n}", "func (e *basicEvent) SetIdentity(i string) {\n\te.Ident = i\n}", "func (cs *ClientStore) Set(id string, cli oauth2.Client) (err error) {\n\tcs.Lock()\n\tdefer cs.Unlock()\n\tcs.data[id] = cli\n\treturn\n}", "func (o GetServiceIdentityOutput) IdentityIds() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetServiceIdentity) []string { return v.IdentityIds }).(pulumi.StringArrayOutput)\n}", "func idcList() []string {\n\tm, _ := idcRegion.Load().(map[string]string)\n\tidcList := make([]string, 0, len(m))\n\tfor dc, _ := range m {\n\t\tidcList = append(idcList, dc)\n\t}\n\treturn idcList\n}", "func (j *DSRocketchat) HasIdentities() bool {\n\treturn true\n}", "func (sID SemanticID) Is(identity string) bool {\n\tif sID.IsNil() {\n\t\treturn false\n\t}\n\n\treturn fmt.Sprintf(\"%s.%s\", sID.Namespace, sID.Collection) == identity\n}", "func (o AnalyzerIdentityOutput) IdentityIds() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v AnalyzerIdentity) []string { return v.IdentityIds }).(pulumi.StringArrayOutput)\n}", "func (s *Cluster) SetIdentity(v *Identity) *Cluster {\n\ts.Identity = v\n\treturn s\n}", "func (ctx *Context) InitializeStores() error {\n\tvar err error\n\tctx.stores = make(map[string]*CoreStores)\n\tfor _, store := range ctx.config.Stores {\n\t\t// Initialize primary in memory store\n\t\tlog.Printf(\"Initializing Primary InMemoryStore %s\\n\", store.Name)\n\t\tnewstore := &CoreStores{}\n\t\tvar localerr error\n\t\tif newstore.primary, localerr = createStore(\"InMemory\", \"\"); localerr != nil {\n\t\t\terr = localerr\n\t\t}\n\t\t// Initialize backup store if defined\n\t\tif len(store.Backup) > 0 {\n\t\t\tlog.Printf(\"Initializing Backup Store %s of type %s, backup directory %s\\n\", store.Name, store.Backup, store.Backupdir)\n\t\t\tvar localerr error\n\t\t\tif newstore.backup, localerr = createStore(store.Backup, store.Backupdir); localerr != nil {\n\t\t\t\terr = localerr\n\t\t\t} else {\n\t\t\t\t// Once initialized we need to restore the primary store from backup store\n\t\t\t\tjsStore, serr := core.SerializeStore(newstore.backup)\n\t\t\t\tif serr != nil {\n\t\t\t\t\terr = serr\n\t\t\t\t} else {\n\t\t\t\t\tif dserr := core.DeSerializeStore(newstore.primary, jsStore); dserr != nil {\n\t\t\t\t\t\terr = dserr\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif len(store.AggregateURLs) > 0 {\n\t\t\tnewstore.shutdown = make(chan bool)\n\t\t\tgo ctx.SyncAggregateURLs(newstore, store.AggregateURLs, time.Duration(store.SyncIntervalSec)*time.Second)\n\t\t}\n\t\tctx.stores[store.Name] = newstore\n\t}\n\treturn err\n}", "func AddIndependentPropertyGeneratorsForManagedClusterSecurityProfileWorkloadIdentity(gens map[string]gopter.Gen) {\n\tgens[\"Enabled\"] = gen.PtrOf(gen.Bool())\n}", "func (engine *Engine) InitStore() {\n\tengine.storeIndexDocChans = make(\n\t\t[]chan storeIndexDocReq, engine.initOptions.StoreShards)\n\n\tfor shard := 0; shard < engine.initOptions.StoreShards; shard++ {\n\t\tengine.storeIndexDocChans[shard] = make(\n\t\t\tchan storeIndexDocReq)\n\t}\n\tengine.storeInitChan = make(\n\t\tchan bool, engine.initOptions.StoreShards)\n}", "func (b *ClusterBuilder) IdentityProviders(value *IdentityProviderListBuilder) *ClusterBuilder {\n\tb.identityProviders = value\n\tb.bitmap_ |= 8388608\n\treturn b\n}", "func (o *UserDisco) SetIdentity(v FullIdentity) {\n\to.Identity = &v\n}", "func (m *ServerContext) CAS() batch.CASClient {\n\treturn m.StoreClient\n}", "func Store(ctx context.Context, isClient bool, config *VFConfig) {\n\tmetadata.Map(ctx, isClient).Store(key{}, config)\n}", "func AddIndependentPropertyGeneratorsForManagedClusterStorageProfileBlobCSIDriver(gens map[string]gopter.Gen) {\n\tgens[\"Enabled\"] = gen.PtrOf(gen.Bool())\n}", "func (m *IosDeviceFeaturesConfiguration) SetIdentityCertificateForClientAuthentication(value IosCertificateProfileBaseable)() {\n err := m.GetBackingStore().Set(\"identityCertificateForClientAuthentication\", value)\n if err != nil {\n panic(err)\n }\n}", "func (az *Cloud) HasClusterID() bool {\n\treturn true\n}", "func (backend *ESClient) InitializeStore(ctx context.Context) {\n\tlogrus.Info(\"Initialize elastic with mappings\")\n\tif !backend.initialized {\n\t\tfor _, esMap := range mappings.AllMappings {\n\t\t\tbackend.CreateTemplate(ctx, esMap.Index, esMap.Mapping)\n\t\t\tif !esMap.Timeseries {\n\t\t\t\tbackend.createStoreIfNotExists(ctx, esMap.Index, esMap.Mapping)\n\t\t\t\tbackend.createStoreAliasIfNotExists(ctx, esMap.Alias, esMap.Index)\n\t\t\t}\n\t\t}\n\t}\n\tbackend.initialized = true\n}", "func (c *cbConfigStore) ConfigurationStore() clustering.ConfigurationStore {\n\treturn c\n}", "func (s *SplitStore) view(c cid.Cid, cb func([]byte) error) error {\n\tif isIdentiyCid(c) {\n\t\tdata, err := decodeIdentityCid(c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn cb(data)\n\t}\n\n\terr := s.hot.View(s.ctx, c, cb)\n\tif ipld.IsNotFound(err) {\n\t\treturn s.cold.View(s.ctx, c, cb)\n\t}\n\treturn err\n}", "func (c *CIDOffer) GetCIDs() []cid.ContentID {\n\treturn c.cids\n}", "func (cs *ClientStore) Set(info oauth2.ClientInfo) (err error) {\n\tcs.cHandler(cs.ccfg.ClientsCName, func(c *mongo.Collection) {\n\t\tentity := &client{\n\t\t\tID: info.GetID(),\n\t\t\tSecret: info.GetSecret(),\n\t\t\tDomain: info.GetDomain(),\n\t\t\tUserID: info.GetUserID(),\n\t\t}\n\n\t\tif _, cerr := c.InsertOne(context.TODO(), entity); cerr != nil {\n\t\t\terr = cerr\n\t\t\treturn\n\t\t}\n\t})\n\n\treturn\n}", "func (d *Dao) ArcMetas(c context.Context, aids []int64) (metas map[int64]*model.ArcCMS, err error) {\n\tmetas = make(map[int64]*model.ArcCMS)\n\trows, err := d.db.Query(c, fmt.Sprintf(_arcMetas, xstr.JoinInts(aids)))\n\tif err != nil {\n\t\tlog.Error(\"ArcMetas d.db.Query error(%v)\", err)\n\t\treturn\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tli := &model.ArcCMS{}\n\t\tif err = rows.Scan(&li.Title, &li.AID, &li.Content, &li.Cover, &li.TypeID, &li.Pubtime, &li.Videos, &li.Valid, &li.Deleted, &li.Result); err != nil {\n\t\t\tlog.Error(\"ArcMetas row.Scan error(%v)\", err)\n\t\t\treturn\n\t\t}\n\t\tmetas[li.AID] = li\n\t}\n\treturn\n}", "func (o SparseAPIChecksList) Identity() elemental.Identity {\n\n\treturn APICheckIdentity\n}", "func (o SparseOAUTHKeysList) Identity() elemental.Identity {\n\n\treturn OAUTHKeyIdentity\n}", "func (s *PollForDecisionTaskInput) SetIdentity(v string) *PollForDecisionTaskInput {\n\ts.Identity = &v\n\treturn s\n}", "func (o *SparseAPICheck) Identity() elemental.Identity {\n\n\treturn APICheckIdentity\n}", "func GetStoreIdentKey() []byte {\n\treturn storeIdentKey\n}", "func (o FluxConfigurationBlobStorageOutput) ManagedIdentity() FluxConfigurationBlobStorageManagedIdentityPtrOutput {\n\treturn o.ApplyT(func(v FluxConfigurationBlobStorage) *FluxConfigurationBlobStorageManagedIdentity {\n\t\treturn v.ManagedIdentity\n\t}).(FluxConfigurationBlobStorageManagedIdentityPtrOutput)\n}", "func (o SparseEnforcerReportsList) Identity() elemental.Identity {\n\n\treturn EnforcerReportIdentity\n}", "func (cloud *Cloud) HasClusterID() bool {\n\treturn false\n}", "func (m *ServicePrincipalRiskDetection) SetKeyIds(value []string)() {\n err := m.GetBackingStore().Set(\"keyIds\", value)\n if err != nil {\n panic(err)\n }\n}", "func (o SparseClaimsList) Append(objects ...elemental.Identifiable) elemental.Identifiables {\n\n\tout := append(SparseClaimsList{}, o...)\n\tfor _, obj := range objects {\n\t\tout = append(out, obj.(*SparseClaims))\n\t}\n\n\treturn out\n}", "func MakeIdentityFile(filePath string, key *Key, format IdentityFileFormat, certAuthorities []services.CertAuthority) (err error) {\n\tconst (\n\t\t// the files and the dir will be created with these permissions:\n\t\tfileMode = 0600\n\t\tdirMode = 0700\n\t)\n\n\tif filePath == \"\" {\n\t\treturn trace.BadParameter(\"identity location is not specified\")\n\t}\n\n\tvar output io.Writer = os.Stdout\n\tswitch format {\n\t// dump user identity into a single file:\n\tcase IdentityFormatFile:\n\t\tf, err := os.OpenFile(filePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, fileMode)\n\t\tif err != nil {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\t\toutput = f\n\t\tdefer f.Close()\n\n\t\t// write key:\n\t\tif _, err = output.Write(key.Priv); err != nil {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\t\t// append ssh cert:\n\t\tif _, err = output.Write(key.Cert); err != nil {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\t\t// append tls cert:\n\t\tif _, err = output.Write(key.TLSCert); err != nil {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\t\t// append trusted host certificate authorities\n\t\tfor _, ca := range certAuthorities {\n\t\t\t// append ssh ca certificates\n\t\t\tfor _, publicKey := range ca.GetCheckingKeys() {\n\t\t\t\tdata, err := sshutils.MarshalAuthorizedHostsFormat(ca.GetClusterName(), publicKey, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn trace.Wrap(err)\n\t\t\t\t}\n\t\t\t\tif _, err = output.Write([]byte(data)); err != nil {\n\t\t\t\t\treturn trace.Wrap(err)\n\t\t\t\t}\n\t\t\t\tif _, err = output.Write([]byte(\"\\n\")); err != nil {\n\t\t\t\t\treturn trace.Wrap(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// append tls ca certificates\n\t\t\tfor _, keyPair := range ca.GetTLSKeyPairs() {\n\t\t\t\tif _, err = output.Write(keyPair.Cert); err != nil {\n\t\t\t\t\treturn trace.Wrap(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t// dump user identity into separate files:\n\tcase IdentityFormatOpenSSH:\n\t\tkeyPath := filePath\n\t\tcertPath := keyPath + \"-cert.pub\"\n\n\t\terr = ioutil.WriteFile(certPath, key.Cert, fileMode)\n\t\tif err != nil {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\n\t\terr = ioutil.WriteFile(keyPath, key.Priv, fileMode)\n\t\tif err != nil {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\n\tcase IdentityFormatTLS:\n\t\tkeyPath := filePath + \".key\"\n\t\tcertPath := filePath + \".crt\"\n\t\tcasPath := filePath + \".cas\"\n\n\t\terr = ioutil.WriteFile(certPath, key.TLSCert, fileMode)\n\t\tif err != nil {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\n\t\terr = ioutil.WriteFile(keyPath, key.Priv, fileMode)\n\t\tif err != nil {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\t\tvar caCerts []byte\n\t\tfor _, ca := range certAuthorities {\n\t\t\tfor _, keyPair := range ca.GetTLSKeyPairs() {\n\t\t\t\tcaCerts = append(caCerts, keyPair.Cert...)\n\t\t\t}\n\t\t}\n\t\terr = ioutil.WriteFile(casPath, caCerts, fileMode)\n\t\tif err != nil {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\tdefault:\n\t\treturn trace.BadParameter(\"unsupported identity format: %q, use one of %q, %q, or %q\",\n\t\t\tformat, IdentityFormatFile, IdentityFormatOpenSSH, IdentityFormatTLS)\n\t}\n\treturn nil\n}", "func Ocis(cfg *config.Config) map[string]interface{} {\n\treturn map[string]interface{}{\n\t\t\"metadata_backend\": cfg.Drivers.OCIS.MetadataBackend,\n\t\t\"propagator\": cfg.Drivers.OCIS.Propagator,\n\t\t\"async_propagator_options\": map[string]interface{}{\n\t\t\t\"propagation_delay\": cfg.Drivers.OCIS.AsyncPropagatorOptions.PropagationDelay,\n\t\t},\n\t\t\"root\": cfg.Drivers.OCIS.Root,\n\t\t\"user_layout\": cfg.Drivers.OCIS.UserLayout,\n\t\t\"share_folder\": cfg.Drivers.OCIS.ShareFolder,\n\t\t\"personalspacealias_template\": cfg.Drivers.OCIS.PersonalSpaceAliasTemplate,\n\t\t\"generalspacealias_template\": cfg.Drivers.OCIS.GeneralSpaceAliasTemplate,\n\t\t\"treetime_accounting\": true,\n\t\t\"treesize_accounting\": true,\n\t\t\"permissionssvc\": cfg.Drivers.OCIS.PermissionsEndpoint,\n\t\t\"permissionssvc_tls_mode\": cfg.Commons.GRPCClientTLS.Mode,\n\t\t\"max_acquire_lock_cycles\": cfg.Drivers.OCIS.MaxAcquireLockCycles,\n\t\t\"lock_cycle_duration_factor\": cfg.Drivers.OCIS.LockCycleDurationFactor,\n\t\t\"max_concurrency\": cfg.Drivers.OCIS.MaxConcurrency,\n\t\t\"asyncfileuploads\": cfg.Drivers.OCIS.AsyncUploads,\n\t\t\"max_quota\": cfg.Drivers.OCIS.MaxQuota,\n\t\t\"statcache\": map[string]interface{}{\n\t\t\t\"cache_store\": cfg.StatCache.Store,\n\t\t\t\"cache_nodes\": cfg.StatCache.Nodes,\n\t\t\t\"cache_database\": cfg.StatCache.Database,\n\t\t\t\"cache_ttl\": cfg.StatCache.TTL / time.Second,\n\t\t\t\"cache_size\": cfg.StatCache.Size,\n\t\t},\n\t\t\"filemetadatacache\": map[string]interface{}{\n\t\t\t\"cache_store\": cfg.FilemetadataCache.Store,\n\t\t\t\"cache_nodes\": cfg.FilemetadataCache.Nodes,\n\t\t\t\"cache_database\": cfg.FilemetadataCache.Database,\n\t\t\t\"cache_ttl\": cfg.FilemetadataCache.TTL / time.Second,\n\t\t\t\"cache_size\": cfg.FilemetadataCache.Size,\n\t\t},\n\t\t\"idcache\": map[string]interface{}{\n\t\t\t\"cache_store\": cfg.IDCache.Store,\n\t\t\t\"cache_nodes\": cfg.IDCache.Nodes,\n\t\t\t\"cache_database\": cfg.IDCache.Database,\n\t\t\t\"cache_ttl\": cfg.IDCache.TTL / time.Second,\n\t\t\t\"cache_size\": cfg.IDCache.Size,\n\t\t},\n\t\t\"events\": map[string]interface{}{\n\t\t\t\"natsaddress\": cfg.Events.Addr,\n\t\t\t\"natsclusterid\": cfg.Events.ClusterID,\n\t\t\t\"tlsinsecure\": cfg.Events.TLSInsecure,\n\t\t\t\"tlsrootcacertificate\": cfg.Events.TLSRootCaCertPath,\n\t\t\t\"numconsumers\": cfg.Events.NumConsumers,\n\t\t},\n\t\t\"tokens\": map[string]interface{}{\n\t\t\t\"transfer_shared_secret\": cfg.Commons.TransferSecret,\n\t\t\t\"transfer_expires\": cfg.TransferExpires,\n\t\t\t\"download_endpoint\": cfg.DataServerURL,\n\t\t\t\"datagateway_endpoint\": cfg.DataGatewayURL,\n\t\t},\n\t}\n}", "func (m *OnAuthenticationMethodLoadStartExternalUsersSelfServiceSignUp) SetIdentityProviders(value []IdentityProviderBaseable)() {\n err := m.GetBackingStore().Set(\"identityProviders\", value)\n if err != nil {\n panic(err)\n }\n}", "func (c *Cluster) Store() error {\n\treturn c.PersistStore.Store(*c)\n}", "func IpfsClientBlockstore(ipfsMaddr string, onlineMode bool) func(helpers.MetricsCtx, fx.Lifecycle, dtypes.ClientImportMgr) (dtypes.ClientBlockstore, error) {\n\treturn func(mctx helpers.MetricsCtx, lc fx.Lifecycle, localStore dtypes.ClientImportMgr) (dtypes.ClientBlockstore, error) {\n\t\tvar err error\n\t\tvar ipfsbs blockstore.BasicBlockstore\n\t\tif ipfsMaddr != \"\" {\n\t\t\tvar ma multiaddr.Multiaddr\n\t\t\tma, err = multiaddr.NewMultiaddr(ipfsMaddr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, xerrors.Errorf(\"parsing ipfs multiaddr: %w\", err)\n\t\t\t}\n\t\t\tipfsbs, err = blockstore.NewRemoteIPFSBlockstore(helpers.LifecycleCtx(mctx, lc), ma, onlineMode)\n\t\t} else {\n\t\t\tipfsbs, err = blockstore.NewLocalIPFSBlockstore(helpers.LifecycleCtx(mctx, lc), onlineMode)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, xerrors.Errorf(\"constructing ipfs blockstore: %w\", err)\n\t\t}\n\t\treturn blockstore.WrapIDStore(ipfsbs), nil\n\t}\n}", "func (o *SparseOAUTHKey) Identity() elemental.Identity {\n\n\treturn OAUTHKeyIdentity\n}", "func (s *Store) Set(entity workloadmeta.Entity) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tentityID := entity.GetID()\n\n\tif _, ok := s.store[entityID.Kind]; !ok {\n\t\ts.store[entityID.Kind] = make(map[string]workloadmeta.Entity)\n\t}\n\n\ts.store[entityID.Kind][entityID.ID] = entity\n}", "func (o *SparseEnforcerReport) Identity() elemental.Identity {\n\n\treturn EnforcerReportIdentity\n}", "func (m *HeavySyncMock) StoreIndicesMinimockCounter() uint64 {\n\treturn atomic.LoadUint64(&m.StoreIndicesCounter)\n}", "func (m *User) GetIdentities()([]ObjectIdentityable) {\n return m.identities\n}", "func (mc *Chain) SaveClients(ctx context.Context, clients []*client.Client) error {\n\tvar err error\n\tclientKeys := make([]datastore.Key, len(clients))\n\tfor idx, c := range clients {\n\t\tclientKeys[idx] = c.GetKey()\n\t}\n\tclientEntityMetadata := datastore.GetEntityMetadata(\"client\")\n\tcEntities := datastore.AllocateEntities(len(clients), clientEntityMetadata)\n\tctx = memorystore.WithEntityConnection(common.GetRootContext(), clientEntityMetadata)\n\tdefer memorystore.Close(ctx)\n\terr = clientEntityMetadata.GetStore().MultiRead(ctx, clientEntityMetadata, clientKeys, cEntities)\n\tif err != nil {\n\t\treturn err\n\t}\n\tctx = datastore.WithAsyncChannel(ctx, client.ClientEntityChannel)\n\tfor idx, c := range clients {\n\t\tif !datastore.IsEmpty(cEntities[idx].GetKey()) {\n\t\t\tcontinue\n\t\t}\n\t\t_, cerr := client.PutClient(ctx, c)\n\t\tif cerr != nil {\n\t\t\terr = cerr\n\t\t}\n\t}\n\treturn err\n}", "func (j *DSGit) HasIdentities() bool {\n\treturn true\n}", "func (o KubernetesClusterIdentityPtrOutput) IdentityIds() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *KubernetesClusterIdentity) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.IdentityIds\n\t}).(pulumi.StringArrayOutput)\n}", "func (c *Cluster) StartStore(storeID uint64) {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\tif store := c.stores[storeID]; store != nil {\n\t\tstore.meta.State = metapb.StoreState_Up\n\t}\n}", "func (o AnalyzerIdentityPtrOutput) IdentityIds() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *AnalyzerIdentity) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.IdentityIds\n\t}).(pulumi.StringArrayOutput)\n}", "func (o *SparseDependencyMap) Identity() elemental.Identity {\n\n\treturn DependencyMapIdentity\n}", "func ManagedClusterIdentityGenerator() gopter.Gen {\n\tif managedClusterIdentityGenerator != nil {\n\t\treturn managedClusterIdentityGenerator\n\t}\n\n\tgenerators := make(map[string]gopter.Gen)\n\tAddIndependentPropertyGeneratorsForManagedClusterIdentity(generators)\n\tmanagedClusterIdentityGenerator = gen.Struct(reflect.TypeOf(ManagedClusterIdentity{}), generators)\n\n\t// The above call to gen.Struct() captures the map, so create a new one\n\tgenerators = make(map[string]gopter.Gen)\n\tAddIndependentPropertyGeneratorsForManagedClusterIdentity(generators)\n\tAddRelatedPropertyGeneratorsForManagedClusterIdentity(generators)\n\tmanagedClusterIdentityGenerator = gen.Struct(reflect.TypeOf(ManagedClusterIdentity{}), generators)\n\n\treturn managedClusterIdentityGenerator\n}", "func WithIDs(ids IDs) ContextOption {\n\treturn func(c *ContextConfig) {\n\t\tc.IDs = ids\n\t}\n}", "func (this *cbCluster) ConfigurationStoreId() string {\n\treturn this.configStore.Id()\n}", "func NewStore(\n\tcfg Config,\n\tstoreCfg chunk.StoreConfig,\n\tschemaCfg chunk.SchemaConfig,\n\tlimits StoreLimits,\n\treg prometheus.Registerer,\n\tcacheGenNumLoader chunk.CacheGenNumLoader,\n\tlogger log.Logger,\n) (chunk.Store, error) {\n\tchunkMetrics := newChunkClientMetrics(reg)\n\n\tindexReadCache, err := cache.New(cfg.IndexQueriesCacheConfig, reg, logger)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\twriteDedupeCache, err := cache.New(storeCfg.WriteDedupeCacheConfig, reg, logger)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tchunkCacheCfg := storeCfg.ChunkCacheConfig\n\tchunkCacheCfg.Prefix = \"chunks\"\n\tchunksCache, err := cache.New(chunkCacheCfg, reg, logger)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Cache is shared by multiple stores, which means they will try and Stop\n\t// it more than once. Wrap in a StopOnce to prevent this.\n\tindexReadCache = cache.StopOnce(indexReadCache)\n\tchunksCache = cache.StopOnce(chunksCache)\n\twriteDedupeCache = cache.StopOnce(writeDedupeCache)\n\n\t// Lets wrap all caches except chunksCache with CacheGenMiddleware to facilitate cache invalidation using cache generation numbers.\n\t// chunksCache is not wrapped because chunks content can't be anyways modified without changing its ID so there is no use of\n\t// invalidating chunks cache. Also chunks can be fetched only by their ID found in index and we are anyways removing the index and invalidating index cache here.\n\tindexReadCache = cache.NewCacheGenNumMiddleware(indexReadCache)\n\twriteDedupeCache = cache.NewCacheGenNumMiddleware(writeDedupeCache)\n\n\terr = schemaCfg.Load()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error loading schema config\")\n\t}\n\tstores := chunk.NewCompositeStore(cacheGenNumLoader)\n\n\tfor _, s := range schemaCfg.Configs {\n\t\tindexClientReg := prometheus.WrapRegistererWith(\n\t\t\tprometheus.Labels{\"component\": \"index-store-\" + s.From.String()}, reg)\n\n\t\tindex, err := NewIndexClient(s.IndexType, cfg, schemaCfg, indexClientReg)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"error creating index client\")\n\t\t}\n\t\tindex = newCachingIndexClient(index, indexReadCache, cfg.IndexCacheValidity, limits, logger)\n\n\t\tobjectStoreType := s.ObjectType\n\t\tif objectStoreType == \"\" {\n\t\t\tobjectStoreType = s.IndexType\n\t\t}\n\n\t\tchunkClientReg := prometheus.WrapRegistererWith(\n\t\t\tprometheus.Labels{\"component\": \"chunk-store-\" + s.From.String()}, reg)\n\n\t\tchunks, err := NewChunkClient(objectStoreType, cfg, schemaCfg, chunkClientReg)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"error creating object client\")\n\t\t}\n\n\t\tchunks = newMetricsChunkClient(chunks, chunkMetrics)\n\n\t\terr = stores.AddPeriod(storeCfg, s, index, chunks, limits, chunksCache, writeDedupeCache)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn stores, nil\n}", "func (us *ClusterStore) GetAll() ([]model.Cluster, error) {\n\tvar cs []model.Cluster\n\tif err := us.db.Preload(clause.Associations).Find(&cs).Error; err != nil {\n\t\tif errors.Is(err, gorm.ErrRecordNotFound) {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn cs, nil\n}", "func (o ServiceIdentityPtrOutput) IdentityIds() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *ServiceIdentity) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.IdentityIds\n\t}).(pulumi.StringArrayOutput)\n}", "func (v *version) AWSCognitoIdentitySources() AWSCognitoIdentitySourceInformer {\n\treturn &aWSCognitoIdentitySourceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}\n}", "func (v *version) AWSCognitoIdentitySources() AWSCognitoIdentitySourceInformer {\n\treturn &aWSCognitoIdentitySourceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}\n}", "func (o SyncAuthorizationOutput) Identities() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *SyncAuthorization) pulumi.StringArrayOutput { return v.Identities }).(pulumi.StringArrayOutput)\n}", "func (m *MacOSEnterpriseWiFiConfiguration) SetIdentityCertificateForClientAuthentication(value MacOSCertificateProfileBaseable)() {\n err := m.GetBackingStore().Set(\"identityCertificateForClientAuthentication\", value)\n if err != nil {\n panic(err)\n }\n}", "func NewAtomixStore(client atomix.Client) (Store, error) {\n\tconfigurations, err := client.GetMap(context.Background(), \"onos-config-configurations\")\n\tif err != nil {\n\t\treturn nil, errors.FromAtomix(err)\n\t}\n\treturn &configurationStore{\n\t\tconfigurations: configurations,\n\t}, nil\n}", "func (o *SecurityProblem) HasCveIds() bool {\n\tif o != nil && o.CveIds != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (s *Store) KVSSetCAS(idx uint64, entry *structs.DirEntry) (bool, error) {\n\ttx := s.db.Txn(true)\n\tdefer tx.Abort()\n\n\tset, err := s.kvsSetCASTxn(tx, idx, entry)\n\tif !set || err != nil {\n\t\treturn false, err\n\t}\n\n\ttx.Commit()\n\treturn true, nil\n}", "func (c *cloud) HasClusterID() bool {\n\treturn true\n}", "func TestIndex_SeriesIDSet(t *testing.T) {\n\tengine := MustOpenEngine()\n\tdefer engine.Close()\n\n\t// Add some series.\n\tengine.MustAddSeries(\"cpu\", map[string]string{\"host\": \"a\", \"region\": \"west\"})\n\tengine.MustAddSeries(\"cpu\", map[string]string{\"host\": \"b\", \"region\": \"west\"})\n\tengine.MustAddSeries(\"cpu\", map[string]string{\"host\": \"b\"})\n\tengine.MustAddSeries(\"gpu\", nil)\n\tengine.MustAddSeries(\"gpu\", map[string]string{\"host\": \"b\"})\n\tengine.MustAddSeries(\"mem\", map[string]string{\"host\": \"z\"})\n\n\t// Collect series IDs.\n\tseriesIDMap := map[string]tsdb.SeriesID{}\n\tvar e tsdb.SeriesIDElem\n\tvar err error\n\n\titr := engine.sfile.SeriesIDIterator()\n\tfor e, err = itr.Next(); ; e, err = itr.Next() {\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t} else if e.SeriesID.IsZero() {\n\t\t\tbreak\n\t\t}\n\n\t\tname, tags := tsdb.ParseSeriesKey(engine.sfile.SeriesKey(e.SeriesID))\n\t\tkey := fmt.Sprintf(\"%s%s\", name, tags.HashKey())\n\t\tseriesIDMap[key] = e.SeriesID\n\t}\n\n\tfor _, id := range seriesIDMap {\n\t\tif !engine.SeriesIDSet().Contains(id) {\n\t\t\tt.Fatalf(\"bitmap does not contain ID: %d\", id)\n\t\t}\n\t}\n\n\t// Drop all the series for the gpu measurement and they should no longer\n\t// be in the series ID set.\n\tif err := engine.DeleteMeasurement([]byte(\"gpu\")); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif engine.SeriesIDSet().Contains(seriesIDMap[\"gpu\"]) {\n\t\tt.Fatalf(\"bitmap does not contain ID: %d for key %s, but should\", seriesIDMap[\"gpu\"], \"gpu\")\n\t} else if engine.SeriesIDSet().Contains(seriesIDMap[\"gpu,host=b\"]) {\n\t\tt.Fatalf(\"bitmap does not contain ID: %d for key %s, but should\", seriesIDMap[\"gpu,host=b\"], \"gpu,host=b\")\n\t}\n\tdelete(seriesIDMap, \"gpu\")\n\tdelete(seriesIDMap, \"gpu,host=b\")\n\n\t// Drop the specific mem series\n\tditr := &seriesIterator{keys: [][]byte{[]byte(\"mem,host=z\")}}\n\tif err := engine.DeleteSeriesRange(ditr, math.MinInt64, math.MaxInt64); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif engine.SeriesIDSet().Contains(seriesIDMap[\"mem,host=z\"]) {\n\t\tt.Fatalf(\"bitmap does not contain ID: %d for key %s, but should\", seriesIDMap[\"mem,host=z\"], \"mem,host=z\")\n\t}\n\tdelete(seriesIDMap, \"mem,host=z\")\n\n\t// The rest of the keys should still be in the set.\n\tfor key, id := range seriesIDMap {\n\t\tif !engine.SeriesIDSet().Contains(id) {\n\t\t\tt.Fatalf(\"bitmap does not contain ID: %d for key %s, but should\", id, key)\n\t\t}\n\t}\n\n\t// Reopen the engine, and the series should be re-added to the bitmap.\n\tif err := engine.Reopen(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Check bitset is expected.\n\texpected := tsdb.NewSeriesIDSet()\n\tfor _, id := range seriesIDMap {\n\t\texpected.Add(id)\n\t}\n\n\tif !engine.SeriesIDSet().Equals(expected) {\n\t\tt.Fatalf(\"got bitset %s, expected %s\", engine.SeriesIDSet().String(), expected.String())\n\t}\n}" ]
[ "0.5328413", "0.48285127", "0.46915764", "0.460706", "0.45859408", "0.45038688", "0.4317314", "0.43106538", "0.4235155", "0.42342368", "0.42137128", "0.41937977", "0.41885242", "0.41864616", "0.41690555", "0.4165227", "0.41583925", "0.41418397", "0.41260913", "0.41253287", "0.41217098", "0.41095707", "0.41057232", "0.40949655", "0.40918368", "0.40892765", "0.40840143", "0.40777856", "0.40609792", "0.40491024", "0.40390414", "0.40334505", "0.40270102", "0.4021322", "0.40086365", "0.4008617", "0.40041596", "0.4004142", "0.3996454", "0.39925405", "0.3988772", "0.39771202", "0.39658627", "0.39635772", "0.39557546", "0.39268655", "0.39151704", "0.3909075", "0.390739", "0.390492", "0.39017266", "0.38969705", "0.3890941", "0.38896367", "0.3881301", "0.3871456", "0.38688153", "0.38638106", "0.38625348", "0.38542196", "0.38471228", "0.3845318", "0.38369405", "0.38313797", "0.38254425", "0.38252723", "0.38212258", "0.38183287", "0.38118747", "0.3809889", "0.38080397", "0.38061434", "0.38042086", "0.38032156", "0.37942684", "0.37903547", "0.37838894", "0.3778488", "0.3778108", "0.37744382", "0.37701714", "0.3768992", "0.3767646", "0.37670982", "0.3765687", "0.37606084", "0.37596768", "0.3755687", "0.37511492", "0.3745524", "0.37369764", "0.37364498", "0.37364498", "0.37346548", "0.37331414", "0.3730449", "0.37300745", "0.37297153", "0.37297025", "0.37290096" ]
0.7659749
0
MaxIndexCidSize specifies the maximum allowed size for indexed CIDs in bytes. Indexing a CID with larger than the allowed size results in ErrCidTooLarge error.
MaxIndexCidSize указывает максимальный допустимый размер индексированных CIDs в байтах. Индексирование CID, размер которого превышает допустимый, приводит к ошибке ErrCidTooLarge.
func MaxIndexCidSize(s uint64) Option { return func(o *Options) { o.MaxIndexCidSize = s } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *Cache) MaxSize() (maxSize int64) {\n\tfor _, shard := range c.shards {\n\t\tmaxSize += shard.maxSize\n\t}\n\treturn int64(bytesToMB(int(maxSize)))\n}", "func (p *MessagePartition) calculateMaxMessageIdFromIndex(fileId uint64) (uint64, error) {\n\tstat, err := os.Stat(p.indexFilenameByMessageId(fileId))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tentriesInIndex := uint64(stat.Size() / int64(INDEX_ENTRY_SIZE))\n\n\treturn (entriesInIndex - 1 + fileId), nil\n}", "func MaxValSize(max int) Option {\n\treturn func(lc cacheWithOpts) error {\n\t\treturn lc.setMaxValSize(max)\n\t}\n}", "func (o ClusterNodeGroupOptionsOutput) MaxSize() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v ClusterNodeGroupOptions) *int { return v.MaxSize }).(pulumi.IntPtrOutput)\n}", "func MaxValSize(max int) Option {\n\treturn func(lc *memoryCache) error {\n\t\tlc.maxValueSize = max\n\t\treturn nil\n\t}\n}", "func (r *Redis) MaxSize() int64 {\n\treturn r.maxSize\n}", "func GetMaxIndexKey(shardID uint64, key []byte) []byte {\n\tkey = getKeySlice(key, idKeyLength)\n\treturn getIDKey(maxIndexSuffix, shardID, key)\n}", "func (ch *clientSecureChannel) MaxMessageSize() uint32 {\n\treturn ch.maxMessageSize\n}", "func (cc *ContinueCompress) MaxMessageSize() int {\n\treturn cc.maxMessageSize\n}", "func (cd *ContinueDecompress) MaxMessageSize() int {\n\treturn cd.maxMessageSize\n}", "func MaxValSize(max int) Option {\n\treturn func(lc *loadingCache) error {\n\t\tlc.maxValueSize = max\n\t\treturn nil\n\t}\n}", "func MaxBufferSize(size int) Options {\n\treturn func(c *config) {\n\t\tc.maxBufferSize = size\n\t}\n}", "func (d *DHCPv4) MaxMessageSize() (uint16, error) {\n\treturn GetUint16(OptionMaximumDHCPMessageSize, d.Options)\n}", "func (c PktCnf1) MaxLen() int {\n\treturn int(c & 0xff)\n}", "func (o ClusterNodeGroupOptionsPtrOutput) MaxSize() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *ClusterNodeGroupOptions) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.MaxSize\n\t}).(pulumi.IntPtrOutput)\n}", "func GetMaxIndexes() int {\r\n\treturn converter.StrToInt(SysString(MaxIndexes))\r\n}", "func (c *Cache) SizeMaxBytes() int {\n\tn := 0\n\tfor _, shard := range c.shards {\n\t\tn += shard.SizeMaxBytes()\n\t}\n\treturn n\n}", "func RaggedCountSparseOutputMaxlength(value int64) RaggedCountSparseOutputAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"maxlength\"] = value\n\t}\n}", "func (group *NodeGroup) MaxSize() int {\n\tdefer group.lk.Unlock()\n\tgroup.lk.Lock()\n\treturn group.maxSize\n}", "func GetMaxBlockSize() int64 {\r\n\treturn converter.StrToInt64(SysString(MaxBlockSize))\r\n}", "func (ch *clientSecureChannel) MaxChunkCount() uint32 {\n\treturn ch.maxChunkCount\n}", "func SparseCountSparseOutputMaxlength(value int64) SparseCountSparseOutputAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"maxlength\"] = value\n\t}\n}", "func MaxLen(n int) PktCnf1 {\n\treturn PktCnf1(n & 0xff)\n}", "func (builder *Builder) MaxSizeInKb(maxSizeInKb uint64) *Builder {\n\tbuilder.maxSizeInKb = maxSizeInKb\n\treturn builder\n}", "func (o *VolumeInfinitevolAttributesType) SetMaxNamespaceConstituentSize(newValue SizeType) *VolumeInfinitevolAttributesType {\n\to.MaxNamespaceConstituentSizePtr = &newValue\n\treturn o\n}", "func getMaxSize() int {\n\tMaxInt := 1 << 31\n\tcount := 0\n\tfor MaxInt > 0 {\n\t\tMaxInt /= 10\n\t\tcount++\n\t}\n\treturn count\n}", "func MaxAllowedHeaderSize(max uint64) Option {\n\treturn func(o *Options) {\n\t\to.MaxAllowedHeaderSize = max\n\t}\n}", "func (w *Whisper) MaxMessageSize() uint32 {\n\tval, _ := w.settings.Load(maxMsgSizeIdx)\n\treturn val.(uint32)\n}", "func (c *Config) MaxSize(stream string) (uint, error) {\n\tkey, err := keyName(stream, \"maxsize\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn c.v.GetSizeInBytes(key), nil\n}", "func (m Logon) SetMaxMessageSize(v int) {\n\tm.Set(field.NewMaxMessageSize(v))\n}", "func (context *context) SetMaxSegmentLength(n uint) {\n\tcontext.params.SetMaxSegmentLength(int(n))\n}", "func (c Conn) MaxLength() int {\n\treturn MaxLength\n}", "func (o *VolumeInfinitevolAttributesType) SetMaxDataConstituentSize(newValue SizeType) *VolumeInfinitevolAttributesType {\n\to.MaxDataConstituentSizePtr = &newValue\n\treturn o\n}", "func NamespaceIDSize(size int) Option {\n\tif size < 0 || size > namespace.IDMaxSize {\n\t\tpanic(\"Got invalid namespace.IDSize. Expected 0 <= size <= namespace.IDMaxSize.\")\n\t}\n\treturn func(opts *Options) {\n\t\topts.NamespaceIDSize = namespace.IDSize(size)\n\t}\n}", "func NewMaxMessageSize(val int) MaxMessageSizeField {\n\treturn MaxMessageSizeField{quickfix.FIXInt(val)}\n}", "func (c ClientProperties) MaxMsgLength() uint32 {\n\treturn binary.LittleEndian.Uint32(c[:4])\n}", "func (asg *Asg) MaxSize() int {\n\treturn asg.maxSize\n}", "func MaxSize32(length int) int {\n\tnumControlBytes := (length + 3) / 4\n\tmaxNumDataBytes := 4 * length\n\treturn numControlBytes + maxNumDataBytes\n}", "func MaxKeys(max int) Option {\n\treturn func(lc *memoryCache) error {\n\t\tlc.maxKeys = max\n\t\treturn nil\n\t}\n}", "func (d *Decoder) SetMaxArraySize(size uint) {\n\td.maxArraySize = int(size)\n}", "func (r *Search) MaxConcurrentShardRequests(maxconcurrentshardrequests string) *Search {\n\tr.values.Set(\"max_concurrent_shard_requests\", maxconcurrentshardrequests)\n\n\treturn r\n}", "func (o *VolumeInfinitevolAttributesType) MaxNamespaceConstituentSize() SizeType {\n\tvar r SizeType\n\tif o.MaxNamespaceConstituentSizePtr == nil {\n\t\treturn r\n\t}\n\tr = *o.MaxNamespaceConstituentSizePtr\n\treturn r\n}", "func (indexer Indexer) GetIndexSize() int {\n\tindex := *indexer.GetIndex()\n\tsize := 8 * len(index)\n\tfor _, postings := range index {\n\t\tsize += 16 * len(postings)\n\t}\n\tkb := int(math.Pow(2, 10))\n\tsize = int(size / kb)\n\treturn size\n}", "func DenseCountSparseOutputMaxlength(value int64) DenseCountSparseOutputAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"maxlength\"] = value\n\t}\n}", "func (query *ContractCallQuery) SetMaxResultSize(size uint64) *ContractCallQuery {\n\tquery.pb.MaxResultSize = int64(size)\n\treturn query\n}", "func MaxMsgSize(s int) server.Option {\n\treturn server.SetOption(maxMsgSizeKey{}, s)\n}", "func (ng *NodeGroup) MaxSize() int {\n\treturn int(ng.MaxNodes)\n}", "func MaxKeys(max int) Option {\n\treturn func(lc cacheWithOpts) error {\n\t\treturn lc.setMaxKeys(max)\n\t}\n}", "func (s *FilesystemStore) MaxLength(l int) {\n\tfor _, c := range s.Codecs {\n\t\tif codec, ok := c.(*securecookie.SecureCookie); ok {\n\t\t\tcodec.MaxLength(l)\n\t\t}\n\t}\n}", "func MaxAllowedSectionSize(max uint64) Option {\n\treturn func(o *Options) {\n\t\to.MaxAllowedSectionSize = max\n\t}\n}", "func (f *AnalyzerFingerprint) MaxOutputSize(maxOutputSize int) *AnalyzerFingerprint {\n\tf.maxOutputSize = &maxOutputSize\n\treturn f\n}", "func (e *BaseExecutor) SetMaxChunkSize(size int) {\n\te.maxChunkSize = size\n}", "func AthensMaxConcurrency() int {\n\tdefaultMaxConcurrency := runtime.NumCPU()\n\tmaxConcurrencyEnv, err := envy.MustGet(\"ATHENS_MAX_CONCURRENCY\")\n\tif err != nil {\n\t\treturn defaultMaxConcurrency\n\t}\n\n\tmc, err := strconv.Atoi(maxConcurrencyEnv)\n\tif err != nil {\n\t\treturn defaultMaxConcurrency\n\t}\n\n\treturn mc\n}", "func MaxKeys(max int) Option {\n\treturn func(lc *cacheImpl) error {\n\t\tlc.maxKeys = max\n\t\treturn nil\n\t}\n}", "func MaxMessageSize(size int64) Option {\n\tif size < 0 {\n\t\tpanic(\"size must be non-negative\")\n\t}\n\treturn func(ws *websocket) {\n\t\tws.options.maxMessageSize = size\n\t}\n}", "func (o *VolumeInfinitevolAttributesType) MaxDataConstituentSize() SizeType {\n\tvar r SizeType\n\tif o.MaxDataConstituentSizePtr == nil {\n\t\treturn r\n\t}\n\tr = *o.MaxDataConstituentSizePtr\n\treturn r\n}", "func FixMaxEntryIndex(rdb *Store, profile *pb.Profile) error {\n\tuuid1, err := uuid.FromString(profile.Uuid)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// MAX Delimiter Key\n\tkey := MaxUUIDFlakeKey(TableEntryIndex, uuid1)\n\treturn rdb.Put(key.Bytes(), []byte(\"0000\"))\n}", "func getMaxID() int {\n\n\tif len(cdb.classMap) != 0 {\n\t\tkeys := make([]int, 0, len(cdb.classMap))\n\t\tfor k := range cdb.classMap {\n\t\t\tkeys = append(keys, k)\n\t\t}\n\t\tsort.Ints(keys)\n\t\treturn keys[len(keys)-1]\n\t}\n\n\treturn -1\n\n}", "func AllocateIndexID(tblInfo *model.TableInfo) int64 {\n\ttblInfo.MaxIndexID++\n\treturn tblInfo.MaxIndexID\n}", "func (st *Settings) MaxHeaderListSize() uint32 {\n\treturn st.headerSize\n}", "func (st *Settings) MaxHeaderListSize() uint32 {\n\treturn st.headerSize\n}", "func SectorDealsMax(size abi.SectorSize) uint64 {\n\treturn max64(256, uint64(size/DealLimitDenominator))\n}", "func (o StorageClusterSpecCloudStorageCapacitySpecsOutput) MaxCapacityInGiB() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v StorageClusterSpecCloudStorageCapacitySpecs) *int { return v.MaxCapacityInGiB }).(pulumi.IntPtrOutput)\n}", "func (e SszNetworkEncoder) GetMaxChunkSize() uint64 {\n\treturn MaxChunkSize\n}", "func MaxHeaderBytes(v int) Option {\n\treturn optionSetter(func(opt *Options) {\n\t\topt.MaxHeaderBytes = v\n\t})\n}", "func MaxCacheEntries(n int) CacheOption { return maxEntriesOption(n) }", "func (opts *FIFOCompactionOptions) GetMaxTableFilesSize() uint64 {\n\treturn uint64(C.rocksdb_fifo_compaction_options_get_max_table_files_size(opts.c))\n}", "func MaxCallRecvMsgSize(v int) Configer {\n\treturn func(c *clientv3.Config) {\n\t\tc.MaxCallRecvMsgSize = v\n\t}\n}", "func MaxSizeBatchOption(size int) BatchOption {\n\treturn func(o *batchOptions) {\n\t\to.maxSize = size\n\t}\n}", "func MaxKeys(max int) Option {\n\treturn func(lc *loadingCache) error {\n\t\tlc.maxKeys = max\n\t\treturn nil\n\t}\n}", "func (m *MessageReplies) SetMaxID(value int) {\n\tm.Flags.Set(2)\n\tm.MaxID = value\n}", "func (iob *IndexOptionsBuilder) Max(max float64) *IndexOptionsBuilder {\n\tiob.document = append(iob.document, bson.E{\"max\", max})\n\treturn iob\n}", "func MaxRequestMaxBytes(max int) ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\to.MaxRequestMaxBytes = max\n\t\treturn nil\n\t}\n}", "func CfgSessionIdLength(length int64) ManagerConfigOpt {\n\treturn func(config *ManagerConfig) {\n\t\tconfig.SessionIDLength = length\n\t}\n}", "func IndexLimits(ctx context.Context, data id.ID, count int, size int, littleEndian bool) (*IndexRange, error) {\n\tobj, err := database.Build(ctx, &IndexLimitsResolvable{\n\t\tIndexSize: uint64(size),\n\t\tCount: uint64(count),\n\t\tLittleEndian: littleEndian,\n\t\tData: path.NewBlob(data),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*IndexRange), nil\n}", "func (b *IndexBuilder) ContentSize() uint32 {\n\t// Add the name too so we don't skip building index if we have\n\t// lots of empty files.\n\treturn b.contentEnd + b.nameEnd\n}", "func MaxRecvMsgSize(s int) client.Option {\n\treturn func(o *client.Options) {\n\t\tif o.Context == nil {\n\t\t\to.Context = context.Background()\n\t\t}\n\t\to.Context = context.WithValue(o.Context, maxRecvMsgSizeKey{}, s)\n\t}\n}", "func (m Logon) GetMaxMessageSize() (v int, err quickfix.MessageRejectError) {\n\tvar f field.MaxMessageSizeField\n\tif err = m.Get(&f); err == nil {\n\t\tv = f.Value()\n\t}\n\treturn\n}", "func (o OceanOutput) MaxSize() pulumi.IntOutput {\n\treturn o.ApplyT(func(v *Ocean) pulumi.IntOutput { return v.MaxSize }).(pulumi.IntOutput)\n}", "func (u *UserStories) SetMaxReadID(value int) {\n\tu.Flags.Set(0)\n\tu.MaxReadID = value\n}", "func MaxMsgSize(n int) Option {\n\treturn func(o *Options) {\n\t\to.MaxMsgSize = n\n\t}\n}", "func idIndex() mgo.Index {\n\treturn mgo.Index{\n\t\tKey: []string{\"id\"},\n\t\tUnique: true,\n\t\tDropDups: true,\n\t\tBackground: true,\n\t\tSparse: true,\n\t}\n}", "func (_SmartTgStats *SmartTgStatsCaller) MaxRequestID(opts *bind.CallOpts) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _SmartTgStats.contract.Call(opts, out, \"maxRequestID\")\n\treturn *ret0, err\n}", "func (mim *metricIDMapping) GetMaxSeriesIDsLimit() uint32 {\n\treturn mim.maxSeriesIDsLimit.Load()\n}", "func (o ClusterScalingConfigurationOutput) MaxCapacity() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v ClusterScalingConfiguration) *int { return v.MaxCapacity }).(pulumi.IntPtrOutput)\n}", "func MaxConcurrency(n int) ParallelOption {\n\treturn func(p *ParallelConfig) *ParallelConfig {\n\t\tp.maxConcurrency = n\n\t\treturn p\n\t}\n}", "func MaxBlockLen(ct CompressionType) uint64 {\n\tif ct == Snappy {\n\t\t// https://github.com/golang/snappy/blob/2a8bb927dd31d8daada140a5d09578521ce5c36a/encode.go#L76\n\t\treturn 6 * (0xffffffff - 32) / 7\n\t}\n\treturn math.MaxUint64\n}", "func (gq *Dispatch) MaxLen() int {\n return gq.maxlength\n}", "func (e *Lint) MaxConcurrency() int {\n\tvar limit syscall.Rlimit\n\terr := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\toldLimit := limit.Cur\n\tlimit.Cur = limit.Max\n\terr = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit)\n\tif err != nil {\n\t\tlog.Debug(err)\n\t\treturn convertLimit(oldLimit)\n\t}\n\n\treturn convertLimit(limit.Cur)\n}", "func (m *MailTips) GetMaxMessageSize()(*int32) {\n val, err := m.GetBackingStore().Get(\"maxMessageSize\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*int32)\n }\n return nil\n}", "func (c *gcsCore) getContainerIDFromIndex(index uint32) string {\n\tc.containerIndexMutex.Lock()\n\tdefer c.containerIndexMutex.Unlock()\n\n\tif int(index) < len(c.containerIndex) {\n\t\treturn c.containerIndex[index]\n\t}\n\n\treturn \"\"\n}", "func NewLabelSizesIndex(size, label uint64) dvid.IndexBytes {\n\tindex := make([]byte, 17)\n\tindex[0] = byte(KeyLabelSizes)\n\tbinary.BigEndian.PutUint64(index[1:9], size)\n\tbinary.BigEndian.PutUint64(index[9:17], label)\n\treturn dvid.IndexBytes(index)\n}", "func (_Contract *ContractSession) MaxOptions() (*big.Int, error) {\n\treturn _Contract.Contract.MaxOptions(&_Contract.CallOpts)\n}", "func (m *MailTips) SetMaxMessageSize(value *int32)() {\n err := m.GetBackingStore().Set(\"maxMessageSize\", value)\n if err != nil {\n panic(err)\n }\n}", "func (_SmartTgStats *SmartTgStatsSession) MaxRequestID() (*big.Int, error) {\n\treturn _SmartTgStats.Contract.MaxRequestID(&_SmartTgStats.CallOpts)\n}", "func (this AliasCodec) MaxEncodedLen(srcLen int) int {\n\treturn srcLen + 1024\n}", "func (c *ColumnMap) SetMaxSize(size int) *ColumnMap {\n\tc.MaxSize = size\n\treturn c\n}", "func (c *ColumnMap) SetMaxSize(size int) *ColumnMap {\n\tc.MaxSize = size\n\treturn c\n}", "func (o *KubernetesNodeGroupProfile) SetMaxsize(v int64) {\n\to.Maxsize = &v\n}", "func (s *Set) SetMaxLineSize(i int) {\n\ts.MaxLineSize = i\n}" ]
[ "0.54128367", "0.5239222", "0.5225754", "0.5207175", "0.517067", "0.5167994", "0.51654756", "0.514978", "0.5139812", "0.5123632", "0.51004505", "0.5087268", "0.50867826", "0.507669", "0.5049227", "0.50329137", "0.5025627", "0.5024108", "0.49574798", "0.49260557", "0.49127924", "0.4910644", "0.49033746", "0.48947075", "0.4872246", "0.48620787", "0.48423564", "0.48419514", "0.4841062", "0.4829615", "0.48268786", "0.47892594", "0.4788429", "0.47796482", "0.4775992", "0.47722614", "0.47634223", "0.4760452", "0.47583646", "0.47499043", "0.4729253", "0.4720951", "0.47206277", "0.47096175", "0.47038263", "0.46909288", "0.468887", "0.46855697", "0.4684044", "0.46717122", "0.46707758", "0.46693173", "0.46468368", "0.46363613", "0.46346977", "0.46225426", "0.46199554", "0.46146235", "0.46120372", "0.45987394", "0.45987394", "0.45959568", "0.4592378", "0.4588514", "0.4588392", "0.45825782", "0.45725843", "0.45687947", "0.45644632", "0.4564417", "0.45592505", "0.45562232", "0.45523256", "0.45332283", "0.45264214", "0.4516632", "0.4503763", "0.45023876", "0.44988713", "0.4493675", "0.4488355", "0.44858772", "0.44771847", "0.44752157", "0.44736332", "0.44629294", "0.44612032", "0.44571182", "0.44530383", "0.444591", "0.44380736", "0.4436111", "0.44211733", "0.44202057", "0.4418277", "0.44143802", "0.44136763", "0.44136763", "0.4413555", "0.43959624" ]
0.8566187
0
WithTraversalPrototypeChooser specifies the prototype chooser that should be used when performing traversals in writes from a linksystem.
WithTraversalPrototypeChooser определяет прототип-выборщик, который должен использоваться при выполнении обходов в записях из системы ссылок.
func WithTraversalPrototypeChooser(t traversal.LinkTargetNodePrototypeChooser) Option { return func(o *Options) { o.TraversalPrototypeChooser = t } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewSocketsTraversalExtension() *SocketsTraversalExtension {\n\treturn &SocketsTraversalExtension{\n\t\tSocketsToken: traversalSocketsToken,\n\t}\n}", "func WithSortingByPathAscAndRevisionDesc() GetImplementationOption {\n\treturn func(options *ListImplementationRevisionsOptions) {\n\t\toptions.sortByPathAscAndRevisionDesc = true\n\t}\n}", "func (result *Result) WithGraphTraversal(graphTraversals []*GraphTraversal) *Result {\n\tresult.GraphTraversals = graphTraversals\n\treturn result\n}", "func (shim *QueryDirectClient) Traversal(ctx context.Context, in *GraphQuery, opts ...grpc.CallOption) (Query_TraversalClient, error) {\n md, _ := metadata.FromOutgoingContext(ctx)\n ictx := metadata.NewIncomingContext(ctx, md)\n\n\tw := &directQueryTraversal{ictx, make(chan *QueryResult, 100), in, nil}\n if shim.streamServerInt != nil {\n go func() {\n defer w.close()\n info := grpc.StreamServerInfo{\n FullMethod: \"/gripql.Query/Traversal\",\n IsServerStream: true,\n }\n w.e = shim.streamServerInt(shim.server, w, &info, _Query_Traversal_Handler)\n } ()\n return w, nil\n }\n\tgo func() {\n defer w.close()\n\t\tw.e = shim.server.Traversal(in, w)\n\t}()\n\treturn w, nil\n}", "func (_options *CreateConfigurationOptions) SetConfigurationPrototype(configurationPrototype ConfigurationPrototypeIntf) *CreateConfigurationOptions {\n\t_options.ConfigurationPrototype = configurationPrototype\n\treturn _options\n}", "func WithTransferCallback(callback base.TransferCallback) Option {\n\treturn func(node base.Node) {\n\t\tnode.SetTransferCallback(callback)\n\t}\n}", "func NewTraversal() (g String) {\n\tg.string = \"g\"\n\tg.buffer = bytes.NewBufferString(\"\")\n\treturn\n}", "func WithPrinter(p io.Writer) Option {\n\treturn func(s *initSpec) {\n\t\ts.Printer = p\n\t}\n}", "func AddIndependentPropertyGeneratorsForVirtualNetworkGateway_Spec(gens map[string]gopter.Gen) {\n\tgens[\"ActiveActive\"] = gen.PtrOf(gen.Bool())\n\tgens[\"AzureName\"] = gen.AlphaString()\n\tgens[\"EnableBgp\"] = gen.PtrOf(gen.Bool())\n\tgens[\"EnableDnsForwarding\"] = gen.PtrOf(gen.Bool())\n\tgens[\"EnablePrivateIpAddress\"] = gen.PtrOf(gen.Bool())\n\tgens[\"GatewayType\"] = gen.PtrOf(gen.OneConstOf(VirtualNetworkGatewayPropertiesFormat_GatewayType_ExpressRoute, VirtualNetworkGatewayPropertiesFormat_GatewayType_LocalGateway, VirtualNetworkGatewayPropertiesFormat_GatewayType_Vpn))\n\tgens[\"Location\"] = gen.PtrOf(gen.AlphaString())\n\tgens[\"Tags\"] = gen.MapOf(gen.AlphaString(), gen.AlphaString())\n\tgens[\"VpnGatewayGeneration\"] = gen.PtrOf(gen.OneConstOf(VirtualNetworkGatewayPropertiesFormat_VpnGatewayGeneration_Generation1, VirtualNetworkGatewayPropertiesFormat_VpnGatewayGeneration_Generation2, VirtualNetworkGatewayPropertiesFormat_VpnGatewayGeneration_None))\n\tgens[\"VpnType\"] = gen.PtrOf(gen.OneConstOf(VirtualNetworkGatewayPropertiesFormat_VpnType_PolicyBased, VirtualNetworkGatewayPropertiesFormat_VpnType_RouteBased))\n}", "func WithTracesURLPath(urlPath string) Option {\n\treturn wrappedOption{otlpconfig.WithTracesURLPath(urlPath)}\n}", "func WithProtocol(protocol Protocol) OptionPortScanner {\n\treturn protocolOption(protocol)\n}", "func (o *Outbound) Chooser() peer.Chooser {\n\treturn o.chooser\n}", "func (o *Object) SetPrototype(proto *Object) error {\n\treturn o.runtime.try(func() {\n\t\to.self.setProto(proto, true)\n\t})\n}", "func (_options *CreateConfigurationActionOptions) SetConfigActionPrototype(configActionPrototype ConfigurationActionPrototypeIntf) *CreateConfigurationActionOptions {\n\t_options.ConfigActionPrototype = configActionPrototype\n\treturn _options\n}", "func NavigateExtensionApprovalFlow(ctx context.Context, cr *chrome.Chrome, tconn *chrome.TestConn, bt browser.Type, parentEmail, parentPassword string) error {\n\ttesting.ContextLog(ctx, \"Adding extension as a supervised user\")\n\n\t// Reserve ten seconds for cleanup.\n\tcleanupCtx := ctx\n\tctx, cancel := ctxutil.Shorten(ctx, 10*time.Second)\n\tdefer cancel()\n\n\t// Set up browser.\n\tbr, closeBrowser, err := browserfixt.SetUp(ctx, cr, bt)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to set up browser\")\n\t}\n\tdefer closeBrowser(cleanupCtx)\n\n\t// Open webstore in browser.\n\tconst extensionID = \"djflhoibgkdhkhhcedjiklpkjnoahfmg\" // Google-developed extension from Chrome Store.\n\tconst extensionURL = \"https://chrome.google.com/webstore/detail/\" + extensionID + \"?hl=en\"\n\tconn, err := br.NewConn(ctx, extensionURL, browser.WithNewWindow())\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to open webstore\")\n\t}\n\tdefer conn.Close()\n\n\t// Load page contents.\n\tui := uiauto.New(tconn).WithTimeout(time.Minute)\n\n\t// Install extension parent permission flow.\n\ttesting.ContextLog(ctx, \"Finding button that adds the extension\")\n\taddButton := nodewith.Name(\"Add to Chrome\").Role(role.Button).First()\n\tif err := ui.WaitUntilExists(addButton)(ctx); err != nil {\n\t\treturn errors.Wrap(err, \"failed to load page\")\n\t}\n\n\ttesting.ContextLog(ctx, \"Clicking button that adds the extension\")\n\tif err := ui.LeftClick(addButton)(ctx); err != nil {\n\t\treturn errors.Wrap(err, \"failed to click add extension\")\n\t}\n\n\ttesting.ContextLog(ctx, \"Clicking ask parent\")\n\taskParentButton := nodewith.Name(\"Ask a parent\").Role(role.Button)\n\t// The \"Ask parent\" button may not immediately be clickable.\n\tif err := ui.LeftClickUntil(askParentButton, ui.Gone(askParentButton))(ctx); err != nil {\n\t\treturn errors.Wrap(err, \"failed to click ask parent\")\n\t}\n\n\ttesting.ContextLog(ctx, \"Selecting parent email\"+strings.ToLower(parentEmail))\n\tparentEmailRadio := nodewith.Name(strings.ToLower(parentEmail)).Role(role.RadioButton)\n\tparentEmailText := nodewith.Name(strings.ToLower(parentEmail))\n\t// If there are two parents, the dialog contains a radio button with both parent emails.\n\tif err := ui.LeftClick(parentEmailRadio)(ctx); err != nil {\n\t\t// If there is no radio button, this indicates that there is only one parent. Verify\n\t\t// that the email is present as text, and return an error if it is not present.\n\t\tif err := ui.Exists(parentEmailText)(ctx); err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to find parent email %q\", parentEmail)\n\t\t}\n\t}\n\n\ttesting.ContextLog(ctx, \"Clicking the parent password text field\")\n\tparentPasswordField := nodewith.Name(\"Enter password\").Role(role.TextField)\n\tif err := ui.LeftClick(parentPasswordField)(ctx); err != nil {\n\t\treturn errors.Wrap(err, \"failed to click parent password text\")\n\t}\n\n\ttesting.ContextLog(ctx, \"Setting up keyboard\")\n\tkb, err := input.Keyboard(ctx)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to get keyboard\")\n\t}\n\tdefer kb.Close()\n\n\ttesting.ContextLog(ctx, \"Typing the parent password\")\n\tif err := kb.Type(ctx, parentPassword); err != nil {\n\t\treturn errors.Wrap(err, \"failed to type parent password\")\n\t}\n\n\ttesting.ContextLog(ctx, \"Verifying Approve and Cancel buttons enabled\")\n\tapproveButton := nodewith.Name(\"Approve\").Role(role.Button)\n\tif err := ui.CheckRestriction(approveButton, restriction.None)(ctx); err != nil {\n\t\treturn errors.Wrap(err, \"failed to verify Approve button enabled\")\n\t}\n\tcancelButton := nodewith.Name(\"Cancel\").Role(role.Button)\n\tif err := ui.CheckRestriction(cancelButton, restriction.None)(ctx); err != nil {\n\t\treturn errors.Wrap(err, \"failed to verify Cancel button enabled\")\n\t}\n\n\treturn nil\n}", "func EchoPresenterWith(w io.Writer) Presenter {\n\treturn func(r interacter.Res) interacter.Res {\n\t\tbuf := []byte(fmt.Sprintln(r))\n\t\tw.Write(buf)\n\t\treturn r\n\t}\n}", "func (p Path) TraverseWithOptions(opts TraverseOptions) (creds.Creds, error) {\n\tlogger.InfoMsgf(\"traversing path %+v with options %+v\", p, opts)\n\n\terr := clearEnvironment()\n\tif err != nil {\n\t\treturn creds.Creds{}, err\n\t}\n\n\tprofileHop, stack := p[0], p[1:]\n\tlogger.InfoMsgf(\"loading origin hop: %+v\", profileHop)\n\tprofileCreds, err := opts.Store.Lookup(profileHop.Profile)\n\tif err != nil {\n\t\treturn creds.Creds{}, err\n\t}\n\n\tuai := []creds.UserAgentItem{{\n\t\tName: \"voyager\",\n\t\tVersion: version.Version,\n\t}}\n\tfor _, x := range opts.UserAgentItems {\n\t\tuai = append(uai, x)\n\t}\n\n\tc := creds.Creds{\n\t\tAccessKey: profileCreds.AccessKeyID,\n\t\tSecretKey: profileCreds.SecretAccessKey,\n\t\tUserAgentItems: uai,\n\t}\n\n\tfor _, thisHop := range stack {\n\t\tc, err = thisHop.Traverse(c, opts)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn c, err\n}", "func WithTransHandlerFactory(f remote.ServerTransHandlerFactory) Option {\n\treturn Option{F: func(o *internal_server.Options, di *utils.Slice) {\n\t\to.Once.OnceOrPanic()\n\t\tdi.Push(fmt.Sprintf(\"WithTransHandlerFactory(%T)\", f))\n\n\t\to.RemoteOpt.SvrHandlerFactory = f\n\t}}\n}", "func AddRelatedPropertyGeneratorsForVirtualNetworkGateway_Spec(gens map[string]gopter.Gen) {\n\tgens[\"BgpSettings\"] = gen.PtrOf(BgpSettingsGenerator())\n\tgens[\"CustomRoutes\"] = gen.PtrOf(AddressSpaceGenerator())\n\tgens[\"ExtendedLocation\"] = gen.PtrOf(ExtendedLocationGenerator())\n\tgens[\"GatewayDefaultSite\"] = gen.PtrOf(SubResourceGenerator())\n\tgens[\"IpConfigurations\"] = gen.SliceOf(VirtualNetworkGatewayIPConfigurationGenerator())\n\tgens[\"Sku\"] = gen.PtrOf(VirtualNetworkGatewaySkuGenerator())\n\tgens[\"VpnClientConfiguration\"] = gen.PtrOf(VpnClientConfigurationGenerator())\n}", "func (codec *LibvirtProviderConfigCodec) EncodeToProviderSpec(in runtime.Object) (*machinev1.ProviderSpec, error) {\n\tvar buf bytes.Buffer\n\tif err := codec.encoder.Encode(in, &buf); err != nil {\n\t\treturn nil, fmt.Errorf(\"encoding failed: %v\", err)\n\t}\n\treturn &machinev1.ProviderSpec{\n\t\tValue: &runtime.RawExtension{Raw: buf.Bytes()},\n\t}, nil\n}", "func WithRoundTripper(roundTripper nethttp.RoundTripper) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http round tripper option can not set nil protocol\")\n\t\t}\n\t\tp.roundTripper = roundTripper\n\t\treturn nil\n\t}\n}", "func (_options *CreateSecretOptions) SetSecretPrototype(secretPrototype SecretPrototypeIntf) *CreateSecretOptions {\n\t_options.SecretPrototype = secretPrototype\n\treturn _options\n}", "func (m *PrinterDefaults) SetDuplexMode(value *PrintDuplexMode)() {\n err := m.GetBackingStore().Set(\"duplexMode\", value)\n if err != nil {\n panic(err)\n }\n}", "func WithPager(p string) Option {\n\treturn option{\n\t\ttable: func(enc *TableEncoder) error {\n\t\t\tenc.pagerCmd = p\n\t\t\treturn nil\n\t\t},\n\t\texpanded: func(enc *ExpandedEncoder) error {\n\t\t\tenc.pagerCmd = p\n\t\t\treturn nil\n\t\t},\n\t}\n}", "func AddIndependentPropertyGeneratorsForVirtualNetworks_VirtualNetworkPeering_Spec(gens map[string]gopter.Gen) {\n\tgens[\"AllowForwardedTraffic\"] = gen.PtrOf(gen.Bool())\n\tgens[\"AllowGatewayTransit\"] = gen.PtrOf(gen.Bool())\n\tgens[\"AllowVirtualNetworkAccess\"] = gen.PtrOf(gen.Bool())\n\tgens[\"AzureName\"] = gen.AlphaString()\n\tgens[\"DoNotVerifyRemoteGateways\"] = gen.PtrOf(gen.Bool())\n\tgens[\"OriginalVersion\"] = gen.AlphaString()\n\tgens[\"PeeringState\"] = gen.PtrOf(gen.AlphaString())\n\tgens[\"UseRemoteGateways\"] = gen.PtrOf(gen.Bool())\n}", "func WithWriter(w io.Writer) PrintingOpt {\n\treturn func(p *Printing) {\n\t\tp.writer = w\n\t}\n}", "func ImplementationWrapAlgCopy(pointer unsafe.Pointer) (Alg, error) {\n\tctx := (*C.vscf_impl_t)(pointer)\n\tshallowCopy := C.vscf_impl_shallow_copy(ctx)\n\treturn ImplementationWrapAlg(unsafe.Pointer(shallowCopy))\n}", "func WithProxy(p proxy.BackwardProxy) Option {\n\treturn Option{F: func(o *internal_server.Options, di *utils.Slice) {\n\t\to.Once.OnceOrPanic()\n\t\tdi.Push(fmt.Sprintf(\"WithProxy(%T)\", p))\n\n\t\tif o.Proxy != nil {\n\t\t\tpanic(fmt.Errorf(\"reassignment of Proxy is not allowed: %T -> %T\", o.Proxy, p))\n\t\t}\n\t\to.Proxy = p\n\t}}\n}", "func (v *vAVL) Traverser(root *vAVLNode, ch chan<- *Venue) {\r\n\tv.mu.RLock()\r\n\tdefer v.mu.RUnlock()\r\n\tv.traverse(root, ch)\r\n\tclose(ch)\r\n}", "func WithFollowing(following *url.URL) Opt {\n\treturn func(opts *Options) {\n\t\topts.Following = following\n\t}\n}", "func (s *BasevhdlListener) EnterSubprogram_kind(ctx *Subprogram_kindContext) {}", "func (_options *CreateSecretActionOptions) SetSecretActionPrototype(secretActionPrototype SecretActionPrototypeIntf) *CreateSecretActionOptions {\n\t_options.SecretActionPrototype = secretActionPrototype\n\treturn _options\n}", "func WithConnectObserver(observer gocql.ConnectObserver) Option {\n\treturn optionFunc(func(cfg *config) {\n\t\tcfg.connectObserver = observer\n\t})\n}", "func (p *Printer) WriteProtoFlow(f v1.Flow) error {\n\tswitch p.opts.output {\n\tcase TabOutput:\n\t\tif p.line == 0 {\n\t\t\t_, err := fmt.Fprint(p.tw,\n\t\t\t\t\"TIMESTAMP\", tab,\n\t\t\t\t\"SOURCE\", tab,\n\t\t\t\t\"DESTINATION\", tab,\n\t\t\t\t\"TYPE\", tab,\n\t\t\t\t\"VERDICT\", tab,\n\t\t\t\t\"SUMMARY\", newline,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tsrc, dst := p.GetHostNames(f)\n\t\t_, err := fmt.Fprint(p.tw,\n\t\t\tgetTimestamp(f), tab,\n\t\t\tsrc, tab,\n\t\t\tdst, tab,\n\t\t\tGetFlowType(f), tab,\n\t\t\tf.GetVerdict().String(), tab,\n\t\t\tf.GetSummary(), newline,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to write out packet: %v\", err)\n\t\t}\n\tcase DictOutput:\n\t\tif p.line != 0 {\n\t\t\t// TODO: line length?\n\t\t\t_, err := fmt.Fprintln(p.opts.w, \"------------\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tsrc, dst := p.GetHostNames(f)\n\t\t// this is a little crude, but will do for now. should probably find the\n\t\t// longest header and auto-format the keys\n\t\t_, err := fmt.Fprint(p.opts.w,\n\t\t\t\" TIMESTAMP: \", getTimestamp(f), newline,\n\t\t\t\" SOURCE: \", src, newline,\n\t\t\t\"DESTINATION: \", dst, newline,\n\t\t\t\" TYPE: \", GetFlowType(f), newline,\n\t\t\t\" VERDICT: \", f.GetVerdict().String(), newline,\n\t\t\t\" SUMMARY: \", f.GetSummary(), newline,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to write out packet: %v\", err)\n\t\t}\n\tcase CompactOutput:\n\t\tsrc, dst := p.GetHostNames(f)\n\t\t_, err := fmt.Fprintf(p.opts.w,\n\t\t\t\"%s [%s]: %s -> %s %s %s (%s)\\n\",\n\t\t\tgetTimestamp(f),\n\t\t\tf.GetNodeName(),\n\t\t\tsrc,\n\t\t\tdst,\n\t\t\tGetFlowType(f),\n\t\t\tf.GetVerdict().String(),\n\t\t\tf.GetSummary(),\n\t\t)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to write out packet: %v\", err)\n\t\t}\n\tcase JSONOutput:\n\t\treturn p.jsonEncoder.Encode(f)\n\t}\n\tp.line++\n\treturn nil\n}", "func WithIPPUSBDescriptors() Option {\n\treturn WithDescriptors(\"ippusb_printer.json\")\n}", "func (client *Client) DescribeExplorerWithCallback(request *DescribeExplorerRequest, callback func(response *DescribeExplorerResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *DescribeExplorerResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.DescribeExplorer(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func OptionUpstreamURLScheme(scheme string) Option {\n\treturn func(cfg *gwconfig) {\n\t\tcfg.upstreamURLScheme = scheme\n\t}\n}", "func WithFormatter(formatter Formatter) Option {\n\treturn option{\n\t\ttable: func(enc *TableEncoder) error {\n\t\t\tenc.formatter = formatter\n\t\t\treturn nil\n\t\t},\n\t\texpanded: func(enc *ExpandedEncoder) error {\n\t\t\tenc.formatter = formatter\n\t\t\treturn nil\n\t\t},\n\t\tjson: func(enc *JSONEncoder) error {\n\t\t\tenc.formatter = formatter\n\t\t\treturn nil\n\t\t},\n\t\tunaligned: func(enc *UnalignedEncoder) error {\n\t\t\tenc.formatter = formatter\n\t\t\treturn nil\n\t\t},\n\t\ttemplate: func(enc *TemplateEncoder) error {\n\t\t\tenc.formatter = formatter\n\t\t\treturn nil\n\t\t},\n\t\tcrosstab: func(view *CrosstabView) error {\n\t\t\tview.formatter = formatter\n\t\t\treturn nil\n\t\t},\n\t}\n}", "func HandlerWithProtocPath(protocPath string) HandlerOption {\n\treturn func(handlerOptions *handlerOptions) {\n\t\thandlerOptions.protocPath = protocPath\n\t}\n}", "func (_options *CreateSecretVersionActionOptions) SetSecretVersionActionPrototype(secretVersionActionPrototype SecretVersionActionPrototypeIntf) *CreateSecretVersionActionOptions {\n\t_options.SecretVersionActionPrototype = secretVersionActionPrototype\n\treturn _options\n}", "func (e *Extension) Outgoing(ms *bayeux.Message) {\n\tswitch ms.Channel {\n\tcase bayeux.MetaHandshake:\n\t\text := ms.GetExt(true)\n\t\text[ExtensionName] = true\n\tcase bayeux.MetaSubscribe:\n\t\tif e.isSupported() {\n\t\t\text := ms.GetExt(true)\n\t\t\text[ExtensionName] = e.replayStore.AsMap()\n\t\t}\n\t}\n}", "func ProtoToOSPolicyAssignment(p *osconfigpb.OsconfigOSPolicyAssignment) *osconfig.OSPolicyAssignment {\n\tobj := &osconfig.OSPolicyAssignment{\n\t\tName: dcl.StringOrNil(p.GetName()),\n\t\tDescription: dcl.StringOrNil(p.GetDescription()),\n\t\tInstanceFilter: ProtoToOsconfigOSPolicyAssignmentInstanceFilter(p.GetInstanceFilter()),\n\t\tRollout: ProtoToOsconfigOSPolicyAssignmentRollout(p.GetRollout()),\n\t\tRevisionId: dcl.StringOrNil(p.GetRevisionId()),\n\t\tRevisionCreateTime: dcl.StringOrNil(p.GetRevisionCreateTime()),\n\t\tEtag: dcl.StringOrNil(p.GetEtag()),\n\t\tRolloutState: ProtoToOsconfigOSPolicyAssignmentRolloutStateEnum(p.GetRolloutState()),\n\t\tBaseline: dcl.Bool(p.GetBaseline()),\n\t\tDeleted: dcl.Bool(p.GetDeleted()),\n\t\tReconciling: dcl.Bool(p.GetReconciling()),\n\t\tUid: dcl.StringOrNil(p.GetUid()),\n\t\tProject: dcl.StringOrNil(p.GetProject()),\n\t\tLocation: dcl.StringOrNil(p.GetLocation()),\n\t\tSkipAwaitRollout: dcl.Bool(p.GetSkipAwaitRollout()),\n\t}\n\tfor _, r := range p.GetOsPolicies() {\n\t\tobj.OSPolicies = append(obj.OSPolicies, *ProtoToOsconfigOSPolicyAssignmentOSPolicies(r))\n\t}\n\treturn obj\n}", "func ProvokingVertex(mode uint32) {\n\tsyscall.Syscall(gpProvokingVertex, 1, uintptr(mode), 0, 0)\n}", "func (t *Link) PrependPreviewObject(v ObjectType) {\n\tt.preview = append([]*previewIntermediateType{&previewIntermediateType{Object: v}}, t.preview...)\n\n}", "func WithDescriptors(path string) Option {\n\treturn func(o *config) error {\n\t\tif len(path) == 0 {\n\t\t\treturn errors.New(\"empty descriptors path\")\n\t\t}\n\t\to.args = append(o.args, \"--descriptors_path=\"+absoluteConfigPath(path))\n\t\to.descriptors = absoluteConfigPath(path)\n\t\treturn nil\n\t}\n}", "func (o *GetOrganizationPrototypePermissionsParams) WithHTTPClient(client *http.Client) *GetOrganizationPrototypePermissionsParams {\n\to.SetHTTPClient(client)\n\treturn o\n}", "func WithDataPageV2() FileWriterOption {\n\treturn func(fw *FileWriter) {\n\t\tfw.newPageFunc = newDataPageV2Writer\n\t}\n}", "func AddRelatedPropertyGeneratorsForVirtualMachine_Spec(gens map[string]gopter.Gen) {\n\tgens[\"AdditionalCapabilities\"] = gen.PtrOf(AdditionalCapabilitiesGenerator())\n\tgens[\"ApplicationProfile\"] = gen.PtrOf(ApplicationProfileGenerator())\n\tgens[\"AvailabilitySet\"] = gen.PtrOf(SubResourceGenerator())\n\tgens[\"BillingProfile\"] = gen.PtrOf(BillingProfileGenerator())\n\tgens[\"CapacityReservation\"] = gen.PtrOf(CapacityReservationProfileGenerator())\n\tgens[\"DiagnosticsProfile\"] = gen.PtrOf(DiagnosticsProfileGenerator())\n\tgens[\"ExtendedLocation\"] = gen.PtrOf(ExtendedLocationGenerator())\n\tgens[\"HardwareProfile\"] = gen.PtrOf(HardwareProfileGenerator())\n\tgens[\"Host\"] = gen.PtrOf(SubResourceGenerator())\n\tgens[\"HostGroup\"] = gen.PtrOf(SubResourceGenerator())\n\tgens[\"Identity\"] = gen.PtrOf(VirtualMachineIdentityGenerator())\n\tgens[\"NetworkProfile\"] = gen.PtrOf(NetworkProfileGenerator())\n\tgens[\"OsProfile\"] = gen.PtrOf(OSProfileGenerator())\n\tgens[\"Plan\"] = gen.PtrOf(PlanGenerator())\n\tgens[\"ProximityPlacementGroup\"] = gen.PtrOf(SubResourceGenerator())\n\tgens[\"ScheduledEventsProfile\"] = gen.PtrOf(ScheduledEventsProfileGenerator())\n\tgens[\"SecurityProfile\"] = gen.PtrOf(SecurityProfileGenerator())\n\tgens[\"StorageProfile\"] = gen.PtrOf(StorageProfileGenerator())\n\tgens[\"VirtualMachineScaleSet\"] = gen.PtrOf(SubResourceGenerator())\n}", "func newSeparatorFromNative(obj unsafe.Pointer) interface{} {\n\tsep := &Separator{}\n\tsep.object = C.to_GtkSeparator(obj)\n\n\tif gobject.IsObjectFloating(sep) {\n\t\tgobject.RefSink(sep)\n\t} else {\n\t\tgobject.Ref(sep)\n\t}\n\tsep.Widget = NewWidget(obj)\n\tsep.Orientable = newOrientableFromNative(obj).(*Orientable)\n\tseparatorFinalizer(sep)\n\n\treturn sep\n}", "func (o *GetOrganizationPrototypePermissionsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param orgname\n\tif err := r.SetPathParam(\"orgname\", o.Orgname); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (t *Link) AppendPreviewObject(v ObjectType) {\n\tt.preview = append(t.preview, &previewIntermediateType{Object: v})\n\n}", "func WithKnowledgeGraphProcessor(s kgProcessor) Option {\n\treturn func(o *options) {\n\t\to.kg = s\n\t}\n}", "func ImplementationWrapAlgInfoSerializerCopy(pointer unsafe.Pointer) (AlgInfoSerializer, error) {\n\tctx := (*C.vscf_impl_t)(pointer)\n\tshallowCopy := C.vscf_impl_shallow_copy(ctx)\n\treturn ImplementationWrapAlgInfoSerializer(unsafe.Pointer(shallowCopy))\n}", "func WithGenerator(g *Generator) OptionFunc {\n\treturn func(b *Bot) {\n\t\tb.generator = g\n\t}\n}", "func (_DelegationController *DelegationControllerFilterer) WatchDelegationProposed(opts *bind.WatchOpts, sink chan<- *DelegationControllerDelegationProposed) (event.Subscription, error) {\n\n\tlogs, sub, err := _DelegationController.contract.WatchLogs(opts, \"DelegationProposed\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(DelegationControllerDelegationProposed)\n\t\t\t\tif err := _DelegationController.contract.UnpackLog(event, \"DelegationProposed\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func (qs ControlQS) OrderByKpDesc() ControlQS {\n\tqs.order = append(qs.order, `\"kp\" DESC`)\n\n\treturn qs\n}", "func (t *Link) PrependPreviewLink(v LinkType) {\n\tt.preview = append([]*previewIntermediateType{&previewIntermediateType{Link: v}}, t.preview...)\n\n}", "func WithTracing(tracing bool) Option {\n\treturn func(b *builder) {\n\t\tb.useTracing = tracing\n\t}\n}", "func AddIndependentPropertyGeneratorsForVirtualNetworkGatewaySku(gens map[string]gopter.Gen) {\n\tgens[\"Name\"] = gen.PtrOf(gen.OneConstOf(\n\t\tVirtualNetworkGatewaySku_Name_Basic,\n\t\tVirtualNetworkGatewaySku_Name_ErGw1AZ,\n\t\tVirtualNetworkGatewaySku_Name_ErGw2AZ,\n\t\tVirtualNetworkGatewaySku_Name_ErGw3AZ,\n\t\tVirtualNetworkGatewaySku_Name_HighPerformance,\n\t\tVirtualNetworkGatewaySku_Name_Standard,\n\t\tVirtualNetworkGatewaySku_Name_UltraPerformance,\n\t\tVirtualNetworkGatewaySku_Name_VpnGw1,\n\t\tVirtualNetworkGatewaySku_Name_VpnGw1AZ,\n\t\tVirtualNetworkGatewaySku_Name_VpnGw2,\n\t\tVirtualNetworkGatewaySku_Name_VpnGw2AZ,\n\t\tVirtualNetworkGatewaySku_Name_VpnGw3,\n\t\tVirtualNetworkGatewaySku_Name_VpnGw3AZ,\n\t\tVirtualNetworkGatewaySku_Name_VpnGw4,\n\t\tVirtualNetworkGatewaySku_Name_VpnGw4AZ,\n\t\tVirtualNetworkGatewaySku_Name_VpnGw5,\n\t\tVirtualNetworkGatewaySku_Name_VpnGw5AZ))\n\tgens[\"Tier\"] = gen.PtrOf(gen.OneConstOf(\n\t\tVirtualNetworkGatewaySku_Tier_Basic,\n\t\tVirtualNetworkGatewaySku_Tier_ErGw1AZ,\n\t\tVirtualNetworkGatewaySku_Tier_ErGw2AZ,\n\t\tVirtualNetworkGatewaySku_Tier_ErGw3AZ,\n\t\tVirtualNetworkGatewaySku_Tier_HighPerformance,\n\t\tVirtualNetworkGatewaySku_Tier_Standard,\n\t\tVirtualNetworkGatewaySku_Tier_UltraPerformance,\n\t\tVirtualNetworkGatewaySku_Tier_VpnGw1,\n\t\tVirtualNetworkGatewaySku_Tier_VpnGw1AZ,\n\t\tVirtualNetworkGatewaySku_Tier_VpnGw2,\n\t\tVirtualNetworkGatewaySku_Tier_VpnGw2AZ,\n\t\tVirtualNetworkGatewaySku_Tier_VpnGw3,\n\t\tVirtualNetworkGatewaySku_Tier_VpnGw3AZ,\n\t\tVirtualNetworkGatewaySku_Tier_VpnGw4,\n\t\tVirtualNetworkGatewaySku_Tier_VpnGw4AZ,\n\t\tVirtualNetworkGatewaySku_Tier_VpnGw5,\n\t\tVirtualNetworkGatewaySku_Tier_VpnGw5AZ))\n}", "func (p *ZkEstablishAccept) Encode(w io.Writer, pver uint32) error {\n\treturn WriteElements(w,\n\t\tp.EscrowTxid,\n\t\tp.ToSelfDelay,\n\t\tp.MerchPayoutPk,\n\t\tp.MerchChildPk,\n\t\tp.ChannelState)\n}", "func (*PacketBrokerRoutingPolicyUplink) Descriptor() ([]byte, []int) {\n\treturn file_ttn_lorawan_v3_packetbrokeragent_proto_rawDescGZIP(), []int{9}\n}", "func (o *GenericSorting) WithFlavor(flavor sql.DbFlavor) *GenericSorting {\n\to.Flavor = flavor\n\treturn o\n}", "func (m *BusinessScenarioPlanner) SetPlanConfiguration(value PlannerPlanConfigurationable)() {\n err := m.GetBackingStore().Set(\"planConfiguration\", value)\n if err != nil {\n panic(err)\n }\n}", "func AddIndependentPropertyGeneratorsForOriginGroupOverride(gens map[string]gopter.Gen) {\n\tgens[\"ForwardingProtocol\"] = gen.PtrOf(gen.AlphaString())\n}", "func (g *Generation) ApplySurvivorSelection(outgoingParents []Individual,\n\tchildren []Individual) ([]Individual, error) {\n\n\tswitch g.engine.Parameters.Selection.Survivor.Type {\n\tcase SurvivorSelectionHalfAndHalf:\n\t\treturn HalfAndHalfSurvivorSelection(outgoingParents, children, g.engine.Parameters.Selection.Survivor.SurvivorPercentage, g.engine.Parameters.EachPopulationSize)\n\tcase SurvivorSelectionParentVsChild:\n\t\treturn ParentVsChildSurvivorSelection(outgoingParents, children, g.engine.Parameters)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"invalid survivor selection selected\")\n\t}\n}", "func (p *Peer) negotiateOutboundProtocol() error {\n\tif err := p.writeLocalVersionMsg(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := p.readRemoteVersionMsg(); err != nil {\n\t\treturn err\n\t}\n\n\tvar protoVersion uint32\n\tp.flagsMtx.Lock()\n\tprotoVersion = p.protocolVersion\n\tp.flagsMtx.Unlock()\n\n\tif err := p.writeSendAddrV2Msg(protoVersion); err != nil {\n\t\treturn err\n\t}\n\n\terr := p.writeMessage(wire.NewMsgVerAck(), wire.LatestEncoding)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Finish the negotiation by waiting for negotiable messages or verack.\n\treturn p.waitToFinishNegotiation(protoVersion)\n}", "func (pp *PathProcessor) SetChainProviderIfApplicable(chainProvider provider.ChainProvider) bool {\n\tif chainProvider == nil {\n\t\treturn false\n\t}\n\tif pp.pathEnd1.info.ChainID == chainProvider.ChainId() {\n\t\tpp.pathEnd1.chainProvider = chainProvider\n\n\t\tif pp.isLocalhost {\n\t\t\tpp.pathEnd2.chainProvider = chainProvider\n\t\t}\n\n\t\treturn true\n\t} else if pp.pathEnd2.info.ChainID == chainProvider.ChainId() {\n\t\tpp.pathEnd2.chainProvider = chainProvider\n\n\t\tif pp.isLocalhost {\n\t\t\tpp.pathEnd1.chainProvider = chainProvider\n\t\t}\n\n\t\treturn true\n\t}\n\treturn false\n}", "func NewCustomTraversal(str string) (g String) {\n\tg.string = str\n\tg.buffer = bytes.NewBufferString(\"\")\n\treturn g\n}", "func (w *Writer) SetTransferSyntax(bo binary.ByteOrder, implicit bool) {\n\tw.bo = bo\n\tw.implicit = implicit\n}", "func NewTraverser(config *config.WeaviateConfig, locks locks,\n\tlogger logrus.FieldLogger, authorizer authorizer,\n\tvectorSearcher VectorSearcher,\n\texplorer explorer, schemaGetter schema.SchemaGetter) *Traverser {\n\treturn &Traverser{\n\t\tconfig: config,\n\t\tlocks: locks,\n\t\tlogger: logger,\n\t\tauthorizer: authorizer,\n\t\tvectorSearcher: vectorSearcher,\n\t\texplorer: explorer,\n\t\tschemaGetter: schemaGetter,\n\t}\n}", "func (m *VirtualEndpoint) SetExternalPartnerSettings(value []CloudPcExternalPartnerSettingable)() {\n err := m.GetBackingStore().Set(\"externalPartnerSettings\", value)\n if err != nil {\n panic(err)\n }\n}", "func (v SetInterceptFileChooserDialogParams) MarshalEasyJSON(w *jwriter.Writer) {\n\teasyjsonC5a4559bEncodeGithubComChromedpCdprotoPage11(w, v)\n}", "func (p *Proxy) SetProtoProxy(proxy *proto.Proxy) { p.protoProxy = proxy }", "func (d *RPCFactory) CreateGRPCDispatcherForOutbound(\n\tcallerName string,\n\tserviceName string,\n\thostName string,\n) (*yarpc.Dispatcher, error) {\n\treturn d.createOutboundDispatcher(callerName, serviceName, hostName, d.grpc.NewSingleOutbound(hostName))\n}", "func (prf *proof) prover(p Predicate, sval map[string]abstract.Scalar,\n\tpval map[string]abstract.Point,\n\tchoice map[Predicate]int) Prover {\n\n\treturn Prover(func(ctx ProverContext) error {\n\t\treturn prf.prove(p, sval, pval, choice, ctx)\n\t})\n}", "func (t *Link) AppendPreviewLink(v LinkType) {\n\tt.preview = append(t.preview, &previewIntermediateType{Link: v})\n\n}", "func WithTracePropagation() Option {\n\treturn func(i interface{}) error {\n\t\treturn nil\n\t}\n}", "func (w *Walker) DescendentsWith(predicate Predicate) *Walker {\n\tif w == nil {\n\t\treturn nil\n\t}\n\tif predicate == nil {\n\t\tw.pipe.errors <- ErrInvalidFilter\n\t} else {\n\t\terr := w.appendFilterForTask(descendentsWith, predicate, 5) // need a helper queue\n\t\tif err != nil { // this should never happen here\n\t\t\tT().Errorf(err.Error())\n\t\t\tpanic(err) // for debugging as long as this is unstable\n\t\t}\n\t}\n\treturn w\n}", "func WithIntentGraphProcessor(s intentGraphProcessor) Option {\n\treturn func(o *options) {\n\t\to.intentGraph = s\n\t}\n}", "func (m *RemoteAssistancePartner) SetOnboardingUrl(value *string)() {\n m.onboardingUrl = value\n}", "func EncodeProtoDescriptorSource(value string) EncodeProtoAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"descriptor_source\"] = value\n\t}\n}", "func WithTracerProvider(provider trace.TracerProvider) Option {\n\treturn func(p *otelPlugin) {\n\t\tp.provider = provider\n\t}\n}", "func ProtoToOsconfigOSPolicyAssignmentInstanceFilter(p *osconfigpb.OsconfigOSPolicyAssignmentInstanceFilter) *osconfig.OSPolicyAssignmentInstanceFilter {\n\tif p == nil {\n\t\treturn nil\n\t}\n\tobj := &osconfig.OSPolicyAssignmentInstanceFilter{\n\t\tAll: dcl.Bool(p.GetAll()),\n\t}\n\tfor _, r := range p.GetInclusionLabels() {\n\t\tobj.InclusionLabels = append(obj.InclusionLabels, *ProtoToOsconfigOSPolicyAssignmentInstanceFilterInclusionLabels(r))\n\t}\n\tfor _, r := range p.GetExclusionLabels() {\n\t\tobj.ExclusionLabels = append(obj.ExclusionLabels, *ProtoToOsconfigOSPolicyAssignmentInstanceFilterExclusionLabels(r))\n\t}\n\tfor _, r := range p.GetInventories() {\n\t\tobj.Inventories = append(obj.Inventories, *ProtoToOsconfigOSPolicyAssignmentInstanceFilterInventories(r))\n\t}\n\treturn obj\n}", "func compileTraversal(\n\tfs *Bcpfs,\n\tshares Exports,\n\treals RealExports,\n) RealExports {\n\ttravs := make([]ExportEntry, 0, len(reals)*5)\n\ttravsByRealpath := make(map[string]int)\n\n\t// Allow directory traversal along realpaths.\n\tfor _, r := range reals {\n\t\tacl := NewTraversalAclWithGroups(r.Acl.Groups())\n\t\tparts := strings.Split(r.Path, \"/\")\n\n\t\t// Do not add `--x` to `<srvdir>/<srv>` but only to subdirs, so\n\t\t// that srv group membership is required to access realpath.\n\t\tbegin := 2\n\t\tif fs.IsServiceRealpath(r.Path) {\n\t\t\tbegin = 3\n\t\t}\n\t\tfor i := begin; i < len(parts); i++ {\n\t\t\tpath := slashpath.Join(parts[:i]...)\n\t\t\tif idx, ok := travsByRealpath[path]; ok {\n\t\t\t\t// Update existing.\n\t\t\t\ttravs[idx].Acl = travs[idx].Acl.Union(acl)\n\t\t\t} else {\n\t\t\t\t// Append new.\n\t\t\t\ttravsByRealpath[path] = len(travs)\n\t\t\t\ttravs = append(travs, ExportEntry{\n\t\t\t\t\tPath: path,\n\t\t\t\t\tAcl: acl,\n\t\t\t\t\tManagingGroups: r.ManagingGroups,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\t// Allow traversal of ou toplevel directories to reach symlinks.\n\tfor _, shr := range shares {\n\t\tacl := NewTraversalAclWithGroups(shr.Acl.Groups())\n\t\tou := strings.Split(shr.Path, \"/\")[0]\n\t\tpath := slashpath.Join(fs.OrgUnitDir, ou)\n\t\tif idx, ok := travsByRealpath[path]; ok {\n\t\t\t// Update existing.\n\t\t\ttravs[idx].Acl = travs[idx].Acl.Union(acl)\n\t\t} else {\n\t\t\t// Append new.\n\t\t\ttravsByRealpath[path] = len(travs)\n\t\t\ttravs = append(travs, ExportEntry{\n\t\t\t\tPath: path,\n\t\t\t\tAcl: acl,\n\t\t\t})\n\t\t}\n\t}\n\n\treturn travs\n}", "func NewWithPrinter(h *human.Printer) *Printer {\n\treturn &Printer{Printer: h}\n}", "func (d *RPCFactory) CreateDispatcherForOutbound(\n\tcallerName string,\n\tserviceName string,\n\thostName string,\n) (*yarpc.Dispatcher, error) {\n\treturn d.createOutboundDispatcher(callerName, serviceName, hostName, d.ch.NewSingleOutbound(hostName))\n}", "func (local *Node) traverseBackpointers(neighbors []RemoteNode, level int) (err error) {\n\tif level >= 0 {\n\t\t// copy neighbors set as basis for nextNeighbors\n\t\tnextNeighbors := make([]RemoteNode, 0)\n\n\t\tfor _, neighbor := range neighbors {\n\t\t\t// for each neighbor, grab all backpointers it has\n\t\t\t// that exist at this level in that neighbor's routing table\n\t\t\t// (also pass our node so that that node can add us to it's routing table)\n\t\t\tbackpointers, err := neighbor.GetBackpointersRPC(local.node, level)\n\t\t\tif err != nil {\n\t\t\t\t// continue to try and add more backpointers if we get an err,\n\t\t\t\t// but return error if nothing else occurs\n\t\t\t\tError.Printf(\"Unreachable node notice: %v unreachable while traversing backpointers for %v\", neighbor, local)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// append ALL the backpointers, as a set\n\t\t\tnextNeighbors = InsertWithoutDuplicates(nextNeighbors, backpointers)\n\t\t}\n\n\t\t// add all to our routing table\n\t\tfor _, neighbor := range nextNeighbors {\n\t\t\t//Debug.Printf(\"Added %v to %v via backpointer traversal\", neighbor, local.node)\n\t\t\tlocal.addRoute(neighbor)\n\t\t}\n\n\t\t// trim the list of neighbors to K (they're already sorted such\n\t\t// that the best (closest) K are at the front, and duplicates have\n\t\t// been removed (i.e. ignored when adding)\n\t\tnextNeighbors = local.SortListByCloseness(nextNeighbors)\n\t\tif len(nextNeighbors) > K {\n\t\t\tnextNeighbors = nextNeighbors[:K]\n\t\t}\n\n\t\t// move on to the next level, using our updated set of neighbors as a starting point\n\t\terr = local.traverseBackpointers(nextNeighbors, level-1)\n\t}\n\n\treturn\n}", "func AddRelatedPropertyGeneratorsForVirtualNetworks_VirtualNetworkPeering_Spec(gens map[string]gopter.Gen) {\n\tgens[\"RemoteAddressSpace\"] = gen.PtrOf(AddressSpaceGenerator())\n\tgens[\"RemoteBgpCommunities\"] = gen.PtrOf(VirtualNetworkBgpCommunitiesGenerator())\n\tgens[\"RemoteVirtualNetwork\"] = gen.PtrOf(SubResourceGenerator())\n}", "func (gui *Gui) setUpstreamToBranch(g *gocui.Gui, v *gocui.View) error {\n\tmaxX, maxY := g.Size()\n\n\te := gui.getSelectedRepository()\n\tv, err := g.SetView(confirmationViewFeature.Name, maxX/2-30, maxY/2-2, maxX/2+30, maxY/2+2)\n\tif err != nil {\n\t\tif err != gocui.ErrUnknownView {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Fprintln(v, \"branch.\"+e.Branch.Name+\".\"+\"remote\"+\"=\"+e.Remote.Name)\n\t\tfmt.Fprintln(v, \"branch.\"+e.Branch.Name+\".\"+\"merge\"+\"=\"+e.Branch.Reference.Name().String())\n\t}\n\treturn gui.focusToView(confirmationViewFeature.Name)\n}", "func AddIndependentPropertyGeneratorsForSshPublicKeySpec(gens map[string]gopter.Gen) {\n\tgens[\"KeyData\"] = gen.PtrOf(gen.AlphaString())\n\tgens[\"Path\"] = gen.PtrOf(gen.AlphaString())\n}", "func WithCount(count int) Option {\n\treturn option{\n\t\ttable: func(enc *TableEncoder) error {\n\t\t\tenc.count = count\n\t\t\treturn nil\n\t\t},\n\t\texpanded: func(enc *ExpandedEncoder) error {\n\t\t\tenc.count = count\n\t\t\treturn nil\n\t\t},\n\t}\n}", "func (t *DesktopTracer) TraceConfiguration(ctx context.Context) (*service.DeviceTraceConfiguration, error) {\n\tapis := make([]*service.TraceTypeCapabilities, 0, 1)\n\tif len(t.b.Instance().GetConfiguration().GetDrivers().GetVulkan().GetPhysicalDevices()) > 0 {\n\t\tapis = append(apis, tracer.VulkanTraceOptions())\n\t}\n\n\tpreferredRoot, err := t.b.GetWorkingDirectory(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tisLocal, err := t.b.IsLocal(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif t.b.SupportsPerfetto(ctx) {\n\t\tapis = append(apis, tracer.PerfettoTraceOptions())\n\t}\n\n\treturn &service.DeviceTraceConfiguration{\n\t\tApis: apis,\n\t\tServerLocalPath: isLocal,\n\t\tCanSpecifyCwd: true,\n\t\tCanUploadApplication: false,\n\t\tCanSpecifyEnv: true,\n\t\tPreferredRootUri: preferredRoot,\n\t\tHasCache: false,\n\t}, nil\n}", "func AddRelatedPropertyGeneratorsForServerfarm_Spec(gens map[string]gopter.Gen) {\n\tgens[\"ExtendedLocation\"] = gen.PtrOf(ExtendedLocationGenerator())\n\tgens[\"HostingEnvironmentProfile\"] = gen.PtrOf(HostingEnvironmentProfileGenerator())\n\tgens[\"KubeEnvironmentProfile\"] = gen.PtrOf(KubeEnvironmentProfileGenerator())\n\tgens[\"Sku\"] = gen.PtrOf(SkuDescriptionGenerator())\n}", "func (t *TablePrinter) PrintObjWithKind(kind string, obj interface{}, writer io.Writer) error {\n\titemsValue := reflect.ValueOf(obj)\n\tif itemsValue.Kind() != reflect.Slice {\n\t\treturn errors.Errorf(\"table printer expects a slice but the kind was %v\", itemsValue.Kind())\n\t}\n\n\tif itemsValue.Len() == 0 {\n\t\tw := bufio.NewWriter(writer)\n\t\tif _, err := w.WriteString(fmt.Sprintf(\"No %s found\\n\", strings.ToLower(kind))); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn w.Flush()\n\t}\n\n\treturn t.table.Render(obj, writer, t.columnames...)\n}", "func (m *PrinterCreateOperation) SetPrinter(value Printerable)() {\n m.printer = value\n}", "func Proto(p Protocol) Option {\n\treturn func(o *Options) {\n\t\to.Protocol = p\n\t}\n}", "func WithSeparator(sep rune) Option {\n\treturn option{\n\t\tunaligned: func(enc *UnalignedEncoder) error {\n\t\t\tenc.sep = sep\n\t\t\treturn nil\n\t\t},\n\t}\n}", "func WithTracerProvider(provider trace.TracerProvider) Option {\n\treturn func(cfg *config) {\n\t\tcfg.TracerProvider = provider\n\t}\n}", "func (m *PrinterCreateOperation) SetPrinter(value Printerable)() {\n err := m.GetBackingStore().Set(\"printer\", value)\n if err != nil {\n panic(err)\n }\n}", "func (self *TraitPixbuf) SaveToCallbackv(save_func C.GdkPixbufSaveFunc, user_data unsafe.Pointer, type_ string, option_keys []string, option_values []string) (return__ bool, __err__ error) {\n\t__cgo__type_ := C.CString(type_)\n\t__header__option_keys := (*reflect.SliceHeader)(unsafe.Pointer(&option_keys))\n\t__header__option_values := (*reflect.SliceHeader)(unsafe.Pointer(&option_values))\n\tvar __cgo_error__ *C.GError\n\tvar __cgo__return__ C.gboolean\n\t__cgo__return__ = C.gdk_pixbuf_save_to_callbackv(self.CPointer, save_func, (C.gpointer)(user_data), __cgo__type_, (**C.char)(unsafe.Pointer(__header__option_keys.Data)), (**C.char)(unsafe.Pointer(__header__option_values.Data)), &__cgo_error__)\n\tC.free(unsafe.Pointer(__cgo__type_))\n\treturn__ = __cgo__return__ == C.gboolean(1)\n\tif __cgo_error__ != nil {\n\t\t__err__ = errors.New(C.GoString((*C.char)(unsafe.Pointer(__cgo_error__.message))))\n\t}\n\treturn\n}" ]
[ "0.39877933", "0.39078513", "0.38892928", "0.374556", "0.37302673", "0.36932385", "0.35834628", "0.33725303", "0.336618", "0.33358172", "0.33044896", "0.32975402", "0.32719445", "0.32489514", "0.32471502", "0.32446983", "0.32418078", "0.3223724", "0.32211462", "0.3219564", "0.31985813", "0.31830966", "0.31760514", "0.31588054", "0.31440192", "0.3141801", "0.30836493", "0.30783927", "0.30701286", "0.306409", "0.30614546", "0.3061356", "0.30604845", "0.30558637", "0.30522615", "0.30431074", "0.3038779", "0.30383423", "0.30345133", "0.30331722", "0.30304193", "0.3024443", "0.30201948", "0.3014918", "0.3009031", "0.30077714", "0.30028412", "0.3002485", "0.2988408", "0.2982713", "0.2980789", "0.2973464", "0.29722822", "0.29675153", "0.29651174", "0.2960937", "0.2960134", "0.29594392", "0.2953175", "0.2952642", "0.29522535", "0.2949601", "0.2948989", "0.29449996", "0.29408625", "0.29389837", "0.29282105", "0.29272437", "0.29197353", "0.29152918", "0.2908451", "0.29057407", "0.2902107", "0.29001072", "0.28938612", "0.28936002", "0.28916973", "0.28898177", "0.2882875", "0.2882114", "0.28814328", "0.2878907", "0.2875215", "0.28685427", "0.2862765", "0.2862166", "0.2861266", "0.28551176", "0.28550905", "0.28549674", "0.2854254", "0.28509104", "0.28503555", "0.28488305", "0.2845224", "0.28386188", "0.2837562", "0.2834852", "0.28341997", "0.28318638" ]
0.7862286
0
WithTrustedCAR specifies whether CIDs match the block data as they are read from the CAR files.
WithTrustedCAR определяет, соответствуют ли CID данным блока, как они читаются из файлов CAR.
func WithTrustedCAR(t bool) Option { return func(o *Options) { o.TrustedCAR = t } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func isSpecTrustedCASet(proxyConfig *configv1.ProxySpec) bool {\n\treturn len(proxyConfig.TrustedCA.Name) > 0\n}", "func WithTrusted(trusted bool) Option {\n\treturn func(linter *Linter) {\n\t\tlinter.trusted = trusted\n\t}\n}", "func (_Casper *CasperTransactor) SetTrusted(opts *bind.TransactOpts, addr common.Address) (*types.Transaction, error) {\n\treturn _Casper.contract.Transact(opts, \"setTrusted\", addr)\n}", "func RequireTrusted(req bool) Opt {\n\treturn func(p *params) { p.requireTrust = req }\n}", "func (_Casper *CasperSession) SetTrusted(addr common.Address) (*types.Transaction, error) {\n\treturn _Casper.Contract.SetTrusted(&_Casper.TransactOpts, addr)\n}", "func (_Casper *CasperTransactorSession) SetTrusted(addr common.Address) (*types.Transaction, error) {\n\treturn _Casper.Contract.SetTrusted(&_Casper.TransactOpts, addr)\n}", "func TrustedOrigins(origins []string) Option {\n\treturn func(cs *csrf) {\n\t\tcs.opts.TrustedOrigins = origins\n\t}\n}", "func findTrustedCerts(cfg *Config, objects []*Object) ([]*x509.Certificate, error) {\n\tvar out []*x509.Certificate\n\n\tcerts := filterObjectsByClass(objects, \"CKO_CERTIFICATE\")\n\ttrusts := filterObjectsByClass(objects, \"CKO_NSS_TRUST\")\n\n\tfor _, cert := range certs {\n\t\tderBytes := cert.attrs[\"CKA_VALUE\"].value\n\t\thash := sha1.New()\n\t\thash.Write(derBytes)\n\t\tdigest := hash.Sum(nil)\n\n\t\tx509, err := x509.ParseCertificate(derBytes)\n\t\tif err != nil {\n\t\t\t// This is known to occur because of a broken certificate in NSS.\n\t\t\t// https://bugzilla.mozilla.org/show_bug.cgi?id=707995\n\t\t\tcontinue\n\t\t}\n\n\t\t// TODO(agl): wtc tells me that Mozilla might get rid of the\n\t\t// SHA1 records in the future and use issuer and serial number\n\t\t// to match trust records to certificates (which is what NSS\n\t\t// currently uses). This needs some changes to the crypto/x509\n\t\t// package to keep the raw names around.\n\n\t\tvar trust *Object\n\t\tfor _, possibleTrust := range trusts {\n\t\t\tif bytes.Equal(digest, possibleTrust.attrs[\"CKA_CERT_SHA1_HASH\"].value) {\n\t\t\t\ttrust = possibleTrust\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\ttrustType := trust.attrs[\"CKA_TRUST_SERVER_AUTH\"].value\n\n\t\tvar trusted bool\n\t\tswitch string(trustType) {\n\t\tcase \"CKT_NSS_NOT_TRUSTED\":\n\t\t\t// An explicitly distrusted cert\n\t\t\ttrusted = false\n\t\tcase \"CKT_NSS_TRUSTED_DELEGATOR\":\n\t\t\t// A cert trusted for issuing SSL server certs.\n\t\t\ttrusted = true\n\t\tcase \"CKT_NSS_TRUST_UNKNOWN\", \"CKT_NSS_MUST_VERIFY_TRUST\":\n\t\t\t// A cert not trusted for issuing SSL server certs, but is trusted for other purposes.\n\t\t\ttrusted = false\n\t\t}\n\n\t\tif !trusted && !cfg.IncludedUntrustedFlag {\n\t\t\tcontinue\n\t\t}\n\n\t\tout = append(out, x509)\n\t}\n\n\treturn out, nil\n}", "func (_Casper *CasperCaller) Trusted(opts *bind.CallOpts) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _Casper.contract.Call(opts, out, \"trusted\")\n\treturn *ret0, err\n}", "func newLightClientAttackEvidence(conflicted, trusted, common *types.LightBlock) *types.LightClientAttackEvidence {\n\tev := &types.LightClientAttackEvidence{ConflictingBlock: conflicted}\n\t// if this is an equivocation or amnesia attack, i.e. the validator sets are the same, then we\n\t// return the height of the conflicting block else if it is a lunatic attack and the validator sets\n\t// are not the same then we send the height of the common header.\n\tif ev.ConflictingHeaderIsInvalid(trusted.Header) {\n\t\tev.CommonHeight = common.Height\n\t\tev.Timestamp = common.Time\n\t\tev.TotalVotingPower = common.ValidatorSet.TotalVotingPower()\n\t} else {\n\t\tev.CommonHeight = trusted.Height\n\t\tev.Timestamp = trusted.Time\n\t\tev.TotalVotingPower = trusted.ValidatorSet.TotalVotingPower()\n\t}\n\tev.ByzantineValidators = ev.GetByzantineValidators(common.ValidatorSet, trusted.SignedHeader)\n\treturn ev\n}", "func VerifyLightClientAttack(e *types.LightClientAttackEvidence, commonHeader, trustedHeader *types.SignedHeader,\n\tcommonVals *types.ValidatorSet, now time.Time, trustPeriod time.Duration) error {\n\t// In the case of lunatic attack there will be a different commonHeader height. Therefore the node perform a single\n\t// verification jump between the common header and the conflicting one\n\tif commonHeader.Height != e.ConflictingBlock.Height {\n\t\terr := commonVals.VerifyCommitLightTrusting(trustedHeader.ChainID, e.ConflictingBlock.Commit, light.DefaultTrustLevel)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"skipping verification of conflicting block failed: %w\", err)\n\t\t}\n\n\t\t// In the case of equivocation and amnesia we expect all header hashes to be correctly derived\n\t} else if e.ConflictingHeaderIsInvalid(trustedHeader.Header) {\n\t\treturn errors.New(\"common height is the same as conflicting block height so expected the conflicting\" +\n\t\t\t\" block to be correctly derived yet it wasn't\")\n\t}\n\n\t// Verify that the 2/3+ commits from the conflicting validator set were for the conflicting header\n\tif err := e.ConflictingBlock.ValidatorSet.VerifyCommitLight(trustedHeader.ChainID, e.ConflictingBlock.Commit.BlockID,\n\t\te.ConflictingBlock.Height, e.ConflictingBlock.Commit); err != nil {\n\t\treturn fmt.Errorf(\"invalid commit from conflicting block: %w\", err)\n\t}\n\n\t// Assert the correct amount of voting power of the validator set\n\tif evTotal, valsTotal := e.TotalVotingPower, commonVals.TotalVotingPower(); evTotal != valsTotal {\n\t\treturn fmt.Errorf(\"total voting power from the evidence and our validator set does not match (%d != %d)\",\n\t\t\tevTotal, valsTotal)\n\t}\n\n\t// check in the case of a forward lunatic attack that monotonically increasing time has been violated\n\tif e.ConflictingBlock.Height > trustedHeader.Height && e.ConflictingBlock.Time.After(trustedHeader.Time) {\n\t\treturn fmt.Errorf(\"conflicting block doesn't violate monotonically increasing time (%v is after %v)\",\n\t\t\te.ConflictingBlock.Time, trustedHeader.Time,\n\t\t)\n\n\t\t// In all other cases check that the hashes of the conflicting header and the trusted header are different\n\t} else if bytes.Equal(trustedHeader.Hash(), e.ConflictingBlock.Hash()) {\n\t\treturn fmt.Errorf(\"trusted header hash matches the evidence's conflicting header hash: %X\",\n\t\t\ttrustedHeader.Hash())\n\t}\n\n\treturn validateABCIEvidence(e, commonVals, trustedHeader)\n}", "func ExampleMicroStellar_AllowTrust() {\n\t// Create a new MicroStellar client connected to a fake network. To\n\t// use a real network replace \"fake\" below with \"test\" or \"public\".\n\tms := New(\"fake\")\n\n\t// Custom USD asset issued by specified issuer.\n\tUSD := NewAsset(\"USD\", \"GAIUIQNMSXTTR4TGZETSQCGBTIF32G2L5P4AML4LFTMTHKM44UHIN6XQ\", Credit4Type)\n\n\t// Issuer sets AUTH_REQUIRED flag on account.\n\terr := ms.SetFlags(\"SDPLQEABOETMI7PPKJZYBHHW2BSA3424CI3V5ZRNN3NP2H7KYQOKY5ST\", FlagAuthRequired)\n\tif err != nil {\n\t\tlog.Fatalf(\"SetFlags: %v\", ErrorString(err))\n\t}\n\n\t// Customer creates a trustline to the custom asset with no limit.\n\terr = ms.CreateTrustLine(\"SCSMBQYTXKZYY7CLVT6NPPYWVDQYDOQ6BB3QND4OIXC7762JYJYZ3RMK\", USD, \"\")\n\tif err != nil {\n\t\tlog.Fatalf(\"CreateTrustLine: %v\", err)\n\t}\n\n\t// Issuer then authorizes the trustline that was just created.\n\terr = ms.AllowTrust(\"SDPLQEABOETMI7PPKJZYBHHW2BSA3424CI3V5ZRNN3NP2H7KYQOKY5ST\",\n\t\t\"GAIUIQNMSXTTR4TGZETSQCGBTIF32G2L5P4AML4LFTMTHKM44UHIN6XQ\", \"USD\", true)\n\tif err != nil {\n\t\tlog.Fatalf(\"AllowTrust: %v\", err)\n\t}\n\n\tfmt.Printf(\"ok\")\n\t// Output: ok\n}", "func (c *ClientWithResponses) GetaspecificTrustedSourceWithResponse(ctx context.Context, id string) (*GetaspecificTrustedSourceResponse, error) {\n\trsp, err := c.GetaspecificTrustedSource(ctx, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseGetaspecificTrustedSourceResponse(rsp)\n}", "func (c *ClientWithResponses) ChangeaspecificTrustedSourceWithBodyWithResponse(ctx context.Context, id string, contentType string, body io.Reader) (*ChangeaspecificTrustedSourceResponse, error) {\n\trsp, err := c.ChangeaspecificTrustedSourceWithBody(ctx, id, contentType, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseChangeaspecificTrustedSourceResponse(rsp)\n}", "func runERC20Lock(ctx *action.Context, tx action.RawTx) (bool, action.Response) {\n\terc20lock := &ERC20Lock{}\n\n\terr := erc20lock.Unmarshal(tx.Data)\n\tif err != nil {\n\t\tctx.Logger.Error(\"wrong tx type\", err)\n\t\treturn false, action.Response{Log: \"wrong tx type\"}\n\t}\n\n\tethTx, err := ethchaindriver.DecodeTransaction(erc20lock.ETHTxn)\n\tif err != nil {\n\t\tctx.Logger.Error(\"decode eth txn err\", err)\n\t\treturn false, action.Response{\n\t\t\tLog: \"decode eth txn error\" + err.Error(),\n\t\t}\n\t}\n\n\tethOptions, err := ctx.GovernanceStore.GetETHChainDriverOption()\n\tif err != nil {\n\t\treturn helpers.LogAndReturnFalse(ctx.Logger, gov.ErrGetEthOptions, erc20lock.Tags(), err)\n\t}\n\ttoken, err := ethchaindriver.GetToken(ethOptions.TokenList, *ethTx.To())\n\tif err != nil {\n\t\treturn false, action.Response{\n\t\t\tLog: err.Error(),\n\t\t}\n\t}\n\n\tok, err := ethchaindriver.VerfiyERC20Lock(erc20lock.ETHTxn, token.TokAbi, ethOptions.ERCContractAddress)\n\tif err != nil {\n\t\tctx.Logger.Error(\"Unable to verify ERC LOCK transaction\")\n\t\treturn false, action.Response{\n\t\t\tLog: \"Unable to verify transaction\" + err.Error(),\n\t\t}\n\t}\n\n\tif !ok {\n\t\tctx.Logger.Error(\"To field of Transaction does not match OneLedger Contract Address\")\n\t\treturn false, action.Response{\n\t\t\tLog: \"To field of Transaction does not match OneLedger Contract Address\" + err.Error(),\n\t\t}\n\t}\n\n\twitnesses, err := ctx.Witnesses.GetWitnessAddresses(chain.ETHEREUM)\n\tif err != nil {\n\t\tctx.Logger.Error(\"err in getting witness address\", err)\n\t\treturn false, action.Response{Log: \"error in getting validator addresses\" + err.Error()}\n\t}\n\n\tcurr, ok := ctx.Currencies.GetCurrencyByName(token.TokName)\n\tif !ok {\n\t\treturn false, action.Response{Log: fmt.Sprintf(\"Token not Supported : %s \", token.TokName)}\n\t}\n\n\terc20Params, err := ethchaindriver.ParseErc20Lock(ethOptions.TokenList, erc20lock.ETHTxn)\n\tif err != nil {\n\t\treturn false, action.Response{\n\t\t\tLog: err.Error(),\n\t\t}\n\t}\n\n\tlockToken := curr.NewCoinFromString(erc20Params.TokenAmount.String())\n\t// Adding lock amount to common address to maintain count of total oToken minted\n\ttokenSupply := action.Address(ethOptions.TotalSupplyAddr)\n\n\tbalCoin, err := ctx.Balances.GetBalanceForCurr(tokenSupply, &curr)\n\tif err != nil {\n\t\treturn false, action.Response{Log: fmt.Sprintf(\"Unable to get Eth lock total balance %s\", erc20lock.Locker)}\n\t}\n\n\ttotalSupplyToken := curr.NewCoinFromString(token.TokTotalSupply)\n\tif !balCoin.Plus(lockToken).LessThanEqualCoin(totalSupplyToken) {\n\t\treturn false, action.Response{Log: fmt.Sprintf(\"Token lock exceeded limit ,for Token : %s \", token.TokName)}\n\t}\n\n\ttracker := ethereum.NewTracker(\n\t\tethereum.ProcessTypeLockERC,\n\t\terc20lock.Locker,\n\t\terc20lock.ETHTxn,\n\t\tethcommon.BytesToHash(erc20lock.ETHTxn),\n\t\twitnesses,\n\t)\n\n\terr = ctx.ETHTrackers.WithPrefixType(ethereum.PrefixOngoing).Set(tracker)\n\tif err != nil {\n\t\tctx.Logger.Error(\"error saving eth tracker\", err)\n\t\treturn false, action.Response{Log: \"error saving eth tracker: \" + err.Error()}\n\t}\n\n\treturn true, action.Response{\n\t\tEvents: action.GetEvent(erc20lock.Tags(), \"erc20_lock\"),\n\t}\n}", "func TrustedTag(tag string) bool {\n\tif tag == core.DNS || tag == core.CERT || tag == core.ARCHIVE || tag == core.AXFR {\n\t\treturn true\n\t}\n\treturn false\n}", "func withCar(node *Car) carOption {\n\treturn func(m *CarMutation) {\n\t\tm.oldValue = func(context.Context) (*Car, error) {\n\t\t\treturn node, nil\n\t\t}\n\t\tm.id = &node.ID\n\t}\n}", "func withCar(node *Car) carOption {\n\treturn func(m *CarMutation) {\n\t\tm.oldValue = func(context.Context) (*Car, error) {\n\t\t\treturn node, nil\n\t\t}\n\t\tm.id = &node.ID\n\t}\n}", "func DoAllowTrust(gClient gasservice.GasServiceClient, trustorAccountAddress string, assetCode string, issuerAddress string,\n\tauthorize bool, cClient crypto_client.CryptoServiceClient) (error, string) {\n\n\tLOGGER.Debugf(\"DoAllowTrust\")\n\tif GetAssetType(assetCode) != model.AssetAssetTypeDO && GetAssetType(assetCode) != model.AssetAssetTypeDA {\n\t\tmsg := \"Asset code is not DO nor DA: \" + assetCode\n\t\tLOGGER.Error(msg)\n\t\treturn errors.New(\"1201\"), msg\n\t}\n\n\tLOGGER.Debugf(\"DoAllowTrust Transaction issuingAccount =%v, assetCode =%v, trustorAddr=%v, authorize=%v\",\n\t\tissuerAddress, assetCode, trustorAccountAddress, authorize)\n\n\t//check AccountFlags\n\tif !isAccountFlagsValid(issuerAddress) {\n\t\tmsg := \"Issuing account's flags is not valid: \" + issuerAddress\n\t\tLOGGER.Errorf(msg)\n\t\treturn errors.New(\"1211\"), msg\n\t}\n\n\t/*\n\t\tSubmit a AllowTrust operation with authorize=boolean\n\t*/\n\n\tstellarNetwork := comn.GetStellarNetwork(os.Getenv(global_environment.ENV_KEY_STELLAR_NETWORK))\n\n\t//Get IBM gas account\n\tibmAccount, sequenceNum, err := gClient.GetAccountAndSequence()\n\n\ttx, err := b.Transaction(\n\t\tb.SourceAccount{AddressOrSeed: ibmAccount},\n\t\tstellarNetwork,\n\t\tb.Sequence{Sequence: sequenceNum},\n\t\t//\tSubmit a AllowTrust operation with authorize=boolean\n\t\tb.AllowTrust(\n\t\t\tb.SourceAccount{AddressOrSeed: issuerAddress},\n\t\t\tb.Trustor{Address: trustorAccountAddress},\n\t\t\tb.AllowTrustAsset{Code: assetCode},\n\t\t\tb.Authorize{Value: authorize}),\n\t)\n\tif err != nil {\n\t\tmsg := \"Error while allowing trust: \" + err.Error()\n\t\tLOGGER.Error(msg)\n\t\treturn errors.New(\"1222\"), msg\n\t}\n\tvar txe b.TransactionEnvelopeBuilder\n\terr = txe.Mutate(tx)\n\n\tif err != nil {\n\t\tmsg := \"Error during building Mutate\"\n\t\tLOGGER.Error(msg)\n\t\treturn errors.New(\"1208\"), msg\n\t}\n\n\ttxeB64, err := txe.Base64()\n\t//TBD: will have to integrate with gas service\n\txdrB, _ := base64.StdEncoding.DecodeString(txeB64)\n\n\t//Get signed by issuing account on crypto service\n\tsigXdr, errorMsg, status, _ := cClient.ParticipantSignXdr(comn.ISSUING, xdrB)\n\n\tif status != http.StatusCreated {\n\t\tLOGGER.Errorf(\"Error creating allow trust %v\", errorMsg.Error())\n\t\treturn errors.New(\"1208\"), errorMsg.Error()\n\t}\n\tLOGGER.Debugf(\"signed transaction: %v\", base64.StdEncoding.EncodeToString(sigXdr))\n\n\tif errorMsg != nil {\n\t\tmsg := \"Signing trust went through. Error during encoding\"\n\t\tLOGGER.Error(msg)\n\t\treturn errors.New(\"1209\"), msg\n\t}\n\n\ttxeB64 = base64.StdEncoding.EncodeToString(sigXdr)\n\n\t//Post to gas service\n\thash, ledger, err := gClient.SubmitTxe(txeB64)\n\tif err != nil {\n\t\tLOGGER.Warningf(\"AllowTrust failed gas service error... %v \", err.Error())\n\t\treturn err, \"AllowTrust failed:\" + err.Error()\n\t}\n\tLOGGER.Debugf(\"Hash:%v, Ledger:%v\", hash, ledger)\n\n\tmsg := \"Transaction posted in ledger: \" + hash\n\treturn nil, msg\n}", "func (t *osCinderCSITranslator) CanSupportInline(volume *v1.Volume) bool {\n\treturn volume != nil && volume.Cinder != nil\n}", "func HasCarWith(preds ...predicate.Car) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(Table, FieldID),\n\t\t\tsqlgraph.To(CarInverseTable, CarFieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.O2M, false, CarTable, CarColumn),\n\t\t)\n\t\tsqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {\n\t\t\tfor _, p := range preds {\n\t\t\t\tp(s)\n\t\t\t}\n\t\t})\n\t})\n}", "func HasCarWith(preds ...predicate.Car) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(Table, FieldID),\n\t\t\tsqlgraph.To(CarInverseTable, CarFieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.O2M, false, CarTable, CarColumn),\n\t\t)\n\t\tsqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {\n\t\t\tfor _, p := range preds {\n\t\t\t\tp(s)\n\t\t\t}\n\t\t})\n\t})\n}", "func bindSafeERC20(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(SafeERC20ABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func bindSafeERC20(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(SafeERC20ABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func bindSafeERC20(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(SafeERC20ABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func validateClientCertificate(certificate *x509.Certificate, trustedCertsFile string,\n\tsuppressCertificateTimeInvalid, suppressCertificateChainIncomplete bool) (bool, error) {\n\tif certificate == nil {\n\t\treturn false, ua.BadCertificateInvalid\n\t}\n\tvar intermediates, roots *x509.CertPool\n\tif buf, err := os.ReadFile(trustedCertsFile); err == nil {\n\t\tfor len(buf) > 0 {\n\t\t\tvar block *pem.Block\n\t\t\tblock, buf = pem.Decode(buf)\n\t\t\tif block == nil {\n\t\t\t\t// maybe its der\n\t\t\t\tcert, err := x509.ParseCertificate(buf)\n\t\t\t\tif err == nil {\n\t\t\t\t\t// is self-signed?\n\t\t\t\t\tif bytes.Equal(cert.RawIssuer, cert.RawSubject) {\n\t\t\t\t\t\tif roots == nil {\n\t\t\t\t\t\t\troots = x509.NewCertPool()\n\t\t\t\t\t\t}\n\t\t\t\t\t\troots.AddCert(cert)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tif intermediates == nil {\n\t\t\t\t\t\t\tintermediates = x509.NewCertPool()\n\t\t\t\t\t\t}\n\t\t\t\t\t\tintermediates.AddCert(cert)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif block.Type != \"CERTIFICATE\" || len(block.Headers) != 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcert, err := x509.ParseCertificate(block.Bytes)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// is self-signed?\n\t\t\tif bytes.Equal(cert.RawIssuer, cert.RawSubject) {\n\t\t\t\tif roots == nil {\n\t\t\t\t\troots = x509.NewCertPool()\n\t\t\t\t}\n\t\t\t\troots.AddCert(cert)\n\t\t\t} else {\n\t\t\t\tif intermediates == nil {\n\t\t\t\t\tintermediates = x509.NewCertPool()\n\t\t\t\t}\n\t\t\t\tintermediates.AddCert(cert)\n\t\t\t}\n\t\t}\n\t}\n\n\topts := x509.VerifyOptions{\n\t\tIntermediates: intermediates,\n\t\tRoots: roots,\n\t\tKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},\n\t}\n\n\tif suppressCertificateTimeInvalid {\n\t\topts.CurrentTime = certificate.NotAfter // causes test to pass\n\t}\n\n\tif suppressCertificateChainIncomplete {\n\t\tif opts.Roots == nil {\n\t\t\topts.Roots = x509.NewCertPool()\n\t\t}\n\t\topts.Roots.AddCert(certificate)\n\t}\n\n\t// build chain and verify\n\tif _, err := certificate.Verify(opts); err != nil {\n\t\tswitch se := err.(type) {\n\t\tcase x509.CertificateInvalidError:\n\t\t\tswitch se.Reason {\n\t\t\tcase x509.Expired:\n\t\t\t\treturn false, ua.BadCertificateTimeInvalid\n\t\t\tcase x509.IncompatibleUsage:\n\t\t\t\treturn false, ua.BadCertificateUseNotAllowed\n\t\t\tdefault:\n\t\t\t\treturn false, ua.BadSecurityChecksFailed\n\t\t\t}\n\t\tcase x509.UnknownAuthorityError:\n\t\t\treturn false, ua.BadSecurityChecksFailed\n\t\tdefault:\n\t\t\treturn false, ua.BadSecurityChecksFailed\n\t\t}\n\t}\n\treturn true, nil\n}", "func withCarInspection(node *CarInspection) carinspectionOption {\n\treturn func(m *CarInspectionMutation) {\n\t\tm.oldValue = func(context.Context) (*CarInspection, error) {\n\t\t\treturn node, nil\n\t\t}\n\t\tm.id = &node.ID\n\t}\n}", "func (_Casper *CasperSession) Trusted() (common.Address, error) {\n\treturn _Casper.Contract.Trusted(&_Casper.CallOpts)\n}", "func bindClinic(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ClinicABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func AllowTrustDomainWorkload(trustDomain string) ValidationMode {\n\treturn validationMode{\n\t\toptions: validationOptions{\n\t\t\ttrustDomain: trustDomain,\n\t\t\ttrustDomainRequired: true,\n\t\t\tidType: workloadId,\n\t\t},\n\t}\n}", "func CarmaintenanceContainsFold(v string) predicate.CarRepairrecord {\n\treturn predicate.CarRepairrecord(func(s *sql.Selector) {\n\t\ts.Where(sql.ContainsFold(s.C(FieldCarmaintenance), v))\n\t})\n}", "func (o FluxConfigurationBlobStorageServicePrincipalOutput) ClientCertificateSendChain() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v FluxConfigurationBlobStorageServicePrincipal) *bool { return v.ClientCertificateSendChain }).(pulumi.BoolPtrOutput)\n}", "func WithTrustForwardHeader(enable bool) Option {\n\treturn func(o *Options) {\n\t\to.TrustForwardHeader = enable\n\t}\n}", "func (uc *UserCreate) AddCarIDs(ids ...string) *UserCreate {\n\tuc.mutation.AddCarIDs(ids...)\n\treturn uc\n}", "func (_Casper *CasperCallerSession) Trusted() (common.Address, error) {\n\treturn _Casper.Contract.Trusted(&_Casper.CallOpts)\n}", "func withCarID(id int) carOption {\n\treturn func(m *CarMutation) {\n\t\tvar (\n\t\t\terr error\n\t\t\tonce sync.Once\n\t\t\tvalue *Car\n\t\t)\n\t\tm.oldValue = func(ctx context.Context) (*Car, error) {\n\t\t\tonce.Do(func() {\n\t\t\t\tif m.done {\n\t\t\t\t\terr = fmt.Errorf(\"querying old values post mutation is not allowed\")\n\t\t\t\t} else {\n\t\t\t\t\tvalue, err = m.Client().Car.Get(ctx, id)\n\t\t\t\t}\n\t\t\t})\n\t\t\treturn value, err\n\t\t}\n\t\tm.id = &id\n\t}\n}", "func withCarID(id int) carOption {\n\treturn func(m *CarMutation) {\n\t\tvar (\n\t\t\terr error\n\t\t\tonce sync.Once\n\t\t\tvalue *Car\n\t\t)\n\t\tm.oldValue = func(ctx context.Context) (*Car, error) {\n\t\t\tonce.Do(func() {\n\t\t\t\tif m.done {\n\t\t\t\t\terr = fmt.Errorf(\"querying old values post mutation is not allowed\")\n\t\t\t\t} else {\n\t\t\t\t\tvalue, err = m.Client().Car.Get(ctx, id)\n\t\t\t\t}\n\t\t\t})\n\t\t\treturn value, err\n\t\t}\n\t\tm.id = &id\n\t}\n}", "func (c *ClientWithResponses) GetTrustedSourceitemsWithResponse(ctx context.Context, params *GetTrustedSourceitemsParams) (*GetTrustedSourceitemsResponse, error) {\n\trsp, err := c.GetTrustedSourceitems(ctx, params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseGetTrustedSourceitemsResponse(rsp)\n}", "func TrustAsset(hclient *equator.Client, seed, code, issuer string) error {\n\ttx, err := b.Transaction(\n\t\tb.SourceAccount{AddressOrSeed: seed},\n\t\tb.TestNetwork,\n\t\tb.AutoSequence{SequenceProvider: hclient},\n\t\tb.Trust(code, issuer),\n\t)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"building tx\")\n\t}\n\t_, err = SignAndSubmitTx(hclient, tx, seed)\n\treturn err\n}", "func NewRawCardSignerWithCtx(pointer unsafe.Pointer) *RawCardSigner {\n\tctx := (*C.vssc_raw_card_signer_t /*ct2*/)(pointer)\n\tobj := &RawCardSigner{\n\t\tcCtx: ctx,\n\t}\n\truntime.SetFinalizer(obj, (*RawCardSigner).Delete)\n\treturn obj\n}", "func WithTlsCaCert(with string) wrapping.Option {\n\treturn func() interface{} {\n\t\treturn OptionFunc(func(o *options) error {\n\t\t\to.withTlsCaCert = with\n\t\t\treturn nil\n\t\t})\n\t}\n}", "func (r *Resolver) Trusted() []int {\n\tm := r.Modules[THETAFD].(*ThetafdModule)\n\treturn m.Trusted()\n}", "func (c *Conn) EnableTrustedSchema(b bool) (bool, error) {\n\tif C.SQLITE_VERSION_NUMBER < 3031000 {\n\t\t// SQLITE_DBCONFIG_TRUSTED_SCHEMA was added in SQLite 3.31.0:\n\t\t// https://github.com/sqlite/sqlite/commit/b77da374ab6dfeaac5def640da91f219da7fa5c0\n\t\treturn false, errors.New(\"SQLITE_DBCONFIG_TRUSTED_SCHEMA isn't present in the called SQLite library\")\n\t}\n\treturn c.queryOrSetEnableDbConfig(C.SQLITE_DBCONFIG_TRUSTED_SCHEMA, btocint(b))\n}", "func (cic *CarInspectionCreate) AddCarrepairrecordIDs(ids ...int) *CarInspectionCreate {\n\tcic.mutation.AddCarrepairrecordIDs(ids...)\n\treturn cic\n}", "func (c *ca) TrustAnchors() []byte {\n\treturn c.bundle.TrustAnchors\n}", "func (m *MockConfig) TrustedCAFile() string {\n\targs := m.Called()\n\treturn args.String(0)\n}", "func (ftyp FileTypeBox) IsCR3() bool {\n\treturn ftyp.MajorBrand == brandCrx\n}", "func (mp *TxPool) verifyCRRelatedTx(txn *Transaction) ErrCode {\n\tswitch txn.TxType {\n\tcase RegisterCR:\n\t\tp, ok := txn.Payload.(*payload.CRInfo)\n\t\tif !ok {\n\t\t\tlog.Error(\"register CR payload cast failed, tx:\", txn.Hash())\n\t\t\treturn ErrCRProcessing\n\t\t}\n\t\tif err := mp.verifyDuplicateCRAndProducer(p.CID, p.Code, p.NickName); err != nil {\n\t\t\tlog.Warn(err)\n\t\t\treturn ErrCRProcessing\n\t\t}\n\tcase UpdateCR:\n\t\tp, ok := txn.Payload.(*payload.CRInfo)\n\t\tif !ok {\n\t\t\tlog.Error(\"update CR payload cast failed, tx:\", txn.Hash())\n\t\t\treturn ErrCRProcessing\n\t\t}\n\t\tif err := mp.verifyDuplicateCRAndNickname(p.CID, p.NickName); err != nil {\n\t\t\tlog.Warn(err)\n\t\t\treturn ErrCRProcessing\n\t\t}\n\tcase UnregisterCR:\n\t\tp, ok := txn.Payload.(*payload.UnregisterCR)\n\t\tif !ok {\n\t\t\tlog.Error(\"update producer payload cast failed, tx:\", txn.Hash())\n\t\t\treturn ErrCRProcessing\n\t\t}\n\t\tif err := mp.verifyDuplicateCR(p.CID); err != nil {\n\t\t\tlog.Warn(err)\n\t\t\treturn ErrCRProcessing\n\t\t}\n\tcase ReturnCRDepositCoin:\n\t\terr := mp.verifyDuplicateCode(BytesToHexString(txn.Programs[0].Code))\n\t\tif err != nil {\n\t\t\tlog.Warn(err)\n\t\t\treturn ErrCRProcessing\n\t\t}\n\t}\n\n\treturn Success\n}", "func NeedsLicense(kind string) bool {\n\treturn kind == \"car\" || kind == \"truck\"\n}", "func (_Privileges *PrivilegesTransactor) SetTrusted(opts *bind.TransactOpts, addr common.Address) (*types.Transaction, error) {\n\treturn _Privileges.contract.Transact(opts, \"setTrusted\", addr)\n}", "func (k *Keeper) SetCiphertext(ctx sdk.Context, ctShare *types.CiphertextShare) sdk.Error {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tlog.Println(\"PANIC:\", r)\n\t\t}\n\t}()\n\tif ctShare.EntropyProvider.Empty() {\n\t\treturn sdk.ErrInvalidAddress(\"entropy provider can't be empty!\")\n\t}\n\tround := k.CurrentRound(ctx)\n\tstage := k.GetStage(ctx, round)\n\tpubKey, err1 := k.GetCommonPublicKey(ctx)\n\tif err1 != nil {\n\t\treturn err1\n\t}\n\terr := elgamal.CEVerify(P256, k.group.Point().Base(), pubKey, ctShare.Ciphertext.PointA, ctShare.Ciphertext.PointB, ctShare.CEproof)\n\tif err != nil {\n\t\treturn sdk.ErrUnknownRequest(fmt.Sprintf(\"CE proof isn't correct: %v\", err))\n\t}\n\n\tif k.CurrentRound(ctx) == 0 && stage == stageUnstarted {\n\t\terr1 = k.InitializeVerificationKeys(ctx)\n\t\tif err1 != nil {\n\t\t\treturn err1\n\t\t}\n\t\tstage = stageCtCollecting\n\t\tk.setStage(ctx, round, stage)\n\t}\n\n\tif stage != stageCtCollecting {\n\t\treturn sdk.ErrUnknownRequest(fmt.Sprintf(\"round is not on the ciphertext collecting stage. Current stage: %v\", stage))\n\t}\n\tctStore := ctx.KVStore(k.storeCiphertextSharesKey)\n\tkeyBytesAllCt := []byte(fmt.Sprintf(\"rd_%d\", round))\n\tt, err1 := k.GetThresholdCiphertexts(ctx)\n\tif err1 != nil {\n\t\treturn err1\n\t}\n\n\tvar addrList []string\n\tif ctStore.Has(keyBytesAllCt) {\n\t\taddrListBytes := ctStore.Get(keyBytesAllCt)\n\t\terr := k.cdc.UnmarshalJSON(addrListBytes, &addrList)\n\t\tif err != nil {\n\t\t\treturn sdk.ErrUnknownRequest(fmt.Sprintf(\"can't unmarshal list of all addresses from the store: %v\", err))\n\t\t}\n\t}\n\taddrList = append(addrList, ctShare.EntropyProvider.String())\n\tnewAddrListBytes, err := k.cdc.MarshalJSON(addrList)\n\tif err != nil {\n\t\treturn sdk.ErrUnknownRequest(fmt.Sprintf(\"can't marshal list of all addresses: %v\", err))\n\t}\n\n\tkeyBytesCt := createKeyBytesByAddr(round, ctShare.EntropyProvider)\n\tif ctStore.Has(keyBytesCt) {\n\t\treturn sdk.ErrInvalidAddress(\"entropy provider has already sentf ciphertext share\")\n\t}\n\tctJSON, err := types.NewCiphertextShareJSON(ctShare)\n\tif err != nil {\n\t\treturn sdk.ErrUnknownRequest(fmt.Sprintf(\"can't serialize ctShare: %v\", err))\n\t}\n\tctBytes, err := k.cdc.MarshalJSON(ctJSON)\n\tif err != nil {\n\t\treturn sdk.ErrUnknownRequest(fmt.Sprintf(\"can't marshall ctShare: %v\", err))\n\t}\n\taggregatedCt, err1 := k.GetAggregatedCiphertext(ctx, round)\n\tif err1 != nil {\n\t\treturn err1\n\t}\n\tvar newAggregatedCt elgamal.Ciphertext\n\tif aggregatedCt == nil {\n\t\tnewAggregatedCt = ctShare.Ciphertext\n\t} else {\n\t\tnewAggregatedCt = elgamal.AggregateCiphertext(P256, []elgamal.Ciphertext{ctShare.Ciphertext, *aggregatedCt})\n\t}\n\terr1 = k.SetAggregatedCiphertext(ctx, round, &newAggregatedCt)\n\tif err1 != nil {\n\t\treturn err1\n\t}\n\tctStore.Set(keyBytesCt, ctBytes)\n\tctStore.Set(keyBytesAllCt, newAddrListBytes)\n\n\tif uint64(len(addrList)) >= t {\n\t\tk.setStage(ctx, round, stageDSCollecting)\n\t}\n\treturn nil\n}", "func (d *DataEncryptor) EncryptWithClientID(clientID, data []byte, setting config.ColumnEncryptionSetting) ([]byte, error) {\n\tif d.needSkipEncryptionFunc(setting) {\n\t\treturn data, nil\n\t}\n\t// skip already encrypted AcraBlock\n\tif _, _, err := ExtractAcraBlockFromData(data); err == nil {\n\t\treturn data, nil\n\t}\n\tif setting.ShouldReEncryptAcraStructToAcraBlock() {\n\t\t// decrypt AcraStruct to encrypt it with AcraBlock\n\t\tif err := acrastruct.ValidateAcraStructLength(data); err == nil {\n\t\t\tdataContext := base.NewDataProcessorContext(d.keyStore)\n\t\t\taccessContext := base.NewAccessContext(base.WithClientID(clientID))\n\t\t\tdataContext.Context = base.SetAccessContextToContext(context.Background(), accessContext)\n\t\t\tdecrypted, err := base.DecryptProcessor{}.Process(data, dataContext)\n\t\t\tif err != nil {\n\t\t\t\treturn data, err\n\t\t\t}\n\t\t\tdata = decrypted\n\t\t}\n\t}\n\tkeys, err := d.keyStore.GetClientIDSymmetricKey(clientID)\n\tif err != nil {\n\t\treturn data, err\n\t}\n\treturn CreateAcraBlock(data, keys, nil)\n}", "func withCarRepairrecord(node *CarRepairrecord) carrepairrecordOption {\n\treturn func(m *CarRepairrecordMutation) {\n\t\tm.oldValue = func(context.Context) (*CarRepairrecord, error) {\n\t\t\treturn node, nil\n\t\t}\n\t\tm.id = &node.ID\n\t}\n}", "func (cfg *Config) TrustedCACerts() []string {\n\tcerts := make([]string, 0, len(cfg.TrustedCAs))\n\tfor _, ca := range cfg.TrustedCAs {\n\t\tcerts = append(certs, ca.Cert)\n\t}\n\treturn certs\n}", "func (c *ClientWithResponses) ReplacechangeaspecificTrustedSourceWithBodyWithResponse(ctx context.Context, id string, contentType string, body io.Reader) (*ReplacechangeaspecificTrustedSourceResponse, error) {\n\trsp, err := c.ReplacechangeaspecificTrustedSourceWithBody(ctx, id, contentType, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseReplacechangeaspecificTrustedSourceResponse(rsp)\n}", "func (o *PublicRemoveTrustedDeviceV4Params) WithContext(ctx context.Context) *PublicRemoveTrustedDeviceV4Params {\n\to.SetContext(ctx)\n\treturn o\n}", "func (uu *UserUpdate) AddCarrepairrecordIDs(ids ...int) *UserUpdate {\n\tuu.mutation.AddCarrepairrecordIDs(ids...)\n\treturn uu\n}", "func bindSmartchef(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(SmartchefABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func trustedOriginExists(d *schema.ResourceData, m interface{}) (bool, error) {\n\tclient := m.(*Config).articulateOktaClient\n\t_, _, err := client.TrustedOrigins.GetTrustedOrigin(d.Id())\n\n\tif client.OktaErrorCode == \"E0000007\" {\n\t\treturn false, nil\n\t}\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"[ERROR] Error Getting Trusted Origin in Okta: %v\", err)\n\t}\n\treturn true, nil\n}", "func (o *PublicRemoveTrustedDeviceV4Params) WithTimeout(timeout time.Duration) *PublicRemoveTrustedDeviceV4Params {\n\to.SetTimeout(timeout)\n\treturn o\n}", "func HasCarinspectionWith(preds ...predicate.CarInspection) predicate.CarRepairrecord {\n\treturn predicate.CarRepairrecord(func(s *sql.Selector) {\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(Table, FieldID),\n\t\t\tsqlgraph.To(CarinspectionInverseTable, FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.M2O, true, CarinspectionTable, CarinspectionColumn),\n\t\t)\n\t\tsqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {\n\t\t\tfor _, p := range preds {\n\t\t\t\tp(s)\n\t\t\t}\n\t\t})\n\t})\n}", "func (c *Client) CarsDriven(custID *string) (*CarsDriven, *http.Response, error) {\n\tv := url.Values{\"custid\": {*custID}}\n\tcarsDriven := &CarsDriven{}\n\tresp, err := c.do(URLPathCarsDriven, &v, carsDriven)\n\treturn carsDriven, resp, err\n}", "func VerifyOnChain(msg []byte, signature []byte, signers EthAddresses,\n) (types.OracleID, error) {\n\tauthor, err := crypto.SigToPub(onChainHash(msg), signature)\n\tif err != nil {\n\t\treturn types.OracleID(-1), errors.Wrapf(err, \"while trying to recover \"+\n\t\t\t\"sender from sig %x on msg %+v\", signature, msg)\n\t}\n\toid, ok := signers[(*OnChainPublicKey)(author).Address()]\n\tif ok {\n\t\treturn oid, nil\n\t} else {\n\t\treturn types.OracleID(-1), errors.Errorf(\"signer is not on whitelist\")\n\t}\n}", "func AllowTrustDomain(trustDomain string) ValidationMode {\n\treturn validationMode{\n\t\toptions: validationOptions{\n\t\t\ttrustDomain: trustDomain,\n\t\t\ttrustDomainRequired: true,\n\t\t\tidType: trustDomainId,\n\t\t},\n\t}\n}", "func (o FluxConfigurationBlobStorageServicePrincipalPtrOutput) ClientCertificateSendChain() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v *FluxConfigurationBlobStorageServicePrincipal) *bool {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ClientCertificateSendChain\n\t}).(pulumi.BoolPtrOutput)\n}", "func WithACRVerifier(verifier oidc.ACRVerifier) VerifierOption {\n\treturn func(v *idTokenVerifier) {\n\t\tv.acr = verifier\n\t}\n}", "func (tracker *PeerTracker) listTrusted() []*types.ChainInfo {\n\ttracker.mu.Lock()\n\tdefer tracker.mu.Unlock()\n\n\tvar tracked []*types.ChainInfo\n\tfor p, ci := range tracker.peers {\n\t\tif _, trusted := tracker.trusted[p]; trusted {\n\t\t\ttracked = append(tracked, ci)\n\t\t}\n\t}\n\tout := make([]*types.ChainInfo, len(tracked))\n\tcopy(out, tracked)\n\treturn out\n}", "func (s *Setting) IsTrusted(adr string) bool {\n\tok := false\n\tfor _, t := range s.TrustedNodes {\n\t\tif t == adr {\n\t\t\tok = true\n\t\t}\n\t}\n\treturn ok\n}", "func (drc *DummyRegistryClient) BecomeFoolishlyTrusting() {}", "func (s *SmartContract) ChangeCarOwner(ctx contractapi.TransactionContextInterface, carNumber string, newOwner string) error {\n\tcar, err := s.QueryCar(ctx, carNumber)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcar.Owner = newOwner\n\n\tcarAsBytes, _ := json.Marshal(car)\n\n\treturn ctx.GetStub().PutState(carNumber, carAsBytes)\n}", "func (_Privileges *PrivilegesCaller) Trusted(opts *bind.CallOpts) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _Privileges.contract.Call(opts, out, \"trusted\")\n\treturn *ret0, err\n}", "func (o *Operation) SecuredWith(name string, scopes ...string) *Operation {\n\to.Operation.SecuredWith(name, scopes...)\n\treturn o\n}", "func ClientIDContainsFold(v string) predicate.DeviceRequest {\n\treturn predicate.DeviceRequest(sql.FieldContainsFold(FieldClientID, v))\n}", "func (uq *UserQuery) WithCars(opts ...func(*CarQuery)) *UserQuery {\n\tquery := &CarQuery{config: uq.config}\n\tfor _, opt := range opts {\n\t\topt(query)\n\t}\n\tuq.withCars = query\n\treturn uq\n}", "func (o ServiceSecurityOutput) TlsRsaWithAes256CbcSha256CiphersEnabled() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v ServiceSecurity) *bool { return v.TlsRsaWithAes256CbcSha256CiphersEnabled }).(pulumi.BoolPtrOutput)\n}", "func bindMainnetCryptoCardsContract(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(MainnetCryptoCardsContractABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func CompareCar(w http.ResponseWriter, r *http.Request) {\n\tallVehicles := services.GetAllVehicle()\n\tids := make(map[string]uint, 0)\n\n\tfor _, vehicle := range allVehicles {\n\t\tids[\"car\"+strconv.Itoa(int(vehicle.ID))] = vehicle.ID\n\t}\n\n\tfmt.Println(ids)\n\n\tcusttpl.ExecuteTemplate(w, \"compareCar.html\", struct {\n\t\tAllVehicles []model.Vehicle\n\t\tIds map[string]uint\n\t}{allVehicles, ids})\n}", "func discoverTrident() (installed bool, err error) {\n\tif installed, _, err = isCSITridentInstalled(); err != nil {\n\t\terr = fmt.Errorf(\"could not check if CSI Trident is installed; %v\", err)\n\t\treturn\n\t}\n\n\treturn\n}", "func (rc *RentalCreate) SetCar(c *Car) *RentalCreate {\n\treturn rc.SetCarID(c.ID)\n}", "func IsOwner(tree *dag.Dag, blockWithHeaders *chaintree.BlockWithHeaders) (bool, chaintree.CodedError) {\n\tctx := context.TODO()\n\tid, _, err := tree.Resolve(context.TODO(), []string{\"id\"})\n\tif err != nil {\n\t\treturn false, &consensus.ErrorCode{Memo: fmt.Sprintf(\"error: %v\", err), Code: consensus.ErrUnknown}\n\t}\n\n\theaders := &consensus.StandardHeaders{}\n\n\terr = typecaster.ToType(blockWithHeaders.Headers, headers)\n\tif err != nil {\n\t\treturn false, &consensus.ErrorCode{Memo: fmt.Sprintf(\"error: %v\", err), Code: consensus.ErrUnknown}\n\t}\n\n\tvar addrs []string\n\n\tuncastAuths, _, err := tree.Resolve(context.TODO(), strings.Split(\"tree/\"+consensus.TreePathForAuthentications, \"/\"))\n\tif err != nil {\n\t\treturn false, &consensus.ErrorCode{Code: consensus.ErrUnknown, Memo: fmt.Sprintf(\"err resolving: %v\", err)}\n\t}\n\t// If there are no authentications then the Chain Tree is still owned by its genesis key\n\tif uncastAuths == nil {\n\t\taddrs = []string{consensus.DidToAddr(id.(string))}\n\t} else {\n\t\terr = typecaster.ToType(uncastAuths, &addrs)\n\t\tif err != nil {\n\t\t\treturn false, &consensus.ErrorCode{Code: consensus.ErrUnknown, Memo: fmt.Sprintf(\"err casting: %v\", err)}\n\t\t}\n\t}\n\n\tfor _, addr := range addrs {\n\t\tisSigned, err := consensus.IsBlockSignedBy(ctx, blockWithHeaders, addr)\n\t\tif err != nil {\n\t\t\treturn false, &consensus.ErrorCode{Memo: fmt.Sprintf(\"error finding if signed: %v\", err), Code: consensus.ErrUnknown}\n\t\t}\n\n\t\tif isSigned {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, nil\n}", "func (i Identifiable) AsCivicStructure() (*CivicStructure, bool) {\n\treturn nil, false\n}", "func WithCORS(cfg CORSConfig) Middleware {\n\treturn func(next Handler) Handler {\n\t\treturn func(ctx context.Context, req Request) (interface{}, error) {\n\t\t\tallowMethods := strings.Join(cfg.AllowMethods, \",\")\n\t\t\tallowHeaders := strings.Join(cfg.AllowHeaders, \",\")\n\n\t\t\thttpReq := req.HTTPRequest()\n\t\t\torigin := httpReq.Header.Get(HeaderOrigin)\n\t\t\tallowOrigin := getAllowOrigin(origin, cfg)\n\t\t\theader := ResponseHeaderFromCtx(ctx)\n\n\t\t\t// non-OPTIONS requests\n\t\t\tif httpReq.Method != http.MethodOptions {\n\t\t\t\theader.Add(HeaderVary, HeaderOrigin)\n\t\t\t\theader.Set(HeaderAccessControlAllowOrigin, allowOrigin)\n\t\t\t\tif cfg.AllowCredentials {\n\t\t\t\t\theader.Set(HeaderAccessControlAllowCredentials, \"true\")\n\t\t\t\t}\n\t\t\t\treturn next(ctx, req)\n\t\t\t}\n\n\t\t\t// Preflight requests\n\t\t\theader.Add(HeaderVary, HeaderOrigin)\n\t\t\theader.Add(HeaderVary, HeaderAccessControlRequestMethod)\n\t\t\theader.Add(HeaderVary, HeaderAccessControlRequestHeaders)\n\t\t\theader.Set(HeaderAccessControlAllowOrigin, allowOrigin)\n\t\t\theader.Set(HeaderAccessControlAllowMethods, allowMethods)\n\t\t\tif cfg.AllowCredentials {\n\t\t\t\theader.Set(HeaderAccessControlAllowCredentials, \"true\")\n\t\t\t}\n\t\t\tif allowHeaders != \"\" {\n\t\t\t\theader.Set(HeaderAccessControlAllowHeaders, allowHeaders)\n\t\t\t} else {\n\t\t\t\th := httpReq.Header.Get(HeaderAccessControlRequestHeaders)\n\t\t\t\tif h != \"\" {\n\t\t\t\t\theader.Set(HeaderAccessControlAllowHeaders, h)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif cfg.MaxAge > 0 {\n\t\t\t\theader.Set(HeaderAccessControlMaxAge, strconv.Itoa(cfg.MaxAge))\n\t\t\t}\n\n\t\t\treturn nil, nil\n\t\t}\n\t}\n}", "func (o KubernetesClusterHttpProxyConfigOutput) TrustedCa() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v KubernetesClusterHttpProxyConfig) *string { return v.TrustedCa }).(pulumi.StringPtrOutput)\n}", "func (o *OIDCIDVerifier) Verify(ctx context.Context, rawIDToken string) (*Claims, error) {\n\tidToken, err := o.Verifier.Verify(ctx, rawIDToken)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to verify rawIDToken\")\n\t}\n\tvar claims Claims\n\tif err = idToken.Claims(&claims); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &claims, nil\n}", "func WithTLSConfig(tlsConfig *tls.Config) Option {\n\treturn func(opts *VDRI) {\n\t\topts.client.Transport = &http.Transport{\n\t\t\tTLSClientConfig: tlsConfig,\n\t\t}\n\t}\n}", "func WithChoriaConfig(bi BuildInfoProvider, c *config.Config) Option {\n\tcfg := Config{\n\t\tAllowList: c.Choria.CertnameAllowList,\n\t\tCA: c.Choria.FileSecurityCA,\n\t\tCertificate: c.Choria.FileSecurityCertificate,\n\t\tKey: c.Choria.FileSecurityKey,\n\t\tDisableTLSVerify: c.DisableTLSVerify,\n\t\tPrivilegedUsers: c.Choria.PrivilegedUsers,\n\t\tIdentity: c.Identity,\n\t\tRemoteSignerURL: c.Choria.RemoteSignerURL,\n\t\tRemoteSignerTokenFile: c.Choria.RemoteSignerTokenFile,\n\t\tRemoteSignerSeedFile: c.Choria.RemoteSignerTokenSeedFile,\n\t\tTLSConfig: tlssetup.TLSConfig(c),\n\t\tBackwardCompatVerification: c.Choria.SecurityAllowLegacyCerts,\n\t\tIdentitySuffix: bi.ClientIdentitySuffix(),\n\t}\n\n\tif cfg.IdentitySuffix == \"\" {\n\t\tcfg.IdentitySuffix = \"mcollective\"\n\t}\n\n\tif cn, ok := os.LookupEnv(\"MCOLLECTIVE_CERTNAME\"); ok {\n\t\tc.OverrideCertname = cn\n\t}\n\n\tif c.OverrideCertname != \"\" {\n\t\tcfg.Identity = c.OverrideCertname\n\t} else if !(runtimeOs() == \"windows\" || uid() == 0) {\n\t\tif u, ok := os.LookupEnv(\"USER\"); ok {\n\t\t\tcfg.Identity = fmt.Sprintf(\"%s.%s\", u, cfg.IdentitySuffix)\n\t\t}\n\t}\n\n\treturn WithConfig(&cfg)\n}", "func (_Privileges *PrivilegesTransactorSession) SetTrusted(addr common.Address) (*types.Transaction, error) {\n\treturn _Privileges.Contract.SetTrusted(&_Privileges.TransactOpts, addr)\n}", "func VerifyClientCertificateChain(verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {\n\treturn func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {\n\t\t// Offload verification to S2Av2.\n\t\tif grpclog.V(1) {\n\t\t\tgrpclog.Infof(\"Sending request to S2Av2 for client peer cert chain validation.\")\n\t\t}\n\t\tif err := s2AStream.Send(&s2av2pb.SessionReq{\n\t\t\tReqOneof: &s2av2pb.SessionReq_ValidatePeerCertificateChainReq{\n\t\t\t\tValidatePeerCertificateChainReq: &s2av2pb.ValidatePeerCertificateChainReq{\n\t\t\t\t\tMode: verificationMode,\n\t\t\t\t\tPeerOneof: &s2av2pb.ValidatePeerCertificateChainReq_ClientPeer_{\n\t\t\t\t\t\tClientPeer: &s2av2pb.ValidatePeerCertificateChainReq_ClientPeer{\n\t\t\t\t\t\t\tCertificateChain: rawCerts,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}); err != nil {\n\t\t\tgrpclog.Infof(\"Failed to send request to S2Av2 for client peer cert chain validation.\")\n\t\t\treturn err\n\t\t}\n\n\t\t// Get the response from S2Av2.\n\t\tresp, err := s2AStream.Recv()\n\t\tif err != nil {\n\t\t\tgrpclog.Infof(\"Failed to receive client peer cert chain validation response from S2Av2.\")\n\t\t\treturn err\n\t\t}\n\n\t\t// Parse the response.\n\t\tif (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) {\n\t\t\treturn fmt.Errorf(\"failed to offload client cert verification to S2A: %d, %v\", resp.GetStatus().Code, resp.GetStatus().Details)\n\n\t\t}\n\n\t\tif resp.GetValidatePeerCertificateChainResp().ValidationResult != s2av2pb.ValidatePeerCertificateChainResp_SUCCESS {\n\t\t\treturn fmt.Errorf(\"client cert verification failed: %v\", resp.GetValidatePeerCertificateChainResp().ValidationDetails)\n\t\t}\n\n\t\treturn nil\n\t}\n}", "func (uu *UserUpdate) AddCarinspectionIDs(ids ...int) *UserUpdate {\n\tuu.mutation.AddCarinspectionIDs(ids...)\n\treturn uu\n}", "func CORSMiddleware(devMode bool, allowedOrigins []string) gin.HandlerFunc {\n\tcorsConfig := cors.DefaultConfig()\n\tif devMode {\n\t\tcorsConfig.AllowAllOrigins = true\n\t\tcorsConfig.AllowCredentials = false\n\t} else if !devMode && len(allowedOrigins) == 0 {\n\t\t// not dev mode, and no specified origins\n\t\t// so we should allow all\n\t\tcorsConfig.AllowAllOrigins = true\n\t\tcorsConfig.AllowOrigins = nil\n\t} else {\n\t\t// configure allowed origins\n\t\tcorsConfig.AllowOrigins = allowedOrigins\n\t}\n\t// allow the DELETE method, allowed methods are now\n\t// DELETE GET POST PUT HEAD\n\tcorsConfig.AddAllowMethods(\"DELETE\")\n\tcorsConfig.AddAllowHeaders(\"cache-control\", \"Authorization\", \"Content-Type\", \"X-Request-ID\")\n\treturn cors.New(corsConfig)\n}", "func InjectTrustedContext(ctx context.Context, t HeaderTrustHandler, r *http.Request) context.Context {\n\tif t.TrustEdgeContext(r) {\n\t\tctx = SetHeader(ctx, EdgeContextContextKey, r.Header.Get(EdgeContextHeader))\n\t}\n\n\tif t.TrustSpan(r) {\n\t\tfor k, v := range map[HeaderContextKey]string{\n\t\t\tTraceIDContextKey: r.Header.Get(TraceIDHeader),\n\t\t\tParentIDContextKey: r.Header.Get(ParentIDHeader),\n\t\t\tSpanIDContextKey: r.Header.Get(SpanIDHeader),\n\t\t\tSpanFlagsContextKey: r.Header.Get(SpanFlagsHeader),\n\t\t\tSpanSampledContextKey: r.Header.Get(SpanSampledHeader),\n\t\t} {\n\t\t\tctx = SetHeader(ctx, k, v)\n\t\t}\n\t}\n\n\treturn ctx\n}", "func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.LightBlock, now time.Time) error {\n\tif primaryTrace == nil || len(primaryTrace) < 2 {\n\t\treturn errors.New(\"nil or single block primary trace\")\n\t}\n\tvar (\n\t\theaderMatched bool\n\t\tlastVerifiedHeader = primaryTrace[len(primaryTrace)-1].SignedHeader\n\t\twitnessesToRemove = make([]int, 0)\n\t)\n\tc.logger.Debug(\"Running detector against trace\", \"endBlockHeight\", lastVerifiedHeader.Height,\n\t\t\"endBlockHash\", lastVerifiedHeader.Hash, \"length\", len(primaryTrace))\n\n\tc.providerMutex.Lock()\n\tdefer c.providerMutex.Unlock()\n\n\tif len(c.witnesses) == 0 {\n\t\treturn ErrNoWitnesses\n\t}\n\n\t// launch one goroutine per witness to retrieve the light block of the target height\n\t// and compare it with the header from the primary\n\terrc := make(chan error, len(c.witnesses))\n\tfor i, witness := range c.witnesses {\n\t\tgo c.compareNewHeaderWithWitness(ctx, errc, lastVerifiedHeader, witness, i)\n\t}\n\n\t// handle errors from the header comparisons as they come in\n\tfor i := 0; i < cap(errc); i++ {\n\t\terr := <-errc\n\n\t\tswitch e := err.(type) {\n\t\tcase nil: // at least one header matched\n\t\t\theaderMatched = true\n\t\tcase errConflictingHeaders:\n\t\t\t// We have conflicting headers. This could possibly imply an attack on the light client.\n\t\t\t// First we need to verify the witness's header using the same skipping verification and then we\n\t\t\t// need to find the point that the headers diverge and examine this for any evidence of an attack.\n\t\t\t//\n\t\t\t// We combine these actions together, verifying the witnesses headers and outputting the trace\n\t\t\t// which captures the bifurcation point and if successful provides the information to create valid evidence.\n\t\t\terr := c.handleConflictingHeaders(ctx, primaryTrace, e.Block, e.WitnessIndex, now)\n\t\t\tif err != nil {\n\t\t\t\t// return information of the attack\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// if attempt to generate conflicting headers failed then remove witness\n\t\t\twitnessesToRemove = append(witnessesToRemove, e.WitnessIndex)\n\n\t\tcase errBadWitness:\n\t\t\t// these are all melevolent errors and should result in removing the\n\t\t\t// witness\n\t\t\tc.logger.Info(\"witness returned an error during header comparison, removing...\",\n\t\t\t\t\"witness\", c.witnesses[e.WitnessIndex], \"err\", err)\n\t\t\twitnessesToRemove = append(witnessesToRemove, e.WitnessIndex)\n\t\tdefault:\n\t\t\t// Benign errors which can be ignored unless there was a context\n\t\t\t// canceled\n\t\t\tif errors.Is(e, context.Canceled) || errors.Is(e, context.DeadlineExceeded) {\n\t\t\t\treturn e\n\t\t\t}\n\t\t\tc.logger.Info(\"error in light block request to witness\", \"err\", err)\n\t\t}\n\t}\n\n\t// remove witnesses that have misbehaved\n\tif err := c.removeWitnesses(witnessesToRemove); err != nil {\n\t\treturn err\n\t}\n\n\t// 1. If we had at least one witness that returned the same header then we\n\t// conclude that we can trust the header\n\tif headerMatched {\n\t\treturn nil\n\t}\n\n\t// 2. Else all witnesses have either not responded, don't have the block or sent invalid blocks.\n\treturn ErrFailedHeaderCrossReferencing\n}", "func AllowAnyTrustDomainWorkload() ValidationMode {\n\treturn validationMode{\n\t\toptions: validationOptions{\n\t\t\tidType: workloadId,\n\t\t},\n\t}\n}", "func (uuo *UserUpdateOne) AddCarrepairrecordIDs(ids ...int) *UserUpdateOne {\n\tuuo.mutation.AddCarrepairrecordIDs(ids...)\n\treturn uuo\n}", "func (tracker *PeerTracker) UpdateTrusted(ctx context.Context) error {\n\treturn tracker.updatePeers(ctx, tracker.trustedPeers()...)\n}", "func bindERC20(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ERC20ABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func bindERC20(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ERC20ABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func bindERC20(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ERC20ABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func (s *Server) isTrustedCluster(clusterID string) bool {\n\tfor _, trusted := range s.config.TrustedPeers {\n\t\tif trusted == clusterID || trusted == \"*\" {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (rc *RentalCreate) SetCarID(u uuid.UUID) *RentalCreate {\n\trc.mutation.SetCarID(u)\n\treturn rc\n}" ]
[ "0.5050521", "0.46469218", "0.44809136", "0.4400281", "0.43353248", "0.43324968", "0.42241368", "0.41937608", "0.41843116", "0.41048527", "0.4037473", "0.40296867", "0.4016661", "0.40072775", "0.39855403", "0.39722", "0.39688164", "0.39688164", "0.3953601", "0.39267403", "0.39037538", "0.39037538", "0.38855118", "0.38855118", "0.38855118", "0.38792187", "0.38600627", "0.38358578", "0.3831689", "0.38294238", "0.38286698", "0.38278177", "0.3825741", "0.3823116", "0.38202572", "0.3819775", "0.3819775", "0.38182354", "0.3813514", "0.38132417", "0.38013384", "0.37956598", "0.37915957", "0.37778845", "0.37745786", "0.3773235", "0.37673563", "0.376591", "0.37615287", "0.37474442", "0.3746116", "0.37421405", "0.37405303", "0.3733487", "0.37323007", "0.3730742", "0.37241894", "0.3710357", "0.37065145", "0.36977935", "0.36957306", "0.3695522", "0.3693082", "0.36705357", "0.36693332", "0.36658245", "0.36569363", "0.36546367", "0.3645987", "0.3638687", "0.36348802", "0.36336762", "0.3626719", "0.36222655", "0.36163336", "0.36067876", "0.36067086", "0.36066255", "0.3606336", "0.3605582", "0.36048195", "0.35927635", "0.35826197", "0.35820895", "0.35797834", "0.35782483", "0.35743597", "0.35737845", "0.3567621", "0.35652193", "0.35619518", "0.35566851", "0.35464576", "0.35450062", "0.35422057", "0.35410666", "0.35410666", "0.35410666", "0.3540865", "0.3539437" ]
0.7244435
0
MaxAllowedHeaderSize overrides the default maximum size (of 32 MiB) that a CARv1 decode (including within a CARv2 container) will allow a header to be without erroring.
MaxAllowedHeaderSize переопределяет default максимальный размер (32 MiB) заголовка, который CARv1 decode (включая внутри CARv2 контейнера) позволит без ошибки.
func MaxAllowedHeaderSize(max uint64) Option { return func(o *Options) { o.MaxAllowedHeaderSize = max } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *fseEncoder) maxHeaderSize() uint32 {\n\tif s.preDefined {\n\t\treturn 0\n\t}\n\tif s.useRLE {\n\t\treturn 8\n\t}\n\treturn (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8\n}", "func (*testObject) MaxHeaderLength() uint16 {\n\treturn 0\n}", "func (*endpoint) MaxHeaderLength() uint16 {\n\treturn header.EthernetMinimumSize\n}", "func (e *endpoint) MaxHeaderLength() uint16 {\n\treturn uint16(e.hdrSize)\n}", "func MaxHeaderBytes(v int) Option {\n\treturn optionSetter(func(opt *Options) {\n\t\topt.MaxHeaderBytes = v\n\t})\n}", "func (st *Settings) MaxHeaderListSize() uint32 {\n\treturn st.headerSize\n}", "func (st *Settings) MaxHeaderListSize() uint32 {\n\treturn st.headerSize\n}", "func HeaderFieldsTooLarge(message ...interface{}) Err {\n\treturn Boomify(http.StatusRequestHeaderFieldsTooLarge, message...)\n}", "func (c Config) MaxHeaderBytesOrDefault() int {\n\tif c.MaxHeaderBytes > 0 {\n\t\treturn c.MaxHeaderBytes\n\t}\n\treturn DefaultMaxHeaderBytes\n}", "func (st *Settings) SetMaxHeaderListSize(size uint32) {\n\tst.headerSize = size\n}", "func (st *Settings) SetMaxHeaderListSize(size uint32) {\n\tst.headerSize = size\n}", "func (r *Responder) RequestHeaderFieldsTooLarge() { r.write(http.StatusRequestHeaderFieldsTooLarge) }", "func (e *endpoint) MaxHeaderLength() uint16 {\n\treturn e.lower.MaxHeaderLength()\n}", "func (r *Route) MaxHeaderLength() uint16 {\n\treturn r.outgoingNIC.getNetworkEndpoint(r.NetProto()).MaxHeaderLength()\n}", "func (msg *MsgFetchSmartContractInfo) MaxPayloadLength(pver uint32) uint32 {\n\t// 10k. In theory this message is very small.\n\treturn 10240\n}", "func (s *Server) SetMaxHeaderBytes(b int) {\n\ts.config.MaxHeaderBytes = b\n}", "func estimatedHeaderWireSize(hs http.Header) (res int) {\n\tfor h, vs := range hs {\n\t\tres += len(h) + 4 // account for \": \" and \"\\r\\n\"\n\t\tfor _, v := range vs {\n\t\t\tres += len(v)\n\t\t\tbreak // no duplicates allowed\n\t\t}\n\t}\n\treturn res\n}", "func DecodeHeader(data []byte, bytesRead int, h *Header) (int, error) {\n\tif h == nil {\n\t\treturn 0, errors.New(\"Cannot decode bytes to nil Header\")\n\t}\n\n\tif len(data) < maxHeaderSize {\n\t\treturn 0, fmt.Errorf(\"Header bytes should be %d bytes, found %d\", maxHeaderSize, len(data))\n\t}\n\n\tvar err error\n\n\th.ID, bytesRead, err = decodeUint16(data, bytesRead)\n\tif err != nil {\n\t\treturn bytesRead, err\n\t}\n\n\tcurrentByte := data[bytesRead]\n\th.QR = QRType(getBitsAtIdx(currentByte, 0, 1))\n\th.OPCODE = Opcode(getBitsAtIdx(currentByte, 1, 4))\n\th.AA = getBitsAtIdx(currentByte, 5, 1)\n\th.TC = getBitsAtIdx(currentByte, 6, 1)\n\th.RD = getBitsAtIdx(currentByte, 7, 1)\n\tbytesRead++\n\n\tcurrentByte = data[bytesRead]\n\th.RA = getBitsAtIdx(currentByte, 0, 1)\n\th.Z = getBitsAtIdx(currentByte, 1, 3)\n\th.RCODE = ResponseCode(getBitsAtIdx(currentByte, 4, 4))\n\tbytesRead++\n\n\t// Set the remaining data\n\th.QDCOUNT, bytesRead, err = decodeUint16(data, bytesRead)\n\tif err != nil {\n\t\treturn bytesRead, err\n\t}\n\n\th.ANCOUNT, bytesRead, err = decodeUint16(data, bytesRead)\n\tif err != nil {\n\t\treturn bytesRead, err\n\t}\n\n\th.NSCOUNT, bytesRead, err = decodeUint16(data, bytesRead)\n\tif err != nil {\n\t\treturn bytesRead, err\n\t}\n\n\th.ARCOUNT, bytesRead, err = decodeUint16(data, bytesRead)\n\tif err != nil {\n\t\treturn bytesRead, err\n\t}\n\n\treturn bytesRead, err\n}", "func (f *frame) headerLength() int {\n\treturn 8 + 1 + 4\n}", "func writeHeaderSize(headerLength int) []byte {\n\ttotalHeaderLen := make([]byte, 4)\n\ttotalLen := uint32(headerLength)\n\tbinary.BigEndian.PutUint32(totalHeaderLen, totalLen)\n\treturn totalHeaderLen\n}", "func (this SnappyCodec) MaxEncodedLen(srcLen int) int {\n\treturn 32 + srcLen + srcLen/6\n}", "func (r Response) RequestHeaderFieldsTooLarge(code string, payload Payload, header ...ResponseHeader) {\n\tr.Response(code, http.RequestHeaderFieldsTooLarge, payload, header...)\n}", "func HeaderSize(h http.Header) int {\n\tl := 0\n\tfor field, value := range h {\n\t\tl += len(field)\n\t\tfor _, v := range value {\n\t\t\tl += len(v)\n\t\t}\n\t}\n\n\treturn l\n}", "func (z *Writer) writeHeader() error {\n\t// Default to 4Mb if BlockMaxSize is not set.\n\tif z.Header.BlockMaxSize == 0 {\n\t\tz.Header.BlockMaxSize = blockSize4M\n\t}\n\t// The only option that needs to be validated.\n\tbSize := z.Header.BlockMaxSize\n\tif !isValidBlockSize(z.Header.BlockMaxSize) {\n\t\treturn fmt.Errorf(\"lz4: invalid block max size: %d\", bSize)\n\t}\n\t// Allocate the compressed/uncompressed buffers.\n\t// The compressed buffer cannot exceed the uncompressed one.\n\tz.newBuffers()\n\tz.idx = 0\n\n\t// Size is optional.\n\tbuf := z.buf[:]\n\n\t// Set the fixed size data: magic number, block max size and flags.\n\tbinary.LittleEndian.PutUint32(buf[0:], frameMagic)\n\tflg := byte(Version << 6)\n\tflg |= 1 << 5 // No block dependency.\n\tif z.Header.BlockChecksum {\n\t\tflg |= 1 << 4\n\t}\n\tif z.Header.Size > 0 {\n\t\tflg |= 1 << 3\n\t}\n\tif !z.Header.NoChecksum {\n\t\tflg |= 1 << 2\n\t}\n\tbuf[4] = flg\n\tbuf[5] = blockSizeValueToIndex(z.Header.BlockMaxSize) << 4\n\n\t// Current buffer size: magic(4) + flags(1) + block max size (1).\n\tn := 6\n\t// Optional items.\n\tif z.Header.Size > 0 {\n\t\tbinary.LittleEndian.PutUint64(buf[n:], z.Header.Size)\n\t\tn += 8\n\t}\n\n\t// The header checksum includes the flags, block max size and optional Size.\n\tbuf[n] = byte(xxh32.ChecksumZero(buf[4:n]) >> 8 & 0xFF)\n\tz.checksum.Reset()\n\n\t// Header ready, write it out.\n\tif _, err := z.dst.Write(buf[0 : n+1]); err != nil {\n\t\treturn err\n\t}\n\tz.Header.done = true\n\tif debugFlag {\n\t\tdebug(\"wrote header %v\", z.Header)\n\t}\n\n\treturn nil\n}", "func (this X86Codec) MaxEncodedLen(srcLen int) int {\n\t// Since we do not check the dst index for each byte (for speed purpose)\n\t// allocate some extra buffer for incompressible data.\n\tif srcLen >= 1<<30 {\n\t\treturn srcLen\n\t}\n\n\tif srcLen <= 512 {\n\t\treturn srcLen + 32\n\t}\n\n\treturn srcLen + srcLen/16\n}", "func (p ZkEstablishAccept) MaxPayloadLength(uint32) uint32 {\n\treturn 65532\n}", "func (cd *ContinueDecompress) MaxMessageSize() int {\n\treturn cd.maxMessageSize\n}", "func (msg *MsgVersion) MaxPayloadSize(pver uint32) uint32 {\n\treturn 48\n}", "func (t ResponseHeader) Size(version int16) int32 {\n\tvar sz int32\n\tsz += sizeof.Int32 // CorrelationId\n\treturn sz\n}", "func (request *RequestNFrame) Size() int {\n\treturn request.Header.Size() + reqsSize\n}", "func (d *DHCPv4) MaxMessageSize() (uint16, error) {\n\treturn GetUint16(OptionMaximumDHCPMessageSize, d.Options)\n}", "func (e *encoder) configureHeader() error {\n\t// Header - Defaults\n\tif e.h.Depth == 0 {\n\t\te.h.Depth = 32\n\t}\n\tif e.h.Compression == \"\" {\n\t\te.h.Compression = CompressionGzip\n\t}\n\tif e.h.RasterMode == \"\" {\n\t\te.h.RasterMode = RasterModeNormal\n\t}\n\tif e.h.Format == \"\" {\n\t\tswitch e.m.ColorModel() {\n\t\tcase hdrcolor.RGBModel:\n\t\t\te.h.Format = FormatRGBE\n\t\tcase hdrcolor.XYZModel:\n\t\t\te.h.Format = FormatXYZE\n\t\tdefault:\n\t\t\treturn UnsupportedError(\"color model\")\n\t\t}\n\t}\n\n\t// Header - Format\n\tswitch e.h.Format {\n\tcase FormatRGBE:\n\t\te.channelSize = 1\n\t\te.nbOfchannel = 4\n\t\te.bytesAt = func(x, y int) []byte {\n\t\t\tr, g, b, _ := e.m.HDRAt(x, y).HDRRGBA()\n\t\t\treturn format.ToRadianceBytes(r, g, b)\n\t\t}\n\tcase FormatXYZE:\n\t\te.channelSize = 1\n\t\te.nbOfchannel = 4\n\t\te.bytesAt = func(x, y int) []byte {\n\t\t\txx, yy, zz, _ := e.m.HDRAt(x, y).HDRXYZA()\n\t\t\treturn format.ToRadianceBytes(xx, yy, zz)\n\t\t}\n\tcase FormatRGB:\n\t\te.channelSize = 4\n\t\te.nbOfchannel = 3\n\t\te.bytesAt = func(x, y int) []byte {\n\t\t\tr, g, b, _ := e.m.HDRAt(x, y).HDRRGBA()\n\t\t\treturn format.ToBytes(binary.LittleEndian, r, g, b)\n\t\t}\n\tcase FormatXYZ:\n\t\te.channelSize = 4\n\t\te.nbOfchannel = 3\n\t\te.bytesAt = func(x, y int) []byte {\n\t\t\txx, yy, zz, _ := e.m.HDRAt(x, y).HDRXYZA()\n\t\t\treturn format.ToBytes(binary.LittleEndian, xx, yy, zz)\n\t\t}\n\tcase FormatLogLuv:\n\t\te.channelSize = 1\n\t\te.nbOfchannel = 4\n\t\te.bytesAt = func(x, y int) []byte {\n\t\t\txx, yy, zz, _ := e.m.HDRAt(x, y).HDRXYZA()\n\t\t\treturn format.XYZToLogLuv(xx, yy, zz)\n\t\t}\n\t}\n\n\t// Header - Size\n\td := e.m.Bounds().Size()\n\te.h.Width = d.X\n\te.h.Height = d.Y\n\n\treturn nil\n}", "func Size(n uint32) uint32 {\n\treturn align(NEEDLE_HEADER_SIZE + n + NEEDLE_FOOTER_SIZE)\n}", "func (msg *MsgGetCFilterV2) MaxPayloadLength(pver uint32) uint32 {\n\t// Block hash.\n\treturn chainhash.HashSize\n}", "func validHeaderFieldByte(b byte) bool {\n\treturn int(b) < len(isTokenTable) && isTokenTable[b]\n}", "func (rw *RequestHeader) Size() int32 {\n\tencoder := NewSizingEncoder()\n\trw.Write(encoder)\n\treturn encoder.Size()\n}", "func Size(n int) int {\n\treturn int(align(_headerSize + int32(n) + _footerSize))\n}", "func (t RequestHeader) Size(version int16) int32 {\n\tvar sz int32\n\tsz += sizeof.Int16 // RequestApiKey\n\tsz += sizeof.Int16 // RequestApiVersion\n\tsz += sizeof.Int32 // CorrelationId\n\tif version >= 1 {\n\t\tsz += sizeof.String(t.ClientId) // ClientId\n\t}\n\treturn sz\n}", "func (cc *ContinueCompress) MaxMessageSize() int {\n\treturn cc.maxMessageSize\n}", "func headerWithNoFileMetaInformationGroupLength() (*headerData, error) {\n\theaderData := new(headerData)\n\n\telements := []*Element{\n\t\tmustNewElement(tag.MediaStorageSOPClassUID, []string{\"SecondaryCapture\"}),\n\t\tmustNewElement(tag.MediaStorageSOPInstanceUID, []string{\"1.3.6.1.4.1.35190.4.1.20210608.607733549593\"}),\n\t\tmustNewElement(tag.TransferSyntaxUID, []string{\"=RLELossless\"}),\n\t\tmustNewElement(tag.ImplementationClassUID, []string{\"1.6.6.1.4.1.9590.100.1.0.100.4.0\"}),\n\t\tmustNewElement(tag.SOPInstanceUID, []string{\"1.3.6.1.4.1.35190.4.1.20210608.607733549593\"}),\n\t}\n\tdata, err := writeElements(elements)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Construct valid DICOM header preamble.\n\tmagicWord := []byte(\"DICM\")\n\tpreamble := make([]byte, 128)\n\tpreamble = append(preamble, magicWord...)\n\theaderBytes := append(preamble, data...)\n\theaderData.HeaderBytes = bytes.NewBuffer(headerBytes)\n\theaderData.Elements = elements[0 : len(elements)-1]\n\treturn headerData, nil\n}", "func MaxBlockLen(ct CompressionType) uint64 {\n\tif ct == Snappy {\n\t\t// https://github.com/golang/snappy/blob/2a8bb927dd31d8daada140a5d09578521ce5c36a/encode.go#L76\n\t\treturn 6 * (0xffffffff - 32) / 7\n\t}\n\treturn math.MaxUint64\n}", "func (request *RequestResponseFrame) Size() int {\n\treturn request.Header.Size() + request.Metadata.Size() + len(request.Data)\n}", "func (this AliasCodec) MaxEncodedLen(srcLen int) int {\n\treturn srcLen + 1024\n}", "func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int {\n\tconst max = 512 << 10\n\tn := int64(maxFrameSize)\n\tif n > max {\n\t\tn = max\n\t}\n\tif cl := cs.reqBodyContentLength; cl != -1 && cl+1 < n {\n\t\t// Add an extra byte past the declared content-length to\n\t\t// give the caller's Request.Body io.Reader a chance to\n\t\t// give us more bytes than they declared, so we can catch it\n\t\t// early.\n\t\tn = cl + 1\n\t}\n\tif n < 1 {\n\t\treturn 1\n\t}\n\treturn int(n) // doesn't truncate; max is 512K\n}", "func ErrRequestHeaderFieldsTooLargef(format string, arguments ...interface{}) *Status {\n\treturn &Status{Code: http.StatusRequestHeaderFieldsTooLarge, Text: fmt.Sprintf(format, arguments...)}\n}", "func (hdr *Header) Unmarshal(data []byte) error {\n\thdr.Source = binary.BigEndian.Uint16(data[0:2])\n\thdr.Destination = binary.BigEndian.Uint16(data[2:4])\n\thdr.SeqNum = binary.BigEndian.Uint32(data[4:8])\n\thdr.AckNum = binary.BigEndian.Uint32(data[8:12])\n\n\thdr.DataOffset = data[12] >> 4\n\thdr.ECN = byte(data[13] >> 6 & 7) // 3 bits\n\thdr.Ctrl = Flag(byte(data[13] & 0x3f)) // bottom 6 bits\n\n\thdr.Window = binary.BigEndian.Uint16(data[14:16])\n\thdr.Checksum = binary.BigEndian.Uint16(data[16:18])\n\thdr.Urgent = binary.BigEndian.Uint16(data[18:20])\n\n\thdr.Options = hdr.opts[:0]\n\n\tif hdr.DataOffset < 5 {\n\t\treturn fmt.Errorf(\"Invalid TCP data offset %d < 5\", hdr.DataOffset)\n\t}\n\n\tdataStart := int(hdr.DataOffset) * 4\n\tif dataStart > len(data) {\n\t\thdr.Payload = nil\n\t\t//hdr.Contents = data\n\t\treturn errors.New(\"TCP data offset greater than packet length\")\n\t}\n\t//hdr.Contents = data[:dataStart]\n\thdr.Payload = data[dataStart:]\n\t// From here on, data points just to the header options.\n\tdata = data[20:dataStart]\nLoop:\n\tfor len(data) > 0 {\n\t\tif hdr.Options == nil {\n\t\t\t// Pre-allocate to avoid allocating a slice.\n\t\t\thdr.Options = hdr.opts[:0]\n\t\t}\n\t\thdr.Options = append(hdr.Options, Option{OptionType: OptionKind(data[0])})\n\t\topt := &hdr.Options[len(hdr.Options)-1]\n\t\tswitch opt.OptionType {\n\t\tcase optionKindEndList: // End of options\n\t\t\topt.OptionLength = 1\n\t\t\thdr.Padding = data[1:]\n\t\t\tbreak Loop\n\t\tcase optionKindNop: // 1 byte padding\n\t\t\topt.OptionLength = 1\n\t\tdefault:\n\t\t\topt.OptionLength = data[1]\n\t\t\tif opt.OptionLength < 2 {\n\t\t\t\treturn fmt.Errorf(\"Invalid TCP option length %d < 2\", opt.OptionLength)\n\t\t\t} else if int(opt.OptionLength) > len(data) {\n\t\t\t\treturn fmt.Errorf(\"Ivalid TCP option length %d exceeds remaining %d bytes\", opt.OptionLength, len(data))\n\t\t\t}\n\t\t\topt.OptionData = data[2:opt.OptionLength]\n\t\t}\n\t\tdata = data[opt.OptionLength:]\n\t}\n\n\treturn nil\n}", "func FixedLengthRecordReaderV2HeaderBytes(value int64) FixedLengthRecordReaderV2Attr {\n\treturn func(m optionalAttr) {\n\t\tm[\"header_bytes\"] = value\n\t}\n}", "func MaxEncodedLen(ct CompressionType, srcLen uint64) (uint64, bool) {\n\tif ct == Snappy {\n\t\tif srcLen > MaxBlockLen(ct) {\n\t\t\treturn 0, false\n\t\t}\n\t\tsz := snappy.MaxEncodedLen(int(srcLen))\n\t\tif sz == -1 {\n\t\t\treturn 0, false\n\t\t}\n\t\treturn uint64(sz), true\n\t}\n\tpanic(\"not supported compression type\")\n}", "func (h *blockHeader) setSize(v uint32) {\n\tconst mask = 7\n\t*h = (*h)&mask | blockHeader(v<<3)\n}", "func parseAcceptedBlobSize(rangeHeader string) (int64, error) {\n\t// Range: Range indicating the current progress of the upload.\n\t// https://github.com/opencontainers/distribution-spec/blob/master/spec.md#get-blob-upload\n\tif rangeHeader == \"\" {\n\t\treturn 0, fmt.Errorf(\"range header required\")\n\t}\n\n\tparts := strings.SplitN(rangeHeader, \"-\", 2)\n\tif len(parts) != 2 {\n\t\treturn 0, fmt.Errorf(\"range header bad value: %s\", rangeHeader)\n\t}\n\n\tsize, err := strconv.ParseInt(parts[1], 10, 64)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t// docker registry did '-1' in the response\n\tif size > 0 {\n\t\tsize = size + 1\n\t}\n\n\treturn size, nil\n}", "func (h literalsHeader) size() int {\n\treturn int(h >> 60)\n}", "func (m *SlimBlock) MaxPayloadLength(pver uint32) uint32 {\n\treturn MaxBlockPayload\n}", "func parseHeader(header []byte, frame *MP3Frame) bool {\n\n // MPEG version. (2 bits)\n frame.MPEGVersion = (header[1] & 0x18) >> 3\n if frame.MPEGVersion == MPEGVersionReserved {\n return false\n }\n\n // MPEG layer. (2 bits.)\n frame.MPEGLayer = (header[1] & 0x06) >> 1\n if frame.MPEGLayer == MPEGLayerReserved {\n return false\n }\n\n // CRC (cyclic redundency check) protection. (1 bit.)\n frame.CrcProtection = (header[1] & 0x01) == 0x00\n\n // Bit rate index. (4 bits.)\n bitRateIndex := (header[2] & 0xF0) >> 4\n if bitRateIndex == 0 || bitRateIndex == 15 {\n return false\n }\n\n // Bit rate.\n if frame.MPEGVersion == MPEGVersion1 {\n switch frame.MPEGLayer {\n case MPEGLayerI: frame.BitRate = v1l1_br[bitRateIndex] * 1000\n case MPEGLayerII: frame.BitRate = v1l2_br[bitRateIndex] * 1000\n case MPEGLayerIII: frame.BitRate = v1l3_br[bitRateIndex] * 1000\n }\n } else {\n switch frame.MPEGLayer {\n case MPEGLayerI: frame.BitRate = v2l1_br[bitRateIndex] * 1000\n case MPEGLayerII: frame.BitRate = v2l2_br[bitRateIndex] * 1000\n case MPEGLayerIII: frame.BitRate = v2l3_br[bitRateIndex] * 1000\n }\n }\n\n // Sampling rate index. (2 bits.)\n samplingRateIndex := (header[2] & 0x0C) >> 2\n if samplingRateIndex == 3 {\n return false\n }\n\n // Sampling rate.\n switch frame.MPEGVersion {\n case MPEGVersion1: frame.SamplingRate = v1_sr[samplingRateIndex]\n case MPEGVersion2: frame.SamplingRate = v2_sr[samplingRateIndex]\n case MPEGVersion2_5: frame.SamplingRate = v25_sr[samplingRateIndex]\n }\n\n // Padding bit. (1 bit.)\n frame.PaddingBit = (header[2] & 0x02) == 0x02\n\n // Private bit. (1 bit.)\n frame.PrivateBit = (header[2] & 0x01) == 0x01\n\n // Channel mode. (2 bits.)\n frame.ChannelMode = (header[3] & 0xC0) >> 6\n\n // Mode Extension. Valid only for Joint Stereo mode. (2 bits.)\n frame.ModeExtension = (header[3] & 0x30) >> 4\n if frame.ChannelMode != JointStereo && frame.ModeExtension != 0 {\n return false\n }\n\n // Copyright bit. (1 bit.)\n frame.CopyrightBit = (header[3] & 0x08) == 0x08\n\n // Original bit. (1 bit.)\n frame.OriginalBit = (header[3] & 0x04) == 0x04\n\n // Emphasis. (2 bits.)\n frame.Emphasis = (header[3] & 0x03)\n if frame.Emphasis == 2 {\n return false\n }\n\n // Number of samples in the frame. We need this to determine the frame size.\n if frame.MPEGVersion == MPEGVersion1 {\n switch frame.MPEGLayer {\n case MPEGLayerI: frame.SampleCount = 384\n case MPEGLayerII: frame.SampleCount = 1152\n case MPEGLayerIII: frame.SampleCount = 1152\n }\n } else {\n switch frame.MPEGLayer {\n case MPEGLayerI: frame.SampleCount = 384\n case MPEGLayerII: frame.SampleCount = 1152\n case MPEGLayerIII: frame.SampleCount = 576\n }\n }\n\n // If the padding bit is set we add an extra 'slot' to the frame length.\n // A layer I slot is 4 bytes long; layer II and III slots are 1 byte long.\n var padding int = 0\n\n if frame.PaddingBit {\n if frame.MPEGLayer == MPEGLayerI {\n padding = 4\n } else {\n padding = 1\n }\n }\n\n // Calculate the frame length in bytes. There's a lot of confusion online\n // about how to do this and definitive documentation is hard to find as\n // the official MP3 specification is not publicly available. The\n // basic formula seems to boil down to:\n //\n // bytes_per_sample = (bit_rate / sampling_rate) / 8\n // frame_length = sample_count * bytes_per_sample + padding\n //\n // In practice we need to rearrange this formula to avoid rounding errors.\n //\n // I can't find any definitive statement on whether this length is\n // supposed to include the 4-byte header and the optional 2-byte CRC.\n // Experimentation on mp3 files captured from the wild indicates that it\n // includes the header at least.\n frame.FrameLength =\n (frame.SampleCount / 8) * frame.BitRate / frame.SamplingRate + padding\n\n return true\n}", "func MaxSize32(length int) int {\n\tnumControlBytes := (length + 3) / 4\n\tmaxNumDataBytes := 4 * length\n\treturn numControlBytes + maxNumDataBytes\n}", "func defaultMaxInflightBytes(n int) option.ClientOption {\n\treturn &defaultInflightBytesSetting{maxBytes: n}\n}", "func (fixedLenByteArrayDecoderTraits) BytesRequired(n int) int {\n\treturn parquet.FixedLenByteArrayTraits.BytesRequired(n)\n}", "func readHeaderDataSize(reader *bytes.Reader, totalSize uint64) (*dataSizeHeader, error) {\n\thdrDataSize := dataSizeHeader{}\n\terr := binary.Read(reader, binary.LittleEndian, &hdrDataSize)\n\n\t/*\n\t * Check if data size header was read.\n\t */\n\tif err != nil {\n\t\tmsg := err.Error()\n\t\treturn nil, fmt.Errorf(\"Failed to read data size header: %s\", msg)\n\t} else {\n\t\tchunkId := hdrDataSize.ChunkID\n\t\tchunkSize := hdrDataSize.ChunkSize\n\t\tsizeRiff := hdrDataSize.SizeRIFF\n\t\texpectedRiffChunkSize := totalSize - 8\n\n\t\t/*\n\t\t * Check data size header for validity.\n\t\t */\n\t\tif chunkId != ID_DATASIZE {\n\t\t\treturn nil, fmt.Errorf(\"Data size header contains invalid chunk id. Expected %#08x, found %#08x.\", ID_DATASIZE, chunkId)\n\t\t} else if chunkSize < MIN_DATASIZE_CHUNK_SIZE {\n\t\t\treturn nil, fmt.Errorf(\"Data size header has too small size. Expected at least %#08x, found %#08x.\", MIN_DATASIZE_CHUNK_SIZE, chunkSize)\n\t\t} else if sizeRiff != expectedRiffChunkSize {\n\t\t\treturn nil, fmt.Errorf(\"Unexpected RIFF chunk size in data size header. Expected %#08x, found %0#8x.\", expectedRiffChunkSize, sizeRiff)\n\t\t} else {\n\t\t\treturn &hdrDataSize, nil\n\t\t}\n\n\t}\n\n}", "func (b IPv4Header) TotalLen() int {\n\treturn int(binary.BigEndian.Uint16(b[2:4]))\n}", "func (request *RequestChannelFrame) Size() int {\n\treturn request.Header.Size() + initReqsSize + request.Metadata.Size() + len(request.Data)\n}", "func (o BucketV2CorsRuleOutput) AllowedHeaders() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v BucketV2CorsRule) []string { return v.AllowedHeaders }).(pulumi.StringArrayOutput)\n}", "func expectedResponseLenth(responseCode uint8, responseLength uint8) (byteCount int, err error) {\n\tswitch responseCode {\n\tcase fcReadHoldingRegisters,\n\t fcReadInputRegisters,\n\t fcReadCoils,\n\t fcReadDiscreteInputs: byteCount = int(responseLength)\n\tcase fcWriteSingleRegister,\n\t fcWriteMultipleRegisters,\n\t fcWriteSingleCoil,\n\t fcWriteMultipleCoils: byteCount = 3\n\tcase fcMaskWriteRegister: byteCount = 5\n\tcase fcReadHoldingRegisters | 0x80,\n\t fcReadInputRegisters | 0x80,\n\t fcReadCoils | 0x80,\n\t fcReadDiscreteInputs | 0x80,\n\t fcWriteSingleRegister | 0x80,\n\t fcWriteMultipleRegisters | 0x80,\n\t fcWriteSingleCoil | 0x80,\n\t fcWriteMultipleCoils | 0x80,\n\t fcMaskWriteRegister | 0x80: byteCount = 0\n\tdefault: err = fmt.Errorf(\"unexpected response code (%v)\", responseCode)\n\t}\n\n\treturn\n}", "func (request *RequestStreamFrame) Size() int {\n\treturn request.Header.Size() + initReqsSize + request.Metadata.Size() + len(request.Data)\n}", "func (f *Framer) ReadHeader() (head *frameHeader, err error) {\n\tv, err := f.r.ReadByte()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tversion := v & protoVersionMask\n\n\tif version < protoVersion1 || version > protoVersion4 {\n\t\treturn nil, fmt.Errorf(\"unsupported version: %x \", v)\n\t}\n\n\tf.proto = version\n\n\thead = &frameHeader{}\n\n\thead.Version = protoVersion(v)\n\n\tflag, err := f.r.ReadByte()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thead.Flags = flag\n\n\tif version > protoVersion2 {\n\t\tstream, err := f.r.ReadNetUint16()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thead.Stream = int(stream)\n\n\t\tb, err := f.r.ReadByte()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\thead.Op = FrameOp(b)\n\t\tl, err := f.r.ReadNetUint32()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thead.BodyLength = int(l)\n\t} else {\n\t\tstream, err := f.r.ReadNetUint8()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thead.Stream = int(stream)\n\n\t\tb, err := f.r.ReadByte()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\thead.Op = FrameOp(b)\n\t\tl, err := f.r.ReadNetUint32()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thead.BodyLength = int(l)\n\t}\n\n\tif head.BodyLength < 0 {\n\t\treturn nil, fmt.Errorf(\"frame body length can not be less than 0: %d\", head.BodyLength)\n\t} else if head.BodyLength > maxFrameSize {\n\t\t// need to free up the connection to be used again\n\t\tlogp.Err(\"head length is too large\")\n\t\treturn nil, ErrFrameTooBig\n\t}\n\n\theadSize := f.r.BufferConsumed()\n\thead.HeadLength = headSize\n\n\tdebugf(\"header: %v\", head)\n\n\tf.Header = head\n\treturn head, nil\n}", "func (t DescribeAclsRequest) Size(version int16) int32 {\n\tvar sz int32\n\tsz += sizeof.Int8 // ResourceType\n\tsz += sizeof.String(t.ResourceNameFilter) // ResourceNameFilter\n\tif version >= 1 {\n\t\tsz += sizeof.Int8 // ResourcePatternType\n\t}\n\tsz += sizeof.String(t.PrincipalFilter) // PrincipalFilter\n\tsz += sizeof.String(t.HostFilter) // HostFilter\n\tsz += sizeof.Int8 // Operation\n\tsz += sizeof.Int8 // PermissionType\n\treturn sz\n}", "func (h *Header) MarshalFrameHeader() ([]byte, error) {\n\t// NOTE(jc): Header contains a uint32 but the protocol demands a uint24,\n\t// unavailable in Go, throw ErrFrameTooBig if given >uint24.\n\tif h.Length >= (1 << 24) {\n\t\treturn nil, ErrFrameTooBig\n\t}\n\n\tb := make([]byte, HeaderLength)\n\n\tputUint24(b, h.Length)\n\tb[3] = byte(h.Type)\n\tb[4] = byte(h.Flags)\n\tputUint31(b[5:], h.StreamID)\n\n\treturn b, nil\n}", "func (UTF8Decoder) Max() int { return utf8.UTFMax }", "func (v *blockValidator) headerEstimatedSerializedSize(header externalapi.BlockHeader) uint64 {\n\tsize := uint64(0)\n\tsize += 2 // Version (uint16)\n\n\tsize += 8 // number of block levels (uint64)\n\tfor _, blockLevelParents := range header.Parents() {\n\t\tsize += 8 // number of parents in the block level (uint64)\n\t\tsize += uint64(externalapi.DomainHashSize * len(blockLevelParents)) // parents\n\t}\n\n\tsize += externalapi.DomainHashSize // HashMerkleRoot\n\tsize += externalapi.DomainHashSize // AcceptedIDMerkleRoot\n\tsize += externalapi.DomainHashSize // UTXOCommitment\n\tsize += 8 // TimeInMilliseconds (int64)\n\tsize += 4 // Bits (uint32)\n\tsize += 8 // Nonce (uint64)\n\n\treturn size\n}", "func (w *response) requestTooLarge() {\n\tw.closeAfterReply = true\n\tw.requestBodyLimitHit = true\n\tif !w.wroteHeader {\n\t\tw.Header().Set(\"Connection\", \"close\")\n\t}\n}", "func (o BucketCorsConfigurationV2CorsRuleOutput) AllowedHeaders() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v BucketCorsConfigurationV2CorsRule) []string { return v.AllowedHeaders }).(pulumi.StringArrayOutput)\n}", "func (hf *HeaderFrame) FrameType() byte { return 2 }", "func UnmarshalHeader(data []byte, m *MessageHeader) error {\n\tif len(data) < 16 {\n\t\treturn ErrMessageTooSmall\n\t}\n\td := decoder{buffer: data}\n\tm.Txid = uint32(d.readUint(4))\n\tm.Reserved = uint32(d.readUint(4))\n\tm.Flags = uint32(d.readUint(4))\n\tm.Ordinal = uint32(d.readUint(4))\n\treturn nil\n}", "func readFrame(r io.Reader, aead cipher.AEAD, maxEncryptedFrameSize, packetSize uint32) (int, frame, []byte, error) {\n\t// Read header.\n\theaderSize := encryptedHeaderSize(aead)\n\tencryptedHeader := make([]byte, headerSize)\n\tn1, err := io.ReadFull(r, encryptedHeader)\n\tif err, ok := err.(net.Error); ok && err.Timeout() {\n\t\treturn 0, frame{}, []byte{}, err // don't wrap timeout error\n\t}\n\tif err != nil {\n\t\treturn 0, frame{}, []byte{}, errors.AddContext(err, \"failed to read encrypted header\")\n\t}\n\t// Decrypt header.\n\theader, err := decryptFrameHeader(encryptedHeader, aead)\n\tif err != nil {\n\t\treturn 0, frame{}, []byte{}, errors.AddContext(err, \"failed to decrypt frame header\")\n\t}\n\t// Unmarshal header.\n\tvar fh frameHeader\n\tif err := fh.Unmarshal(header); err != nil {\n\t\treturn 0, frame{}, []byte{}, errors.AddContext(err, \"failed to unmarshal frame header\")\n\t}\n\t// Check payload length.\n\tmaxPayloadSize := maxFramePayloadSize(maxEncryptedFrameSize, aead)\n\tif fh.length > uint32(maxPayloadSize) {\n\t\treturn 0, frame{}, []byte{}, fmt.Errorf(\"frame payload is too large %v > %v\", fh.length, maxPayloadSize)\n\t}\n\t// Compute encrypted payload length. We expect the amount of padding after\n\t// the payload to be < packetSize.\n\tencryptionOverhead := aead.Overhead() + aead.NonceSize()\n\tencryptedPayloadSize := fh.length + uint32(encryptionOverhead)\n\tif mod := (encryptedPayloadSize + uint32(n1)) % packetSize; mod != 0 {\n\t\tencryptedPayloadSize += (packetSize - mod)\n\t}\n\t// Read payload.\n\tencryptedPayload := make([]byte, encryptedPayloadSize)\n\tn2, err := io.ReadFull(r, encryptedPayload)\n\tif err, ok := err.(net.Error); ok && err.Timeout() {\n\t\treturn 0, frame{}, []byte{}, err // don't wrap timeout error\n\t}\n\tif err != nil {\n\t\treturn 0, frame{}, []byte{}, errors.AddContext(err, \"failed to read encrypted payload\")\n\t}\n\t// Decrypt the payload.\n\tpayload, err := decryptFramePayload(encryptedPayload, aead)\n\tif err != nil {\n\t\treturn 0, frame{}, []byte{}, errors.AddContext(err, \"failed to decrypt payload\")\n\t}\n\tf := frame{\n\t\tframeHeader: fh,\n\t\tpayload: payload[:fh.length],\n\t}\n\treturn n1 + n2, f, payload[fh.length:], nil\n}", "func (d *Decoder) readFrame() (*Frame, error) {\n\theader := [10]byte{}\n\tn, err := io.ReadFull(d.r, header[:])\n\td.n += n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tallZero := [10]byte{}\n\n\tif bytes.Equal(header[:], allZero[:]) {\n\t\t// Reached padding. Exit.\n\t\treturn nil, nil\n\t}\n\n\t// Frame ID $xx xx xx xx (four characters)\n\t// Size $xx xx xx xx\n\t// Flags $xx xx\n\n\t// verify if the id is a valid string\n\tidRaw := header[0:4]\n\tfor _, c := range idRaw {\n\t\tif !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9')) {\n\t\t\treturn nil, fmt.Errorf(\"invalid header: %v\", idRaw)\n\t\t}\n\t}\n\n\tid := string(idRaw)\n\n\t// It's safe to represent size as a 32-bit signed int, even if the spec says\n\t// it uses 32-bit integer without specifying it's signed or unsigned,\n\t// because the Size section of tag header can only store an 28-bit signed\n\t// integer.\n\t//\n\t// See decodeTagSize for details.\n\t//\n\t// FIXME: find a way to read signed int directly, without explicit type conversion\n\tsize := int(binary.BigEndian.Uint32(header[4:8]))\n\tflags := binary.BigEndian.Uint16(header[8:10])\n\tdata := make([]byte, size)\n\t// In case of HTTP response body, r is a bufio.Reader, and in some cases\n\t// r.Read() may not fill the whole len(data). Using io.ReadFull ensures it\n\t// fills the whole len(data) slice.\n\tn, err = io.ReadFull(d.r, data)\n\n\td.n += n\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tframe := new(Frame)\n\tframe.ID = id\n\tframe.Flags = flags\n\tframe.Data = data\n\n\treturn frame, nil\n}", "func (h NalHeader) MarshalSize() int {\n\t// NOTE: Be careful to match the MarshalTo() method.\n\treturn 1\n}", "func (h *Header) Size() common.StorageSize {\n\treturn common.StorageSize(unsafe.Sizeof(*h)) + common.StorageSize(len(h.Extra)+\n\t\t(h.SnailNumber.BitLen()+h.Number.BitLen()+h.Time.BitLen())/8)\n}", "func (t ExpireDelegationTokenResponse) Size(version int16) int32 {\n\tvar sz int32\n\tsz += sizeof.Int16 // ErrorCode\n\tsz += sizeof.Int64 // ExpiryTimestampMs\n\tsz += sizeof.Int32 // ThrottleTimeMs\n\treturn sz\n}", "func (c PktCnf1) MaxLen() int {\n\treturn int(c & 0xff)\n}", "func ValidateMsgHeader(msgData []byte) bool {\n return len(msgData) >= HEADER_LEN_B\n}", "func (byteArrayDecoderTraits) BytesRequired(n int) int {\n\treturn parquet.ByteArrayTraits.BytesRequired(n)\n}", "func parseRequestHeader(bufHeader []byte) (RequestHeader, error) {\n\tret := RequestHeader{}\n\tbuf := bufHeader\n\n\tret.Magic = uint8(buf[0])\n\tif ret.Magic != MagicRequest {\n\t\treturn RequestHeader{}, fmt.Errorf(\"Magic byte is not 0x80: %x\", ret.Magic)\n\t}\n\tbuf = buf[1:]\n\n\tret.Opcode = uint8(buf[0])\n\t_, ok := OpHandler[ret.Opcode]\n\tif !ok {\n\t\treturn RequestHeader{}, fmt.Errorf(\"Opcode byte is not recognized: %x\", ret.Opcode)\n\t}\n\tbuf = buf[1:]\n\n\tret.KeyLength = GetUint16(buf)\n\tbuf = buf[2:]\n\n\tret.ExtraLength = uint8(buf[0])\n\tbuf = buf[1:]\n\n\tret.DataType = uint8(buf[0])\n\tif ret.DataType != 0x00 {\n\t\treturn RequestHeader{}, fmt.Errorf(\"DataType byte is supposed to be 0x00: %x\", ret.DataType)\n\t}\n\tbuf = buf[1:]\n\n\tret.VBucketID = GetUint16(buf)\n\tbuf = buf[2:]\n\n\tret.TotalBodyLength = GetUint32(buf)\n\tif uint64(ret.TotalBodyLength) < uint64(ret.KeyLength)+uint64(ret.ExtraLength) {\n\t\treturn RequestHeader{}, fmt.Errorf(\"TotaoBodyLength is supposed to be no less than KeyLength + ExtraLength: total: %d key: %d extra %d\", ret.TotalBodyLength, ret.KeyLength, ret.ExtraLength)\n\t}\n\tbuf = buf[4:]\n\n\tret.Opaque = GetUint32(buf)\n\tbuf = buf[4:]\n\n\tret.CAS = GetUint64(buf)\n\n\treturn ret, nil\n}", "func (f FormatHeader) BlockSize() uint16 {\n\treturn (f.BitsPerSample / 8) * f.NumChannels\n}", "func RenderHeaderFieldsTooLarge(w http.ResponseWriter, message ...interface{}) {\n\tRender(w, HeaderFieldsTooLarge(message...))\n}", "func (o NetworkPacketCaptureOutput) MaximumBytesPerPacket() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *NetworkPacketCapture) pulumi.IntPtrOutput { return v.MaximumBytesPerPacket }).(pulumi.IntPtrOutput)\n}", "func (msg *Message) parseFormat1HeaderBytes(data []byte) error {\n\tif headerSize := len(data); headerSize < Format1HeaderSize {\n\t\treturn fmt.Errorf(errorShortMessageSize, (headerSize + FrameHeaderSize), (Format1HeaderSize + FrameHeaderSize))\n\t}\n\n\t// SEOJ\n\n\tmsg.seoj[0] = data[0]\n\tmsg.seoj[1] = data[1]\n\tmsg.seoj[2] = data[2]\n\n\t// DEOJ\n\n\tmsg.deoj[0] = data[3]\n\tmsg.deoj[1] = data[4]\n\tmsg.deoj[2] = data[5]\n\n\t// ESV\n\n\tmsg.esv = ESV(data[6])\n\n\t// OPC\n\n\terr := msg.SetOPC(int(data[7]))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (pk PacketBufferPtr) HeaderSize() int {\n\treturn pk.pushed + pk.consumed\n}", "func (fm *FieldModelMapInt32OptionalBytes) FBESize() int { return 4 }", "func MaxAllowedSectionSize(max uint64) Option {\n\treturn func(o *Options) {\n\t\to.MaxAllowedSectionSize = max\n\t}\n}", "func (msg *Message) parseFrameHeaderBytes(data []byte) error {\n\tif headerSize := len(data); headerSize < FrameHeaderSize {\n\t\treturn fmt.Errorf(errorShortMessageSize, headerSize, FrameHeaderSize)\n\t}\n\n\t// Check Headers\n\n\tif data[0] != EHD1Echonet {\n\t\treturn fmt.Errorf(errorInvalidMessageHeader, 0, data[0], EHD1Echonet)\n\t}\n\n\tif data[1] != EHD2Format1 {\n\t\treturn fmt.Errorf(errorInvalidMessageHeader, 1, data[1], EHD2Format1)\n\t}\n\n\t// TID\n\n\tmsg.tid[0] = data[2]\n\tmsg.tid[1] = data[3]\n\n\treturn nil\n}", "func TestSmallTagHeader(t *testing.T) {\n\tt.Parallel()\n\n\t_, err := parseHeader(bytes.NewReader([]byte{0, 0, 0}))\n\tif err != ErrSmallHeaderSize {\n\t\tt.Fatalf(\"Expected err contains %q, got %q\", \"less than expected\", err)\n\t}\n}", "func RequestHeaderContentLengthValidator(req http.Request, bodyMaxSize int64) int {\n\tvar contentLength int64\n\tcontentLengthHeader := req.Header.Get(\"Content-Length\")\n\tif contentLengthHeader != \"\" {\n\t\tvar err error\n\t\tcontentLength, err = strconv.ParseInt(contentLengthHeader, 10, 64)\n\t\tif err != nil {\n\t\t\treturn http.StatusBadRequest\n\t\t}\n\t}\n\tif contentLength > bodyMaxSize || req.ContentLength > bodyMaxSize {\n\t\treturn http.StatusRequestEntityTooLarge\n\t}\n\treturn 0\n}", "func DeserializeHeader(bytes []byte) (Header, error) {\n\tvar header Header\n\tif len(bytes) < 12 {\n\t\treturn header, errors.New(fmt.Sprint(\"bytes too short to deserialize dnsmessage.Header, expected at least 12 bytes but got\", len(bytes)))\n\t}\n\theader.ID = binary.BigEndian.Uint16(bytes[0:2])\n\theader.parseFlag(binary.BigEndian.Uint16(bytes[2:4]))\n\theader.QuestionCount = binary.BigEndian.Uint16(bytes[4:6])\n\theader.AnswerRecordCount = binary.BigEndian.Uint16(bytes[6:8])\n\theader.AuthorityRecordCount = binary.BigEndian.Uint16(bytes[8:10])\n\theader.AdditionalRecordCount = binary.BigEndian.Uint16(bytes[10:12])\n\treturn header, nil\n}", "func ReadHeader(buffer *BytePacketBuffer) (Header, error) {\n\tid, err := buffer.ReadU16()\n\tif err != nil {\n\t\treturn Header{}, err\n\t}\n\n\tflags, err := buffer.ReadU16()\n\tif err != nil {\n\t\treturn Header{}, err\n\t}\n\n\tlowFlags := uint8(flags >> 8)\n\trecursionDesired := lowFlags&(1<<0) > 0\n\ttruncatedMessage := lowFlags&(1<<1) > 0\n\tauthoritativeAnswer := lowFlags&(1<<2) > 0\n\topcode := (lowFlags >> 3) & 0x0F\n\tresponse := lowFlags&(1<<7) > 0\n\n\thighFlags := uint8(flags & 0xFF)\n\trescode := ResultCode(highFlags & 0x0F)\n\tcheckingDisabled := highFlags&(1<<4) > 0\n\tauthedData := highFlags&(1<<5) > 0\n\tz := highFlags&(1<<6) > 0\n\trecursionAvailable := highFlags&(1<<7) > 0\n\n\tquestions, err := buffer.ReadU16()\n\tif err != nil {\n\t\treturn Header{}, err\n\t}\n\n\tanswers, err := buffer.ReadU16()\n\tif err != nil {\n\t\treturn Header{}, err\n\t}\n\n\tauthoritativeEntires, err := buffer.ReadU16()\n\tif err != nil {\n\t\treturn Header{}, err\n\t}\n\n\tresourceEntries, err := buffer.ReadU16()\n\tif err != nil {\n\t\treturn Header{}, err\n\t}\n\n\treturn Header{\n\t\tid: id,\n\t\trecursionDesired: recursionDesired,\n\t\ttruncatedMessage: truncatedMessage,\n\t\tauthoritativeAnswer: authoritativeAnswer,\n\t\topcode: opcode,\n\t\tresponse: response,\n\t\trescode: rescode,\n\t\tcheckingDisabled: checkingDisabled,\n\t\tauthedData: authedData,\n\t\tz: z,\n\t\trecursionAvailable: recursionAvailable,\n\t\tquestions: questions,\n\t\tanswers: answers,\n\t\tauthoritativeEntires: authoritativeEntires,\n\t\tresourceEntries: resourceEntries,\n\t}, nil\n}", "func MaxDataBytesNoEvidence(maxBytes int64, keyType crypto.KeyType, valsCount int) int64 {\n\tmaxDataBytes := maxBytes -\n\t\tMaxOverheadForBlock -\n\t\tMaxHeaderBytes -\n\t\tMaxCoreChainLockSize -\n\t\tMaxCommitOverheadBytes\n\n\tif maxDataBytes < 0 {\n\t\tpanic(fmt.Sprintf(\n\t\t\t\"Negative MaxDataBytesUnknownEvidence. Block.MaxBytes=%d is too small to accommodate header&lastCommit&evidence=%d\",\n\t\t\tmaxBytes,\n\t\t\t-(maxDataBytes - maxBytes),\n\t\t))\n\t}\n\n\treturn maxDataBytes\n}", "func (m *ModifyBearerResponse) SetLength() {\n\tm.Header.Length = uint16(m.MarshalLen() - 4)\n}", "func extractGIF1stFrame(bytes []byte) (int, error) {\n\tsize := len(bytes)\n\tif size < 13 {\n\t\treturn size, errors.New(\"too short header\")\n\t}\n\tflags := bytes[10]\n\tglobalColorTableFlag := (flags & 0x80) >> 7\n\tsizeOfGlobalColorTable := (flags & 0x07)\n\tvar offset = 13\n\tif globalColorTableFlag != 0 {\n\t\tcolorTableSize := int(math.Pow(2, float64(sizeOfGlobalColorTable+1)))\n\t\toffset += 3 * colorTableSize\n\t\tif size < offset {\n\t\t\treturn size, errors.New(\"too short global colorTable\")\n\t\t}\n\t}\n\tfor {\n\t\tif size < (offset + 1) {\n\t\t\treturn size, errors.New(\"missing separator\")\n\t\t}\n\t\tseparator := bytes[offset]\n\t\toffset++\n\t\tswitch separator {\n\t\tcase 0x3B: // Trailer\n\t\tcase 0x21: // Extention\n\t\t\tif size < (offset + 2) {\n\t\t\t\treturn size, errors.New(\"missing extention block header\")\n\t\t\t}\n\t\t\textensionBlockLabel := bytes[offset]\n\t\t\textensionDataSize := bytes[offset+1]\n\t\t\toffset += 2 + int(extensionDataSize)\n\t\t\tif size < offset {\n\t\t\t\treturn size, errors.New(\"too short extension block\")\n\t\t\t}\n\t\t\tif extensionBlockLabel == 0xff { // Application Extension\n\t\t\t\tfor {\n\t\t\t\t\tif size < (offset + 1) {\n\t\t\t\t\t\treturn size, errors.New(\"missing extension subblock size field\")\n\t\t\t\t\t}\n\t\t\t\t\tsubBlockSize := bytes[offset]\n\t\t\t\t\toffset++\n\t\t\t\t\tif subBlockSize == 0 {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\toffset += int(subBlockSize)\n\t\t\t\t\tif size < offset {\n\t\t\t\t\t\treturn size, errors.New(\"to short extension subblock\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\toffset++ // extensionBlock Trailer\n\t\t\t}\n\t\tcase 0x2C: // Image\n\t\t\tif size < (offset + 9) {\n\t\t\t\treturn size, errors.New(\"too short image header\")\n\t\t\t}\n\t\t\tflags := bytes[offset+8]\n\t\t\tlocalColorTableFlag := (flags & 0x80) >> 7\n\t\t\tsizeOfLocalColorTable := (flags & 0x07)\n\t\t\toffset += 9\n\t\t\tif localColorTableFlag != 0 {\n\t\t\t\tcolorTableSize := int(math.Pow(2, float64(sizeOfLocalColorTable+1)))\n\t\t\t\toffset += 3 * colorTableSize\n\t\t\t\tif size < offset {\n\t\t\t\t\treturn size, errors.New(\"too short local colorTable\")\n\t\t\t\t}\n\t\t\t}\n\t\t\toffset++ // LZWMinimumCodeSize\n\t\t\tfor {\n\t\t\t\tif size < (offset + 1) {\n\t\t\t\t\treturn size, errors.New(\"missing image subblock size field\")\n\t\t\t\t}\n\t\t\t\tsubBlockSize := bytes[offset]\n\t\t\t\toffset++\n\t\t\t\tif subBlockSize == 0 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\toffset += int(subBlockSize)\n\t\t\t\tif size < offset {\n\t\t\t\t\treturn size, errors.New(\"too short image subblock\")\n\t\t\t\t}\n\t\t\t}\n\t\t\tif size < (offset + 1) {\n\t\t\t\treturn size, errors.New(\"missing separator for trailer overwrite\")\n\t\t\t}\n\t\t\tbytes[offset] = 0x3B // trailer overwrite\n\t\tdefault:\n\t\t\t// nothing to do\n\t\t}\n\t\tif separator == 0x3B {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn offset, nil\n}", "func readHeaderData(reader *bytes.Reader, totalSize uint64) (*dataHeader, error) {\n\thdrData := dataHeader{}\n\terr := binary.Read(reader, binary.LittleEndian, &hdrData)\n\n\t/*\n\t * Check if data header was read.\n\t */\n\tif err != nil {\n\t\tmsg := err.Error()\n\t\treturn nil, fmt.Errorf(\"Failed to read data header: %s\", msg)\n\t} else {\n\t\tmaxDataLength := totalSize - MIN_TOTAL_HEADER_SIZE\n\t\tmaxDataLength32 := uint32(maxDataLength)\n\t\tchunkId := hdrData.ChunkID\n\t\tchunkSize := hdrData.ChunkSize\n\n\t\t/*\n\t\t * Check data header for validity.\n\t\t */\n\t\tif chunkId != ID_DATA {\n\t\t\treturn nil, fmt.Errorf(\"Data header contains invalid chunk id. Expected %#08x, found %#08x.\", ID_DATA, chunkId)\n\t\t} else if (chunkSize > maxDataLength32) && (chunkSize != math.MaxUint32) {\n\t\t\treturn nil, fmt.Errorf(\"Data header contains invalid chunk size. Expected at most %#08x (or %#08x), found %#08x.\", maxDataLength32, uint32(math.MaxUint32), chunkSize)\n\t\t} else {\n\t\t\treturn &hdrData, nil\n\t\t}\n\n\t}\n\n}", "func handleConn(conn net.Conn) {\n\tdefer conn.Close()\n\n\tlog.Print(\"read request to buffer\")\n\tconst maxHeaderSize = 4096\n\treader := bufio.NewReaderSize(conn, maxHeaderSize)\n\twriter := bufio.NewWriter(conn)\n\tcounter := 0\n\tbuf := [maxHeaderSize]byte{}\n\t// naive header limit\n\tfor {\n\t\tif counter == maxHeaderSize {\n\t\t\tlog.Printf(\"too long request header\")\n\t\t\twriter.WriteString(\"HTTP/1.1 413 Payload Too Large\\r\\n\")\n\t\t\twriter.WriteString(\"Content-Length: 0\\r\\n\")\n\t\t\twriter.WriteString(\"Connection: close\\r\\n\")\n\t\t\twriter.WriteString(\"\\r\\n\")\n\t\t\twriter.Flush()\n\t\t\treturn\n\t\t}\n\n\t\tread, err := reader.ReadByte()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"can't read request line: %v\", err)\n\t\t\twriter.WriteString(\"HTTP/1.1 400 Bad Request\\r\\n\")\n\t\t\twriter.WriteString(\"Content-Length: 0\\r\\n\")\n\t\t\twriter.WriteString(\"Connection: close\\r\\n\")\n\t\t\twriter.WriteString(\"\\r\\n\")\n\t\t\twriter.Flush()\n\t\t\treturn\n\t\t}\n\t\tbuf[counter] = read\n\t\tcounter++\n\n\t\tif counter < 4 {\n\t\t\tcontinue\n\t\t}\n\n\t\tif string(buf[counter-4:counter]) == \"\\r\\n\\r\\n\" {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tlog.Print(\"headers found\")\n\theadersStr := string(buf[:counter - 4])\n\n\theaders := make(map[string]string) // TODO: в оригинале map[string][]string\n\trequestHeaderParts := strings.Split(headersStr, \"\\r\\n\")\n\n\tlog.Print(\"parse request line\")\n\trequestLine := requestHeaderParts[0]\n\tlog.Printf(\"request line: %s\", requestLine)\n\n\tlog.Print(\"parse headers\")\n\tfor _, headerLine := range requestHeaderParts[1:] {\n\t\theaderParts := strings.SplitN(headerLine, \": \", 2)\n\t\theaders[strings.TrimSpace(headerParts[0])] = strings.TrimSpace(headerParts[1]) // TODO: are we allow empty header?\n\t}\n\tlog.Printf(\"headers: %v\", headers)\n\n\thtml := fmt.Sprintf(`<!doctype html>\n<html lang=\"en\">\n<head>\n <meta charset=\"UTF-8\">\n <meta name=\"viewport\"\n content=\"width=device-width, user-scalable=no, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0\">\n <meta http-equiv=\"X-UA-Compatible\" content=\"ie=edge\">\n <title>Document</title>\n</head>\n<body>\n <h1>Hello from golang %s</h1>\n</body>\n</html>`, runtime.Version())\n\n\tlog.Print(\"send response\")\n\twriter.WriteString(\"HTTP/1.1 200 OK\\r\\n\")\n\twriter.WriteString(fmt.Sprintf(\"Content-Length: %d\\r\\n\", len(html)))\n\twriter.WriteString(\"Connection: close\\r\\n\")\n\twriter.WriteString(\"\\r\\n\")\n\twriter.WriteString(html)\n\twriter.Flush()\n\n\tlog.Print(\"done\")\n\treturn\n}", "func MaxEncodedLen(b []byte) int {\n\tmaxlen := int(math.Ceil(float64(len(b)) / BitsPerDigit * 8))\n\treturn maxlen\n}", "func TestHeader(t *testing.T) {\n\n\thdr := Header{\"MyHdr1\", []byte(\"a string\")}\n\tif hdr.String() != \"MyHdr1=\\\"a string\\\"\" {\n\t\tt.Errorf(\"Unexpected: %s\", hdr.String())\n\t}\n\n\thdr = Header{\"MyHdr2\", []byte(\"a longer string that will be truncated right here <-- so you wont see this part.\")}\n\tif hdr.String() != \"MyHdr2=\\\"a longer string that will be truncated right here \\\"(30 more bytes)\" {\n\t\tt.Errorf(\"Unexpected: %s\", hdr.String())\n\t}\n\n\thdr = Header{\"MyHdr3\", []byte{1, 2, 3, 4}}\n\tif hdr.String() != \"MyHdr3=\\\"\\\\x01\\\\x02\\\\x03\\\\x04\\\"\" {\n\t\tt.Errorf(\"Unexpected: %s\", hdr.String())\n\t}\n\n}", "func MakeHeader(data []byte) ([]byte, error) {\n\theader := make([]byte, 4)\n\n\tlength := uint32(len(data))\n\n\tif length > 0x7fffffff {\n\t\treturn nil, errors.New(\"Data to large\")\n\t}\n\n\theader[0] = byte((length >> 24) & 0xff)\n\theader[1] = byte((length >> 16) & 0xff)\n\theader[2] = byte((length >> 8) & 0xff)\n\theader[3] = byte((length >> 0) & 0xff)\n\n\treturn header, nil\n}" ]
[ "0.67647296", "0.63822037", "0.60273457", "0.5796491", "0.5741116", "0.5712794", "0.5712794", "0.55037737", "0.54724437", "0.54210025", "0.54210025", "0.54165447", "0.53984994", "0.5197604", "0.5164738", "0.5153342", "0.50923467", "0.50772697", "0.5059694", "0.502238", "0.50169086", "0.49834615", "0.49759427", "0.4951584", "0.49227318", "0.49224046", "0.48794144", "0.48630556", "0.48425454", "0.4832467", "0.48213804", "0.47730905", "0.47366476", "0.47333872", "0.47256845", "0.47112405", "0.46951443", "0.4694762", "0.4691685", "0.4687955", "0.46761042", "0.4673901", "0.46635115", "0.46540776", "0.46343866", "0.46118063", "0.461048", "0.45955878", "0.45948973", "0.45898893", "0.45897633", "0.45674506", "0.45554063", "0.45526445", "0.45463008", "0.45450586", "0.45429006", "0.45399374", "0.45315114", "0.45217878", "0.4513446", "0.45129082", "0.4511414", "0.45107633", "0.4506571", "0.4495195", "0.44900244", "0.44867113", "0.44774127", "0.4475397", "0.44598675", "0.44526452", "0.44490567", "0.44461152", "0.44458944", "0.4440208", "0.44320333", "0.4429126", "0.4427023", "0.441777", "0.44159305", "0.44145042", "0.43947443", "0.43945086", "0.4393924", "0.43883517", "0.43822607", "0.43816468", "0.43802887", "0.43750298", "0.43741423", "0.43734777", "0.43724313", "0.4371097", "0.43698978", "0.43698528", "0.4366269", "0.43618563", "0.43614975", "0.4357589" ]
0.7392962
0
WriteAsCarV1 is a write option which makes a CAR interface (blockstore or storage) write the output as a CARv1 only, with no CARv2 header or index. Indexing is used internally during write but is discarded upon finalization. Note that this option only affects the storage interfaces (blockstore or storage), and is ignored by the root gocar/v2 package.
WriteAsCarV1 — это параметр записи, который заставляет интерфейс CAR (blockstore или storage) записывать выходные данные только в формате CARv1, без заголовка или индекса CARv2. Индексирование используется внутренне во время записи, но отбрасывается при завершении. Примечание: этот параметр влияет только на интерфейсы хранения (blockstore или storage), и игнорируется пакетом root gocar/v2.
func WriteAsCarV1(asCarV1 bool) Option { return func(o *Options) { o.WriteAsCarV1 = asCarV1 } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *RAMOutputStream) WriteToV1(bytes []byte) error {\n\terr := r.flush()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tend := int(r.file.length)\n\tpos, buffer, bytesUpto := 0, 0, 0\n\n\tfor pos < end {\n\t\tlength := r.bufferSize\n\t\tnextPos := pos + length\n\t\tif nextPos > end {\n\t\t\tlength = end - pos\n\t\t}\n\n\t\tsrc := r.file.getBuffer(buffer)[:length]\n\t\tcopy(bytes[bytesUpto:], src)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbuffer++\n\t\tbytesUpto += length\n\t\tpos = nextPos\n\t}\n\treturn nil\n}", "func (crc *CasbinRuleCreate) SetV1(s string) *CasbinRuleCreate {\n\tcrc.mutation.SetV1(s)\n\treturn crc\n}", "func (c ConfChange) AsV1() (ConfChange, bool) {\n\treturn c, true\n}", "func (_PBridge *PBridgeTransactor) UpgradeContractS1(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _PBridge.contract.Transact(opts, \"upgradeContractS1\")\n}", "func (c *Controller) Write(value byte) {\n\tc.strobe = value&1 == 1\n\tif c.strobe {\n\t\tc.index = 0\n\t}\n}", "func (d *Encoder) One(v interface{}) error {\n\theader := deriveHeader(v)\n\trecord := makeRecord(v, header)\n\tif !d.headWritten {\n\t\td.Csvwriter.Write(header)\n\t\td.headWritten = true\n\t}\n\n\terr := d.Csvwriter.Write(record)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (cc *CarCreate) SetCarNo(s string) *CarCreate {\n\tcc.mutation.SetCarNo(s)\n\treturn cc\n}", "func NewV1Encoder(b []byte) *V1Encoder {\n\treturn &V1Encoder{\n\t\tdata: b,\n\t}\n}", "func (a *Client) PutCredentialV1(params *PutCredentialV1Params) (*PutCredentialV1OK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewPutCredentialV1Params()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"putCredentialV1\",\n\t\tMethod: \"PUT\",\n\t\tPathPattern: \"/v1/credentials\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &PutCredentialV1Reader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*PutCredentialV1OK), nil\n\n}", "func (w *Writer) Put1(n int) *Writer {\n\tif n < 0 || 1<<8 <= n {\n\t\tpanic(\"stor.Writer.Put1 value outside range\")\n\t}\n\tw.buf = append(w.buf,\n\t\tbyte(n))\n\treturn w\n}", "func (w *RWWrapper) WriteHeader(statusCode int) {\n\tif w.statusWritten {\n\t\treturn\n\t}\n\n\tw.configureHeader()\n\tw.rw.WriteHeader(statusCode)\n\tw.statusWritten = true\n}", "func (m *DigicamControl) Write(version int) (output []byte, err error) {\n\tvar buffer bytes.Buffer\n\n\t// Ensure only Version 1 or Version 2 were specified\n\tif version != 1 && version != 2 {\n\t\terr = mavlink2.ErrUnsupportedVersion\n\t\treturn\n\t}\n\n\t// Don't attempt to Write V2 messages to V1 bodies\n\tif m.GetID() > 255 && version < 2 {\n\t\terr = mavlink2.ErrEncodeV2MessageV1Frame\n\t\treturn\n\t}\n\n\terr = binary.Write(&buffer, binary.LittleEndian, *m)\n\tif err != nil {\n\t\treturn\n\t}\n\n\toutput = buffer.Bytes()\n\n\t// V1 uses fixed message lengths and does not include any extension fields\n\t// Truncate the byte slice to the correct length\n\t// This also removes the trailing extra byte written for HasExtensionFieldValues\n\tif version == 1 {\n\t\toutput = output[:m.getV1Length()]\n\t}\n\n\t// V2 uses variable message lengths and includes extension fields\n\t// The variable length is caused by truncating any trailing zeroes from\n\t// the end of the message before it is added to a frame\n\tif version == 2 {\n\t\t// Set HasExtensionFieldValues to zero so that it doesn't interfere with V2 truncation\n\t\toutput[len(output)-1] = 0\n\t\toutput = util.TruncateV2(buffer.Bytes())\n\t}\n\n\treturn\n\n}", "func (mb *client) WriteSingleCoil(address, value uint16) (results []byte, err error) {\n\t// The requested ON/OFF state can only be 0xFF00 and 0x0000\n\tif value != 0xFF00 && value != 0x0000 {\n\t\terr = fmt.Errorf(\"modbus: state '%v' must be either 0xFF00 (ON) or 0x0000 (OFF)\", value)\n\t\treturn\n\t}\n\trequest := ProtocolDataUnit{\n\t\tFunctionCode: FuncCodeWriteSingleCoil,\n\t\tData: dataBlock(address, value),\n\t}\n\tresponse, err := mb.send(&request)\n\tif err != nil {\n\t\treturn\n\t}\n\t// Fixed response length\n\tif len(response.Data) != 4 {\n\t\terr = fmt.Errorf(\"modbus: response data size '%v' does not match expected '%v'\", len(response.Data), 4)\n\t\treturn\n\t}\n\trespValue := binary.BigEndian.Uint16(response.Data)\n\tif address != respValue {\n\t\terr = fmt.Errorf(\"modbus: response address '%v' does not match request '%v'\", respValue, address)\n\t\treturn\n\t}\n\tresults = response.Data[2:]\n\trespValue = binary.BigEndian.Uint16(results)\n\tif value != respValue {\n\t\terr = fmt.Errorf(\"modbus: response value '%v' does not match request '%v'\", respValue, value)\n\t\treturn\n\t}\n\treturn\n}", "func (_PBridge *PBridgeSession) UpgradeContractS1() (*types.Transaction, error) {\n\treturn _PBridge.Contract.UpgradeContractS1(&_PBridge.TransactOpts)\n}", "func PutBufioWriter1K(w *bufio.Writer) bool {\n\tif w == nil {\n\t\treturn false\n\t}\n\tif l := w.Size(); l < 1024 || l >= 2048 {\n\t\treturn PutBufioWriter(w)\n\t}\n\tw.Reset(nil) // to not keep the parent writer alive\n\tputw1K(w)\n\treturn true\n}", "func (t *Type1) Write(w io.Writer) error {\n\tvar err error\n\tfor i := 0; i < 3; i++ {\n\t\terr = t.writeSegment(w, i)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tpostfix := []byte{128, 3}\n\t_, err = w.Write(postfix)\n\treturn err\n}", "func (r1cs *R1CS) WriteTo(w io.Writer) (int64, error) {\n\t_w := ioutils.WriterCounter{W: w} // wraps writer to count the bytes written\n\tencoder := cbor.NewEncoder(&_w)\n\n\t// encode our object\n\terr := encoder.Encode(r1cs)\n\treturn _w.N, err\n}", "func v1beta1Tov1(csc *storagev1beta1.CSIStorageCapacity) *storagev1.CSIStorageCapacity {\n\treturn &storagev1.CSIStorageCapacity{\n\t\tObjectMeta: csc.ObjectMeta,\n\t\tNodeTopology: csc.NodeTopology,\n\t\tStorageClassName: csc.StorageClassName,\n\t\tCapacity: csc.Capacity,\n\t\tMaximumVolumeSize: csc.MaximumVolumeSize,\n\t}\n}", "func V1() (*v1.Client, error) {\n\treturn nil, docker.ErrDockerNotCompiled\n}", "func (_PBridge *PBridgeTransactorSession) UpgradeContractS1() (*types.Transaction, error) {\n\treturn _PBridge.Contract.UpgradeContractS1(&_PBridge.TransactOpts)\n}", "func (a *api) DescribeClassroomV1(ctx context.Context,\n\treq *grpcApi.DescribeClassroomV1Request) (res *grpcApi.DescribeClassroomV1Response, err error) {\n\n\tdefer utils.LogGrpcCall(\"CreateClassroomV1\", &req, &res, &err)\n\n\tif err = req.Validate(); err != nil {\n\n\t\terr = status.Error(codes.InvalidArgument, err.Error())\n\t\treturn nil, err\n\t}\n\n\tclassroom, err := a.classroomRepo.DescribeClassroom(ctx, req.ClassroomId)\n\tif err != nil {\n\n\t\terr = status.Error(codes.Unavailable, err.Error())\n\t\treturn nil, err\n\t}\n\n\tprotoClassroom := classroom.ToProtoClassroom()\n\n\tres = &grpcApi.DescribeClassroomV1Response{Classroom: protoClassroom}\n\treturn res, nil\n}", "func (s *SmartContract) CreateCar(ctx contractapi.TransactionContextInterface, carNumber string, make string) error {\n\tcar := Car{\n\t\tMessage: make,\n\t}\n\n\tcarAsBytes, _ := json.Marshal(car)\n\n\treturn ctx.GetStub().PutState(carNumber, carAsBytes)\n}", "func (r *restApiImpl) V1() (v1.CoreV1) {\n return r.v1\n}", "func ImplementationWrapAsn1WriterCopy(pointer unsafe.Pointer) (Asn1Writer, error) {\n\tctx := (*C.vscf_impl_t)(pointer)\n\tshallowCopy := C.vscf_impl_shallow_copy(ctx)\n\treturn ImplementationWrapAsn1Writer(unsafe.Pointer(shallowCopy))\n}", "func (c *CardScanClient) UpdateOne(cs *CardScan) *CardScanUpdateOne {\n\tmutation := newCardScanMutation(c.config, OpUpdateOne, withCardScan(cs))\n\treturn &CardScanUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}\n}", "func (c *CompressingResponseWriter) WriteHeader(status int) {\n\tc.writer.WriteHeader(status)\n}", "func (a *Client) CreateIOAExclusionsV1(params *CreateIOAExclusionsV1Params, opts ...ClientOption) (*CreateIOAExclusionsV1OK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewCreateIOAExclusionsV1Params()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"createIOAExclusionsV1\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/policy/entities/ioa-exclusions/v1\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &CreateIOAExclusionsV1Reader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*CreateIOAExclusionsV1OK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for createIOAExclusionsV1: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func registerModelBridge1Flags(depth int, cmdPrefix string, cmd *cobra.Command) error {\n\n\tif err := registerBridge1ID(depth, cmdPrefix, cmd); err != nil {\n\t\treturn err\n\t}\n\n\tif err := registerBridge1Interfaces(depth, cmdPrefix, cmd); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (c *Clientset) RbacV1() rbacv1.RbacV1Interface {\n\treturn c.rbacV1\n}", "func (mr *MockInterfaceMockRecorder) BkbcsV1() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"BkbcsV1\", reflect.TypeOf((*MockInterface)(nil).BkbcsV1))\n}", "func (c *Controller) Write(path string, v interface{}) error {\n\n\tvar buf bytes.Buffer\n\tvar err error\n\n\tswitch strings.ToLower(path[strings.LastIndex(path, \".\"):]) {\n\tcase \".json\":\n\t\terr = json.NewEncoder(&buf).Encode(v)\n\tcase \".gob\":\n\t\terr = gob.NewEncoder(&buf).Encode(v)\n\tdefault:\n\t\treturn errors.New(\"invalid file extension\")\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr := bytes.NewReader(buf.Bytes())\n\n\tinput := &s3.PutObjectInput{\n\t\tBody: aws.ReadSeekCloser(r),\n\t\tBucket: aws.String(c.bucket),\n\t\tKey: aws.String(path),\n\t\tServerSideEncryption: aws.String(\"AES256\"),\n\t}\n\n\tresult, err := c.c3svc.PutObject(input)\n\tif err != nil {\n\t\treturn util.Err(err)\n\t}\n\n\tc.verIDs[path] = result.VersionId\n\n\treturn nil\n}", "func NewMBC1(rom []byte) *MBC1 {\n\treturn &MBC1{\n\t\trom: rom,\n\t\tram: make([]byte, 0x8000),\n\t\tromBankNumber: 1,\n\t\tromBanking: true,\n\t}\n}", "func (m *MockInterface) BkbcsV1() v1.BkbcsV1Interface {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"BkbcsV1\")\n\tret0, _ := ret[0].(v1.BkbcsV1Interface)\n\treturn ret0\n}", "func (es *ElasticClientV5) WriteDirect(index string, id string,\n\ttyp string, v interface{}) error {\n\t_, err := es.client.Index().Index(index).Type(typ).Id(id).BodyJson(v).Do(context.Background())\n\treturn err\n}", "func (rc *RentalCreate) SetCar(c *Car) *RentalCreate {\n\treturn rc.SetCarID(c.ID)\n}", "func (w *reqResWriter) writeArg1(arg Output) error {\n\treturn w.writeArg(arg, false, reqResWriterPreArg1, reqResWriterPreArg2)\n}", "func (c *Car) Name() string {\n\treturn \"car\"\n}", "func (c *ChannelData) WriteHeader() {\n\tif len(c.Raw) < channelDataHeaderSize {\n\t\t// Making WriteHeader call valid even when c.Raw\n\t\t// is nil or len(c.Raw) is less than needed for header.\n\t\tc.grow(channelDataHeaderSize)\n\t}\n\t// Early bounds check to guarantee safety of writes below.\n\t_ = c.Raw[:channelDataHeaderSize]\n\tbinary.BigEndian.PutUint16(c.Raw[:channelDataNumberSize], uint16(c.Number))\n\tbinary.BigEndian.PutUint16(c.Raw[channelDataNumberSize:channelDataHeaderSize],\n\t\tuint16(len(c.Data)),\n\t)\n}", "func (mapper *MapperMMC1) WriteByte(addr uint16, data byte) {\n\tif addr < 0x6000 {\n\t\tif addr <= 0x0FFF {\n\t\t\tmapper.memory.cartridge.chr[mapper.getCHR1Index(addr)] = data\n\t\t} else if addr <= 0x1FFF {\n\t\t\tmapper.memory.cartridge.chr[mapper.getCHR2Index(addr)] = data\n\t\t} else if addr <= 0x2FFF {\n\t\t\tmapper.memory.ppu.vram[TranslateVRamAddress(addr, mapper.mirrorMode)] = data\n\t\t}\n\t} else if addr <= 0x7FFF {\n\t\tif mapper.registerPRG&0x10 == 0 {\n\t\t\tmapper.prgRAM[addr-0x6000] = data\n\t\t}\n\t} else {\n\t\tif data&0x80 > 0 {\n\t\t\t// clear shift register\n\t\t\tmapper.shiftNumber = 0\n\t\t\tmapper.shiftRegister = 0\n\t\t} else {\n\t\t\t// add to shift register\n\t\t\tmapper.shiftRegister = mapper.shiftRegister | ((data & 0x1) << uint(mapper.shiftNumber))\n\t\t\tmapper.shiftNumber++\n\t\t}\n\n\t\tif mapper.shiftNumber == 5 {\n\t\t\tswitch (addr >> 13) & 0x3 {\n\t\t\tcase 0:\n\t\t\t\tmapper.registerControl = mapper.shiftRegister\n\n\t\t\t\tswitch mapper.registerControl & 0x3 {\n\t\t\t\tcase 0:\n\t\t\t\t\tmapper.mirrorMode = MirrorSingleA\n\t\t\t\tcase 1:\n\t\t\t\t\tmapper.mirrorMode = MirrorSingleB\n\t\t\t\tcase 2:\n\t\t\t\t\tmapper.mirrorMode = MirrorVertical\n\t\t\t\tcase 3:\n\t\t\t\t\tmapper.mirrorMode = MirrorHorizontal\n\t\t\t\t}\n\t\t\tcase 1:\n\t\t\t\tmapper.registerCHR0 = mapper.shiftRegister\n\t\t\tcase 2:\n\t\t\t\tmapper.registerCHR1 = mapper.shiftRegister\n\t\t\tcase 3:\n\t\t\t\tmapper.registerPRG = mapper.shiftRegister\n\t\t\t}\n\t\t\tmapper.shiftNumber = 0\n\t\t\tmapper.shiftRegister = 0\n\t\t}\n\t}\n}", "func (t *Type1) writeSegment(w io.Writer, segment int) error {\n\tl := len(t.Segments[segment])\n\tvar asciiBinary byte\n\tif segment == 1 {\n\t\tasciiBinary = 2\n\t} else {\n\t\tasciiBinary = 1\n\t}\n\tprefix := []byte{128, asciiBinary, byte(l & 0xFF), byte(l >> 8 & 0xFF), byte(l >> 16 & 0xFF), byte(l >> 24 & 0xFF)}\n\t_, err := w.Write(prefix)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(t.Segments[segment])\n\treturn err\n}", "func (c *Client) IsV1API() bool {\n\treturn c.isV1\n}", "func (w *BodylessResponseWriter) WriteHeader(s int) {\n\tif w.wroteHeader {\n\t\treturn\n\t}\n\tw.wroteHeader = true\n\tw.status = s\n\n\tw.Header().Del(\"Content-Type\")\n\tw.ResponseWriter.WriteHeader(s)\n}", "func (rc *RentalCreate) SetCarID(u uuid.UUID) *RentalCreate {\n\trc.mutation.SetCarID(u)\n\treturn rc\n}", "func (c *Clientset) CoreV1() corev1.CoreV1Interface {\n\treturn c.coreV1\n}", "func AddCar(Myconn *adabas.Connection, vendeur string, modele string, couleur string) string {\n\t// creating Store Request with MAP\n\tstoreRequest, cerr := Myconn.CreateMapStoreRequest(&Carinfo{})\n\tif cerr != nil {\n\t\treturn cerr.Error()\n\t}\n\t// Assigning query's fields\n\terr := storeRequest.StoreFields(\"Vendor,Model,Color\")\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\tenreg := &carrec{Vendor: vendeur, Model: modele, Color: couleur}\n\tsterr := storeRequest.StoreData(enreg)\n\tif sterr != nil {\n\t\treturn sterr.Error()\n\t}\n\ttranserr := storeRequest.EndTransaction()\n\tif transerr != nil {\n\t\treturn transerr.Error()\n\t}\n\treturn \"\"\n}", "func (a *api) CreateClassroomV1(ctx context.Context,\n\treq *grpcApi.CreateClassroomV1Request) (res *grpcApi.CreateClassroomV1Response, err error) {\n\n\tdefer utils.LogGrpcCall(\"CreateClassroomV1\", &req, &res, &err)\n\tdefer func() {\n\t\t_ = a.logProducer.Send(producer.Created, req, res, err)\n\t}()\n\n\tif err = req.Validate(); err != nil {\n\n\t\terr = status.Error(codes.InvalidArgument, err.Error())\n\t\treturn nil, err\n\t}\n\n\tclassroomId, err := a.classroomRepo.AddClassroom(ctx, models.Classroom{\n\t\tTenantId: req.TenantId,\n\t\tCalendarId: req.CalendarId,\n\t})\n\tif err != nil {\n\n\t\terr = status.Error(codes.Unavailable, err.Error())\n\t\treturn nil, err\n\t}\n\n\tmetrics.IncCreateCounter()\n\n\tres = &grpcApi.CreateClassroomV1Response{ClassroomId: classroomId}\n\treturn res, nil\n}", "func writeResponseHeader(header ResponseHeader, rw *bufio.ReadWriter) error {\n\terr := rw.WriteByte(header.Magic)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = rw.WriteByte(header.Opcode)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = rw.WriteByte(GetNthByteFromUint16(header.KeyLength, 0))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = rw.WriteByte(GetNthByteFromUint16(header.KeyLength, 1))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = rw.WriteByte(header.ExtraLength)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = rw.WriteByte(header.DataType)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = rw.WriteByte(GetNthByteFromUint16(header.Status, 0))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = rw.WriteByte(GetNthByteFromUint16(header.Status, 1))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor pos := 0; pos < 4; pos++ {\n\t\terr = rw.WriteByte(GetNthByteFromUint32(header.TotalBodyLength, pos))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor pos := 0; pos < 4; pos++ {\n\t\terr = rw.WriteByte(GetNthByteFromUint32(header.Opaque, pos))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tl := uint32(header.CAS >> 32)\n\tr := uint32(header.CAS & 0x00000000ffffffff)\n\tfor pos := 0; pos < 4; pos++ {\n\t\terr = rw.WriteByte(GetNthByteFromUint32(l, pos))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tfor pos := 0; pos < 4; pos++ {\n\t\terr = rw.WriteByte(GetNthByteFromUint32(r, pos))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (r *Response) WriteHeader(statusCode int) {\n\tif r.committed {\n\t\treturn\n\t}\n\tr.committed = true\n\tr.statusCode = statusCode\n\tr.ResponseWriter.WriteHeader(r.statusCode)\n}", "func (d *Device) SetDCDC1VoltageSet(a uint8) {\n\td.write1Byte(RegDCDC1VoltageSet, a)\n}", "func (c *V1) Encode(w io.Writer, prettify bool) error {\n\tencoder := json.NewEncoder(w)\n\tif prettify {\n\t\tencoder.SetIndent(\"\", strings.Repeat(\" \", 2))\n\t}\n\treturn encoder.Encode(c)\n}", "func InitV1(opts *InitOpts) error {\n\tctx := context.Background()\n\n\tctxResource := &model.ContextResource{}\n\tif err := ctxResource.UpdateNamespace(opts.Namespace); err != nil {\n\t\treturn err\n\t}\n\n\tif err := ctxResource.UpdateContext(opts.Context); err != nil {\n\t\treturn err\n\t}\n\tctxOptions := &contextCMD.ContextOptions{\n\t\tContext: ctxResource.Context,\n\t\tNamespace: ctxResource.Namespace,\n\t\tShow: true,\n\t}\n\tif err := contextCMD.NewContextCommand().Run(ctx, ctxOptions); err != nil {\n\t\treturn err\n\t}\n\n\topts.Language = os.Getenv(model.OktetoLanguageEnvVar)\n\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn err\n\t}\n\topts.Workdir = cwd\n\n\tmc := &ManifestCommand{}\n\tif err := mc.RunInitV1(ctx, opts); err != nil {\n\t\treturn err\n\t}\n\n\toktetoLog.Success(fmt.Sprintf(\"okteto manifest (%s) created\", opts.DevPath))\n\n\tif opts.DevPath == utils.DefaultManifest {\n\t\toktetoLog.Information(\"Run 'okteto up' to activate your development container\")\n\t} else {\n\t\toktetoLog.Information(\"Run 'okteto up -f %s' to activate your development container\", opts.DevPath)\n\t}\n\treturn nil\n}", "func (c *Clientset) CoordinationV1() coordinationv1.CoordinationV1Interface {\n\treturn c.coordinationV1\n}", "func ImplementationWrapAsn1Writer(pointer unsafe.Pointer) (Asn1Writer, error) {\n\tctx := (*C.vscf_impl_t)(pointer)\n\tif !C.vscf_asn1_writer_is_implemented(ctx) {\n\t\treturn nil, &FoundationError{-1, \"Given C implementation does not implement interface Asn1Writer.\"}\n\t}\n\n\timplTag := C.vscf_impl_tag(ctx)\n\tswitch implTag {\n\tcase C.vscf_impl_tag_ASN1WR:\n\t\treturn NewAsn1wrWithCtx(unsafe.Pointer(ctx)), nil\n\tdefault:\n\t\treturn nil, &FoundationError{-1, \"Unexpected C implementation cast to the Go implementation.\"}\n\t}\n}", "func (c *CarBuilder) SetStructure() BuildProcess {\n\tc.v.Structure = \"Car\"\n\treturn c\n}", "func (p *Plugin) IsV1() bool {\n\treturn false\n}", "func GenGenesisCar(cfg *GenesisCfg, out io.Writer) (*RenderedGenInfo, error) {\n\tctx := context.Background()\n\n\tbstore := blockstoreutil.WrapIDStore(blockstore.NewBlockstore(ds.NewMapDatastore()))\n\tdserv := dag.NewDAGService(bserv.New(bstore, offline.Exchange(bstore)))\n\tinfo, err := GenGen(ctx, cfg, bstore)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Ignore cids that make it on chain but that should not be read through\n\t// and therefore don't have corresponding blocks in store\n\tignore := cid.NewSet()\n\tfor _, m := range cfg.Miners {\n\t\tfor _, comm := range m.CommittedSectors {\n\t\t\tignore.Add(comm.CommR)\n\t\t\tignore.Add(comm.CommD)\n\t\t\tignore.Add(comm.DealCfg.CommP)\n\t\t}\n\t}\n\n\tignoreWalkFunc := func(nd format.Node) (out []*format.Link, err error) {\n\t\tlinks := nd.Links()\n\t\tvar filteredLinks []*format.Link\n\t\tfor _, l := range links {\n\t\t\tif ignore.Has(l.Cid) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfilteredLinks = append(filteredLinks, l)\n\t\t}\n\n\t\treturn filteredLinks, nil\n\t}\n\n\treturn info, car.WriteCarWithWalker(ctx, dserv, []cid.Cid{info.GenesisCid}, out, ignoreWalkFunc)\n}", "func NewMBC1(rom []byte, ram []byte) (*MBC1, error) {\n\tif rom == nil || ram == nil {\n\t\tpanic(fmt.Errorf(\"the rom or ram are nil\"))\n\t}\n\n\tif len(rom) < 2*romBankSize {\n\t\treturn nil, errors.E(\"rom size insufficient: must contain at least two banks\", errors.Cart)\n\t}\n\n\t// The ROM bank is initialized to 0x01 to avoid access to ROM banks 0x00, 0x20, 0x40 and 0x60\n\t// from the switchable ROM addresses on startup.\n\t// The SetByte method verifies that the lower two bits of the bank are also != 00 to impose this\n\t// after startup.\n\treturn &MBC1{rom: rom, ram: ram, romBank: 0x01}, nil\n}", "func (s *SmartContract) CreateCar(ctx contractapi.TransactionContextInterface, carNumber string, make string, model string, colour string, owner string) error {\n\tcar := Car{\n\t\tMake: make,\n\t\tModel: model,\n\t\tColour: colour,\n\t\tOwner: owner,\n\t}\n\n\tcarAsBytes, _ := json.Marshal(car)\n\n\treturn ctx.GetStub().PutState(carNumber, carAsBytes)\n}", "func (d *Device) SetGPIO1Control(a uint8) {\n\td.write1Byte(RegGPIO1Control, a)\n}", "func (z *zpoolctl) Upgrade1(ctx context.Context, v bool) *execute {\n\targs := []string{\"upgrade\"}\n\tif v {\n\t\targs = append(args, \"-v\")\n\t}\n\treturn &execute{ctx: ctx, name: z.cmd, args: args}\n}", "func (r *response) WriteHeader(status int) {\n\tr.wrote = true\n\tr.rw.WriteHeader(status)\n}", "func NewV1Client(region string) (*vpcv1.VpcV1, error) {\n\tsvcEndpoint := \"https://\" + region + \".iaas.cloud.ibm.com/v1\"\n\n\treturn vpcv1.NewVpcV1(&vpcv1.VpcV1Options{\n\t\tServiceName: \"vpcs\",\n\t\tAuthenticator: iam.GetIAMAuth(),\n\t\tURL: svcEndpoint,\n\t})\n}", "func RVSA1() representation.Chooser {\n\treturn rvsa1{}\n}", "func (a *Client) UpdateIOAExclusionsV1(params *UpdateIOAExclusionsV1Params, opts ...ClientOption) (*UpdateIOAExclusionsV1OK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewUpdateIOAExclusionsV1Params()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"updateIOAExclusionsV1\",\n\t\tMethod: \"PATCH\",\n\t\tPathPattern: \"/policy/entities/ioa-exclusions/v1\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &UpdateIOAExclusionsV1Reader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*UpdateIOAExclusionsV1OK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for updateIOAExclusionsV1: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (t *ssh2Server) WriteHeader(s *Stream, md metadata.MD) error {\n\n\tlogrus.Debugln(\"WriteHeader\")\n\treturn nil\n\n\t// =================================== original code ======================================\n\t// s.mu.Lock()\n\t// if s.headerOk || s.state == streamDone {\n\t// \ts.mu.Unlock()\n\t// \treturn ErrIllegalHeaderWrite\n\t// }\n\t// s.headerOk = true\n\t// s.mu.Unlock()\n\t// if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {\n\t// \treturn err\n\t// }\n\t// t.hBuf.Reset()\n\t// t.hEnc.WriteField(hpack.HeaderField{Name: \":status\", Value: \"200\"})\n\t// t.hEnc.WriteField(hpack.HeaderField{Name: \"content-type\", Value: \"application/grpc\"})\n\t// for k, v := range md {\n\t// \tt.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v})\n\t// }\n\t// if err := t.writeHeaders(s, t.hBuf, false); err != nil {\n\t// \treturn err\n\t// }\n\t// t.writableChan <- 0\n\t// return nil\n}", "func (c *Conn) WriteHeader(header protocommon.Header) error {\n\terr := protocommon.HeaderEncode(c.writeBuf, header)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.writeBuf.Flush()\n}", "func (cs *Cars) ListCars(stub shim.ChaincodeStubInterface) ([]byte, error) {\n\tidxCarsByte, _ := stub.GetState(\"idx_Cars\")\n\tcarIDs := strings.Split(string(idxCarsByte), \",\")\n\tcarList := \"{\\\"Cars\\\":\"\n\tfor i, carID := range carIDs {\n\t\tif i != 0 {\n\t\t\tcarList = carList + \",\"\n\t\t}\n\t\tcJsonIndent, _ := stub.GetState(carID)\n\t\tcarList = carList + string(cJsonIndent)\n\t}\n\tcarList = carList + \"\\n}\"\n\treturn []byte(carList), nil\n}", "func (w *responseWriter) WriteHeader(s int) {\n\tif w.wroteHeader {\n\t\treturn\n\t}\n\tw.wroteHeader = true\n\tw.status = s\n\n\tif s == http.StatusNoContent {\n\t\tw.ResponseWriter = &BodylessResponseWriter{ResponseWriter: w.ResponseWriter}\n\t}\n\n\t// Set Content-Type header if missing and not using the BodylessResponseWriter.\n\tif _, ok := w.ResponseWriter.(*BodylessResponseWriter); !ok && w.Header().Get(\"Content-Type\") == \"\" {\n\t\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\t}\n\tw.ResponseWriter.WriteHeader(s)\n}", "func ConvertToCAR(ctx context.Context, in io.Reader, out io.Writer) (cid.Cid, uint64, error) {\n\treturn convertToCAR(ctx, in, out, false)\n}", "func (w *Writer) WriteHeader(hdr *index.Header) error {\n\t// Flush out preceding file's content before starting new range.\n\tif !w.first {\n\t\tif err := w.tw.Flush(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tw.first = false\n\t// Setup index header for next file.\n\t// (bryce) might want to deep copy the passed in header.\n\tw.hdr = &index.Header{\n\t\tHdr: hdr.Hdr,\n\t\tIdx: &index.Index{DataOp: &index.DataOp{}},\n\t}\n\tw.cw.StartRange(w.callback(w.hdr))\n\tif err := w.tw.WriteHeader(w.hdr.Hdr); err != nil {\n\t\treturn err\n\t}\n\t// Setup first tag for header.\n\tw.hdr.Idx.DataOp.Tags = []*index.Tag{&index.Tag{Id: headerTag, SizeBytes: w.cw.RangeSize()}}\n\treturn nil\n}", "func (d *Encoder) All(v interface{}) error {\n\theader := deriveHeader(v)\n\trecord := makeRecords(v, header)\n\tif !d.headWritten {\n\t\td.Csvwriter.Write(header)\n\t\td.headWritten = true\n\t}\n\n\terr := d.Csvwriter.WriteAll(record)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (w *Writer) WriteHeader(hdr *Header) (err error) {\n\tif w.closed {\n\t\treturn ErrWriteAfterClose\n\t}\n\tif w.err == nil {\n\t\tw.Flush()\n\t}\n\tif w.err != nil {\n\t\treturn w.err\n\t}\n\n\tif hdr.Name != headerEOF {\n\t\t// TODO: should we be mutating hdr here?\n\t\t// ensure all inodes are unique\n\t\tw.inode++\n\t\tif hdr.Inode == 0 {\n\t\t\thdr.Inode = w.inode\n\t\t}\n\n\t\t// ensure file type is set\n\t\tif hdr.Mode&^ModePerm == 0 {\n\t\t\thdr.Mode |= ModeRegular\n\t\t}\n\n\t\t// ensure regular files have at least 1 inbound link\n\t\tif hdr.Links < 1 && hdr.Mode.IsRegular() {\n\t\t\thdr.Links = 1\n\t\t}\n\t}\n\n\tw.nb = hdr.Size\n\tw.pad, w.err = writeSVR4Header(w.w, hdr)\n\treturn\n}", "func (rr *responseRecorder) WriteHeader(statusCode int) {\n\tif rr.wroteHeader {\n\t\treturn\n\t}\n\n\t// save statusCode always, in case HTTP middleware upgrades websocket\n\t// connections by manually setting headers and writing status 101\n\trr.statusCode = statusCode\n\n\t// 1xx responses aren't final; just informational\n\tif statusCode < 100 || statusCode > 199 {\n\t\trr.wroteHeader = true\n\n\t\t// decide whether we should buffer the response\n\t\tif rr.shouldBuffer == nil {\n\t\t\trr.stream = true\n\t\t} else {\n\t\t\trr.stream = !rr.shouldBuffer(rr.statusCode, rr.ResponseWriterWrapper.Header())\n\t\t}\n\t}\n\n\t// if informational or not buffered, immediately write header\n\tif rr.stream || (100 <= statusCode && statusCode <= 199) {\n\t\trr.ResponseWriterWrapper.WriteHeader(statusCode)\n\t}\n}", "func (c *ResponseCapture) WriteHeader(statusCode int) {\n\tc.status = statusCode\n\tc.wroteHeader = true\n\tc.ResponseWriter.WriteHeader(statusCode)\n}", "func (oo *OmciCC) SendCreateDot1PMapper(ctx context.Context, timeout int, highPrio bool,\n\taInstID uint16, rxChan chan Message) (*me.ManagedEntity, error) {\n\ttid := oo.GetNextTid(highPrio)\n\tlogger.Debugw(ctx, \"send .1pMapper-Create-msg:\", log.Fields{\"device-id\": oo.deviceID,\n\t\t\"SequNo\": strconv.FormatInt(int64(tid), 16), \"InstId\": strconv.FormatInt(int64(aInstID), 16)})\n\n\tmeParams := me.ParamData{\n\t\tEntityID: aInstID,\n\t\tAttributes: me.AttributeValueMap{\n\t\t\t//workaround for unsuitable omci-lib default values, cmp VOL-3729\n\t\t\tme.Ieee8021PMapperServiceProfile_TpPointer: 0xFFFF,\n\t\t\tme.Ieee8021PMapperServiceProfile_InterworkTpPointerForPBitPriority0: 0xFFFF,\n\t\t\tme.Ieee8021PMapperServiceProfile_InterworkTpPointerForPBitPriority1: 0xFFFF,\n\t\t\tme.Ieee8021PMapperServiceProfile_InterworkTpPointerForPBitPriority2: 0xFFFF,\n\t\t\tme.Ieee8021PMapperServiceProfile_InterworkTpPointerForPBitPriority3: 0xFFFF,\n\t\t\tme.Ieee8021PMapperServiceProfile_InterworkTpPointerForPBitPriority4: 0xFFFF,\n\t\t\tme.Ieee8021PMapperServiceProfile_InterworkTpPointerForPBitPriority5: 0xFFFF,\n\t\t\tme.Ieee8021PMapperServiceProfile_InterworkTpPointerForPBitPriority6: 0xFFFF,\n\t\t\tme.Ieee8021PMapperServiceProfile_InterworkTpPointerForPBitPriority7: 0xFFFF,\n\t\t},\n\t}\n\tmeInstance, omciErr := me.NewIeee8021PMapperServiceProfile(meParams)\n\tif omciErr.GetError() == nil {\n\t\t//we have to set all 'untouched' parameters to default by some additional option parameter!!\n\t\tomciLayer, msgLayer, err := oframe.EncodeFrame(meInstance, omci.CreateRequestType,\n\t\t\toframe.TransactionID(tid), oframe.AddDefaults(true))\n\t\tif err != nil {\n\t\t\tlogger.Errorw(ctx, \"Cannot encode .1pMapper for create\", log.Fields{\n\t\t\t\t\"Err\": err, \"device-id\": oo.deviceID})\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpkt, err := SerializeOmciLayer(ctx, omciLayer, msgLayer)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(ctx, \"Cannot serialize .1pMapper create\", log.Fields{\n\t\t\t\t\"Err\": err, \"device-id\": oo.deviceID})\n\t\t\treturn nil, err\n\t\t}\n\n\t\tomciRxCallbackPair := CallbackPair{\n\t\t\tCbKey: tid,\n\t\t\tCbEntry: CallbackPairEntry{rxChan, oo.receiveOmciResponse, true},\n\t\t}\n\t\terr = oo.Send(ctx, pkt, timeout, CDefaultRetries, highPrio, omciRxCallbackPair)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(ctx, \"Cannot send .1pMapper create\", log.Fields{\n\t\t\t\t\"Err\": err, \"device-id\": oo.deviceID})\n\t\t\treturn nil, err\n\t\t}\n\t\tlogger.Debug(ctx, \"send .1pMapper-create-msg done\")\n\t\treturn meInstance, nil\n\t}\n\tlogger.Errorw(ctx, \"Cannot generate .1pMapper\", log.Fields{\n\t\t\"Err\": omciErr.GetError(), \"device-id\": oo.deviceID})\n\treturn nil, omciErr.GetError()\n}", "func (x *MQQueueManager) Put1(good *MQOD, gomd *MQMD,\n\tgopmo *MQPMO, buffer []byte) error {\n\tvar mqrc C.MQLONG\n\tvar mqcc C.MQLONG\n\tvar mqmd C.MQMD\n\tvar mqpmo C.MQPMO\n\tvar mqod C.MQOD\n\tvar ptr C.PMQVOID\n\n\terr := checkMD(gomd, \"MQPUT1\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcopyODtoC(&mqod, good)\n\tcopyMDtoC(&mqmd, gomd)\n\tcopyPMOtoC(&mqpmo, gopmo)\n\n\tbufflen := len(buffer)\n\n\tif bufflen > 0 {\n\t\tptr = (C.PMQVOID)(unsafe.Pointer(&buffer[0]))\n\t} else {\n\t\tptr = nil\n\t}\n\n\tC.MQPUT1(x.hConn, (C.PMQVOID)(unsafe.Pointer(&mqod)),\n\t\t(C.PMQVOID)(unsafe.Pointer(&mqmd)),\n\t\t(C.PMQVOID)(unsafe.Pointer(&mqpmo)),\n\t\t(C.MQLONG)(bufflen),\n\t\tptr,\n\t\t&mqcc, &mqrc)\n\n\tcopyODfromC(&mqod, good)\n\tcopyMDfromC(&mqmd, gomd)\n\tcopyPMOfromC(&mqpmo, gopmo)\n\n\tmqreturn := MQReturn{MQCC: int32(mqcc),\n\t\tMQRC: int32(mqrc),\n\t\tverb: \"MQPUT1\",\n\t}\n\n\tif mqcc != C.MQCC_OK {\n\t\treturn &mqreturn\n\t}\n\n\treturn nil\n\n}", "func (mb *client) WriteSingleRegister(address, value uint16) (results []byte, err error) {\n\trequest := ProtocolDataUnit{\n\t\tFunctionCode: FuncCodeWriteSingleRegister,\n\t\tData: dataBlock(address, value),\n\t}\n\tresponse, err := mb.send(&request)\n\tif err != nil {\n\t\treturn\n\t}\n\t// Fixed response length\n\tif len(response.Data) != 4 {\n\t\terr = fmt.Errorf(\"modbus: response data size '%v' does not match expected '%v'\", len(response.Data), 4)\n\t\treturn\n\t}\n\trespValue := binary.BigEndian.Uint16(response.Data)\n\tif address != respValue {\n\t\terr = fmt.Errorf(\"modbus: response address '%v' does not match request '%v'\", respValue, address)\n\t\treturn\n\t}\n\tresults = response.Data[2:]\n\trespValue = binary.BigEndian.Uint16(results)\n\tif value != respValue {\n\t\terr = fmt.Errorf(\"modbus: response value '%v' does not match request '%v'\", respValue, value)\n\t\treturn\n\t}\n\treturn\n}", "func VersionCar() string {\n\tversions()\n\tmessage := \"Aayez anhy Version ya Amar?\\n\"\n\n\tfor key, _ := range CarVersionMap {\n\t\tfmt.Println(key)\n\t\tmessage += \" \" + key + \" ,\\n\"\n\t}\n\treturn message\n\n}", "func (w responseWriterNoBody) WriteHeader(statusCode int) {\n\tw.ResponseWriter.WriteHeader(statusCode)\n}", "func NewObjectCDNV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) {\n\teo.ApplyDefaults(\"rax:object-cdn\")\n\turl, err := client.EndpointLocator(eo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil\n}", "func (a *api) UpdateClassroomV1(ctx context.Context,\n\treq *grpcApi.UpdateClassroomV1Request) (res *grpcApi.UpdateClassroomV1Response, err error) {\n\n\tdefer utils.LogGrpcCall(\"UpdateClassroomV1\", &req, &res, &err)\n\tdefer func() {\n\t\t_ = a.logProducer.Send(producer.Updated, req, res, err)\n\t}()\n\n\tif err = req.Validate(); err != nil {\n\n\t\terr = status.Error(codes.InvalidArgument, err.Error())\n\t\treturn nil, err\n\t}\n\n\tclassroom := models.FromProtoClassroom(req.Classroom)\n\n\tfound, err := a.classroomRepo.UpdateClassroom(ctx, *classroom)\n\tif err != nil {\n\n\t\terr = status.Error(codes.Unavailable, err.Error())\n\t\treturn nil, err\n\t}\n\n\tif found {\n\t\tmetrics.IncUpdateCounter()\n\t}\n\n\tres = &grpcApi.UpdateClassroomV1Response{Found: found}\n\treturn res, nil\n}", "func (res Responder) WriteOne() int {\n\tn, _ := res.b.Write(binONE)\n\treturn n\n}", "func (a *Client) DeleteIOAExclusionsV1(params *DeleteIOAExclusionsV1Params, opts ...ClientOption) (*DeleteIOAExclusionsV1OK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewDeleteIOAExclusionsV1Params()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"deleteIOAExclusionsV1\",\n\t\tMethod: \"DELETE\",\n\t\tPathPattern: \"/policy/entities/ioa-exclusions/v1\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &DeleteIOAExclusionsV1Reader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*DeleteIOAExclusionsV1OK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for deleteIOAExclusionsV1: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func CreateCar(w http.ResponseWriter, r *http.Request) {\n\t// Set the way we will serve data between frontend and backend\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t// Allow cross origin connections making the routes accessible for everyone\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\t// Allow the server to perform post operation\n\tw.Header().Set(\"Access-Control-Allow-Methods\", \"POST\")\n\t// Allow the content type that is specified by client to be processed on server\n\tw.Header().Set(\"Access-Control-Allow-Headers\", \"Content-Type\")\n\t// Declare an empty car\n\tvar car models.Car\n\t// Take the car json from the client and decode it into car struct\n\t_ = json.NewDecoder(r.Body).Decode(&car)\n\tpayload := createCar(car)\n\tjson.NewEncoder(w).Encode(payload)\n}", "func (a *PipelineControllerApiService) CancelPipelineUsingPUT1(ctx _context.Context, id string) apiCancelPipelineUsingPUT1Request {\n\treturn apiCancelPipelineUsingPUT1Request{\n\t\tapiService: a,\n\t\tctx: ctx,\n\t\tid: id,\n\t}\n}", "func (a *Client) V1Version(params *V1VersionParams) (*V1VersionOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewV1VersionParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"V1Version\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/v1/version\",\n\t\tProducesMediaTypes: []string{\"text/plain\"},\n\t\tConsumesMediaTypes: []string{\"text/plain\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &V1VersionReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*V1VersionOK), nil\n\n}", "func carRegister(c router.Context) (interface{}, error) {\n\t// arg name defined in router method definition\n\tp := c.Arg(`car`).(CarPayload)\n\n\tt, _ := c.Time() // tx time\n\tcar := &Car{ // data for chaincode state\n\t\tId: p.Id,\n\t\tTitle: p.Title,\n\t\tOwner: p.Owner,\n\t\tUpdatedAt: t,\n\t}\n\n\t// trigger event\n\tc.SetEvent(CarRegisteredEvent, car)\n\n\treturn car, // peer.Response payload will be json serialized car data\n\t\t//put json serialized data to state\n\t\t// create composite key using CarKeyPrefix and car.Id\n\t\tc.State().Insert(car)\n}", "func (o *IndicatorCreateV1Params) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.IgnoreWarnings != nil {\n\n\t\t// query param ignore_warnings\n\t\tvar qrIgnoreWarnings bool\n\n\t\tif o.IgnoreWarnings != nil {\n\t\t\tqrIgnoreWarnings = *o.IgnoreWarnings\n\t\t}\n\t\tqIgnoreWarnings := swag.FormatBool(qrIgnoreWarnings)\n\t\tif qIgnoreWarnings != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"ignore_warnings\", qIgnoreWarnings); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.Retrodetects != nil {\n\n\t\t// query param retrodetects\n\t\tvar qrRetrodetects bool\n\n\t\tif o.Retrodetects != nil {\n\t\t\tqrRetrodetects = *o.Retrodetects\n\t\t}\n\t\tqRetrodetects := swag.FormatBool(qrRetrodetects)\n\t\tif qRetrodetects != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"retrodetects\", qRetrodetects); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (b *CPUBus) Write(addr uint16, v uint8) {\n\t// Gate VRAM and OAM off from the CPU if necessary.\n\tif b.mmu.ppu != nil {\n\t\tif addr >= AddrVRAM && addr < AddrCartRAM && !b.mmu.ppu.VRAMAccessible() {\n\t\t\treturn\n\t\t}\n\t\tif addr >= AddrOAM && addr < AddrOAM && !b.mmu.ppu.OAMAccessible() {\n\t\t\treturn\n\t\t}\n\t}\n\n\tb.mmu.write(addr, v)\n}", "func (a *Client) CmsBlockRepositoryV1SavePut(params *CmsBlockRepositoryV1SavePutParams) (*CmsBlockRepositoryV1SavePutOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewCmsBlockRepositoryV1SavePutParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"cmsBlockRepositoryV1SavePut\",\n\t\tMethod: \"PUT\",\n\t\tPathPattern: \"/V1/cmsBlock/{id}\",\n\t\tProducesMediaTypes: []string{\"\"},\n\t\tConsumesMediaTypes: []string{\"\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &CmsBlockRepositoryV1SavePutReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*CmsBlockRepositoryV1SavePutOK), nil\n\n}", "func (a *Client) V1Version(params *V1VersionParams) (*V1VersionOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewV1VersionParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"V1Version\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/v1/version\",\n\t\tProducesMediaTypes: []string{\"text/plain\"},\n\t\tConsumesMediaTypes: []string{\"text/plain\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &V1VersionReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*V1VersionOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\tunexpectedSuccess := result.(*V1VersionDefault)\n\treturn nil, runtime.NewAPIError(\"unexpected success response: content available as default response in error\", unexpectedSuccess, unexpectedSuccess.Code())\n}", "func writeBitacora(file *os.File, index int64, log *bitacora) {\n\tfile.Seek(index, 0)\n\t//Empezamos el proceso de guardar en binario la data en memoria del struct\n\tvar binaryDisc bytes.Buffer\n\tbinary.Write(&binaryDisc, binary.BigEndian, log)\n\twriteNextBytes(file, binaryDisc.Bytes())\n}", "func registerModelTraffic1Flags(depth int, cmdPrefix string, cmd *cobra.Command) error {\n\n\tif err := registerTraffic1Download(depth, cmdPrefix, cmd); err != nil {\n\t\treturn err\n\t}\n\n\tif err := registerTraffic1Time(depth, cmdPrefix, cmd); err != nil {\n\t\treturn err\n\t}\n\n\tif err := registerTraffic1Upload(depth, cmdPrefix, cmd); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (k *Keptn) APIV1() api.KeptnInterface {\n\treturn k.api\n}", "func PostCar(w http.ResponseWriter, r *http.Request) {\n\tclaims := GetToken(jwtauth.TokenFromHeader(r))\n\n\tvar cars []Car\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\thttp.Error(w, \"can't read body\", http.StatusBadRequest)\n\t\tpanic(err)\n\t}\n\n\terr = json.Unmarshal(body, &cars)\n\tif err != nil {\n\t\thttp.Error(w, \"wrong body structure\", http.StatusBadRequest)\n\t\tpanic(err)\n\t}\n\n\tfor i := range cars {\n\t\tsql := \"INSERT INTO public.cars(\" +\n\t\t\t\"model, manufacturer, plate, color, caradded, year, fk_user, vin)\" +\n\t\t\t\"VALUES ($1, $2, $3, $4, CURRENT_DATE, $5, $6, $7);\"\n\n\t\terr = Database.QueryRow(sql, cars[i].Model, cars[i].Manufacturer, cars[i].Plate, cars[i].Color, cars[i].Year, claims[\"id\"], cars[i].Vin).Err()\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"wrong body structure\", http.StatusBadRequest)\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tw.WriteHeader(http.StatusCreated)\n}", "func (asc *AsenaSmartContract) QueryAllCars(ctx contractapi.TransactionContextInterface, args []string) error {\n\n\treturn nil\n}", "func (*Car) Descriptor() ([]byte, []int) {\n\treturn file_carz_proto_rawDescGZIP(), []int{1}\n}", "func (j *JSendWriterBuffer) WriteHeader(statusCode int) {\n\tj.responseWriter.WriteHeader(statusCode)\n}", "func (_e *MockWriteBufferJsonBased_Expecter) WriteInt16(logicalName interface{}, bitLength interface{}, value interface{}, writerArgs ...interface{}) *MockWriteBufferJsonBased_WriteInt16_Call {\n\treturn &MockWriteBufferJsonBased_WriteInt16_Call{Call: _e.mock.On(\"WriteInt16\",\n\t\tappend([]interface{}{logicalName, bitLength, value}, writerArgs...)...)}\n}", "func (a *Client) CreateCredentialV1(params *CreateCredentialV1Params) (*CreateCredentialV1OK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewCreateCredentialV1Params()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"createCredentialV1\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/v1/credentials\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &CreateCredentialV1Reader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*CreateCredentialV1OK), nil\n\n}" ]
[ "0.48664775", "0.44362253", "0.43605676", "0.43476164", "0.4318693", "0.42778337", "0.42568678", "0.4225701", "0.4217781", "0.41908678", "0.41819924", "0.41723937", "0.41719002", "0.41454837", "0.41401184", "0.4108826", "0.41082826", "0.4107811", "0.41051665", "0.40967038", "0.40580535", "0.40521723", "0.40388066", "0.40175205", "0.3992928", "0.3955306", "0.39389408", "0.39358416", "0.39285725", "0.39253452", "0.3910767", "0.39009103", "0.38891062", "0.38713565", "0.38712138", "0.38668376", "0.38611388", "0.38584313", "0.38524273", "0.38515586", "0.38501245", "0.38452137", "0.38435796", "0.38339466", "0.38333103", "0.38329363", "0.38163406", "0.38143417", "0.38127536", "0.38055724", "0.3801852", "0.38014343", "0.38002422", "0.3791948", "0.3775124", "0.37738147", "0.3768505", "0.37660995", "0.3757572", "0.37467152", "0.37372935", "0.37348953", "0.3734253", "0.37306267", "0.37279645", "0.37208253", "0.37201226", "0.3718483", "0.37142712", "0.37126788", "0.37081617", "0.37064162", "0.3696917", "0.36968547", "0.3696271", "0.36872035", "0.36833364", "0.36832616", "0.36801222", "0.3679878", "0.3679603", "0.36761034", "0.3670744", "0.36676556", "0.36652827", "0.36617583", "0.3651883", "0.3648777", "0.36483365", "0.36449426", "0.36401987", "0.3639062", "0.36377156", "0.36301425", "0.3627037", "0.36238784", "0.3622887", "0.36224213", "0.3619914", "0.36159506" ]
0.8315218
0
AllowDuplicatePuts is a write option which makes a CAR interface (blockstore or storage) not deduplicate blocks in Put and PutMany. The default is to deduplicate, which matches the current semantics of goipfsblockstore v1. Note that this option only affects the storage interfaces (blockstore or storage), and is ignored by the root gocar/v2 package.
AllowDuplicatePuts — это опция записи, которая заставляет интерфейс CAR (blockstore или storage) не удалять дубликаты блоков при Put и PutMany. По умолчанию происходит удаление дубликатов, что соответствует текущей семантике goipfsblockstore v1. Приметь, что данная опция влияет только на интерфейсы хранения (blockstore или storage), и игнорируется пакетом root gocar/v2.
func AllowDuplicatePuts(allow bool) Option { return func(o *Options) { o.BlockstoreAllowDuplicatePuts = allow } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func DisallowDuplicateKey() DecodeOption {\n\treturn func(d *Decoder) error {\n\t\td.disallowDuplicateKey = true\n\t\treturn nil\n\t}\n}", "func (c *Client) PutDuplicate(oldName, newName upspin.PathName) (*upspin.DirEntry, error) {\n\tconst op errors.Op = \"client.PutDuplicate\"\n\tm, s := newMetric(op)\n\tdefer m.Done()\n\n\treturn c.dupOrRename(op, oldName, newName, false, s)\n}", "func (blk *Block) duplicate() *Block {\n\tdup := &Block{}\n\n\t// Copy over.\n\t*dup = *blk\n\n\tdupContents := contentstream.ContentStreamOperations{}\n\tfor _, op := range *blk.contents {\n\t\tdupContents = append(dupContents, op)\n\t}\n\tdup.contents = &dupContents\n\n\treturn dup\n}", "func (handle Handle) Duplicate(src, dest Handle, access DuplicateAccess) (Handle, error) {\n\tvar destHandle Handle\n\terrno, _, err := duplicateHandle.Call(\n\t\tuintptr(src),\n\t\tuintptr(handle),\n\t\tuintptr(dest),\n\t\tuintptr(unsafe.Pointer(&destHandle)),\n\t\tuintptr(access),\n\t\t0,\n\t\t0,\n\t)\n\tif winerrno.Errno(errno) != winerrno.Success {\n\t\treturn destHandle, nil\n\t}\n\treturn Handle(0), os.NewSyscallError(\"DuplicateHandle\", err)\n}", "func AllowOverwrite(existing, new Source) bool {\n\tswitch existing {\n\n\t// KubeAPIServer state can only be overwritten by other kube-apiserver\n\t// state.\n\tcase KubeAPIServer:\n\t\treturn new == KubeAPIServer\n\n\t// Local state can only be overwritten by other local state or\n\t// kube-apiserver state.\n\tcase Local:\n\t\treturn new == Local || new == KubeAPIServer\n\n\t// KVStore can be overwritten by other kvstore, local state, or\n\t// kube-apiserver state.\n\tcase KVStore:\n\t\treturn new == KVStore || new == Local || new == KubeAPIServer\n\n\t// Custom-resource state can be overwritten by everything except\n\t// generated, unspecified and Kubernetes (non-CRD) state\n\tcase CustomResource:\n\t\treturn new != Generated && new != Unspec && new != Kubernetes\n\n\t// Kubernetes state can be overwritten by everything except generated\n\t// and unspecified state\n\tcase Kubernetes:\n\t\treturn new != Generated && new != Unspec\n\n\t// Generated can be overwritten by everything except by Unspecified\n\tcase Generated:\n\t\treturn new != Unspec\n\n\t// Unspecified state can be overwritten by everything\n\tcase Unspec:\n\t\treturn true\n\t}\n\n\treturn true\n}", "func (o TransferJobTransferSpecTransferOptionsOutput) OverwriteObjectsAlreadyExistingInSink() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v TransferJobTransferSpecTransferOptions) *bool { return v.OverwriteObjectsAlreadyExistingInSink }).(pulumi.BoolPtrOutput)\n}", "func (f *PushFilter) Duplicate() *PushFilter {\n\n\tnf := NewPushFilter()\n\n\tfor id, types := range f.Identities {\n\t\tnf.FilterIdentity(id, types...)\n\t}\n\n\tfor k, v := range f.Params {\n\t\tnf.SetParameter(k, v...)\n\t}\n\n\treturn nf\n}", "func (o TransferJobTransferSpecTransferOptionsPtrOutput) OverwriteObjectsAlreadyExistingInSink() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v *TransferJobTransferSpecTransferOptions) *bool {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.OverwriteObjectsAlreadyExistingInSink\n\t}).(pulumi.BoolPtrOutput)\n}", "func IsDup(err error) bool {\n\twriteException, ok := err.(mongo.WriteException)\n\n\tif !ok {\n\t\treturn false\n\t}\n\n\tfor _, writeError := range writeException.WriteErrors {\n\t\treturn writeError.Code == 11000 || writeError.Code == 11001 || writeError.Code == 12582 || writeError.Code == 16460 && strings.Contains(writeError.Message, \" E11000 \")\n\t}\n\n\treturn false\n}", "func (me TxsdFeConvolveMatrixTypeEdgeMode) IsDuplicate() bool { return me.String() == \"duplicate\" }", "func (cache *diskBlockCacheWrapped) Put(ctx context.Context, tlfID tlf.ID,\n\tblockID kbfsblock.ID, buf []byte,\n\tserverHalf kbfscrypto.BlockCryptKeyServerHalf) error {\n\t// This is a write operation but we are only reading the pointers to the\n\t// caches. So we use a read lock.\n\tcache.mtx.RLock()\n\tdefer cache.mtx.RUnlock()\n\tif cache.config.IsSyncedTlf(tlfID) && cache.syncCache != nil {\n\t\tworkingSetCache := cache.workingSetCache\n\t\terr := cache.syncCache.Put(ctx, tlfID, blockID, buf, serverHalf)\n\t\tif err == nil {\n\t\t\tgo workingSetCache.Delete(ctx, []kbfsblock.ID{blockID})\n\t\t\treturn nil\n\t\t}\n\t\t// Otherwise drop through and put it into the working set cache.\n\t}\n\t// TODO: Allow more intelligent transitioning from the sync cache to\n\t// the working set cache.\n\tif cache.syncCache != nil {\n\t\tsyncCache := cache.syncCache\n\t\tgo syncCache.Delete(ctx, []kbfsblock.ID{blockID})\n\t}\n\treturn cache.workingSetCache.Put(ctx, tlfID, blockID, buf, serverHalf)\n}", "func CheckDupe(domain, instance, class, id string) error {\n\n\tfileName := strings.ToLower(fileName(domain, instance, class, id))\n\n\tmux.Lock()\n\tdefer mux.Unlock()\n\n\t_, found := sources[fileName]\n\tif found {\n\t\treturn ErrDup\n\t}\n\n\tsources[fileName] = true\n\n\treturn nil\n}", "func (rs *replicationScheme) ensureBlockIsReplicated(ctx context.Context, id ulid.ULID) error {\n\tblockID := id.String()\n\tchunksDir := path.Join(blockID, thanosblock.ChunksDirname)\n\tindexFile := path.Join(blockID, thanosblock.IndexFilename)\n\tmetaFile := path.Join(blockID, thanosblock.MetaFilename)\n\n\tlevel.Debug(rs.logger).Log(\"msg\", \"ensuring block is replicated\", \"block_uuid\", blockID)\n\n\toriginMetaFile, err := rs.fromBkt.Get(ctx, metaFile)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"get meta file from origin bucket: %w\", err)\n\t}\n\n\tdefer runutil.CloseWithLogOnErr(rs.logger, originMetaFile, \"close original meta file\")\n\n\ttargetMetaFile, err := rs.toBkt.Get(ctx, metaFile)\n\tif targetMetaFile != nil {\n\t\tdefer runutil.CloseWithLogOnErr(rs.logger, targetMetaFile, \"close target meta file\")\n\t}\n\n\tif err != nil && !rs.toBkt.IsObjNotFoundErr(err) && err != io.EOF {\n\t\treturn fmt.Errorf(\"get meta file from target bucket: %w\", err)\n\t}\n\n\toriginMetaFileContent, err := ioutil.ReadAll(originMetaFile)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"read origin meta file: %w\", err)\n\t}\n\n\tif targetMetaFile != nil && !rs.toBkt.IsObjNotFoundErr(err) {\n\t\ttargetMetaFileContent, err := ioutil.ReadAll(targetMetaFile)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"read target meta file: %w\", err)\n\t\t}\n\n\t\tif bytes.Equal(originMetaFileContent, targetMetaFileContent) {\n\t\t\t// If the origin meta file content and target meta file content is\n\t\t\t// equal, we know we have already successfully replicated\n\t\t\t// previously.\n\t\t\tlevel.Debug(rs.logger).Log(\"msg\", \"skipping block as already replicated\", \"block_uuid\", id.String())\n\t\t\trs.metrics.blocksAlreadyReplicated.Inc()\n\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tif err := rs.fromBkt.Iter(ctx, chunksDir, func(objectName string) error {\n\t\terr := rs.ensureObjectReplicated(ctx, objectName)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"replicate object %v: %w\", objectName, err)\n\t\t}\n\n\t\treturn nil\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\tif err := rs.ensureObjectReplicated(ctx, indexFile); err != nil {\n\t\treturn fmt.Errorf(\"replicate index file: %w\", err)\n\t}\n\n\tlevel.Debug(rs.logger).Log(\"msg\", \"replicating meta file\", \"object\", metaFile)\n\n\tif err := rs.toBkt.Upload(ctx, metaFile, bytes.NewReader(originMetaFileContent)); err != nil {\n\t\treturn fmt.Errorf(\"upload meta file: %w\", err)\n\t}\n\n\trs.metrics.blocksReplicated.Inc()\n\n\treturn nil\n}", "func (o BucketOutput) LifecycleRuleAllowSameActionOverlap() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v *Bucket) pulumi.BoolPtrOutput { return v.LifecycleRuleAllowSameActionOverlap }).(pulumi.BoolPtrOutput)\n}", "func IsDuplicate(err error) bool {\n\tvar e mongo.WriteException\n\tif errors.As(err, &e) {\n\t\tfor _, we := range e.WriteErrors {\n\t\t\tif we.Code == 11000 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func TestAddExportFileSkipsDuplicates(t *testing.T) {\n\tt.Parallel()\n\n\ttestDB := database.NewTestDatabase(t)\n\texportDB := New(testDB)\n\tctx := context.Background()\n\n\t// Add foreign key records.\n\tec := &model.ExportConfig{Period: time.Hour}\n\tif err := exportDB.AddExportConfig(ctx, ec); err != nil {\n\t\tt.Fatal(err)\n\t}\n\teb := &model.ExportBatch{ConfigID: ec.ConfigID, Status: model.ExportBatchOpen}\n\tif err := exportDB.AddExportBatches(ctx, []*model.ExportBatch{eb}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Lease the batch to get the ID.\n\teb, err := exportDB.LeaseBatch(ctx, time.Hour, time.Now())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twantBucketName := \"bucket-1\"\n\tef := &model.ExportFile{\n\t\tFilename: \"file\",\n\t\tBucketName: wantBucketName,\n\t\tBatchID: eb.BatchID,\n\t}\n\n\t// Add a record.\n\terr = testDB.InTx(ctx, pgx.Serializable, func(tx pgx.Tx) error {\n\t\tif err := addExportFile(ctx, tx, ef); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Check that the row is present.\n\tgot, err := exportDB.LookupExportFile(ctx, ef.Filename)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got.BucketName != wantBucketName {\n\t\tt.Fatalf(\"bucket name mismatch got %q, want %q\", got.BucketName, wantBucketName)\n\t}\n\n\t// Add a second record with same filename, must return ErrKeyConflict, and not overwrite.\n\tef.BucketName = \"bucket-2\"\n\terr = testDB.InTx(ctx, pgx.Serializable, func(tx pgx.Tx) error {\n\t\tif err := addExportFile(ctx, tx, ef); err != nil {\n\t\t\tif err == database.ErrKeyConflict {\n\t\t\t\treturn nil // Expected result.\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\treturn errors.New(\"missing expected ErrKeyConflict\")\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Row must not be updated.\n\tgot, err = exportDB.LookupExportFile(ctx, ef.Filename)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got.BucketName != wantBucketName {\n\t\tt.Fatalf(\"bucket name mismatch got %q, want %q\", got.BucketName, wantBucketName)\n\t}\n}", "func NoConcurrentDupes(f OnMissHandler) (OnMissHandler, chan<- bool) {\n\terrClosed := errors.New(\"NoConcurrentDupes wrapper has been closed\")\n\topchan := make(chan reqGet)\n\tgo nocondupesMainloop(f, opchan)\n\tquit := make(chan bool, 1)\n\twrap := func(key string) (Cacheable, error) {\n\t\tif opchan == nil {\n\t\t\treturn nil, errClosed\n\t\t}\n\t\tselect {\n\t\tcase <-quit:\n\t\t\tclose(opchan)\n\t\t\topchan = nil\n\t\t\treturn nil, errClosed\n\t\tdefault:\n\t\t}\n\t\treplychan := make(chan replyGet)\n\t\topchan <- reqGet{key, replychan}\n\t\treply := <-replychan\n\t\treturn reply.val, reply.err\n\t}\n\treturn wrap, quit\n}", "func (w *binWriter) WriteAvoidRepetitionWhenPossible(v interface{}) {\n\tif w.err != nil {\n\t\treturn\n\t}\n\tif w.err = binary.Write(w.w, binary.LittleEndian, v); w.err == nil {\n\t\tw.size += int64(binary.Size(v))\n\t}\n}", "func (mw *Writer) DedupWriteIsDup(v interface{}) (res bool, err error) {\n\tdefer func() {\n\t\t// This recover allows test 911 (_generated/gen_test.go:67) to run green.\n\t\t// It turns indexing by []byte msgp.Raw into a no-op. Which it\n\t\t// should be.\n\t\tif recover() != nil {\n\t\t\treturn\n\t\t}\n\t}()\n\tif v == nil || reflect.ValueOf(v).IsNil() {\n\t\treturn false, nil\n\t}\n\tk, dup := mw.ptrWrit[v]\n\tif !dup {\n\t\tmw.ptrWrit[v] = mw.ptrCountNext\n\t\t//fmt.Printf(\"\\n\\n $$$ NOT dup write %p -> k=%v / %#v\\n\\n\", v, mw.ptrCountNext, v)\n\t\tmw.ptrCountNext++\n\t\treturn false, nil\n\t} else {\n\t\t//fmt.Printf(\"\\n\\n $$$ DUP write %p -> k=%v / %#v\\n\\n\", v, k, v)\n\t}\n\treturn true, mw.DedupWriteExt(k)\n}", "func (me *Container) Duplicate(r ...Registries) *Container {\n\tinstance := Container{sync.Mutex{}, make(map[string]interface{})}\n\n\tfor k, v := range globalContainerInstance.Container.bag {\n\t\tinstance.bag[k] = v\n\t}\n\n\tif len(r) > 0 {\n\t\tfor _, v := range r {\n\t\t\tinstance.Register(v)\n\t\t}\n\t}\n\n\treturn &instance\n}", "func (bs *GasChargeBlockStore) Put(ctx context.Context, blk blocks.Block) error {\n\tbs.gasTank.Charge(bs.pricelist.OnIpldPut(len(blk.RawData())), \"%s storage put %d bytes\", blk.Cid(), len(blk.RawData()))\n\n\tif err := bs.inner.Put(ctx, blk); err != nil {\n\t\tpanic(xerrors.WithMessage(err, \"failed to write data to disk\"))\n\t}\n\treturn nil\n}", "func (fc finderClient) Overwrite(ctx context.Context,\n\thost, index, shard string,\n\txs []*objects.VObject,\n) ([]RepairResponse, error) {\n\treturn fc.cl.OverwriteObjects(ctx, host, index, shard, xs)\n}", "func (endpointSliceStrategy) AllowCreateOnUpdate() bool {\n\treturn false\n}", "func (d *Duplicator) Duplicate(in chan Any, count int) (outs []chan Any) {\n\t// Create duplicate channels\n\touts = make([]chan Any, 0, count)\n\tfor i := 0; i < count; i++ {\n\t\touts = append(outs, make(chan Any))\n\t}\n\n\t// Pipe input to all of the outputs\n\tgo func(outs []chan Any) {\n\t\tfor x := range in {\n\t\t\tfor _, o := range outs {\n\t\t\t\to <- x\n\t\t\t}\n\t\t}\n\t\tfor _, o := range outs {\n\t\t\tclose(o)\n\t\t}\n\t}(outs)\n\n\treturn outs\n}", "func TestMoveMultipleToSameBlock(t *testing.T) {\n\tt.Parallel()\n\n\ts, db, teardown, err := testStore()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer teardown()\n\n\tdbtx, err := db.BeginReadWriteTx()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer dbtx.Commit()\n\tns := dbtx.ReadWriteBucket(namespaceKey)\n\n\tb100 := BlockMeta{\n\t\tBlock: Block{Height: 100},\n\t\tTime: time.Now(),\n\t}\n\n\tcb := newCoinBase(20e8, 30e8)\n\tcbRec, err := NewTxRecordFromMsgTx(cb, b100.Time)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Insert coinbase and mark both outputs as credits.\n\terr = s.InsertTx(ns, cbRec, &b100)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = s.AddCredit(ns, cbRec, &b100, 0, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = s.AddCredit(ns, cbRec, &b100, 1, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Create and insert two unmined transactions which spend both coinbase\n\t// outputs.\n\tspenderATime := time.Now()\n\tspenderA := spendOutput(&cbRec.Hash, 0, 1e8, 2e8, 18e8)\n\tspenderARec, err := NewTxRecordFromMsgTx(spenderA, spenderATime)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = s.InsertTx(ns, spenderARec, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = s.AddCredit(ns, spenderARec, nil, 0, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = s.AddCredit(ns, spenderARec, nil, 1, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tspenderBTime := time.Now()\n\tspenderB := spendOutput(&cbRec.Hash, 1, 4e8, 8e8, 18e8)\n\tspenderBRec, err := NewTxRecordFromMsgTx(spenderB, spenderBTime)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = s.InsertTx(ns, spenderBRec, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = s.AddCredit(ns, spenderBRec, nil, 0, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = s.AddCredit(ns, spenderBRec, nil, 1, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcoinbaseMaturity := int32(chaincfg.TestNet3Params.CoinbaseMaturity)\n\n\t// Mine both transactions in the block that matures the coinbase.\n\tbMaturity := BlockMeta{\n\t\tBlock: Block{Height: b100.Height + coinbaseMaturity},\n\t\tTime: time.Now(),\n\t}\n\terr = s.InsertTx(ns, spenderARec, &bMaturity)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = s.InsertTx(ns, spenderBRec, &bMaturity)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Check that both transactions can be queried at the maturity block.\n\tdetailsA, err := s.UniqueTxDetails(ns, &spenderARec.Hash, &bMaturity.Block)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif detailsA == nil {\n\t\tt.Fatal(\"No details found for first spender\")\n\t}\n\tdetailsB, err := s.UniqueTxDetails(ns, &spenderBRec.Hash, &bMaturity.Block)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif detailsB == nil {\n\t\tt.Fatal(\"No details found for second spender\")\n\t}\n\n\t// Verify that the balance was correctly updated on the block record\n\t// append and that no unmined transactions remain.\n\tbalTests := []struct {\n\t\theight int32\n\t\tminConf int32\n\t\tbal btcutil.Amount\n\t}{\n\t\t// Maturity height\n\t\t{\n\t\t\theight: bMaturity.Height,\n\t\t\tminConf: 0,\n\t\t\tbal: 15e8,\n\t\t},\n\t\t{\n\t\t\theight: bMaturity.Height,\n\t\t\tminConf: 1,\n\t\t\tbal: 15e8,\n\t\t},\n\t\t{\n\t\t\theight: bMaturity.Height,\n\t\t\tminConf: 2,\n\t\t\tbal: 0,\n\t\t},\n\n\t\t// Next block after maturity height\n\t\t{\n\t\t\theight: bMaturity.Height + 1,\n\t\t\tminConf: 0,\n\t\t\tbal: 15e8,\n\t\t},\n\t\t{\n\t\t\theight: bMaturity.Height + 1,\n\t\t\tminConf: 2,\n\t\t\tbal: 15e8,\n\t\t},\n\t\t{\n\t\t\theight: bMaturity.Height + 1,\n\t\t\tminConf: 3,\n\t\t\tbal: 0,\n\t\t},\n\t}\n\tfor i, tst := range balTests {\n\t\tbal, err := s.Balance(ns, tst.minConf, tst.height)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Balance test %d: Store.Balance failed: %v\", i, err)\n\t\t}\n\t\tif bal != tst.bal {\n\t\t\tt.Errorf(\"Balance test %d: Got %v Expected %v\", i, bal, tst.bal)\n\t\t}\n\t}\n\tif t.Failed() {\n\t\tt.Fatal(\"Failed balance checks after moving both coinbase spenders\")\n\t}\n\tunminedTxs, err := s.UnminedTxs(ns)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(unminedTxs) != 0 {\n\t\tt.Fatalf(\"Should have no unmined transactions mining both, found %d\", len(unminedTxs))\n\t}\n}", "func (kv *ShardKV) isDuplicateRequest(clientId int64, requestId int64) bool {\n\toperationContext, ok := kv.lastOperations[clientId]\n\treturn ok && requestId <= operationContext.MaxAppliedCommandId\n}", "func BenchmarkPut(b *testing.B) {\n\tdefer cleanTestFiles()\n\n\tblk, err := createTestBlock()\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\n\tdefer blk.Close()\n\n\trpos, err := blk.New()\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\n\tpld := []byte{1, 2, 3, 4}\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tblk.Put(rpos, 2, pld)\n\t}\n}", "func Duplicate(h handle.Handle, pid uint32, access handle.DuplicateAccess) (handle.Handle, error) {\n\ttargetPs, err := process.Open(process.DupHandle, false, pid)\n\tif err != nil {\n\t\treturn ^handle.Handle(0), err\n\t}\n\tdefer targetPs.Close()\n\tcurrentPs, err := process.Open(process.DupHandle, false, uint32(os.Getpid()))\n\tif err != nil {\n\t\treturn ^handle.Handle(0), err\n\t}\n\tdefer currentPs.Close()\n\t// duplicate the remote handle in the current process's address space.\n\t// Note that for certain handle types this operation might fail\n\t// as they don't permit duplicate operations\n\tdup, err := h.Duplicate(targetPs, currentPs, access)\n\tif err != nil {\n\t\treturn ^handle.Handle(0), fmt.Errorf(\"couldn't duplicate handle: %v\", err)\n\t}\n\treturn dup, nil\n}", "func (mp *TxPool) replaceDuplicateSideChainPowTx(txn *Transaction) {\n\tvar replaceList []*Transaction\n\n\tfor _, v := range mp.txnList {\n\t\tif v.TxType == SideChainPow {\n\t\t\toldPayload := v.Payload.Data(payload.SideChainPowVersion)\n\t\t\toldGenesisHashData := oldPayload[32:64]\n\n\t\t\tnewPayload := txn.Payload.Data(payload.SideChainPowVersion)\n\t\t\tnewGenesisHashData := newPayload[32:64]\n\n\t\t\tif bytes.Equal(oldGenesisHashData, newGenesisHashData) {\n\t\t\t\treplaceList = append(replaceList, v)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, txn := range replaceList {\n\t\ttxid := txn.Hash()\n\t\tlog.Info(\"replace sidechainpow transaction, txid=\", txid.String())\n\t\tmp.removeTransaction(txn)\n\t}\n}", "func (mcc *mapChunkCache) Put(chnks []nbs.CompressedChunk) bool {\n\tmcc.mu.Lock()\n\tdefer mcc.mu.Unlock()\n\n\tfor i := 0; i < len(chnks); i++ {\n\t\tc := chnks[i]\n\t\th := c.Hash()\n\n\t\tif curr, ok := mcc.hashToChunk[h]; ok {\n\t\t\tif !curr.IsEmpty() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tif mcc.cm.CapacityExceeded(len(c.FullCompressedChunk)) {\n\t\t\treturn true\n\t\t}\n\n\t\tmcc.hashToChunk[h] = c\n\n\t\tif !c.IsEmpty() {\n\t\t\tmcc.toFlush[h] = c\n\t\t}\n\t}\n\n\treturn false\n}", "func checkDuplicate(recvPath []string, confs []*Config) bool {\n\tif len(confs) == 0 {\n\t\treturn false\n\t}\n\n\tfor _, c := range confs {\n\t\tif reflect.DeepEqual(c.Path, recvPath) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func TestCheckDuplicateConfigs(t *testing.T) {\n\ttestCases := []struct {\n\t\tqConfigs []queueConfig\n\t\texpectedErrCode APIErrorCode\n\t}{\n\t\t// Error for duplicate queue configs.\n\t\t{\n\t\t\tqConfigs: []queueConfig{\n\t\t\t\t{\n\t\t\t\t\tQueueARN: \"arn:minio:sqs:us-east-1:1:redis\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tQueueARN: \"arn:minio:sqs:us-east-1:1:redis\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErrCode: ErrOverlappingConfigs,\n\t\t},\n\t\t// Valid queue configs.\n\t\t{\n\t\t\tqConfigs: []queueConfig{\n\t\t\t\t{\n\t\t\t\t\tQueueARN: \"arn:minio:sqs:us-east-1:1:redis\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErrCode: ErrNone,\n\t\t},\n\t}\n\n\t// ... validate for duplicate queue configs.\n\tfor i, testCase := range testCases {\n\t\terrCode := checkDuplicateQueueConfigs(testCase.qConfigs)\n\t\tif errCode != testCase.expectedErrCode {\n\t\t\tt.Errorf(\"Test %d: Expected %d, got %d\", i+1, testCase.expectedErrCode, errCode)\n\t\t}\n\t}\n}", "func (t *ACLRole) DBCreateIgnoreDuplicate(ctx context.Context, db DB) (sql.Result, error) {\n\tq := \"INSERT INTO `acl_role` (`acl_role`.`id`,`acl_role`.`checksum`,`acl_role`.`name`,`acl_role`.`description`,`acl_role`.`admin_user_id`,`acl_role`.`customer_id`,`acl_role`.`created_at`,`acl_role`.`updated_at`) VALUES (?,?,?,?,?,?,?,?) ON DUPLICATE KEY UPDATE `id` = `id`\"\n\tchecksum := t.CalculateChecksum()\n\tif t.GetChecksum() == checksum {\n\t\treturn nil, nil\n\t}\n\tt.Checksum = &checksum\n\treturn db.ExecContext(ctx, q,\n\t\torm.ToSQLString(t.ID),\n\t\torm.ToSQLString(t.Checksum),\n\t\torm.ToSQLString(t.Name),\n\t\torm.ToSQLString(t.Description),\n\t\torm.ToSQLString(t.AdminUserID),\n\t\torm.ToSQLString(t.CustomerID),\n\t\torm.ToSQLInt64(t.CreatedAt),\n\t\torm.ToSQLInt64(t.UpdatedAt),\n\t)\n}", "func (b Bucket) Put(args ...Params) error {\n\theader, query := getHeaderQuery(args)\n\tif b.ACL != \"\" {\n\t\theader.Set(\"x-oss-acl\", b.ACL)\n\t}\n\tvar body interface{}\n\tif b.Location != \"\" {\n\t\tbody = CreateBucketConfiguration{b.Location}\n\t}\n\treturn b.Do(\"PUT\", \"\", body, nil, header, query)\n}", "func SaveMissedBlock(vals []types.Validator, validatorSets []types.ValidatorOfValidatorSet, block types.BlockResult) {\n\theight, _ := utils.ParseInt(block.Block.Header.Height)\n\tvalidatorSetsFormat := client.FormatValidatorSetPubkeyToIndex(validatorSets)\n\tfor _, validator := range vals {\n\t\tif val, ok := validatorSetsFormat[validator.ConsensusPubkey.Key]; ok {\n\t\t\tif len(block.Block.LastCommit.Signatures) > 0 {\n\t\t\t\tsignedInfo := block.Block.LastCommit.Signatures[val]\n\t\t\t\tif signedInfo.Signature == \"\" {\n\t\t\t\t\tb := schema.NewMissedBlock(schema.MissedBlock{\n\t\t\t\t\t\tHeight: height,\n\t\t\t\t\t\tOperatorAddr: validator.OperatorAddress,\n\t\t\t\t\t\tTimestamp: block.Block.Header.Time,\n\t\t\t\t\t})\n\t\t\t\t\torm.Save(\"missed_block\", b)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func DupSecOpt(src string) ([]string, error) {\n\treturn dupSecOpt(src)\n}", "func (c *cache) Put(ctx context.Context, hash string, data []byte) error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tif err := c.bs.Put(ctx, hash, data); err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.blobsCache.Add(hash, data); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func IsDuplicated(err error) bool {\n\tif we, ok := err.(mongo.WriteException); ok {\n\t\tfor _, e := range we.WriteErrors {\n\t\t\tif e.Code == 11000 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func RegisterDeepCopies(scheme *runtime.Scheme) error {\n\treturn scheme.AddGeneratedDeepCopyFuncs(\n\t\tconversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {\n\t\t\tin.(*PodPolicy).DeepCopyInto(out.(*PodPolicy))\n\t\t\treturn nil\n\t\t}, InType: reflect.TypeOf(&PodPolicy{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {\n\t\t\tin.(*Redis).DeepCopyInto(out.(*Redis))\n\t\t\treturn nil\n\t\t}, InType: reflect.TypeOf(&Redis{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {\n\t\t\tin.(*RedisList).DeepCopyInto(out.(*RedisList))\n\t\t\treturn nil\n\t\t}, InType: reflect.TypeOf(&RedisList{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {\n\t\t\tin.(*SentinelSpec).DeepCopyInto(out.(*SentinelSpec))\n\t\t\treturn nil\n\t\t}, InType: reflect.TypeOf(&SentinelSpec{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {\n\t\t\tin.(*SentinelStatus).DeepCopyInto(out.(*SentinelStatus))\n\t\t\treturn nil\n\t\t}, InType: reflect.TypeOf(&SentinelStatus{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {\n\t\t\tin.(*ServerCondition).DeepCopyInto(out.(*ServerCondition))\n\t\t\treturn nil\n\t\t}, InType: reflect.TypeOf(&ServerCondition{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {\n\t\t\tin.(*ServerSpec).DeepCopyInto(out.(*ServerSpec))\n\t\t\treturn nil\n\t\t}, InType: reflect.TypeOf(&ServerSpec{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {\n\t\t\tin.(*ServerStatus).DeepCopyInto(out.(*ServerStatus))\n\t\t\treturn nil\n\t\t}, InType: reflect.TypeOf(&ServerStatus{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {\n\t\t\tin.(*SlaveSpec).DeepCopyInto(out.(*SlaveSpec))\n\t\t\treturn nil\n\t\t}, InType: reflect.TypeOf(&SlaveSpec{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {\n\t\t\tin.(*SlaveStatus).DeepCopyInto(out.(*SlaveStatus))\n\t\t\treturn nil\n\t\t}, InType: reflect.TypeOf(&SlaveStatus{})},\n\t)\n}", "func (c *CopyCmd) Replicate(ctx context.Context, opt *Option, srcFs, dstFs *Firestore) error {\n\tvar err error\n\tif c.IsDelete {\n\t\tPrintInfof(opt.Stdout, \"delete original document? (y/n) \\n\")\n\t\tyes := askForConfirmation(opt)\n\t\tif !yes {\n\t\t\treturn errors.New(\"exit\")\n\t\t}\n\t}\n\n\treaderList, err := srcFs.Scan(ctx, c.FirestorePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor k, reader := range readerList {\n\t\tdstPath := strings.Replace(c.DestinationFirestorePath, \"*\", k, -1)\n\t\tsrcPath := strings.Replace(c.FirestorePath, \"*\", k, -1)\n\t\tDebugf(\"save with : %v from %v \\n\", srcPath, srcPath)\n\n\t\tvar m map[string]interface{}\n\t\terr = json.NewDecoder(reader).Decode(&m)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tom := dstFs.InterpretationEachValueForTime(m)\n\n\t\terr = dstFs.SaveData(ctx, opt, dstPath, om)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif c.IsDelete {\n\t\t\terr = dstFs.DeleteData(ctx, opt, srcPath)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\tPrintInfof(opt.Stdout, \"Copy complete! \\n\\n\")\n\treturn nil\n}", "func StatsdDuplicate(watchType string, watchID string) {\n\tif DogStatsd {\n\t\tstatsd, _ := godspeed.NewDefault()\n\t\tdefer statsd.Conn.Close()\n\t\ttags := makeTags(watchType, watchID, \"\", \"\")\n\t\tmetricName := fmt.Sprintf(\"%s.duplicate\", MetricPrefix)\n\t\tstatsd.Incr(metricName, tags)\n\t}\n\tLog(fmt.Sprintf(\"dogstatsd='%t' %s='%s' action='duplicate'\", DogStatsd, watchType, watchID), \"debug\")\n}", "func recordAdaptorDuplicateBidIDs(metricsEngine metrics.MetricsEngine, adapterBids map[openrtb_ext.BidderName]*entities.PbsOrtbSeatBid) bool {\n\tbidIDCollisionFound := false\n\tif nil == adapterBids {\n\t\treturn false\n\t}\n\tfor bidder, bid := range adapterBids {\n\t\tbidIDColisionMap := make(map[string]int, len(adapterBids[bidder].Bids))\n\t\tfor _, thisBid := range bid.Bids {\n\t\t\tif collisions, ok := bidIDColisionMap[thisBid.Bid.ID]; ok {\n\t\t\t\tbidIDCollisionFound = true\n\t\t\t\tbidIDColisionMap[thisBid.Bid.ID]++\n\t\t\t\tglog.Warningf(\"Bid.id %v :: %v collision(s) [imp.id = %v] for bidder '%v'\", thisBid.Bid.ID, collisions, thisBid.Bid.ImpID, string(bidder))\n\t\t\t\tmetricsEngine.RecordAdapterDuplicateBidID(string(bidder), 1)\n\t\t\t} else {\n\t\t\t\tbidIDColisionMap[thisBid.Bid.ID] = 1\n\t\t\t}\n\t\t}\n\t}\n\treturn bidIDCollisionFound\n}", "func (pb *PBServer) SubPut(args *PutArgs, reply *PutReply) error {\n pb.mu.Lock()\n\n fmt.Printf(\"sub put %s received at %s\\n\", args, pb.me)\n \n if pb.view.Backup != pb.me {\n reply.Err = ErrWrongServer\n pb.mu.Unlock()\n return nil\n }\n\n // filter duplicated requests\n if pb.processed[args.Id] {\n reply.Err = OK\n pb.mu.Unlock()\n return nil\n }\n\n //pb.processed[args.Id] = true\n pb.doPut(args, reply)\n \n reply.Err = OK\n pb.mu.Unlock()\n return nil\n}", "func TestPut(t *testing.T) {\n\tconf := withTmpBoltStore(t, defaultConf(t, secret))\n\tcases := []struct {\n\t\tname string\n\t\tinput bits.ChunkReader\n\t\tconf bits.Config\n\t\tminKeys int\n\t\texpectedErr string\n\t\tkeyw bits.KeyWriter\n\t}{{\n\t\t\"9MiB_random_default_conf\", //chunker max size is 8Mib, so expect at least 2 chunks\n\t\trandBytesInput(bytes.NewBuffer(randb(9*1024*1024)), secret),\n\t\tconf,\n\t\t2,\n\t\t\"\",\n\t\tnil,\n\t}, {\n\t\t\"1MiB_random_storage_failed\",\n\t\trandBytesInput(bytes.NewBuffer(randb(1024*1024)), secret),\n\t\twithStore(t, defaultConf(t, secret), &failingStore{}),\n\t\t0,\n\t\t\"storage_failed\",\n\t\tnil,\n\t}, {\n\t\t\"1MiB_random_chunker_failed\",\n\t\t&failingChunker{},\n\t\tconf,\n\t\t0,\n\t\t\"chunking_failed\",\n\t\tnil,\n\t}, {\n\t\t\"1MiB_chunking_fail\",\n\t\t&failingChunker{},\n\t\tconf,\n\t\t0,\n\t\t\"chunking_failed\",\n\t\tnil,\n\t}, {\n\t\t\"1MiB_handler_failed\",\n\t\trandBytesInput(bytes.NewBuffer(randb(1024*1024)), secret),\n\t\tconf,\n\t\t0,\n\t\t\"handler_failed\",\n\t\t&failingKeyHandler{},\n\t}}\n\n\tfor _, c := range cases {\n\t\tt.Run(c.name, func(t *testing.T) {\n\n\t\t\tvar keys []bits.K\n\t\t\tvar err error\n\t\t\tif c.keyw == nil {\n\t\t\t\th := bitskeys.NewMemIterator()\n\t\t\t\terr = bits.Put(c.input, h, c.conf)\n\t\t\t\tkeys = h.Keys\n\n\t\t\t\tif len(keys) < c.minKeys {\n\t\t\t\t\tt.Errorf(\"expected at least '%d' keys, got: '%d'\", c.minKeys, len(keys))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\terr = bits.Put(c.input, c.keyw, c.conf)\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tif c.expectedErr == \"\" {\n\t\t\t\t\tt.Errorf(\"splitting shouldnt fail but got: %v\", err)\n\t\t\t\t} else if !strings.Contains(err.Error(), c.expectedErr) {\n\t\t\t\t\tt.Errorf(\"expected an error that contains message '%s', got: %v\", c.expectedErr, err)\n\t\t\t\t}\n\t\t\t} else if c.expectedErr != \"\" {\n\t\t\t\tt.Errorf(\"expected an error, got success\")\n\t\t\t}\n\t\t})\n\t}\n}", "func (this *Block) Unique(b []int) {\n\tfor _, vb := range b {\n\t\tfor i, dup := range this.Possible {\n\t\t\tif dup == vb {\n\t\t\t\tthis.Possible = append(this.Possible[:i], this.Possible[i+1:]...)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}", "func (bfs *BruteForceService) duplicate(sub string) bool {\n\tbfs.Lock()\n\tdefer bfs.Unlock()\n\n\tif _, found := bfs.subdomains[sub]; found {\n\t\treturn true\n\t}\n\tbfs.subdomains[sub] = struct{}{}\n\treturn false\n}", "func (ihs *IPHistoryService) duplicate(domain string) bool {\n\tihs.Lock()\n\tdefer ihs.Unlock()\n\n\tif _, found := ihs.filter[domain]; found {\n\t\treturn true\n\t}\n\tihs.filter[domain] = struct{}{}\n\treturn false\n}", "func DuplicateFile(n *net_node.Node, filename string, send_to_idx int32) {\n\t// First, determine if the file we are putting actually exists\n\tf, err := os.Stat(filename)\n\tif os.IsNotExist(err) {\n\t\tfmt.Println(filename, \"does not exist ,cant duplicate this file\")\n\t\treturn\n\t}\n\tfile_size := f.Size()\n\n\t// Do not begin writing until we have waited for all\n\t// other writes and reads on the file to finish and notified\n\t// other servers that we are writing\n\n\tacquire_distributed_write_lock(n, filename)\n\n\tSend_file_tcp(n, send_to_idx, filename, filename, file_size, \"\", false)\n\n\t// Send a message to the remaining servers that the file has been put\n\tservers := n.Files[filename].Servers\n\tfor _, idx := range servers {\n\t\tif idx == -1 {\n\t\t\tcontinue\n\t\t}\n\t\tif n.Table[idx].Status != net_node.ACTIVE {\n\t\t\tn.Files[filename].Servers[idx] = send_to_idx\n\t\t}\n\t}\n\tnotify_servers_of_file_put_complete(n, servers, filename, file_size)\n}", "func BenchmarkBTreeDeleteInsertCloneOnce(b *testing.B) {\n\tforBenchmarkSizes(b, func(b *testing.B, count int) {\n\t\tinsertP := perm(count)\n\t\tvar tr btree\n\t\tfor _, item := range insertP {\n\t\t\ttr.Set(item)\n\t\t}\n\t\ttr = tr.Clone()\n\t\tb.ResetTimer()\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\titem := insertP[i%count]\n\t\t\ttr.Delete(item)\n\t\t\ttr.Set(item)\n\t\t}\n\t})\n}", "func (_ BufferPtrPool2M) Put(b *[]byte) {\n\tPutBytesSlicePtr2M(b)\n}", "func duplicateExists(vs []string) bool {\n\tm := make(map[string]bool, len(vs))\n\n\tfor _, v := range vs {\n\t\tif _, ok := m[v]; ok {\n\t\t\treturn true\n\t\t}\n\t\tm[v] = true\n\t}\n\treturn false\n}", "func (meta *predicateMetadata) ShallowCopy() algorithm.PredicateMetadata {\n\tnewPredMeta := &predicateMetadata{\n\t\tpod: meta.pod,\n\t\tpodBestEffort: meta.podBestEffort,\n\t\tpodRequest: meta.podRequest,\n\t\tserviceAffinityInUse: meta.serviceAffinityInUse,\n\t\tignoredExtendedResources: meta.ignoredExtendedResources,\n\t}\n\tnewPredMeta.podPorts = append([]*v1.ContainerPort(nil), meta.podPorts...)\n\tnewPredMeta.matchingAntiAffinityTerms = map[string][]matchingPodAntiAffinityTerm{}\n\tfor k, v := range meta.matchingAntiAffinityTerms {\n\t\tnewPredMeta.matchingAntiAffinityTerms[k] = append([]matchingPodAntiAffinityTerm(nil), v...)\n\t}\n\tnewPredMeta.nodeNameToMatchingAffinityPods = make(map[string][]*v1.Pod)\n\tfor k, v := range meta.nodeNameToMatchingAffinityPods {\n\t\tnewPredMeta.nodeNameToMatchingAffinityPods[k] = append([]*v1.Pod(nil), v...)\n\t}\n\tnewPredMeta.nodeNameToMatchingAntiAffinityPods = make(map[string][]*v1.Pod)\n\tfor k, v := range meta.nodeNameToMatchingAntiAffinityPods {\n\t\tnewPredMeta.nodeNameToMatchingAntiAffinityPods[k] = append([]*v1.Pod(nil), v...)\n\t}\n\tnewPredMeta.serviceAffinityMatchingPodServices = append([]*v1.Service(nil),\n\t\tmeta.serviceAffinityMatchingPodServices...)\n\tnewPredMeta.serviceAffinityMatchingPodList = append([]*v1.Pod(nil),\n\t\tmeta.serviceAffinityMatchingPodList...)\n\treturn (algorithm.PredicateMetadata)(newPredMeta)\n}", "func (d *VaultPKIQuery) CanShare() bool {\n\treturn false\n}", "func (ei ei) Share(cfg upspin.Config, readers []upspin.PublicKey, packdata []*[]byte) {\n}", "func ValidateNoDuplicateNetworkRules(attribute string, rules []*NetworkRule) error {\n\n\ttype indexedRule struct {\n\t\tindex int\n\t\trule *NetworkRule\n\t}\n\tseen := make(map[[sha256.Size]byte]*indexedRule, len(rules))\n\tfor iRule, rule := range rules {\n\n\t\tif rule == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\thash := sha256.New()\n\n\t\t// hash the action\n\t\tfmt.Fprintf(hash, \"%s/\", rule.Action)\n\n\t\t// hash the object\n\t\tobj := make([]string, len(rule.Object))\n\t\tfor i, subExpr := range rule.Object {\n\t\t\tcpy := append([]string{}, subExpr...)\n\t\t\tsort.Strings(cpy)\n\t\t\tobj[i] = strings.Join(cpy, \"/\")\n\t\t}\n\t\tsort.Strings(obj)\n\t\tfor _, subExpr := range obj {\n\t\t\tfmt.Fprintf(hash, \"[%s]/\", subExpr)\n\t\t}\n\n\t\t// hash the ports\n\t\tprotoPortCpy := append([]string{}, rule.ProtocolPorts...)\n\t\tfor i, port := range protoPortCpy {\n\t\t\tprotoPortCpy[i] = strings.ToLower(port)\n\t\t}\n\t\tsort.Strings(protoPortCpy)\n\t\tfor _, port := range protoPortCpy {\n\t\t\tfmt.Fprintf(hash, \"%s/\", port)\n\t\t}\n\n\t\t// check if hash was seen before\n\t\tvar digest [sha256.Size]byte\n\t\tcopy(digest[:], hash.Sum(nil))\n\t\tif prevRule, ok := seen[digest]; ok {\n\t\t\treturn makeValidationError(\n\t\t\t\tattribute,\n\t\t\t\tfmt.Sprintf(\"Duplicate network rules at the following indexes: [%d, %d]\", prevRule.index+1, iRule+1),\n\t\t\t)\n\t\t}\n\n\t\tseen[digest] = &indexedRule{index: iRule, rule: rule}\n\t}\n\n\treturn nil\n}", "func (cp *ComposerPool) Put(composer *Composer) (result bool) {\n\tlogger.Debug(\"Entering ComposerPool.Put\", composer)\n\tdefer func() { logger.Debug(\"Exiting ComposerPool.Put\", result) }()\n\n\tcp.PoolMutex.Lock()\n\tcp.Pool <- composer\n\tcp.PoolMutex.Unlock()\n\treturn true\n}", "func (k *keeper) StoreMultiWithoutBlocking(items []Item) error {\n\tif k.disableCaching {\n\t\treturn nil\n\t}\n\n\tclient := k.connPool.Get()\n\tdefer client.Close()\n\n\terr := client.Send(\"MULTI\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, item := range items {\n\t\terr = client.Send(\"SETEX\", item.GetKey(), k.decideCacheTTL(item), item.GetValue())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t_, err = client.Do(\"EXEC\")\n\treturn err\n}", "func Put(\n\tctx context.Context, batch engine.ReadWriter, cArgs CommandArgs, resp roachpb.Response,\n) (result.Result, error) {\n\targs := cArgs.Args.(*roachpb.PutRequest)\n\th := cArgs.Header\n\tms := cArgs.Stats\n\n\tvar ts hlc.Timestamp\n\tif !args.Inline {\n\t\tts = h.Timestamp\n\t}\n\tif h.DistinctSpans {\n\t\tif b, ok := batch.(engine.Batch); ok {\n\t\t\t// Use the distinct batch for both blind and normal ops so that we don't\n\t\t\t// accidentally flush mutations to make them visible to the distinct\n\t\t\t// batch.\n\t\t\tbatch = b.Distinct()\n\t\t\tdefer batch.Close()\n\t\t}\n\t}\n\tif args.Blind {\n\t\treturn result.Result{}, engine.MVCCBlindPut(ctx, batch, ms, args.Key, ts, args.Value, h.Txn)\n\t}\n\treturn result.Result{}, engine.MVCCPut(ctx, batch, ms, args.Key, ts, args.Value, h.Txn)\n}", "func (c *Client) ShareSecret() {\n\tgen := c.g.Point().Base()\n\trand := c.suite.RandomStream()\n\tsecret1 := c.g.Scalar().Pick(rand)\n\tsecret2 := c.g.Scalar().Pick(rand)\n\tpublic1 := c.g.Point().Mul(secret1, gen)\n\tpublic2 := c.g.Point().Mul(secret2, gen)\n\n\t//generate share secrets via Diffie-Hellman w/ all servers\n\t//one used for masks, one used for one-time pad\n\tcs1 := ClientDH{\n\t\tPublic: MarshalPoint(public1),\n\t\tId: c.id,\n\t}\n\tcs2 := ClientDH{\n\t\tPublic: MarshalPoint(public2),\n\t\tId: c.id,\n\t}\n\n\tmasks := make([][]byte, len(c.servers))\n\tsecrets := make([][]byte, len(c.servers))\n\n\tvar wg sync.WaitGroup\n\tfor i, rpcServer := range c.rpcServers {\n\t\twg.Add(1)\n\t\tgo func(i int, rpcServer *rpc.Client, cs1 ClientDH, cs2 ClientDH) {\n\t\t\tdefer wg.Done()\n\t\t\tservPub1 := make([]byte, SecretSize)\n\t\t\tservPub2 := make([]byte, SecretSize)\n\t\t\tservPub3 := make([]byte, SecretSize)\n\t\t\tcall1 := rpcServer.Go(\"Server.ShareMask\", &cs1, &servPub1, nil)\n\t\t\tcall2 := rpcServer.Go(\"Server.ShareSecret\", &cs2, &servPub2, nil)\n\t\t\tcall3 := rpcServer.Go(\"Server.GetEphKey\", 0, &servPub3, nil)\n\t\t\t<-call1.Done\n\t\t\t<-call2.Done\n\t\t\t<-call3.Done\n\t\t\tmasks[i] = MarshalPoint(c.g.Point().Mul(secret1, UnmarshalPoint(c.g, servPub1)))\n\t\t\t// c.masks[i] = make([]byte, SecretSize)\n\t\t\t// c.masks[i][c.id] = 1\n\t\t\tsecrets[i] = MarshalPoint(c.g.Point().Mul(secret2, UnmarshalPoint(c.g, servPub2)))\n\t\t\t//secrets[i] = make([]byte, SecretSize)\n\t\t\tc.ephKeys[i] = UnmarshalPoint(c.suite, servPub3)\n\t\t}(i, rpcServer, cs1, cs2)\n\t}\n\twg.Wait()\n\n\tfor r := range c.secretss {\n\t\tfor i := range c.secretss[r] {\n\t\t\tif r == 0 {\n\t\t\t\tsha3.ShakeSum256(c.secretss[r][i], secrets[i])\n\t\t\t} else {\n\t\t\t\tsha3.ShakeSum256(c.secretss[r][i], c.secretss[r-1][i])\n\t\t\t}\n\t\t}\n\t}\n\n\tfor r := range c.maskss {\n\t\tfor i := range c.maskss[r] {\n\t\t\tif r == 0 {\n\t\t\t\tsha3.ShakeSum256(c.maskss[r][i], masks[i])\n\t\t\t} else {\n\t\t\t\tsha3.ShakeSum256(c.maskss[r][i], c.maskss[r-1][i])\n\t\t\t}\n\t\t}\n\t}\n\n}", "func (s *s3ManifestService) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (godigest.Digest, error) {\n\tif err := s.r.init(); err != nil {\n\t\treturn \"\", err\n\t}\n\tmediaType, payload, err := manifest.Payload()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdgst := godigest.FromBytes(payload)\n\tblob := fmt.Sprintf(\"/v2/%s/blobs/%s\", s.r.repoName, dgst)\n\n\tif err := s.r.conditionalUpload(&s3manager.UploadInput{\n\t\tBucket: aws.String(s.r.bucket),\n\t\tContentType: aws.String(mediaType),\n\t\tBody: bytes.NewBuffer(payload),\n\t\tKey: aws.String(blob),\n\t}, dgst.String()); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// set manifests\n\ttags := []string{dgst.String()}\n\tfor _, option := range options {\n\t\tif opt, ok := option.(distribution.WithTagOption); ok {\n\t\t\ttags = append(tags, opt.Tag)\n\t\t}\n\t}\n\tfor _, tag := range tags {\n\t\tif _, err := s.r.s3.CopyObject(&s3.CopyObjectInput{\n\t\t\tBucket: aws.String(s.r.bucket),\n\t\t\tContentType: aws.String(mediaType),\n\t\t\tCopySource: aws.String(path.Join(s.r.bucket, blob)),\n\t\t\tKey: aws.String(fmt.Sprintf(\"/v2/%s/manifests/%s\", s.r.repoName, tag)),\n\t\t}); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn dgst, nil\n}", "func createDuplicates(cfg *duplicate.Config) {\n\tif cfg.File == \"\" {\n\t\tfmt.Println(\"Please specify the original file path (flag \\\"-file\\\")\")\n\t} else {\n\t\tfmt.Println(\"Functionality in development\")\n\t}\n}", "func (c *ConfigMapVault) Put(key, val string, createOnly bool) error {\n\tc.storeLock.Lock()\n\tdefer c.storeLock.Unlock()\n\tapiObj := &api_v1.ConfigMap{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: c.name,\n\t\t\tNamespace: c.namespace,\n\t\t},\n\t}\n\tcfgMapKey := fmt.Sprintf(\"%v/%v\", c.namespace, c.name)\n\n\titem, exists, err := c.configMapStore.GetByKey(cfgMapKey)\n\tif err == nil && exists {\n\t\tdata := item.(*api_v1.ConfigMap).Data\n\t\tif createOnly {\n\t\t\treturn fmt.Errorf(\"failed to create configmap %v, it is already existed with data %v.\", cfgMapKey, data)\n\t\t}\n\t\texistingVal, ok := data[key]\n\t\tif ok && existingVal == val {\n\t\t\t// duplicate, no need to update.\n\t\t\treturn nil\n\t\t}\n\t\tdata[key] = val\n\t\tapiObj.Data = data\n\t\tif existingVal != val {\n\t\t\tklog.Infof(\"Configmap %v has key %v but wrong value %v, updating to %v\", cfgMapKey, key, existingVal, val)\n\t\t} else {\n\t\t\tklog.Infof(\"Configmap %v will be updated with %v = %v\", cfgMapKey, key, val)\n\t\t}\n\t\tif err := c.configMapStore.Update(apiObj); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to update %v: %v\", cfgMapKey, err)\n\t\t}\n\t} else {\n\t\tapiObj.Data = map[string]string{key: val}\n\t\tif err := c.configMapStore.Add(apiObj); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to add %v: %v\", cfgMapKey, err)\n\t\t}\n\t}\n\tklog.Infof(\"Successfully stored key %v = %v in config map %v\", key, val, cfgMapKey)\n\treturn nil\n}", "func (pmp *PrivateMarketplace) Copy() *PrivateMarketplace {\n\tpmpCopy := *pmp\n\n\tif pmp.Deals != nil {\n\t\tpmpCopy.Deals = []*Deal{}\n\t\tfor i := range pmp.Deals {\n\t\t\tpmpCopy.Deals = append(pmpCopy.Deals, pmp.Deals[i].Copy())\n\t\t}\n\t}\n\n\treturn &pmpCopy\n}", "func BenchmarkSigsOnlyWriter64K(t *testing.B) {\n\tconst totalinput = 10 << 20\n\tinput := getBufferSize(totalinput)\n\n\tconst size = 64 << 10\n\tb := input.Bytes()\n\t// Create some duplicates\n\tfor i := 0; i < 50; i++ {\n\t\t// Read from 10 first blocks\n\t\tsrc := b[(i%10)*size : (i%10)*size+size]\n\t\t// Write into the following ones\n\t\tdst := b[(10+i)*size : (i+10)*size+size]\n\t\tcopy(dst, src)\n\t}\n\tt.ResetTimer()\n\tt.SetBytes(totalinput)\n\tfor i := 0; i < t.N; i++ {\n\t\tinput = bytes.NewBuffer(b)\n\t\tw, _ := dedup.NewWriter(ioutil.Discard, ioutil.Discard, dedup.ModeSignaturesOnly, size, 0)\n\t\tio.Copy(w, input)\n\t\terr := w.Close()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}", "func (_ BufferPtrPool16M) Put(b *[]byte) {\n\tPutBytesSlicePtr16M(b)\n}", "func (ms Mutations) Put(m Mutation) Mutations {\n\tif m.Context == immutable {\n\t\treturn ms\n\t}\n\tif ms == nil {\n\t\treturn map[Context][]MutatorFunc{m.Context: {m.mutator}}\n\t}\n\n\tif _, ok := ms[m.Context]; !ok {\n\t\tms[m.Context] = []MutatorFunc{m.mutator}\n\t} else {\n\t\tms[m.Context] = append(ms[m.Context], m.mutator)\n\t}\n\n\treturn ms\n}", "func BenchmarkDupMap(b *testing.B) {\n\tdupInit(b)\n\tfor n := 0; n < b.N; n++ {\n\t\tdupIntMapData.Dup()\n\t}\n}", "func RegisterDeepCopies(scheme *runtime.Scheme) error {\n\treturn scheme.AddGeneratedDeepCopyFuncs(\n\t\tconversion.GeneratedDeepCopyFunc{Fn: DeepCopy_route_Route, InType: reflect.TypeOf(&Route{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: DeepCopy_route_RouteIngress, InType: reflect.TypeOf(&RouteIngress{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: DeepCopy_route_RouteIngressCondition, InType: reflect.TypeOf(&RouteIngressCondition{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: DeepCopy_route_RouteList, InType: reflect.TypeOf(&RouteList{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: DeepCopy_route_RoutePort, InType: reflect.TypeOf(&RoutePort{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: DeepCopy_route_RouteSpec, InType: reflect.TypeOf(&RouteSpec{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: DeepCopy_route_RouteStatus, InType: reflect.TypeOf(&RouteStatus{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: DeepCopy_route_RouteTargetReference, InType: reflect.TypeOf(&RouteTargetReference{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: DeepCopy_route_RouterShard, InType: reflect.TypeOf(&RouterShard{})},\n\t\tconversion.GeneratedDeepCopyFunc{Fn: DeepCopy_route_TLSConfig, InType: reflect.TypeOf(&TLSConfig{})},\n\t)\n}", "func RenameDuplicateColumns(RenameDuplicateColumns bool) ConfigFunc {\n\treturn func(c *Config) {\n\t\tc.RenameDuplicateColumns = RenameDuplicateColumns\n\t}\n}", "func CheckDuplicateMountPoint(mounts []*types.MountPoint, destination string) bool {\n\tfor _, sm := range mounts {\n\t\tif sm.Destination == destination {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (c *Cache) PutBlock(k Key, b Block) {\n\tidx := uint64(0)\n\tif len(c.shards) > 1 {\n\t\th := k.hashUint64()\n\t\tidx = h % uint64(len(c.shards))\n\t}\n\tshard := c.shards[idx]\n\tshard.PutBlock(k, b)\n}", "func (m *Setting) SetOverwriteAllowed(value *bool)() {\n err := m.GetBackingStore().Set(\"overwriteAllowed\", value)\n if err != nil {\n panic(err)\n }\n}", "func (b *BlockProcessorQueue) Put(block uint64) bool {\n\n\tresp := make(chan bool)\n\treq := Request{\n\t\tBlockNumber: block,\n\t\tResponseChan: resp,\n\t}\n\n\tb.PutChan <- req\n\treturn <-resp\n\n}", "func (s *Sync) fixDupes(claims []jsonrpc.Claim) (bool, error) {\n\tstart := time.Now()\n\tdefer func(start time.Time) {\n\t\ttiming.TimedComponent(\"fixDupes\").Add(time.Since(start))\n\t}(start)\n\tabandonedClaims := false\n\tvideoIDs := make(map[string]jsonrpc.Claim)\n\tfor _, c := range claims {\n\t\tif !isYtsyncClaim(c, s.DbChannelData.ChannelClaimID) {\n\t\t\tcontinue\n\t\t}\n\t\ttn := c.Value.GetThumbnail().GetUrl()\n\t\tvideoID := tn[strings.LastIndex(tn, \"/\")+1:]\n\n\t\tcl, ok := videoIDs[videoID]\n\t\tif !ok || cl.ClaimID == c.ClaimID {\n\t\t\tvideoIDs[videoID] = c\n\t\t\tcontinue\n\t\t}\n\t\t// only keep the most recent one\n\t\tclaimToAbandon := c\n\t\tvideoIDs[videoID] = cl\n\t\tif c.Height > cl.Height {\n\t\t\tclaimToAbandon = cl\n\t\t\tvideoIDs[videoID] = c\n\t\t}\n\t\t//it's likely that all we need is s.DbChannelData.PublishAddress.IsMine but better be safe than sorry I guess\n\t\tif (claimToAbandon.Address != s.DbChannelData.PublishAddress.Address || s.DbChannelData.PublishAddress.IsMine) && !s.syncedVideos[videoID].Transferred {\n\t\t\tlog.Debugf(\"abandoning %+v\", claimToAbandon)\n\t\t\t_, err := s.daemon.StreamAbandon(claimToAbandon.Txid, claimToAbandon.Nout, nil, true)\n\t\t\tif err != nil {\n\t\t\t\treturn true, err\n\t\t\t}\n\t\t\tabandonedClaims = true\n\t\t} else {\n\t\t\tlog.Debugf(\"claim is not ours. Have the user run this: lbrynet stream abandon --txid=%s --nout=%d\", claimToAbandon.Txid, claimToAbandon.Nout)\n\t\t}\n\t}\n\treturn abandonedClaims, nil\n}", "func Copy(source KVStore, target KVStore) error {\n\n\tvar innerErr error\n\tif err := source.Iterate(EmptyPrefix, func(key, value Value) bool {\n\t\tif err := target.Set(key, value); err != nil {\n\t\t\tinnerErr = err\n\t\t}\n\n\t\treturn innerErr == nil\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\tif innerErr != nil {\n\t\treturn innerErr\n\t}\n\n\treturn target.Flush()\n}", "func (_ BufferPtrPool512) Put(b *[]byte) {\n\tPutBytesSlicePtr512(b)\n}", "func duplicateIP(ip net.IP) net.IP {\n\tdup := make(net.IP, len(ip))\n\tcopy(dup, ip)\n\treturn dup\n}", "func (_ BufferPtrPool512K) Put(b *[]byte) {\n\tPutBytesSlicePtr512K(b)\n}", "func (dao *blockDAO) putBlock(blk *block.Block) error {\n\tbatch := db.NewBatch()\n\n\theight := byteutil.Uint64ToBytes(blk.Height())\n\thash := blk.HashBlock()\n\tserHeader, err := blk.Header.Serialize()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to serialize block header\")\n\t}\n\tserBody, err := blk.Body.Serialize()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to serialize block body\")\n\t}\n\tserFooter, err := blk.Footer.Serialize()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to serialize block footer\")\n\t}\n\tif dao.compressBlock {\n\t\ttimer := dao.timerFactory.NewTimer(\"compress_header\")\n\t\tserHeader, err = compress.Compress(serHeader)\n\t\ttimer.End()\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"error when compressing a block header\")\n\t\t}\n\t\ttimer = dao.timerFactory.NewTimer(\"compress_body\")\n\t\tserBody, err = compress.Compress(serBody)\n\t\ttimer.End()\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"error when compressing a block body\")\n\t\t}\n\t\ttimer = dao.timerFactory.NewTimer(\"compress_footer\")\n\t\tserFooter, err = compress.Compress(serFooter)\n\t\ttimer.End()\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"error when compressing a block footer\")\n\t\t}\n\t}\n\tbatch.Put(blockHeaderNS, hash[:], serHeader, \"failed to put block header\")\n\tbatch.Put(blockBodyNS, hash[:], serBody, \"failed to put block body\")\n\tbatch.Put(blockFooterNS, hash[:], serFooter, \"failed to put block footer\")\n\n\thashKey := append(hashPrefix, hash[:]...)\n\tbatch.Put(blockHashHeightMappingNS, hashKey, height, \"failed to put hash -> height mapping\")\n\n\theightKey := append(heightPrefix, height...)\n\tbatch.Put(blockHashHeightMappingNS, heightKey, hash[:], \"failed to put height -> hash mapping\")\n\n\tvalue, err := dao.kvstore.Get(blockNS, topHeightKey)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to get top height\")\n\t}\n\ttopHeight := enc.MachineEndian.Uint64(value)\n\tif blk.Height() > topHeight {\n\t\tbatch.Put(blockNS, topHeightKey, height, \"failed to put top height\")\n\t}\n\n\tvalue, err = dao.kvstore.Get(blockNS, totalActionsKey)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to get total actions\")\n\t}\n\ttotalActions := enc.MachineEndian.Uint64(value)\n\ttotalActions += uint64(len(blk.Actions))\n\ttotalActionsBytes := byteutil.Uint64ToBytes(totalActions)\n\tbatch.Put(blockNS, totalActionsKey, totalActionsBytes, \"failed to put total actions\")\n\n\tif !dao.writeIndex {\n\t\treturn dao.kvstore.Commit(batch)\n\t}\n\tif err := indexBlock(dao.kvstore, blk, batch); err != nil {\n\t\treturn err\n\t}\n\treturn dao.kvstore.Commit(batch)\n}", "func (stack *StackNode) performDuplicate() bool {\n\ttop, err := stack.Top()\n\tif err != nil {\n\t\t//\t\tfmt.Println(err)\n\t\treturn false\n\t}\n\terr = stack.Push(top)\n\tif err != nil {\n\t\t//\t\tfmt.Println(err)\n\t\treturn false\n\t}\n\treturn true\n}", "func handleWriteBlock(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tvar m model.Message\n\n\t// Decode http request into message struct\n\tdecoder := json.NewDecoder(r.Body)\n\tif err := decoder.Decode(&m); err != nil {\n\t\trespondWithJSON(w, r, http.StatusBadRequest, r.Body)\n\t\treturn\n\t}\n\n\t// checks if the password is correct\n\t// if !authenticate(m.Password) {\n\t// \trespondWithJSON(w, r, http.StatusUnauthorized, r.Body)\n\t// }\n\n\tdefer r.Body.Close()\n\n\t//ensure atomicity when creating new block\n\tvar mutex = &sync.Mutex{}\n\tmutex.Lock()\n\tnewBlock := blockchainhelpers.GenerateBlock(model.Blockchain[len(model.Blockchain)-1], m.BPM)\n\tmutex.Unlock()\n\n\tif blockchainhelpers.IsBlockValid(newBlock, model.Blockchain[len(model.Blockchain)-1]) {\n\t\tmodel.Blockchain = append(model.Blockchain, newBlock)\n\t\tspew.Dump(model.Blockchain)\n\t}\n\n\trespondWithJSON(w, r, http.StatusCreated, newBlock)\n\n}", "func AddCopy(ctx context.Context, config *config.Config, mgr manager.Manager) error {\n\tctx = ctxlog.NewContextWithRecorder(ctx, \"copy-reconciler\", mgr.GetEventRecorderFor(\"copy-recorder\"))\n\tlog := ctxlog.ExtractLogger(ctx)\n\tr := NewCopyReconciler(ctx, config, mgr, credsgen.NewInMemoryGenerator(log), controllerutil.SetControllerReference)\n\n\tc, err := controller.New(\"copy-controller\", mgr, controller.Options{\n\t\tReconciler: r,\n\t\tMaxConcurrentReconciles: config.MaxQuarksSecretWorkers,\n\t})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Adding copy controller to manager failed.\")\n\t}\n\n\tnsPred := newNSPredicate(ctx, mgr.GetClient(), config.MonitoredID)\n\n\t// Watch for changes to the copied status of QuarksSecrets\n\tp := predicate.Funcs{\n\t\tCreateFunc: func(e event.CreateEvent) bool { return false },\n\t\tDeleteFunc: func(e event.DeleteEvent) bool { return false },\n\t\tGenericFunc: func(e event.GenericEvent) bool { return false },\n\t\tUpdateFunc: func(e event.UpdateEvent) bool {\n\t\t\tn := e.ObjectNew.(*qsv1a1.QuarksSecret)\n\n\t\t\tif n.Status.Copied != nil {\n\t\t\t\tctxlog.Debugf(ctx, \"Skipping QuarksSecret '%s', if copy status '%v' is true\", n.Name, *n.Status.Copied)\n\t\t\t\treturn !(*n.Status.Copied)\n\t\t\t}\n\n\t\t\treturn true\n\t\t},\n\t}\n\terr = c.Watch(&source.Kind{Type: &qsv1a1.QuarksSecret{}}, &handler.EnqueueRequestForObject{}, nsPred, p)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Watching quarks secrets failed in copy controller.\")\n\t}\n\n\t// Watch for changes to user created secrets\n\tp = predicate.Funcs{\n\t\tCreateFunc: func(e event.CreateEvent) bool { return false },\n\t\tDeleteFunc: func(e event.DeleteEvent) bool { return false },\n\t\tGenericFunc: func(e event.GenericEvent) bool { return false },\n\t\tUpdateFunc: func(e event.UpdateEvent) bool {\n\t\t\tn := e.ObjectNew.(*corev1.Secret)\n\t\t\to := e.ObjectOld.(*corev1.Secret)\n\n\t\t\tshouldProcessReconcile := isUserCreatedSecret(n)\n\t\t\tif reflect.DeepEqual(n.Data, o.Data) && reflect.DeepEqual(n.Labels, o.Labels) &&\n\t\t\t\treflect.DeepEqual(n.Annotations, o.Annotations) {\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\treturn shouldProcessReconcile\n\t\t},\n\t}\n\terr = c.Watch(&source.Kind{Type: &corev1.Secret{}}, handler.EnqueueRequestsFromMapFunc(\n\t\tfunc(a crc.Object) []reconcile.Request {\n\t\t\tsecret := a.(*corev1.Secret)\n\n\t\t\tif skip.Reconciles(ctx, mgr.GetClient(), secret) {\n\t\t\t\treturn []reconcile.Request{}\n\t\t\t}\n\n\t\t\treconciles, err := listQuarksSecretsReconciles(ctx, mgr.GetClient(), secret, secret.Namespace)\n\t\t\tif err != nil {\n\t\t\t\tctxlog.Errorf(ctx, \"Failed to calculate reconciles for secret '%s/%s': %v\", secret.Namespace, secret.Name, err)\n\t\t\t}\n\t\t\tif len(reconciles) > 0 {\n\t\t\t\treturn reconciles\n\t\t\t}\n\n\t\t\treturn reconciles\n\t\t}), nsPred, p)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Watching user defined secrets failed in copy controller.\")\n\t}\n\n\treturn nil\n}", "func (b Board) SafePut(pos Position, sap SideAndPiece) {\n\tif b.Has(pos) {\n\t\tpanic(fmt.Sprintf(\"%s must be empty.\", pos.String()))\n\t}\n\tb[pos] = sap\n}", "func (h *KVHandler) Put(kvPair *api.KVPair, wOptions *api.WriteOptions) (*api.WriteMeta, error) {\n\ttxnItem := &api.KVTxnOp{\n\t\tVerb: api.KVSet,\n\t\tKey: kvPair.Key,\n\t\tValue: kvPair.Value,\n\t}\n\th.KVTxnOps = append(h.KVTxnOps, txnItem)\n\treturn nil, nil\n}", "func (r *KeyRing) isDuplicate(e *openpgp.Entity) bool {\n\tfor _, re := range r.entities {\n\t\tif re.PrimaryKey.Fingerprint == e.PrimaryKey.Fingerprint {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (s *Set) ShallowCopy() *Set {\n\tcopiedSet := NewSet()\n\n\tfor k := range s.set {\n\t\tcopiedSet.Add(k)\n\t}\n\n\treturn copiedSet\n}", "func WriteManifest(manifestWriter io.Writer, compression *pwr.CompressionSettings, container *tlc.Container, blockHashes *BlockHashMap) error {\n\trawWire := wire.NewWriteContext(manifestWriter)\n\terr := rawWire.WriteMagic(pwr.ManifestMagic)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\terr = rawWire.WriteMessage(&pwr.ManifestHeader{\n\t\tCompression: compression,\n\t\tAlgorithm: pwr.HashAlgorithm_SHAKE128_32,\n\t})\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\twire, err := pwr.CompressWire(rawWire, compression)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\terr = wire.WriteMessage(container)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tsh := &pwr.SyncHeader{}\n\tmbh := &pwr.ManifestBlockHash{}\n\n\tfor fileIndex, f := range container.Files {\n\t\tsh.Reset()\n\t\tsh.FileIndex = int64(fileIndex)\n\t\terr = wire.WriteMessage(sh)\n\t\tif err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\n\t\tnumBlocks := ComputeNumBlocks(f.Size)\n\n\t\tfor blockIndex := int64(0); blockIndex < numBlocks; blockIndex++ {\n\t\t\tloc := BlockLocation{FileIndex: int64(fileIndex), BlockIndex: blockIndex}\n\t\t\thash := blockHashes.Get(loc)\n\t\t\tif hash == nil {\n\t\t\t\terr = fmt.Errorf(\"missing BlockHash for block %+v\", loc)\n\t\t\t\treturn errors.WithStack(err)\n\t\t\t}\n\n\t\t\tmbh.Reset()\n\t\t\tmbh.Hash = hash\n\n\t\t\terr = wire.WriteMessage(mbh)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.WithStack(err)\n\t\t\t}\n\t\t}\n\t}\n\n\terr = wire.Close()\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\treturn nil\n}", "func IsDuplicate(b types.ButtonEvent, ElevSlice []types.Elevator, ID int) bool {\n\n\tbtnInt := types.ButtonMap[b.Button]\n\n\tif btnInt == 2 {\n\t\treturn (ElevSlice[ID].Orders[b.Floor][btnInt] == 1)\n\t}\n\tfor elevIndex := 0; elevIndex < types.NumElevators; elevIndex++ {\n\t\tif ElevSlice[ID].Orders[b.Floor][btnInt] == 1 {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func Share(mod *big.Int, nPieces int, secret *big.Int) []*big.Int {\n\tif nPieces == 0 {\n\t\tpanic(\"Number of shares must be at least 1\")\n\t} else if nPieces == 1 {\n\t\treturn []*big.Int{secret}\n\t}\n\n\tout := make([]*big.Int, nPieces)\n\n\tacc := new(big.Int)\n\tfor i := 0; i < nPieces-1; i++ {\n\t\tout[i] = utils.RandInt(mod)\n\n\t\tacc.Add(acc, out[i])\n\t}\n\n\tacc.Sub(secret, acc)\n\tacc.Mod(acc, mod)\n\tout[nPieces-1] = acc\n\n\treturn out\n}", "func spannerBatchPut(ctx context.Context, db string, m []*spanner.Mutation) error {\n\tclient, err := spanner.NewClient(ctx, db)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to create client %v\", err)\n\t\treturn err\n\t}\n\tdefer client.Close()\n\n\tif _, err = client.Apply(ctx, m); err != nil {\n\t\treturn errors.New(\"ResourceNotFoundException: \" + err.Error())\n\t}\n\treturn nil\n}", "func validateAffinityGroupDuplicate(agList []ovirt.AffinityGroup) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tfor i, ag1 := range agList {\n\t\tfor _, ag2 := range agList[i+1:] {\n\t\t\tif ag1.Name == ag2.Name {\n\t\t\t\tif ag1.Priority != ag2.Priority ||\n\t\t\t\t\tag1.Description != ag2.Description ||\n\t\t\t\t\tag1.Enforcing != ag2.Enforcing {\n\t\t\t\t\tallErrs = append(\n\t\t\t\t\t\tallErrs,\n\t\t\t\t\t\t&field.Error{\n\t\t\t\t\t\t\tType: field.ErrorTypeDuplicate,\n\t\t\t\t\t\t\tBadValue: errors.Errorf(\"Error validating affinity groups: found same \"+\n\t\t\t\t\t\t\t\t\"affinity group defined twice with different fields %v anf %v\", ag1, ag2)})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn allErrs\n}", "func (_Ethdkg *EthdkgTransactor) DistributeShares(opts *bind.TransactOpts, encrypted_shares []*big.Int, commitments [][2]*big.Int) (*types.Transaction, error) {\n\treturn _Ethdkg.contract.Transact(opts, \"distribute_shares\", encrypted_shares, commitments)\n}", "func (f *FakeImagesClient) Put(ctx context.Context, putOpts *images.PutRequest, opts ...grpc.CallOption) (*googleprotobuf.Empty, error) {\n\tf.Lock()\n\tdefer f.Unlock()\n\tf.appendCalled(\"put\", putOpts)\n\tif err := f.getError(\"put\"); err != nil {\n\t\treturn nil, err\n\t}\n\tf.ImageList[putOpts.Image.Name] = putOpts.Image\n\treturn &googleprotobuf.Empty{}, nil\n}", "func (h *Handle) HandleWrite(\n\tctx context.Context,\n\ttxn txnif.AsyncTxn,\n\treq *db.WriteReq,\n\tresp *db.WriteResp) (err error) {\n\tdefer func() {\n\t\tif req.Cancel != nil {\n\t\t\treq.Cancel()\n\t\t}\n\t}()\n\tctx = perfcounter.WithCounterSetFrom(ctx, h.db.Opts.Ctx)\n\tswitch req.PkCheck {\n\tcase db.FullDedup:\n\t\ttxn.SetDedupType(txnif.FullDedup)\n\tcase db.IncrementalDedup:\n\t\tif h.db.Opts.IncrementalDedup {\n\t\t\ttxn.SetDedupType(txnif.IncrementalDedup)\n\t\t} else {\n\t\t\ttxn.SetDedupType(txnif.FullSkipWorkSpaceDedup)\n\t\t}\n\tcase db.FullSkipWorkspaceDedup:\n\t\ttxn.SetDedupType(txnif.FullSkipWorkSpaceDedup)\n\t}\n\tcommon.DoIfDebugEnabled(func() {\n\t\tlogutil.Debugf(\"[precommit] handle write typ: %v, %d-%s, %d-%s txn: %s\",\n\t\t\treq.Type, req.TableID,\n\t\t\treq.TableName, req.DatabaseId, req.DatabaseName,\n\t\t\ttxn.String(),\n\t\t)\n\t\tlogutil.Debugf(\"[precommit] write batch: %s\", common.DebugMoBatch(req.Batch))\n\t})\n\tvar dbase handle.Database\n\tvar tb handle.Relation\n\tdefer func() {\n\t\tcommon.DoIfDebugEnabled(func() {\n\t\t\tlogutil.Debugf(\"[precommit] handle write end txn: %s\", txn.String())\n\t\t})\n\t\tif err != nil && moerr.IsMoErrCode(err, moerr.ErrDuplicateEntry) && (strings.HasPrefix(req.TableName, \"bmsql\") || strings.HasPrefix(req.TableName, \"sbtest\")) {\n\t\t\tlogutil.Infof(\"[precommit] dup handle catalog on dup %s \", tb.GetMeta().(*catalog2.TableEntry).PPString(common.PPL1, 0, \"\"))\n\t\t}\n\t}()\n\n\tdbase, err = txn.GetDatabaseByID(req.DatabaseId)\n\tif err != nil {\n\t\treturn\n\t}\n\n\ttb, err = dbase.GetRelationByID(req.TableID)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif req.Type == db.EntryInsert {\n\t\t//Add blocks which had been bulk-loaded into S3 into table.\n\t\tif req.FileName != \"\" {\n\t\t\tlocations := make([]objectio.Location, 0)\n\t\t\tfor _, metLoc := range req.MetaLocs {\n\t\t\t\tlocation, err := blockio.EncodeLocationFromString(metLoc)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tlocations = append(locations, location)\n\t\t\t}\n\t\t\terr = tb.AddBlksWithMetaLoc(ctx, locations)\n\t\t\treturn\n\t\t}\n\t\t//check the input batch passed by cn is valid.\n\t\tlen := 0\n\t\tfor i, vec := range req.Batch.Vecs {\n\t\t\tif vec == nil {\n\t\t\t\tlogutil.Errorf(\"the vec:%d in req.Batch is nil\", i)\n\t\t\t\tpanic(\"invalid vector : vector is nil\")\n\t\t\t}\n\t\t\tif vec.Length() == 0 {\n\t\t\t\tlogutil.Errorf(\"the vec:%d in req.Batch is empty\", i)\n\t\t\t\tpanic(\"invalid vector: vector is empty\")\n\t\t\t}\n\t\t\tif i == 0 {\n\t\t\t\tlen = vec.Length()\n\t\t\t}\n\t\t\tif vec.Length() != len {\n\t\t\t\tlogutil.Errorf(\"the length of vec:%d in req.Batch is not equal to the first vec\", i)\n\t\t\t\tpanic(\"invalid batch : the length of vectors in batch is not the same\")\n\t\t\t}\n\t\t}\n\t\t//Appends a batch of data into table.\n\t\terr = AppendDataToTable(ctx, tb, req.Batch)\n\t\treturn\n\t}\n\n\t//handle delete\n\tif req.FileName != \"\" {\n\t\t//wait for loading deleted row-id done.\n\t\tnctx := context.Background()\n\t\tif deadline, ok := ctx.Deadline(); ok {\n\t\t\t_, req.Cancel = context.WithTimeout(nctx, time.Until(deadline))\n\t\t}\n\t\trowidIdx := 0\n\t\tpkIdx := 1\n\t\tfor _, key := range req.DeltaLocs {\n\t\t\tvar location objectio.Location\n\t\t\tlocation, err = blockio.EncodeLocationFromString(key)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tvar ok bool\n\t\t\tvar bat *batch.Batch\n\t\t\tbat, err = blockio.LoadTombstoneColumns(\n\t\t\t\tctx,\n\t\t\t\t[]uint16{uint16(rowidIdx), uint16(pkIdx)},\n\t\t\t\tnil,\n\t\t\t\th.db.Runtime.Fs.Service,\n\t\t\t\tlocation,\n\t\t\t\tnil,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tblkids := getBlkIDsFromRowids(bat.Vecs[0])\n\t\t\tid := tb.GetMeta().(*catalog2.TableEntry).AsCommonID()\n\t\t\tif len(blkids) == 1 {\n\t\t\t\tfor blkID := range blkids {\n\t\t\t\t\tid.BlockID = blkID\n\t\t\t\t}\n\t\t\t\tok, err = tb.TryDeleteByDeltaloc(id, location)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tlogutil.Warnf(\"blk %v try delete by deltaloc failed\", id.BlockID.String())\n\t\t\t} else {\n\t\t\t\tlogutil.Warnf(\"multiply blocks in one deltalocation\")\n\t\t\t}\n\t\t\trowIDVec := containers.ToTNVector(bat.Vecs[0])\n\t\t\tdefer rowIDVec.Close()\n\t\t\tpkVec := containers.ToTNVector(bat.Vecs[1])\n\t\t\t//defer pkVec.Close()\n\t\t\tif err = tb.DeleteByPhyAddrKeys(rowIDVec, pkVec); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif len(req.Batch.Vecs) != 2 {\n\t\tpanic(fmt.Sprintf(\"req.Batch.Vecs length is %d, should be 2\", len(req.Batch.Vecs)))\n\t}\n\trowIDVec := containers.ToTNVector(req.Batch.GetVector(0))\n\tdefer rowIDVec.Close()\n\tpkVec := containers.ToTNVector(req.Batch.GetVector(1))\n\t//defer pkVec.Close()\n\terr = tb.DeleteByPhyAddrKeys(rowIDVec, pkVec)\n\treturn\n}", "func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) {\n\tparams := make(map[string]string, 1)\n\tif p.Flags != 0 {\n\t\tparams[\"flags\"] = strconv.FormatUint(p.Flags, 10)\n\t}\n\t_, wm, err := k.put(p.Key, params, p.Value, q)\n\treturn wm, err\n}", "func (_ BufferPtrPool1M) Put(b *[]byte) {\n\tPutBytesSlicePtr1M(b)\n}", "func AllowPartialUpdates() BatchOption {\n\treturn batchOptionFunc(func(b Batch) Batch {\n\t\tb.AllowPartialUpdates = true\n\t\treturn b\n\t})\n}", "func (_ BufferPtrPool256K) Put(b *[]byte) {\n\tPutBytesSlicePtr256K(b)\n}", "func ProcessDuplicates(file *File, flag bool) error {\n\tif flag {\n\t\terr := os.Remove(file.Path)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"can't remove file: %s by this error: %v\",\n\t\t\t\tfile.Path, err)\n\t\t}\n\t\treturn nil\n\t}\n\tfmt.Printf(\"Duplicate: %s, with size %d byte(-s);\\n\",\n\t\tfile.Path,\n\t\tfile.Size)\n\treturn nil\n}", "func WithShares(shares *url.URL) Opt {\n\treturn func(opts *Options) {\n\t\topts.Shares = shares\n\t}\n}" ]
[ "0.5433435", "0.51779854", "0.5121265", "0.45189086", "0.44898877", "0.43579203", "0.43035212", "0.42886138", "0.4212644", "0.4183098", "0.4140767", "0.4131019", "0.40938774", "0.40888754", "0.40803897", "0.40694353", "0.40229222", "0.40060905", "0.4005224", "0.39877382", "0.39833277", "0.39755702", "0.3975145", "0.3966641", "0.39559793", "0.39407322", "0.39376116", "0.39349532", "0.39329475", "0.39174366", "0.390392", "0.39012468", "0.38975948", "0.38917676", "0.3886696", "0.38827488", "0.38791662", "0.38659146", "0.38654685", "0.38630858", "0.38547602", "0.38509023", "0.38452682", "0.38410434", "0.38353196", "0.38326362", "0.3821901", "0.38206184", "0.38186675", "0.3818266", "0.38182202", "0.38098136", "0.3806894", "0.38016403", "0.3799871", "0.3796002", "0.37925392", "0.37900123", "0.37887105", "0.37867868", "0.3780418", "0.37682056", "0.37662706", "0.37661535", "0.37596896", "0.37563112", "0.37508842", "0.37455806", "0.37425756", "0.37409097", "0.3727431", "0.37252766", "0.37230128", "0.37182", "0.37171265", "0.3712487", "0.370881", "0.3701506", "0.37001193", "0.36981037", "0.3696458", "0.36953008", "0.36948428", "0.36875415", "0.36875114", "0.3685966", "0.3684511", "0.36804813", "0.36782986", "0.3675728", "0.3674411", "0.36696813", "0.3669003", "0.36680636", "0.3665064", "0.3660059", "0.3657303", "0.36561692", "0.36529803", "0.3650922" ]
0.8344131
0
Nanoseconds returns the duration as an integer nanosecond count.
Наносекунды возвращает продолжительность в виде целого числа наносекунд.
func (d Duration) Nanoseconds() int64 { return time.Duration(d).Nanoseconds() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s Stopwatch) Nanoseconds() int64 {\n\treturn s.acc.Nanoseconds()\n}", "func (ft *filetime) Nanoseconds() int64 {\n\t// 100-nanosecond intervals since January 1, 1601\n\tnsec := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime)\n\t// change starting time to the Epoch (00:00:00 UTC, January 1, 1970)\n\tnsec -= 116444736000000000\n\t// convert into nanoseconds\n\tnsec *= 100\n\treturn nsec\n}", "func (f *Formatter) Nanoseconds() string {\n\tvar format string\n\tif f.withoutUnit {\n\t\tformat = \"%d\\n\"\n\t} else {\n\t\tformat = \"%d nanoseconds\\n\"\n\t}\n\treturn fmt.Sprintf(format, f.duration.Nanoseconds())\n}", "func (t Time) Nanosecond() int {}", "func nanotime() int64", "func nanotime() int64", "func nanotime() int64", "func nanotime() int64", "func (dt DateTime) Nanosecond() int {\n\treturn dt.Time().Nanosecond()\n}", "func TimeUnitNano(unit string) int64 {\n\tswitch unit {\n\tcase TimeUnitSeconds:\n\t\treturn int64(time.Second)\n\tcase TimeUnitMilliseconds:\n\t\treturn int64(time.Millisecond)\n\tcase TimeUnitMicroseconds:\n\t\treturn int64(time.Microsecond)\n\tdefault:\n\t\treturn int64(time.Nanosecond)\n\t}\n}", "func (dt DateTime) Nanosecond() int {\n\treturn dt.src.Nanosecond()\n}", "func (xt XSDTime) Nanosecond() int {\n\treturn xt.innerTime.Nanosecond()\n}", "func TimevalToNsec(tv Timeval) int64 { return tv.Nano() }", "func NsMicroseconds(count int64) int64 { return count * 1e3 }", "func run_timeNano() int64", "func (t Time) Microseconds() int64 {\n\treturn time.Time(t).UnixNano() / DivideMicroseconds\n}", "func to_ms(nano int64) int64 {\n\treturn nano / int64(time.Millisecond)\n}", "func NanoTime() int64", "func (d Duration) Microseconds() int64 {\n\treturn int64(d)\n}", "func TimespecToNsec(ts Timespec) int64 { return ts.Nano() }", "func (dt *DateTime) GetNanosecond() *Number {\n\treturn dt.Nanosecond()\n}", "func (ts Timespec) ToNsec() int64 {\n\treturn int64(ts.Sec)*1e9 + int64(ts.Nsec)\n}", "func (dt *DateTime) Nanosecond() *Number {\n\topChain := dt.chain.enter(\"Nanosecond()\")\n\tdefer opChain.leave()\n\n\tif opChain.failed() {\n\t\treturn newNumber(opChain, float64(0))\n\t}\n\n\treturn newNumber(opChain, float64(dt.value.Nanosecond()))\n}", "func tickspersecond() int64", "func NsSeconds(count int64) int64 { return NsMilliseconds(count * 1e3) }", "func durationInSeconds(d time.Duration) int64 {\n\t// converting a floating-point number to an integer discards\n\t// the fraction (truncation towards zero)\n\treturn int64(d.Seconds())\n}", "func Ms(duration time.Duration) float64 {\n\treturn float64(duration / time.Millisecond)\n}", "func Nanotime() int64 {\n\treturn nanotime()\n}", "func Nanotime() int64 {\n\treturn nanotime()\n}", "func (t ntpTime) Duration() time.Duration {\n\tsec := (t >> 32) * nanoPerSec\n\tfrac := (t & 0xffffffff) * nanoPerSec\n\tnsec := frac >> 32\n\tif uint32(frac) >= 0x80000000 {\n\t\tnsec++\n\t}\n\treturn time.Duration(sec + nsec)\n}", "func NsMilliseconds(count int64) int64 { return NsMicroseconds(count * 1e3) }", "func (sxts StatxTimestamp) ToNsec() int64 {\n\treturn int64(sxts.Sec)*1e9 + int64(sxts.Nsec)\n}", "func humanToNanoTime(value []byte) ([]byte) {\n\tdura, err := time.ParseDuration(string(value))\n\tif err != nil {\n\t\treturn value\n\t}\n\treturn []byte(strconv.FormatInt(dura.Nanoseconds(), 10))\n}", "func (o DurationOutput) Nanos() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v Duration) *int { return v.Nanos }).(pulumi.IntPtrOutput)\n}", "func durationToMilliseconds(d time.Duration) (uint64, error) {\n\tif d < 0 {\n\t\treturn 0, fmt.Errorf(\"report period cannot be negative: %v\", d)\n\t}\n\n\treturn uint64(d / time.Millisecond), nil\n}", "func currentTimeMillis() int64 {\n\tresult := time.Nanoseconds()\n\treturn result / 1e6\n}", "func micros(d time.Duration) int {\n\treturn int(d.Seconds() * 1000000)\n}", "func Nanosec() int64 {\n\treturn internal.Syscall0r64(NANOSEC)\n}", "func MeasureNanosecondsSince(name string, field string, t time.Time) Measurement {\n\treturn NewMeasurement(name).AddNanosecondsSince(field, t)\n}", "func (t ntpTime) Duration() time.Duration {\n\tsec := (t >> 32) * nanoPerSec\n\tfrac := (t & 0xffffffff) * nanoPerSec >> 32\n\treturn time.Duration(sec + frac)\n}", "func eps(n int, d time.Duration) float64 {\n\treturn float64(n) / d.Seconds()\n}", "func (t Time) Milliseconds() int64 {\n\treturn time.Time(t).UnixNano() / DivideMilliseconds\n}", "func ToUsec(t time.Time) int64 {\n\treturn t.UnixNano() / 1e3\n}", "func CurrentNanosecond() int64 {\n\treturn CurrentMicrosecond() * 1e3\n}", "func ToMillis(t time.Time) int64 {\n\treturn t.UnixNano() / 1e6\n}", "func toMilliseconds(duration time.Duration) float64 {\n\tif duration < time.Microsecond*10 {\n\t\treturn 0\n\t}\n\n\tms := float64(duration) / float64(time.Millisecond)\n\t// Round time to 0.02 precision\n\treturn math.Round(ms*100) / 100\n}", "func (o DurationResponseOutput) Nanos() pulumi.IntOutput {\n\treturn o.ApplyT(func(v DurationResponse) int { return v.Nanos }).(pulumi.IntOutput)\n}", "func (t ntpTimeShort) Duration() time.Duration {\n\tsec := uint64(t>>16) * nanoPerSec\n\tfrac := uint64(t&0xffff) * nanoPerSec\n\tnsec := frac >> 16\n\tif uint16(frac) >= 0x8000 {\n\t\tnsec++\n\t}\n\treturn time.Duration(sec + nsec)\n}", "func (o DurationPtrOutput) Nanos() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *Duration) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Nanos\n\t}).(pulumi.IntPtrOutput)\n}", "func NsMinutes(count int64) int64 { return NsSeconds(count * 60) }", "func (dt DateTime) ShiftNanoseconds(nanosecond int) DateTime {\n\tduration, _ := time.ParseDuration(fmt.Sprintf(\"%dns\", nanosecond))\n\treturn DateTime(dt.Time().Add(duration))\n}", "func ToMilliSec(date time.Time) int64 {\n\treturn date.UnixNano() / 1000000\n}", "func DurationInMilliseconds(d time.Duration) string {\n\treturn fmt.Sprintf(\"%.0fms\", d.Seconds()*1e3)\n}", "func Snotime() uint64 {\n\t// Note: Division is left here instead of being impl in asm since the compiler optimizes this\n\t// into mul+shift, which is easier to read when left in as simple division.\n\t// This doesn't affect performance. The asm won't get inlined anyway while this function\n\t// will.\n\t//\n\t// 4e4 instead of TimeUnit (4e6) because the time we get from the OS is in units of 100ns.\n\treturn ostime() / 4e4\n}", "func GetMonoTime() int64 {\n\tsec, nsec := getRawMonoTime()\n\n\t// to milliseconds\n\treturn sec * 1000 + (nsec / (1 * 1000 * 1000))\n}", "func (t *Track) Duration() float64 {\n\treturn float64(t.duration) / float64(t.globalTimescale)\n}", "func fseconds(d time.Duration) float64 { return float64(d) / float64(time.Second) }", "func (d Dispatcher) ExecDurationNanoseconds(id string, hash string) (int64, error) {\n\te, err := d.GetBC().FindExec(id, hash)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn e.GetDuration().Nanoseconds(), nil\n}", "func Time(t time.Time) int64 {\n\treturn t.UnixNano() / 1000000\n}", "func (tv Timeval) ToNsecCapped() int64 {\n\tif tv.Sec > maxSecInDuration {\n\t\treturn math.MaxInt64\n\t}\n\treturn int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3\n}", "func TimeElapsed() int64 {\n\telapsed := time.Since(start)\n\treturn elapsed.Nanoseconds() / 1000\n}", "func (d Duration) Seconds() float64 {\n\tsec := d / Second\n\tusec := d % Second\n\treturn float64(sec) + float64(usec)/1e6\n}", "func UnixMilliseconds(t time.Time) float64 {\n\tnanosPerSecond := float64(time.Second) / float64(time.Nanosecond)\n\treturn float64(t.UnixNano()) / nanosPerSecond\n}", "func (i ISODuration) GetMilliSeconds() int {\r\n\treturn i.duration.MilliSeconds\r\n}", "func toUnixMsec(t time.Time) int64 {\n\treturn t.UnixNano() / 1e6\n}", "func (v Value) Duration() uint64 {\n\tstart := big.NewInt(v.StartSeconds)\n\tend := big.NewInt(v.EndSeconds)\n\n\tduration := (&big.Int{}).Sub(end, start)\n\n\treturn duration.Uint64()\n}", "func DiffNano(startTime time.Time) (diff int64) {\n\n\tstartTimeStamp := startTime.UnixNano()\n\tendTimeStamp := time.Now().UnixNano()\n\n\tdiff = endTimeStamp - startTimeStamp\n\n\treturn\n}", "func (d *Duration) Duration() time.Duration {\n\tif d == nil {\n\t\treturn 0\n\t}\n\treturn (time.Duration(d.Seconds) * time.Second) + (time.Duration(d.Nanos) * time.Nanosecond)\n}", "func TestNanoTime(t *testing.T) {\n\tt1 := time.Now().UnixNano()\n\tt2 := time.Now().UnixNano()\n\tinterval := t2 - t1\n\tfmt.Println(interval)\n}", "func (v TimestampNano) Int() int {\n\treturn int(v.Int64())\n}", "func millisI(nanos int64) float64 {\n\treturn millisF(float64(nanos))\n}", "func (dt DateTime) UnixNano() int64 {\n\treturn dt.src.UnixNano()\n}", "func ToVNCTime(t time.Time) string {\n\tvar format string\n\tif t.Nanosecond() < 1000 {\n\t\tformat = vncFormatWithoutNanoseconds\n\t} else {\n\t\tformat = vncFormatWithNanoseconds\n\t}\n\treturn t.UTC().Format(format)\n}", "func (p Packet) TimeUnixNano() int64 {\n\t// 1.0737... is 2^30 (collectds' subsecond interval) / 10^-9 (nanoseconds)\n\treturn int64(float64(p.CdTime) / 1.073741824)\n}", "func (tm *CompilationTelemetry) CompilationDurationNS() int64 {\n\treturn tm.compilationDuration.Nanoseconds()\n}", "func (v TimestampNano) Int64() int64 {\n\tif !v.Valid() || v.time.UnixNano() == 0 {\n\t\treturn 0\n\t}\n\treturn v.time.UnixNano()\n}", "func Nanosec() int64 {\n\treturn syscall.Nanosec()\n}", "func ToUnixMillis(t time.Time) int64 {\n\treturn t.UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond))\n}", "func durtoTV(d time.Duration) (int64, int64) {\n\tsec := int64(d / nanoPerSec)\n\tmicro := int64((int64(d) - sec*nanoPerSec) / 1000)\n\n\treturn sec, micro\n}", "func (sw Stopwatch) ElapsedMilliseconds() float64 {\n\tduration := time.Since(sw.startTime)\n\treturn duration.Seconds() * 1000\n}", "func timeToUnixMS(t time.Time) int64 {\n\treturn t.UnixNano() / int64(time.Millisecond)\n}", "func (p *parser) duration() Node {\n\ttoken := p.expect(TokenDuration)\n\tnum, err := newDur(token.pos, token.val)\n\tif err != nil {\n\t\tp.error(err)\n\t}\n\treturn num\n}", "func (t Time) UnixMilli() int64 {\n\treturn (time.Time)(t).UnixNano() / int64(time.Millisecond)\n}", "func (e PrecisionTiming) durationToMs(x time.Duration) float64 {\n\treturn float64(x) / float64(time.Millisecond)\n}", "func (d *Decoder) TotalTimeMs() int64 {\n\tif d.start.IsZero() {\n\t\treturn 0\n\t}\n\tdur := time.Since(d.start)\n\treturn int64(dur / time.Millisecond)\n}", "func TicksToUnixNano(ticks int64) int64 {\n\treturn TicksToTime(ticks).UnixNano()\n}", "func average(data []int64) string {\n var total int64\n for _, n := range data {\n total += n\n }\n duration, _ := time.ParseDuration(fmt.Sprintf(\"%dns\", (total / int64(len(data)))))\n return fmt.Sprintf(\"%.3f\", duration.Seconds())\n}", "func milliseconds(ms int64) time.Duration {\n\treturn time.Duration(ms * 1000 * 1000)\n}", "func ToHuman(nano int64) string {\n\tvar base int64 = 1\n\tif nano < 1000*base {\n\t\treturn strconv.Itoa(int(nano/base)) + \"ns\"\n\t}\n\n\tbase *= 1000\n\tif nano < 1000*base {\n\t\tvar us = int(nano / base)\n\t\tif nano%base >= base/2 {\n\t\t\tus++\n\t\t}\n\n\t\treturn strconv.Itoa(us) + \"us\"\n\t}\n\n\tbase *= 1000\n\tif nano < 1000*base {\n\t\tvar ms = int(nano / base)\n\t\tif nano%base >= base/2 {\n\t\t\tms++\n\t\t}\n\t\treturn strconv.Itoa(ms) + \"ms\"\n\t}\n\n\tbase *= 1000\n\tvar s = int(nano / base)\n\tif nano%base >= base/2 {\n\t\ts++\n\t}\n\treturn strconv.Itoa(s) + \"s\"\n}", "func seconds(ttl time.Duration) int64 {\n\ti := int64(ttl / time.Second)\n\tif i <= 0 {\n\t\ti = 1\n\t}\n\treturn i\n}", "func TimeTrack(start time.Time) int64 {\n\telapsed := time.Since(start)\n\treturn elapsed.Nanoseconds() / 1000\n}", "func (o KubernetesClusterMaintenanceWindowNodeOsOutput) Duration() pulumi.IntOutput {\n\treturn o.ApplyT(func(v KubernetesClusterMaintenanceWindowNodeOs) int { return v.Duration }).(pulumi.IntOutput)\n}", "func ExampleTime_TimestampNano() {\n\tt := gtime.TimestampNano()\n\n\tfmt.Println(t)\n\n\t// May output:\n\t// 1533686888000000\n}", "func (t HighresTimestamp) Duration() time.Duration {\n\treturn time.Duration(uint64(t) * uint64(tbinfo.numer) / uint64(tbinfo.denom)))\n}", "func (s *Stopwatch) ElapsedMilliSeconds() float64 {\n\ts.RLock()\n\tdefer s.RUnlock()\n\n\treturn float64(s.Elapsed() / time.Millisecond)\n}", "func formatNano(nanosec uint, n int, trim bool) []byte {\n\tu := nanosec\n\tvar buf [9]byte\n\tfor start := len(buf); start > 0; {\n\t\tstart--\n\t\tbuf[start] = byte(u%10 + '0')\n\t\tu /= 10\n\t}\n\n\tif n > 9 {\n\t\tn = 9\n\t}\n\tif trim {\n\t\tfor n > 0 && buf[n-1] == '0' {\n\t\t\tn--\n\t\t}\n\t\tif n == 0 {\n\t\t\treturn buf[:0]\n\t\t}\n\t}\n\treturn buf[:n]\n}", "func unixMilli(t time.Time) int64 {\n\treturn t.UnixNano() / int64(time.Millisecond)\n}", "func ConvertNanosecondsToHz(val float64) float64 {\n\treturn val / 1e7\n}", "func (d Duration) Seconds() float64 {\n\treturn time.Duration(d).Seconds()\n}", "func (ti *TimeInterval) EndUnixNano() int64 {\n\treturn ti.End.UTC().UnixNano()\n}" ]
[ "0.7365707", "0.71677697", "0.71562576", "0.7091952", "0.68987876", "0.68987876", "0.68987876", "0.68987876", "0.6809148", "0.66780186", "0.66759145", "0.6439207", "0.64132607", "0.62661654", "0.62491643", "0.62183124", "0.6110412", "0.61070853", "0.60488445", "0.6042173", "0.59795725", "0.5977492", "0.5929279", "0.5921957", "0.59147143", "0.5858022", "0.58271694", "0.5811278", "0.5811278", "0.5794857", "0.5784236", "0.5782882", "0.57552207", "0.57082695", "0.5685207", "0.56718236", "0.5663621", "0.5662295", "0.56595993", "0.5636959", "0.55587834", "0.55228716", "0.55118525", "0.54740924", "0.54739565", "0.54455686", "0.5443789", "0.53865063", "0.5360199", "0.53380257", "0.5314501", "0.5299935", "0.5292967", "0.52471864", "0.5241303", "0.52387875", "0.52368534", "0.5227604", "0.5224608", "0.52212805", "0.52173704", "0.5211857", "0.51795745", "0.5176492", "0.51642466", "0.5157037", "0.5147364", "0.51464397", "0.5145096", "0.513736", "0.5123099", "0.51183486", "0.50997543", "0.5084079", "0.508348", "0.5066079", "0.5065343", "0.5060721", "0.50313085", "0.50312907", "0.5024725", "0.501503", "0.49933064", "0.4970519", "0.49637896", "0.4928636", "0.4924234", "0.49240726", "0.49154773", "0.49148816", "0.49087816", "0.49059728", "0.49030614", "0.48966634", "0.48881462", "0.48871264", "0.48761582", "0.48709825", "0.48655438", "0.48613238" ]
0.81052667
0
Seconds returns the duration as a floating point number of seconds.
Возвращает продолжительность в виде числа секунд с плавающей точкой.
func (d Duration) Seconds() float64 { return time.Duration(d).Seconds() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (d Duration) Seconds() float64 {\n\tsec := d / Second\n\tusec := d % Second\n\treturn float64(sec) + float64(usec)/1e6\n}", "func fseconds(d time.Duration) float64 { return float64(d) / float64(time.Second) }", "func (c *ClockVal) Seconds(d time.Duration) float64 {\n\treturn d.Seconds()\n}", "func (s Stopwatch) Seconds() float64 {\n\treturn s.acc.Seconds()\n}", "func ToFloat(d time.Duration) (seconds float64) {\n\treturn float64(d) / float64(time.Second)\n}", "func (f *Formatter) Seconds() string {\n\tvar format string\n\tif f.withoutUnit {\n\t\tformat = \"%d\\n\"\n\t} else {\n\t\tformat = \"%d seconds\\n\"\n\t}\n\treturn fmt.Sprintf(format, int(f.duration.Seconds()))\n}", "func durationInSeconds(d time.Duration) int64 {\n\t// converting a floating-point number to an integer discards\n\t// the fraction (truncation towards zero)\n\treturn int64(d.Seconds())\n}", "func (o DurationOutput) Seconds() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v Duration) *string { return v.Seconds }).(pulumi.StringPtrOutput)\n}", "func getSeconds(data *speedTestData) float64 {\n\treturn float64(data.Milliseconds) / 1000\n}", "func (i ISODuration) GetSeconds() int {\r\n\treturn i.duration.Seconds\r\n}", "func (sw Stopwatch) ElapsedSeconds() float64 {\n\tduration := time.Since(sw.startTime)\n\treturn duration.Seconds()\n}", "func (s *Stopwatch) ElapsedSeconds() float64 {\n\ts.RLock()\n\tdefer s.RUnlock()\n\n\treturn s.Elapsed().Seconds()\n}", "func ConvertSeconds(s string) float64 {\n\tnum, err := strconv.ParseFloat(s, 64)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn num\n}", "func (time CMTime) Seconds() uint64 {\n\t//prevent division by 0\n\tif time.CMTimeValue == 0 {\n\t\treturn 0\n\t}\n\treturn time.CMTimeValue / uint64(time.CMTimeScale)\n}", "func (o DurationPtrOutput) Seconds() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Duration) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Seconds\n\t}).(pulumi.StringPtrOutput)\n}", "func (o DurationResponseOutput) Seconds() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DurationResponse) string { return v.Seconds }).(pulumi.StringOutput)\n}", "func NsSeconds(count int64) int64 { return NsMilliseconds(count * 1e3) }", "func (o TransferJobScheduleStartTimeOfDayOutput) Seconds() pulumi.IntOutput {\n\treturn o.ApplyT(func(v TransferJobScheduleStartTimeOfDay) int { return v.Seconds }).(pulumi.IntOutput)\n}", "func ParseSeconds(d string) (time.Duration, error) {\n\tn, err := strconv.ParseInt(d, 10, 64)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn time.Duration(n) * time.Second, nil\n}", "func (c *Job) Seconds() *Job {\n\tif c.delayUnit == delayNone {\n\t\tc.unit = seconds\n\t} else {\n\t\tc.delayUnit = delaySeconds\n\t}\n\treturn c\n}", "func Ms(duration time.Duration) float64 {\n\treturn float64(duration / time.Millisecond)\n}", "func getDuration(seconds int) time.Duration {\n\treturn time.Duration(seconds) * time.Second\n}", "func seconds(s string) int64 {\n\tt, err := time.Parse(gitime, s)\n\tif err != nil {\n\t\treturn -1\n\t}\n\treturn t.Unix()\n}", "func (o SecurityProfileBehaviorCriteriaOutput) DurationSeconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v SecurityProfileBehaviorCriteria) *int { return v.DurationSeconds }).(pulumi.IntPtrOutput)\n}", "func toTimeSeconds(value string) (int64, error) {\n\t//is serial format?\n\tserial, err := strconv.ParseFloat(value, 64)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\treturn int64(serial * 86400), nil\n}", "func secondsToDuration(seconds float64) time.Duration {\n\tttl := seconds * float64(time.Second)\n\treturn time.Duration(ttl)\n}", "func secondsToDuration(seconds float64) time.Duration {\n\tttl := seconds * float64(time.Second)\n\treturn time.Duration(ttl)\n}", "func StringToSeconds(s string) (r time.Duration) {\n\t_sec := StringToInteger(s)\n\tsec := time.Duration(_sec * 1000 * 1000 * 1000)\n\treturn sec\n}", "func (cvr Converter) MillisecondsToSeconds(msecs Milliseconds) Seconds {\n\treturn Seconds(msecs / 60)\n}", "func DurationToFloat(dur time.Duration) float64 {\n\treturn float64(dur) / float64(time.Second)\n}", "func seconds(ttl time.Duration) int64 {\n\ti := int64(ttl / time.Second)\n\tif i <= 0 {\n\t\ti = 1\n\t}\n\treturn i\n}", "func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput {\n\ts.DurationSeconds = &v\n\treturn s\n}", "func (s *GetCredentialsInput) SetDurationSeconds(v int64) *GetCredentialsInput {\n\ts.DurationSeconds = &v\n\treturn s\n}", "func (o SecurityProfileBehaviorCriteriaPtrOutput) DurationSeconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *SecurityProfileBehaviorCriteria) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.DurationSeconds\n\t}).(pulumi.IntPtrOutput)\n}", "func (d Dispatcher) ExecDurationSeconds(id string, hash string) (float64, error) {\n\te, err := d.GetBC().FindExec(id, hash)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn e.GetDuration().Seconds(), nil\n}", "func durationTo8601Seconds(duration time.Duration) string {\n\treturn fmt.Sprintf(\"PT%dS\", duration/time.Second)\n}", "func (o DurationResponsePtrOutput) Seconds() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DurationResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Seconds\n\t}).(pulumi.StringPtrOutput)\n}", "func (o InstanceDenyMaintenancePeriodTimeOutput) Seconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v InstanceDenyMaintenancePeriodTime) *int { return v.Seconds }).(pulumi.IntPtrOutput)\n}", "func (o InstanceMaintenanceWindowStartTimeOutput) Seconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v InstanceMaintenanceWindowStartTime) *int { return v.Seconds }).(pulumi.IntPtrOutput)\n}", "func (j *Job) Seconds() (job *Job) {\n\tj.unit = JOB_UNIT_TYPE_SECOND\n\treturn j\n}", "func (o TransferJobScheduleStartTimeOfDayPtrOutput) Seconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *TransferJobScheduleStartTimeOfDay) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Seconds\n\t}).(pulumi.IntPtrOutput)\n}", "func (o *FeedSyncResult) GetTotalTimeSeconds() float32 {\n\tif o == nil || o.TotalTimeSeconds == nil {\n\t\tvar ret float32\n\t\treturn ret\n\t}\n\treturn *o.TotalTimeSeconds\n}", "func SecondsSince(ts TimeSource, t time.Time) float64 {\n\treturn ts.Now().Sub(t).Seconds()\n}", "func TimestampToSeconds(timestamp int64) float64 {\n\tfloatTime := float64(timestamp)\n\treturn floatTime * 0.000000001\n}", "func getSecondsFromDurationString(s string) (int, error) {\n\tduration, err := time.ParseDuration(s)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn int(duration.Seconds()), nil\n}", "func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput {\n\ts.DurationSeconds = &v\n\treturn s\n}", "func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput {\n\ts.DurationSeconds = &v\n\treturn s\n}", "func DurationValue(s string, step int64) (int64, error) {\n\tif len(s) == 0 {\n\t\treturn 0, fmt.Errorf(\"duration cannot be empty\")\n\t}\n\tlastChar := s[len(s)-1]\n\tif lastChar >= '0' && lastChar <= '9' || lastChar == '.' {\n\t\t// Try parsing floating-point duration\n\t\td, err := strconv.ParseFloat(s, 64)\n\t\tif err == nil {\n\t\t\t// Convert the duration to milliseconds.\n\t\t\treturn int64(d * 1000), nil\n\t\t}\n\t}\n\tisMinus := false\n\td := float64(0)\n\tfor len(s) > 0 {\n\t\tn := scanSingleDuration(s, true)\n\t\tif n <= 0 {\n\t\t\treturn 0, fmt.Errorf(\"cannot parse duration %q\", s)\n\t\t}\n\t\tds := s[:n]\n\t\ts = s[n:]\n\t\tdLocal, err := parseSingleDuration(ds, step)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tif isMinus && dLocal > 0 {\n\t\t\tdLocal = -dLocal\n\t\t}\n\t\td += dLocal\n\t\tif dLocal < 0 {\n\t\t\tisMinus = true\n\t\t}\n\t}\n\tif math.Abs(d) > 1<<63-1 {\n\t\treturn 0, fmt.Errorf(\"too big duration %.0fms\", d)\n\t}\n\treturn int64(d), nil\n}", "func SinceInSeconds(start time.Time) float64 {\n\treturn time.Since(start).Seconds()\n}", "func SinceInSeconds(start time.Time) float64 {\n\treturn time.Since(start).Seconds()\n}", "func SinceInSeconds(start time.Time) float64 {\n\treturn time.Since(start).Seconds()\n}", "func formatSeconds(d uint64) string {\n\tif d == 0 {\n\t\treturn \"\"\n\t}\n\n\tdays := d / 86400\n\thours := (d - days*86400) / 3600\n\tminutes := (d - days*86400 - hours*3600) / 60\n\tseconds := d - days*86400 - hours*3600 - minutes*60\n\n\tif days > 0 {\n\t\treturn fmt.Sprintf(\"%dd %dh %dm\", days, hours, minutes)\n\t}\n\tif hours > 0 {\n\t\treturn fmt.Sprintf(\"%dh %dm %ds\", hours, minutes, seconds)\n\t}\n\tif minutes > 0 {\n\t\treturn fmt.Sprintf(\"%dm %ds\", minutes, seconds)\n\t}\n\n\treturn fmt.Sprintf(\"%ds\", seconds)\n}", "func (d Duration) Microseconds() int64 {\n\treturn int64(d)\n}", "func (fs *FlowStats) Duration() float64 {\n\tendTime := fs.ReadTime(EndTime)\n\tif endTime.Equal(time.Time{}) {\n\t\tendTime = time.Now()\n\t}\n\tduration := endTime.Sub(fs.ReadTime(StartTime))\n\treturn float64(duration) / float64(time.Second)\n}", "func FormatDeltaSeconds(delta int) string {\n\treturn time.SecondsToUTC(time.Seconds() + int64(delta)).Format(TimeLayout)\n}", "func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput {\n\ts.DurationSeconds = &v\n\treturn s\n}", "func (t *Timespan) unmarshalSeconds(s string) (time.Duration, error) {\n\t// \"03\" = 3 * time.Second\n\t// \"00.099\" = 99 * time.Millisecond\n\t// \"03.0123\" == 3 * time.Second + 12300 * time.Microsecond\n\tsp := strings.Split(s, \".\")\n\tswitch len(sp) {\n\tcase 1:\n\t\tseconds, err := strconv.Atoi(s)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"timespan's seconds field was incorrect, was %s\", s)\n\t\t}\n\t\treturn time.Duration(seconds) * time.Second, nil\n\tcase 2:\n\t\tseconds, err := strconv.Atoi(sp[0])\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"timespan's seconds field was incorrect, was %s\", s)\n\t\t}\n\t\tn, err := strconv.Atoi(sp[1])\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"timespan's seconds field was incorrect, was %s\", s)\n\t\t}\n\t\tvar prec time.Duration\n\t\tswitch len(sp[1]) {\n\t\tcase 1:\n\t\t\tprec = time.Duration(n) * (100 * time.Millisecond)\n\t\tcase 2:\n\t\t\tprec = time.Duration(n) * (10 * time.Millisecond)\n\t\tcase 3:\n\t\t\tprec = time.Duration(n) * time.Millisecond\n\t\tcase 4:\n\t\t\tprec = time.Duration(n) * 100 * time.Microsecond\n\t\tcase 5:\n\t\t\tprec = time.Duration(n) * 10 * time.Microsecond\n\t\tcase 6:\n\t\t\tprec = time.Duration(n) * time.Microsecond\n\t\tcase 7:\n\t\t\tprec = time.Duration(n) * tick\n\t\tcase 8:\n\t\t\tprec = time.Duration(n) * (10 * time.Nanosecond)\n\t\tcase 9:\n\t\t\tprec = time.Duration(n) * time.Nanosecond\n\t\tdefault:\n\t\t\treturn 0, fmt.Errorf(\"timespan's seconds field did not have 1-9 numbers after the decimal, had %v\", s)\n\t\t}\n\n\t\treturn time.Duration(seconds)*time.Second + prec, nil\n\t}\n\treturn 0, fmt.Errorf(\"timespan's seconds field did not have the requisite '.'s, was %s\", s)\n}", "func (o InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput) Seconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime) *int { return v.Seconds }).(pulumi.IntPtrOutput)\n}", "func (o InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput) Seconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v InstanceMaintenancePolicyWeeklyMaintenanceWindowStartTime) *int { return v.Seconds }).(pulumi.IntPtrOutput)\n}", "func (cvr Converter) MinutesToSeconds(m Minutes) Seconds {\n\treturn Seconds(m) * Seconds(60)\n}", "func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput {\n\ts.DurationSeconds = &v\n\treturn s\n}", "func GetTimeInSeconds() float64 {\n\treturn float64(C.ovr_GetTimeInSeconds())\n}", "func (i ISODuration) SetSeconds(seconds int) {\r\n\ti.duration.Seconds = seconds\r\n}", "func (o GroupContainerLivenessProbeOutput) PeriodSeconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v GroupContainerLivenessProbe) *int { return v.PeriodSeconds }).(pulumi.IntPtrOutput)\n}", "func (v Value) Duration() uint64 {\n\tstart := big.NewInt(v.StartSeconds)\n\tend := big.NewInt(v.EndSeconds)\n\n\tduration := (&big.Int{}).Sub(end, start)\n\n\treturn duration.Uint64()\n}", "func (e2 *PicoSecondTimeStamp) Duration(e1 *PicoSecondTimeStamp) *PicoSecondDuration {\n\tresult := &PicoSecondDuration{\n\t\tEpoch: int32(e2.Epoch - e1.Epoch),\n\t\tPicoSeconds: int64(e2.PicoSeconds - e1.PicoSeconds),\n\t}\n\n\tif result.PicoSeconds < 0 && result.Epoch > 0 {\n\t\tresult.Epoch = result.Epoch - 1\n\t\tresult.PicoSeconds = result.PicoSeconds + 1000000000000\n\t}\n\treturn result\n}", "func TimeInSec(period string) int {\n\tif strings.HasSuffix(period, \"sec\") {\n\t\ti, _ := strconv.Atoi(strings.Replace(period, \"sec\", \"\", -1))\n\t\treturn i\n\t} else if strings.HasSuffix(period, \"min\") {\n\t\ti, _ := strconv.Atoi(strings.Replace(period, \"min\", \"\", -1))\n\t\treturn i * 60\n\t} else if strings.HasSuffix(period, \"hours\") {\n\t\ti, _ := strconv.Atoi(strings.Replace(period, \"hours\", \"\", -1))\n\t\treturn i * 60 * 60\n\t} else if strings.HasSuffix(period, \"days\") {\n\t\ti, _ := strconv.Atoi(strings.Replace(period, \"days\", \"\", -1))\n\t\treturn i * 60 * 60 * 24\n\t} else {\n\t\treturn 0\n\t}\n}", "func (d *Duration) Duration() time.Duration {\n\tif d == nil {\n\t\treturn 0\n\t}\n\treturn (time.Duration(d.Seconds) * time.Second) + (time.Duration(d.Nanos) * time.Nanosecond)\n}", "func ToUsec(t time.Time) int64 {\n\treturn t.UnixNano() / 1e3\n}", "func (i ISODuration) GetMilliSeconds() int {\r\n\treturn i.duration.MilliSeconds\r\n}", "func (t *Track) Duration() float64 {\n\treturn float64(t.duration) / float64(t.globalTimescale)\n}", "func MinutesToSeconds(minutes int) int {\n\treturn minutes * 60\n}", "func getDurationStringFromSeconds(seconds int) string {\n\treturn (time.Duration(seconds) * time.Second).String()\n}", "func (o GroupContainerLivenessProbePtrOutput) PeriodSeconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *GroupContainerLivenessProbe) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PeriodSeconds\n\t}).(pulumi.IntPtrOutput)\n}", "func (o InstanceMaintenanceWindowStartTimePtrOutput) Seconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *InstanceMaintenanceWindowStartTime) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Seconds\n\t}).(pulumi.IntPtrOutput)\n}", "func convertToSeconds(hours, minutes, seconds, microseconds string) {\n\thoursInSeconds, _ := strconv.Atoi(hours)\n\tminutesInSeconds, _ := strconv.Atoi(minutes)\n\tformattedSeconds, _ := strconv.Atoi(seconds)\n\tformattedSeconds = formattedSeconds + (hoursInSeconds * 3600) + (minutesInSeconds * 60)\n\n\tvar buffer bytes.Buffer\n\n\tbuffer.WriteString(strconv.Itoa(formattedSeconds))\n\tbuffer.WriteString(\".\")\n\tbuffer.WriteString(microseconds)\n\n\tfmt.Println(\"BarDuration: \" + buffer.String())\n}", "func (t Time) Microseconds() int64 {\n\treturn time.Time(t).UnixNano() / DivideMicroseconds\n}", "func (o InstanceDenyMaintenancePeriodTimePtrOutput) Seconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *InstanceDenyMaintenancePeriodTime) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Seconds\n\t}).(pulumi.IntPtrOutput)\n}", "func (cvr Converter) MinutesToSeconds(m Minutes) Seconds {\n\treturn Seconds(m * 60)\n}", "func daysToSeconds(inDays int64) int64 {\n\treturn int64(inDays * 24 * 60 * 60 )\n}", "func (o BuildStrategySpecBuildStepsLivenessProbeOutput) PeriodSeconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v BuildStrategySpecBuildStepsLivenessProbe) *int { return v.PeriodSeconds }).(pulumi.IntPtrOutput)\n}", "func (m *sdt) Duration() int32 {\n\treturn m.durationField\n}", "func GetTrackedSeconds(ctx context.Context, opts FindTrackedTimesOptions) (trackedSeconds int64, err error) {\n\treturn opts.toSession(db.GetEngine(ctx)).SumInt(&TrackedTime{}, \"time\")\n}", "func (o HPAScalingPolicyOutput) PeriodSeconds() pulumi.IntOutput {\n\treturn o.ApplyT(func(v HPAScalingPolicy) int { return v.PeriodSeconds }).(pulumi.IntOutput)\n}", "func (s *Stopwatch) ElapsedMilliSeconds() float64 {\n\ts.RLock()\n\tdefer s.RUnlock()\n\n\treturn float64(s.Elapsed() / time.Millisecond)\n}", "func FromFloat(seconds float64) time.Duration {\n\treturn time.Duration(seconds*float64(time.Second) + 0.5)\n}", "func (o ClusterBuildStrategySpecBuildStepsLivenessProbeOutput) PeriodSeconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v ClusterBuildStrategySpecBuildStepsLivenessProbe) *int { return v.PeriodSeconds }).(pulumi.IntPtrOutput)\n}", "func (d UnixDuration) Duration() time.Duration {\n\treturn time.Duration(d) * time.Second\n}", "func (c *Client) Duration() (float64, error) {\n\treturn c.GetFloatProperty(\"duration\")\n}", "func fmtDuration(d time.Duration) string {\n\treturn fmt.Sprintf(\"%.2fs\", d.Seconds())\n}", "func (o BuildStrategySpecBuildStepsLivenessProbePtrOutput) PeriodSeconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *BuildStrategySpecBuildStepsLivenessProbe) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PeriodSeconds\n\t}).(pulumi.IntPtrOutput)\n}", "func FormatSeconds(seconds float64) string {\n\t// Make sure localised strings are fetched\n\tlocOnce.Do(func() {\n\t\tlocDay = glib.Local(\"one day\")\n\t\tlocDays = glib.Local(\"days\")\n\t})\n\n\tminutes, secs := int(seconds)/60, int(seconds)%60\n\thours, mins := minutes/60, minutes%60\n\tdays, hrs := hours/24, hours%24\n\tswitch {\n\tcase days > 1:\n\t\treturn fmt.Sprintf(\"%d %s %d:%02d:%02d\", days, locDays, hrs, mins, secs)\n\tcase days == 1:\n\t\treturn fmt.Sprintf(\"%s %d:%02d:%02d\", locDay, hrs, mins, secs)\n\tcase hours >= 1:\n\t\treturn fmt.Sprintf(\"%d:%02d:%02d\", hrs, mins, secs)\n\tdefault:\n\t\treturn fmt.Sprintf(\"%d:%02d\", mins, secs)\n\t}\n}", "func (s Segment) Duration() time.Duration {\n\treturn s.EndsBefore.Sub(s.Start)\n}", "func MeasureSecondsSince(name string, field string, t time.Time) Measurement {\n\treturn NewMeasurement(name).AddSecondsSince(field, t)\n}", "func getSeconds(time *int) int {\n\treturn *time\n}", "func (m *TimerMutation) ElapsedSeconds() (r int, exists bool) {\n\tv := m.elapsedSeconds\n\tif v == nil {\n\t\treturn\n\t}\n\treturn *v, true\n}", "func micros(d time.Duration) int {\n\treturn int(d.Seconds() * 1000000)\n}", "func (o ClusterBuildStrategySpecBuildStepsLivenessProbePtrOutput) PeriodSeconds() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *ClusterBuildStrategySpecBuildStepsLivenessProbe) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PeriodSeconds\n\t}).(pulumi.IntPtrOutput)\n}", "func (t Time) Second() int {\n\treturn time.Time(t).Second()\n}", "func (dt *DateTime) GetSecond() *Number {\n\treturn dt.Second()\n}" ]
[ "0.8146225", "0.7864046", "0.7570394", "0.72681606", "0.7212462", "0.7174782", "0.7154755", "0.71513623", "0.6977348", "0.6971889", "0.6894677", "0.6776812", "0.6714895", "0.67132235", "0.66712093", "0.65651536", "0.6538294", "0.65263283", "0.65176785", "0.6466086", "0.6434017", "0.6346442", "0.6335964", "0.6320832", "0.6305017", "0.6227518", "0.6227518", "0.6188115", "0.6179615", "0.6160989", "0.6151172", "0.61360115", "0.6134382", "0.6124866", "0.6116353", "0.60917294", "0.60746425", "0.60684717", "0.60326755", "0.60280186", "0.59919035", "0.5981728", "0.5965073", "0.59522265", "0.59358466", "0.5933968", "0.5911186", "0.5909971", "0.5904017", "0.5904017", "0.5904017", "0.59037054", "0.5853884", "0.58501333", "0.5846143", "0.5842225", "0.5826644", "0.58200824", "0.58200824", "0.5814505", "0.58078283", "0.58017075", "0.57867104", "0.5777276", "0.57679474", "0.5767262", "0.57671547", "0.5743368", "0.57286924", "0.5720256", "0.5709292", "0.5700157", "0.5690397", "0.5683481", "0.56823915", "0.56571186", "0.5638545", "0.56276274", "0.5623667", "0.56083286", "0.5604272", "0.5583957", "0.55779094", "0.55776834", "0.5528202", "0.55278444", "0.5521058", "0.5519935", "0.55159116", "0.5504168", "0.5501442", "0.5497751", "0.5496833", "0.5489368", "0.54639864", "0.5434759", "0.5431013", "0.54273176", "0.54169023", "0.54133046" ]
0.81206095
1
GetIssueLabels gets the current labels on the specified PR or issue
GetIssueLabels получает текущие метки на указанном PR или issue
func (fc *fakeClient) GetIssueLabels(owner, repo string, number int) ([]github.Label, error) { var la []github.Label for _, l := range fc.labels { la = append(la, github.Label{Name: l}) } return la, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *client) GetIssueLabels(org, repo string, number int) ([]Label, error) {\n\tdurationLogger := c.log(\"GetIssueLabels\", org, repo, number)\n\tdefer durationLogger()\n\n\treturn c.getLabels(fmt.Sprintf(\"/repos/%s/%s/issues/%d/labels\", org, repo, number), org)\n}", "func (issue *Issue) GetLabels() []string {\n\treturn issue.Fields.Labels\n}", "func (m *MockRerunClient) GetIssueLabels(org, repo string, number int) ([]github.Label, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetIssueLabels\", org, repo, number)\n\tret0, _ := ret[0].([]github.Label)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockClient) GetIssueLabels(org, repo string, number int) ([]github.Label, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetIssueLabels\", org, repo, number)\n\tret0, _ := ret[0].([]github.Label)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (i *IssueRequest) GetLabels() []string {\n\tif i == nil || i.Labels == nil {\n\t\treturn nil\n\t}\n\treturn *i.Labels\n}", "func (m *MockIssueClient) GetIssueLabels(org, repo string, number int) ([]github.Label, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetIssueLabels\", org, repo, number)\n\tret0, _ := ret[0].([]github.Label)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (a ProblemAdapter) GetLabels() map[string]string {\n\treturn nil\n}", "func GetLabels(repositoryURL string, token string) ([]Label, error) {\n\tURL := fmt.Sprintf(\"%v/labels\", repositoryURL)\n\n\trequest, err := http.NewRequest(\"GET\", URL, nil)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Couldn't make a new request in GetLabel: %v\", err)\n\t}\n\n\trequest.Header.Add(\"Authorization\", token)\n\trequest.Header.Add(\"Accept\", \"application/vnd.github.v3+json\")\n\n\tresponse, err := http.DefaultClient.Do(request)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Response error in GetLabel: %v\", err)\n\t}\n\n\tif response.Body != nil {\n\t\tdefer response.Body.Close()\n\t}\n\n\tbody, err := ioutil.ReadAll(response.Body)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"couldn't convert response body to []byte: %v\", err)\n\t}\n\n\tvar labels []Label\n\n\terr = json.Unmarshal(body, &labels)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"problem unmarshalling the response body: %v\", err)\n\t}\n\n\treturn labels, nil\n}", "func NewIssueGetLabelParams() *IssueGetLabelParams {\n\tvar ()\n\treturn &IssueGetLabelParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func Labels(ctx context.Context, client *github.Client, settings *Settings) ([]string, error) {\n\tlabels, _, err := client.Issues.ListLabels(ctx, settings.BaseAccount, settings.BaseRepo, &github.ListOptions{PerPage: 100})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsort.Slice(labels, func(i, j int) bool {\n\t\tswitch {\n\t\tcase labels[i] == nil:\n\t\t\treturn true\n\t\tcase labels[j] == nil:\n\t\t\treturn false\n\t\tdefault:\n\t\t\treturn *labels[i].Name < *labels[j].Name\n\t\t}\n\t})\n\tvar o []string\n\tfor _, l := range labels {\n\t\tif l.Name != nil {\n\t\t\to = append(o, *l.Name)\n\t\t}\n\t}\n\treturn o, nil\n}", "func (c *client) GetRepoLabels(org, repo string) ([]Label, error) {\n\tdurationLogger := c.log(\"GetRepoLabels\", org, repo)\n\tdefer durationLogger()\n\n\treturn c.getLabels(fmt.Sprintf(\"/repos/%s/%s/labels\", org, repo), org)\n}", "func jiraLabels(j *v1alpha1.Jira) map[string]string {\n\tlabels := defaultLabels(j)\n\tfor key, val := range j.ObjectMeta.Labels {\n\t\tlabels[key] = val\n\t}\n\treturn labels\n}", "func (c *client) getLabels(path, org string) ([]Label, error) {\n\tvar labels []Label\n\tif c.fake {\n\t\treturn labels, nil\n\t}\n\terr := c.readPaginatedResults(\n\t\tpath,\n\t\t\"application/vnd.github.symmetra-preview+json\", // allow the description field -- https://developer.github.com/changes/2018-02-22-label-description-search-preview/\n\t\torg,\n\t\tfunc() interface{} {\n\t\t\treturn &[]Label{}\n\t\t},\n\t\tfunc(obj interface{}) {\n\t\t\tlabels = append(labels, *(obj.(*[]Label))...)\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn labels, nil\n}", "func GetLabels(component, name, identifier string) map[string]string {\n\t// see https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels\n\treturn map[string]string{\n\t\t\"app.kubernetes.io/managed-by\": \"splunk-operator\",\n\t\t\"app.kubernetes.io/component\": component,\n\t\t\"app.kubernetes.io/name\": name,\n\t\t\"app.kubernetes.io/part-of\": fmt.Sprintf(\"splunk-%s-%s\", identifier, component),\n\t\t\"app.kubernetes.io/instance\": fmt.Sprintf(\"splunk-%s-%s\", identifier, name),\n\t}\n}", "func (mr *MockRerunClientMockRecorder) GetIssueLabels(org, repo, number interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetIssueLabels\", reflect.TypeOf((*MockRerunClient)(nil).GetIssueLabels), org, repo, number)\n}", "func (mr *MockIssueClientMockRecorder) GetIssueLabels(org, repo, number interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetIssueLabels\", reflect.TypeOf((*MockIssueClient)(nil).GetIssueLabels), org, repo, number)\n}", "func getSigLabelsForIssue(issue Issue) []string {\n\tvar sigs []string = nil\n\n\tvar sizeFactor float64 = 400\n\tissueSize := float64(len(issue.Title) + len(issue.Body))\n\tsizeScaling := 0.75 * issueSize / sizeFactor\n\tif sizeScaling < 1 { // Don't weirdly scale tiny issues\n\t\tsizeScaling = 1\n\t}\n\tfmt.Println(\"size scaling\", sizeScaling)\n\n\tfor sigName, scoreData := range getScoresForSigs(issue) {\n\t\tfmt.Println(\"Debug\", sigName, scoreData.scoreItems)\n\t\tif float64(scoreData.scoreTotal) >= scoreThreshhold*sizeScaling {\n\t\t\tsigs = append(sigs, sigName)\n\t\t}\n\t}\n\n\treturn sigs\n}", "func (mr *MockClientMockRecorder) GetIssueLabels(org, repo, number interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetIssueLabels\", reflect.TypeOf((*MockClient)(nil).GetIssueLabels), org, repo, number)\n}", "func GetLabels(component constants.ComponentName, cr_name string) map[string]string {\n\treturn generateComponentLabels(component, cr_name)\n}", "func (a *Awaitility) GetMetricLabels(t *testing.T, family string) []map[string]*string {\n\tlabels, err := metrics.GetMetricLabels(a.RestConfig, a.MetricsURL, family)\n\trequire.NoError(t, err)\n\treturn labels\n}", "func (pc *PodCache) GetLabels(key types.UID) labels.Set {\n\treturn pc.cachedPods[key].LabelSet\n}", "func (sdc *SDConfig) GetLabels(baseDir string) ([]*promutils.Labels, error) {\n\tcfg, err := getAPIConfig(sdc, baseDir)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot get API config: %w\", err)\n\t}\n\tms := getServiceLabels(cfg)\n\treturn ms, nil\n}", "func GetLabels() []string {\n\tvar res []string\n\tlabelsURL := \"https://raw.githubusercontent.com/googlecreativelab/quickdraw-dataset/master/categories.txt\"\n\tresp, err := http.Get(labelsURL)\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tfmt.Printf(\"Unable to fetch labels\")\n\t}\n\tres = strings.Split(string(body), \"\\n\")\n\treturn res\n}", "func NewIssueGetLabelParamsWithHTTPClient(client *http.Client) *IssueGetLabelParams {\n\tvar ()\n\treturn &IssueGetLabelParams{\n\t\tHTTPClient: client,\n\t}\n}", "func (m *Group) GetAssignedLabels()([]AssignedLabelable) {\n return m.assignedLabels\n}", "func (sm SchedulerModel) getLabels(group string, instance InstanceID) map[string]string {\n\tlabels := map[string]string{\n\t\t\"group\": group,\n\t\t\"instance\": string(instance),\n\t}\n\n\treturn labels\n}", "func (m *MockClient) GetRepoLabels(org, repo string) ([]github.Label, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetRepoLabels\", org, repo)\n\tret0, _ := ret[0].([]github.Label)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (i *Issue) GetLabelsURL() string {\n\tif i == nil || i.LabelsURL == nil {\n\t\treturn \"\"\n\t}\n\treturn *i.LabelsURL\n}", "func (o *VirtualizationIweVirtualMachine) GetLabels() []InfraMetaData {\n\tif o == nil {\n\t\tvar ret []InfraMetaData\n\t\treturn ret\n\t}\n\treturn o.Labels\n}", "func (i *IssueEvent) GetLabel() *Label {\n\tif i == nil {\n\t\treturn nil\n\t}\n\treturn i.Label\n}", "func (d *RetryDownloader) GetLabels() ([]*Label, error) {\n\tvar (\n\t\tlabels []*Label\n\t\terr error\n\t)\n\n\terr = d.retry(func() error {\n\t\tlabels, err = d.Downloader.GetLabels()\n\t\treturn err\n\t})\n\n\treturn labels, err\n}", "func GetIssueType(issue *github.Issue) string {\n\tfor _, l := range issue.Labels {\n\t\tswitch l.GetName() {\n\t\tcase enhancementTag:\n\t\t\treturn enhancementDisplayName\n\t\tcase bugTag:\n\t\t\treturn bugDisplayName\n\t\tdefault:\n\t\t\treturn closedDisplayName\n\t\t}\n\t}\n\treturn closedDisplayName\n}", "func (wt *WorkspaceTemplateFilter) GetLabels() []*Label {\n\treturn wt.Labels\n}", "func (m *HistogramDataPoint) GetLabels() []v11.StringKeyValue {\n\tif m != nil {\n\t\treturn m.Labels\n\t}\n\treturn nil\n}", "func (a GetSLITriggeredAdapter) GetLabels() map[string]string {\n\treturn a.event.Labels\n}", "func (a *Client) GetLabels(params *GetLabelsParams, opts ...ClientOption) (*GetLabelsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetLabelsParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"GetLabels\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/get-labels\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &GetLabelsReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetLabelsOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for GetLabels: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (o *TemplateSummaryResources) GetLabels() []TemplateSummaryLabel {\n\tif o == nil {\n\t\tvar ret []TemplateSummaryLabel\n\t\treturn ret\n\t}\n\n\treturn o.Labels\n}", "func fetchAllIssuesByLabel(client *github.Client, owner, name, state string, labels []string) []*github.Issue {\n\tpageIndex := 1\n\trepoOptions := github.IssueListByRepoOptions{\n\t\tListOptions: github.ListOptions{\n\t\t\tPage: pageIndex,\n\t\t\tPerPage: 100,\n\t\t},\n\t\tState: state,\n\t\tLabels: labels,\n\t}\n\tvar allIssues []*github.Issue\n\tfor {\n\t\tissues, _, err :=\n\t\t\tclient.Issues.ListByRepo(context.Background(), owner, name, &repoOptions)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tallIssues = append(allIssues, issues...)\n\t\trepoOptions.Page++\n\t\tif len(issues) != 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn allIssues\n}", "func (lbl *LabelService) GetLabelList() (labels []*types.Label, err error) {\n\tlog.Debug(\"GetLabelList\")\n\n\tdata, status, err := lbl.concertoService.Get(\"/labels\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = utils.CheckStandardStatus(status, data); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = json.Unmarshal(data, &labels); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// exclude internal labels (with a Namespace defined)\n\tvar filteredLabels []*types.Label\n\tfor _, label := range labels {\n\t\tif label.Namespace == \"\" {\n\t\t\tfilteredLabels = append(filteredLabels, label)\n\t\t}\n\t}\n\n\treturn filteredLabels, nil\n}", "func (m *MockRepositoryClient) GetRepoLabels(org, repo string) ([]github.Label, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetRepoLabels\", org, repo)\n\tret0, _ := ret[0].([]github.Label)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (i *IssuesEvent) GetLabel() *Label {\n\tif i == nil {\n\t\treturn nil\n\t}\n\treturn i.Label\n}", "func (o *Channel) GetLabels() []Label {\n\tif o == nil || o.Labels == nil {\n\t\tvar ret []Label\n\t\treturn ret\n\t}\n\treturn *o.Labels\n}", "func GetLabels() ([]byte, error) {\n\tlog.Trace.Printf(\"Getting all the labels.\")\n\tvar ret []byte\n\tvar err error\n\n\tlabels := make([]types.Label, 0)\n\tif err = store.DB.Find(&labels).Error; err == nil {\n\t\tlog.Trace.Printf(\"Successfully got the labels: %+v\", labels)\n\t\tret, err = json.Marshal(labels)\n\t}\n\n\tif err != nil {\n\t\tlog.Warning.Printf(err.Error())\n\t}\n\n\treturn ret, err\n}", "func getTagLabels() ([]string, []string) {\n\tdefer trace()()\n\tvar ntags []string\n\tvar vtags []string\n\tif conf.UCMConfig.AwsTagsToLabels.Enabled {\n\t\ttags := processTagLabelMap(labels, conf.UCMConfig.MetadataReporting.Attributes)\n\t\tfor k, v := range tags {\n\t\t\tntags = append(ntags, strings.ToLower(k))\n\t\t\tvtags = append(vtags, v)\n\t\t}\n\t}\n\treturn ntags, vtags\n}", "func (o ProjectOutput) Labels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *Project) pulumi.StringMapOutput { return v.Labels }).(pulumi.StringMapOutput)\n}", "func (o *IssueGetLabelParams) WithHTTPClient(client *http.Client) *IssueGetLabelParams {\n\to.SetHTTPClient(client)\n\treturn o\n}", "func GetLabelsForComputeInstance(t *testing.T, projectID string, zone string, instanceID string) map[string]string {\n\tlabels, err := GetLabelsForComputeInstanceE(t, projectID, zone, instanceID)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn labels\n}", "func filterOutLabels(issues []*gitlab.Issue, exLabels string) []*gitlab.Issue {\n\tif exLabels == \"\" {\n\t\treturn issues\n\t}\n\tlabels := map[string]struct{}{}\n\tfor _, l := range strings.Split(exLabels, \",\") {\n\t\tlabels[l] = struct{}{}\n\t}\n\tfor i := 0; i < len(issues); {\n\t\tissue := issues[i]\n\t\tskip := false\n\t\tfor _, l := range issue.Labels {\n\t\t\tif _, ok := labels[l]; ok {\n\t\t\t\tskip = true\n\t\t\t}\n\t\t}\n\t\tif skip {\n\t\t\tissues[i] = issues[len(issues)-1]\n\t\t\tissues = issues[0 : len(issues)-1]\n\t\t} else {\n\t\t\ti++\n\t\t}\n\t}\n\treturn issues\n}", "func (node *Node) GetLabels() *[]string {\n\n\tres := make([]string, 0)\n\tlabel := &en.ELabel{ID: node.NextLabelID}\n\tfor engine.GetObject(label) {\n\t\tlabelStr := &en.ELabelString{ID: label.LabelStringID}\n\t\tengine.GetObject(labelStr)\n\t\tres = append(res, labelStr.String)\n\t\tif label.NextLabelID != -1 {\n\t\t\tlabel = &en.ELabel{ID: label.NextLabelID}\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn &res\n}", "func getLabels(\n docker *client.Client,\n containerId string) (labels map[string]string, err error) {\n\n inspect, err := docker.ContainerInspect(context.Background(), containerId)\n if err != nil {\n return\n }\n\n labels = inspect.Config.Labels\n return\n}", "func (m *EnvoyFilter) GetWorkloadLabels() map[string]string {\n\tif m != nil {\n\t\treturn m.WorkloadLabels\n\t}\n\treturn nil\n}", "func (m *CertManagerConfig) GetPodLabels() map[string]interface{} {\n\tif m != nil {\n\t\treturn m.PodLabels\n\t}\n\treturn nil\n}", "func (m *CertManagerConfig) GetPodLabels() map[string]interface{} {\n\tif m != nil {\n\t\treturn m.PodLabels\n\t}\n\treturn nil\n}", "func (m *CertManagerConfig) GetPodLabels() map[string]interface{} {\n\tif m != nil {\n\t\treturn m.PodLabels\n\t}\n\treturn nil\n}", "func (m *CertManagerConfig) GetPodLabels() map[string]interface{} {\n\tif m != nil {\n\t\treturn m.PodLabels\n\t}\n\treturn nil\n}", "func getIssues(user, repo, label string) []Datos {\n\t// Format the http link\n\turl := fmt.Sprintf(\"https://api.github.com/repos/%s/%s/issues?labels=%s&page=1&per_page=100\", user, repo, label)\n\n\t// Get response\n\tbody := connectHTML(url)\n\n\t// Filter data\n\tdata := getData(body)\n\n\treturn data\n\n}", "func (manager *Manager) getIdentityLabels(securityIdentity uint32) (labels.Labels, error) {\n\tidentityCtx, cancel := context.WithTimeout(context.Background(), option.Config.KVstoreConnectivityTimeout)\n\tdefer cancel()\n\tif err := manager.identityAllocator.WaitForInitialGlobalIdentities(identityCtx); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to wait for initial global identities: %v\", err)\n\t}\n\n\tidentity := manager.identityAllocator.LookupIdentityByID(identityCtx, identity.NumericIdentity(securityIdentity))\n\tif identity == nil {\n\t\treturn nil, fmt.Errorf(\"identity %d not found\", securityIdentity)\n\t}\n\treturn identity.Labels, nil\n}", "func (r *RedisFailoverHandler) getLabels(rf *redisfailoverv1.RedisFailover) map[string]string {\n\tdynLabels := map[string]string{\n\t\trfLabelNameKey: rf.Name,\n\t}\n\n\t// Filter the labels based on the whitelist\n\tfilteredCustomLabels := make(map[string]string)\n\tif rf.Spec.LabelWhitelist != nil && len(rf.Spec.LabelWhitelist) != 0 {\n\t\tfor _, regex := range rf.Spec.LabelWhitelist {\n\t\t\tcompiledRegexp, err := regexp.Compile(regex)\n\t\t\tif err != nil {\n\t\t\t\tr.logger.Errorf(\"Unable to compile label whitelist regex '%s', ignoring it.\", regex)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor labelKey, labelValue := range rf.Labels {\n\t\t\t\tif match := compiledRegexp.MatchString(labelKey); match {\n\t\t\t\t\tfilteredCustomLabels[labelKey] = labelValue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// If no whitelist is specified then don't filter the labels.\n\t\tfilteredCustomLabels = rf.Labels\n\t}\n\treturn util.MergeLabels(defaultLabels, dynLabels, filteredCustomLabels)\n}", "func (c *Client) GetCronWorkflowLabels(namespace, name, prefix string) (labels map[string]string, err error) {\n\tcwf, err := c.ArgoprojV1alpha1().CronWorkflows(namespace).Get(name, metav1.GetOptions{})\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"Namespace\": namespace,\n\t\t\t\"Name\": name,\n\t\t\t\"Error\": err.Error(),\n\t\t}).Error(\"CronWorkflow not found.\")\n\t\treturn nil, util.NewUserError(codes.NotFound, \"CronWorkflow not found.\")\n\t}\n\n\tlabels = label.FilterByPrefix(prefix, cwf.Labels)\n\tlabels = label.RemovePrefix(prefix, labels)\n\n\treturn\n}", "func (o Iperf3SpecClientConfigurationOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v Iperf3SpecClientConfiguration) map[string]string { return v.PodLabels }).(pulumi.StringMapOutput)\n}", "func (o Iperf3SpecClientConfigurationPtrOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *Iperf3SpecClientConfiguration) map[string]string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PodLabels\n\t}).(pulumi.StringMapOutput)\n}", "func (m *NumberDataPoint) GetLabels() []v11.StringKeyValue {\n\tif m != nil {\n\t\treturn m.Labels\n\t}\n\treturn nil\n}", "func HasLabel(i *github.Issue, label string) bool {\n\tfor _, l := range i.Labels {\n\t\tif *l.Name == label {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (o *DashboardAllOfLinks) GetLabels() string {\n\tif o == nil || o.Labels == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Labels\n}", "func getPendingPRs(g *u.GithubClient, f *os.File, owner, repo, branch string) error {\n\tlog.Print(\"Getting pending PR status...\")\n\tf.WriteString(\"-------\\n\")\n\tf.WriteString(fmt.Sprintf(\"## PENDING PRs on the %s branch\\n\", branch))\n\n\tif *htmlizeMD {\n\t\tf.WriteString(\"PR | Milestone | User | Date | Commit Message\\n\")\n\t\tf.WriteString(\"-- | --------- | ---- | ---- | --------------\\n\")\n\t}\n\n\tvar query []string\n\tquery = u.AddQuery(query, \"repo\", owner, \"/\", repo)\n\tquery = u.AddQuery(query, \"is\", \"open\")\n\tquery = u.AddQuery(query, \"type\", \"pr\")\n\tquery = u.AddQuery(query, \"base\", branch)\n\tpendingPRs, err := g.SearchIssues(strings.Join(query, \" \"))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to search pending PRs: %v\", err)\n\t}\n\n\tfor _, pr := range pendingPRs {\n\t\tvar str string\n\t\t// escape '*' in commit messages so they don't mess up formatting\n\t\tmsg := strings.Replace(*pr.Title, \"*\", \"\", -1)\n\t\tmilestone := \"null\"\n\t\tif pr.Milestone != nil {\n\t\t\tmilestone = *pr.Milestone.Title\n\t\t}\n\t\tif *htmlizeMD {\n\t\t\tstr = fmt.Sprintf(\"#%-8d | %-4s | @%-10s| %s | %s\\n\", *pr.Number, milestone, *pr.User.Login, pr.UpdatedAt.Format(\"Mon Jan 2 15:04:05 MST 2006\"), msg)\n\t\t} else {\n\t\t\tstr = fmt.Sprintf(\"#%-8d %-4s @%-10s %s %s\\n\", *pr.Number, milestone, *pr.User.Login, pr.UpdatedAt.Format(\"Mon Jan 2 15:04:05 MST 2006\"), msg)\n\t\t}\n\t\tf.WriteString(str)\n\t}\n\tf.WriteString(\"\\n\\n\")\n\treturn nil\n}", "func (o *IssueGetLabelParams) WithTimeout(timeout time.Duration) *IssueGetLabelParams {\n\to.SetTimeout(timeout)\n\treturn o\n}", "func (drc *DummyRegistryClient) LabelsForImageName(in string) (labels map[string]string, err error) {\n\tres := drc.Called(in)\n\treturn res.Get(0).(map[string]string), res.Error(1)\n}", "func (p *Plex) GetLibraryLabels(sectionKey, sectionIndex string) (libraryLabels, error) {\n\trequestInfo.headers.Token = p.token\n\n\tif sectionIndex == \"\" {\n\t\tsectionIndex = \"1\"\n\t}\n\n\tquery := fmt.Sprintf(\"%s/library/sections/%s/labels?type=%s\", p.URL, sectionKey, sectionIndex)\n\n\tresp, respErr := requestInfo.get(query)\n\n\tif respErr != nil {\n\t\treturn libraryLabels{}, respErr\n\t}\n\n\tdefer resp.Body.Close()\n\n\tvar result libraryLabels\n\n\tif err := json.NewDecoder(resp.Body).Decode(result); err != nil {\n\t\tfmt.Println(err.Error())\n\n\t\treturn libraryLabels{}, err\n\t}\n\n\treturn result, nil\n}", "func (p *PullRequestEvent) GetLabel() *Label {\n\tif p == nil {\n\t\treturn nil\n\t}\n\treturn p.Label\n}", "func (m *SummaryDataPoint) GetLabels() []v11.StringKeyValue {\n\tif m != nil {\n\t\treturn m.Labels\n\t}\n\treturn nil\n}", "func (p *PullRequestBranch) GetLabel() string {\n\tif p == nil || p.Label == nil {\n\t\treturn \"\"\n\t}\n\treturn *p.Label\n}", "func (r *Repo) ListLabels() github.Labels {\n\treturn r.cli.ListLabels(r.path)\n}", "func (o Iperf3SpecServerConfigurationOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v Iperf3SpecServerConfiguration) map[string]string { return v.PodLabels }).(pulumi.StringMapOutput)\n}", "func (r *Registry) Labels(ctx context.Context, ref image.Reference) (map[string]string, error) {\n\t// Set the default namespace if unset\n\tctx = ensureNamespace(ctx)\n\n\tmanifest, err := r.getManifest(ctx, ref)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\timageConfig, err := r.getImage(ctx, *manifest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn imageConfig.Config.Labels, nil\n}", "func (r *InformationProtectionPolicyLabelsCollectionRequest) Get(ctx context.Context) ([]InformationProtectionLabel, error) {\n\treturn r.GetN(ctx, 0)\n}", "func (o Iperf3SpecServerConfigurationPtrOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *Iperf3SpecServerConfiguration) map[string]string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PodLabels\n\t}).(pulumi.StringMapOutput)\n}", "func addLabelsToPullRequest(prInfo *PullRequestInfo, labels []string) error {\n\tif prInfo == nil {\n\t\treturn errors.New(\"pull request to label cannot be nil\")\n\t}\n\tpr := prInfo.PullRequest\n\tprovider := prInfo.GitProvider\n\n\tif len(labels) > 0 {\n\t\tnumber := *pr.Number\n\t\tvar err error\n\t\terr = provider.AddLabelsToIssue(pr.Owner, pr.Repo, number, labels)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Logger().Infof(\"Added label %s to Pull Request %s\", util.ColorInfo(strings.Join(labels, \", \")), pr.URL)\n\t}\n\treturn nil\n}", "func (o PgbenchSpecPodConfigOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v PgbenchSpecPodConfig) map[string]string { return v.PodLabels }).(pulumi.StringMapOutput)\n}", "func (o DrillSpecPodConfigPtrOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *DrillSpecPodConfig) map[string]string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PodLabels\n\t}).(pulumi.StringMapOutput)\n}", "func (o FioSpecPodConfigOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v FioSpecPodConfig) map[string]string { return v.PodLabels }).(pulumi.StringMapOutput)\n}", "func (m *Milestone) GetLabelsURL() string {\n\tif m == nil || m.LabelsURL == nil {\n\t\treturn \"\"\n\t}\n\treturn *m.LabelsURL\n}", "func GetLabelsForComputeInstanceE(t *testing.T, projectID string, zone string, instanceID string) (map[string]string, error) {\n\tlogger.Logf(t, \"Getting Labels for Compute Instance %s\", instanceID)\n\n\tctx := context.Background()\n\n\tservice, err := NewComputeServiceE(t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinstance, err := service.Instances.Get(projectID, zone, instanceID).Context(ctx).Do()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Instances.Get(%s) got error: %v\", instanceID, err)\n\t}\n\n\treturn instance.Labels, nil\n}", "func GetClanLabels(qparms rest.QParms) ([]Label, error) {\n\tvar sb strings.Builder\n\tsb.Grow(100)\n\tsb.WriteString(config.Data.BaseURL)\n\tsb.WriteString(\"/labels/clans/\")\n\n\tbody, err := get(sb.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse into an array of clans\n\ttype respType struct {\n\t\tLabels []Label `json:\"items\"`\n\t}\n\tvar resp respType\n\terr = json.Unmarshal(body, &resp)\n\tif err != nil {\n\t\tlog.Debug(\"failed to parse the json response\")\n\t\treturn nil, err\n\t}\n\n\treturn resp.Labels, nil\n}", "func (r *resultImpl) Labels() []Label {\n\treturn r.labels\n}", "func (o *Channel) GetLabelsOk() (*[]Label, bool) {\n\tif o == nil || o.Labels == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Labels, true\n}", "func (o SysbenchSpecOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v SysbenchSpec) map[string]string { return v.PodLabels }).(pulumi.StringMapOutput)\n}", "func GetMetricLabels(restConfig *rest.Config, url string, family string) ([]map[string]*string, error) {\n\turi := fmt.Sprintf(\"https://%s/metrics\", url)\n\tvar metrics []byte\n\n\tclient := http.Client{\n\t\tTimeout: time.Duration(30 * time.Second),\n\t\tTransport: &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec\n\t\t},\n\t}\n\trequest, err := http.NewRequest(\"Get\", uri, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trequest.Header.Add(\"Authorization\", fmt.Sprintf(\"Bearer %s\", restConfig.BearerToken))\n\tresp, err := client.Do(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\t_ = resp.Body.Close()\n\t}()\n\tmetrics, err = io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// parse the metrics\n\tparser := expfmt.TextParser{}\n\tfamilies, err := parser.TextToMetricFamilies(bytes.NewReader(metrics))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlabels := make([]map[string]*string, 0, len(families))\n\tfor _, f := range families {\n\t\tif f.GetName() == family {\n\t\t\tlbls := map[string]*string{}\n\t\t\tlabels = append(labels, lbls)\n\t\t\tfor _, m := range f.GetMetric() {\n\t\t\t\tfor _, kv := range m.Label {\n\t\t\t\t\tif kv.Name != nil {\n\t\t\t\t\t\tlbls[*kv.Name] = kv.Value\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t// here we can return `0` is the metric does not exist, which may be valid if the expected value is `0`, too.\n\treturn labels, nil\n}", "func (o FioSpecPodConfigPtrOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *FioSpecPodConfig) map[string]string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PodLabels\n\t}).(pulumi.StringMapOutput)\n}", "func (o LookupClientTlsPolicyResultOutput) Labels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v LookupClientTlsPolicyResult) map[string]string { return v.Labels }).(pulumi.StringMapOutput)\n}", "func HasLabel(label string, issueLabels []*github.Label) bool {\n\tfor _, l := range issueLabels {\n\t\tif strings.ToLower(l.GetName()) == strings.ToLower(label) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (o LookupFeatureResultOutput) Labels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v LookupFeatureResult) map[string]string { return v.Labels }).(pulumi.StringMapOutput)\n}", "func (o QperfSpecClientConfigurationOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v QperfSpecClientConfiguration) map[string]string { return v.PodLabels }).(pulumi.StringMapOutput)\n}", "func (o QperfSpecClientConfigurationPtrOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *QperfSpecClientConfiguration) map[string]string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PodLabels\n\t}).(pulumi.StringMapOutput)\n}", "func (o LookupApiResultOutput) Labels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v LookupApiResult) map[string]string { return v.Labels }).(pulumi.StringMapOutput)\n}", "func (b *Bot) labels(ctx context.Context, files []github.PullRequestFile) ([]string, error) {\n\tvar labels []string\n\n\t// The branch name is unsafe, but here we are simply adding a label.\n\tif isReleaseBranch(b.c.Environment.UnsafeBase) {\n\t\tlog.Println(\"Label: Found backport branch.\")\n\t\tlabels = append(labels, \"backport\")\n\t}\n\n\tfor _, file := range files {\n\t\tif strings.HasPrefix(file.Name, \"vendor/\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor k, v := range prefixes {\n\t\t\tif strings.HasPrefix(file.Name, k) {\n\t\t\t\tlog.Printf(\"Label: Found prefix %v, attaching labels: %v.\", k, v)\n\t\t\t\tlabels = append(labels, v...)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn deduplicate(labels), nil\n}", "func (l CurrentLabels) Labels() []string {\n\treturn []string{\"type\"}\n}", "func (o PgbenchSpecPodConfigPtrOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *PgbenchSpecPodConfig) map[string]string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PodLabels\n\t}).(pulumi.StringMapOutput)\n}", "func (o DrillSpecPodConfigOutput) PodLabels() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v DrillSpecPodConfig) map[string]string { return v.PodLabels }).(pulumi.StringMapOutput)\n}", "func (o *CreateOptions) GetLabels() map[string]string {\n\tif o.Labels == nil {\n\t\tvar z map[string]string\n\t\treturn z\n\t}\n\treturn o.Labels\n}", "func GetXWso2Labels(vendorExtensionsMap map[string]interface{}) []string {\n\tvar labelArray []string\n\tif y, found := vendorExtensionsMap[\"x-wso2-label\"]; found {\n\t\tif val, ok := y.([]interface{}); ok {\n\t\t\tfor _, label := range val {\n\t\t\t\tlabelArray = append(labelArray, label.(string))\n\t\t\t}\n\t\t\treturn labelArray\n\t\t}\n\t\tlogger.LoggerOasparser.Errorln(\"Error while parsing the x-wso2-label\")\n\t}\n\treturn []string{\"default\"}\n}" ]
[ "0.8113356", "0.7227975", "0.70755315", "0.7042123", "0.70022243", "0.6956072", "0.69555527", "0.6602383", "0.65491056", "0.65308553", "0.6387276", "0.63280463", "0.63201314", "0.6253154", "0.6241437", "0.62240124", "0.62148833", "0.6124567", "0.6104237", "0.5956916", "0.59501666", "0.59391576", "0.5933868", "0.58997226", "0.5864033", "0.58363646", "0.5805237", "0.5791458", "0.5790756", "0.57645184", "0.57587945", "0.57539576", "0.57486945", "0.5737803", "0.5726392", "0.57115436", "0.5709977", "0.56985253", "0.56836843", "0.5673481", "0.56612855", "0.5648707", "0.5623989", "0.5605312", "0.5588475", "0.55833757", "0.55811244", "0.55766886", "0.5570993", "0.5566398", "0.55643225", "0.5552313", "0.5552313", "0.5552313", "0.5552313", "0.55305886", "0.5522722", "0.5512849", "0.5499617", "0.5499104", "0.5490772", "0.5475641", "0.54685646", "0.54657024", "0.54631054", "0.54460514", "0.5437185", "0.54368925", "0.5432543", "0.5431588", "0.5419353", "0.5414626", "0.5412325", "0.5401767", "0.5399571", "0.5389586", "0.5379682", "0.53742796", "0.53735936", "0.53693944", "0.53693295", "0.5368887", "0.5351958", "0.53516346", "0.5349291", "0.53460735", "0.5343084", "0.5324835", "0.5322699", "0.53184247", "0.53156734", "0.53151214", "0.53145564", "0.53144497", "0.5314368", "0.53136873", "0.5303117", "0.5301994", "0.5290026", "0.5288933" ]
0.8116904
0
CreateComment adds and tracks a comment in the client
CreateComment добавляет и отслеживает комментарий в клиенте
func (fc *fakeClient) CreateComment(owner, repo string, number int, comment string) error { fc.commentsAdded[number] = append(fc.commentsAdded[number], comment) return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *Server) CreateComment(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\n\tb, err := ioutil.ReadAll(r.Body)\n\tdefer r.Body.Close()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tvar comment Comment\n\tif err = json.Unmarshal(b, &comment); err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tcommentUUID, err := uuid.NewUUID()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\tcommentID := commentUUID.String()\n\n\tcomment.ID = commentID\n\n\terr1 := s.database.CreateComment(ctx, &comment)\n\tif httperr.HandleError(w, err, http.StatusInternalServerError) {\n\t\ts.logger.For(ctx).Error(\"request failed\", zap.Error(err1))\n\t\treturn\n\t}\n\n\tresponseBody := CreatePostResponse{\"success\"}\n\tjsonResponse, err := json.Marshal(responseBody)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(jsonResponse)\n}", "func (b *Service) CommentCreate(ctx context.Context, TeamID string, UserID string, EventValue string) ([]byte, error, bool) {\n\tvar c struct {\n\t\tCheckinId string `json:\"checkinId\"`\n\t\tUserID string `json:\"userId\"`\n\t\tComment string `json:\"comment\"`\n\t}\n\terr := json.Unmarshal([]byte(EventValue), &c)\n\tif err != nil {\n\t\treturn nil, err, false\n\t}\n\n\terr = b.CheckinService.CheckinComment(ctx, TeamID, c.CheckinId, c.UserID, c.Comment)\n\tif err != nil {\n\t\treturn nil, err, false\n\t}\n\n\tmsg := createSocketEvent(\"comment_added\", \"\", \"\")\n\n\treturn msg, nil, false\n}", "func (s *APIClientService) CreateComment(ctx context.Context, id string, new CommentRequest) (Comment, *http.Response, error) {\n\tresource := Comment{} // new(APIClient)\n\n\treq, err := s.client.NewRequest(ctx, http.MethodPost, \"comments/\"+apiClientBasePath+\"/\"+id, new)\n\tif err != nil {\n\t\treturn resource, nil, err\n\t}\n\n\tresp, _, err := s.client.Do(ctx, req, &resource, false)\n\tif err != nil {\n\t\treturn resource, nil, err\n\t}\n\n\treturn resource, resp, nil\n}", "func (c *Client) CreateComment(owner, repo string, number int, comment string) error {\n\tif c.dry {\n\t\treturn nil\n\t}\n\n\tic := IssueComment{\n\t\tBody: comment,\n\t}\n\tresp, err := c.request(http.MethodPost, fmt.Sprintf(\"%s/repos/%s/%s/issues/%d/comments\", c.base, owner, repo, number), ic)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != 201 {\n\t\treturn fmt.Errorf(\"response not 201: %s\", resp.Status)\n\t}\n\treturn nil\n}", "func (b *Client) CreateComment(repo models.Repo, pullNum int, comment string, command string) error {\n\t// NOTE: I tried to find the maximum size of a comment for bitbucket.org but\n\t// I got up to 200k chars without issue so for now I'm not going to bother\n\t// to detect this.\n\tbodyBytes, err := json.Marshal(map[string]map[string]string{\"content\": {\n\t\t\"raw\": comment,\n\t}})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"json encoding\")\n\t}\n\tpath := fmt.Sprintf(\"%s/2.0/repositories/%s/pullrequests/%d/comments\", b.BaseURL, repo.FullName, pullNum)\n\t_, err = b.makeRequest(\"POST\", path, bytes.NewBuffer(bodyBytes))\n\treturn err\n}", "func (c *client) CreateComment(org, repo string, number int, comment string) error {\n\treturn c.CreateCommentWithContext(context.Background(), org, repo, number, comment)\n}", "func CreateComment(w http.ResponseWriter, r *http.Request) {\n\tsessionID := r.Header.Get(\"sessionID\")\n\tuser, err := getUserFromSession(sessionID)\n\n\tif err != nil {\n\t\tmsg := map[string]string{\"error\": \"Sorry there was an internal server error\"}\n\t\tw.Header().Set(\"Content-type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(msg)\n\n\t\treturn\n\t}\n\n\tif !user.Active {\n\t\tmsg := map[string]string{\"error\": \"Sorry your account isn't activated yet\"}\n\t\tw.Header().Set(\"Content-type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\tjson.NewEncoder(w).Encode(msg)\n\n\t\treturn\n\t}\n\n\tif r.Body == nil {\n\t\tmsg := map[string]string{\"error\": \"Sorry you need to supply an item id and a comment text\"}\n\t\tw.Header().Set(\"Content-type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(msg)\n\n\t\treturn\n\t}\n\n\tvar comment comments.Comment\n\n\terr = json.NewDecoder(r.Body).Decode(&comment)\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tmsg := map[string]string{\"error\": \"Please supply a valid email and password\"}\n\t\tw.Header().Set(\"Content-type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(w).Encode(msg)\n\n\t\treturn\n\t}\n\n\tcomment.Username = user.DisplayName\n\n\terr = comment.Create()\n\n\tif err != nil {\n\t\tmsg := map[string]string{\"error\": \"Sorry there was an internal server error\"}\n\t\tw.Header().Set(\"Content-type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(w).Encode(msg)\n\n\t\treturn\n\t}\n\n\tmsg := map[string]string{\"message\": \"Success!\"}\n\tw.Header().Set(\"Content-type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tjson.NewEncoder(w).Encode(msg)\n\n\treturn\n}", "func (c *CommentApiController) CreateComment(w http.ResponseWriter, r *http.Request) {\n\tcommentParam := Comment{}\n\td := json.NewDecoder(r.Body)\n\td.DisallowUnknownFields()\n\tif err := d.Decode(&commentParam); err != nil {\n\t\tc.errorHandler(w, r, &ParsingError{Err: err}, nil)\n\t\treturn\n\t}\n\tif err := AssertCommentRequired(commentParam); err != nil {\n\t\tc.errorHandler(w, r, err, nil)\n\t\treturn\n\t}\n\tresult, err := c.service.CreateComment(r.Context(), commentParam)\n\t// If an error occurred, encode the error with the status code\n\tif err != nil {\n\t\tc.errorHandler(w, r, err, &result)\n\t\treturn\n\t}\n\t// If no error, encode the body and the result code\n\tEncodeJSONResponse(result.Body, &result.Code, result.Headers, w)\n\n}", "func (v Notes) CreateComment(params NotesCreateCommentParams) (NotesCreateCommentResponse, error) {\n\tr, err := v.API.Request(\"notes.createComment\", params)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar resp NotesCreateCommentResponse\n\n\tvar cnv int\n\tcnv, err = strconv.Atoi(string(r))\n\tresp = NotesCreateCommentResponse(cnv)\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn resp, nil\n}", "func (a *ProblemsApiService) CreateComment(ctx _context.Context, problemId string) ApiCreateCommentRequest {\n\treturn ApiCreateCommentRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tproblemId: problemId,\n\t}\n}", "func (dbHandler *Handler) CreateComment(userID uint, entryID uint, text string, ts time.Time) (api.Comment, error) {\n\tcomment := api.Comment{UserID: userID, EntryID: entryID, Text: text}\n\tif !ts.IsZero() {\n\t\tcomment.CreatedAt = ts\n\t\tcomment.UpdatedAt = ts\n\t}\n\n\tdb := dbHandler.DB.Create(&comment)\n\tif db.Error != nil {\n\t\treturn comment, errors.WrapWithDetails(db.Error, \"cannot create comment\", \"userID\", userID, \"entryID\", entryID)\n\t}\n\n\treturn comment, nil\n}", "func (_article *Article) CommentsCreate(am map[string]interface{}) error {\n\t\t\tam[\"article_id\"] = _article.Id\n\t\t_, err := CreateComment(am)\n\treturn err\n}", "func CreateComment(dbp zesty.DBProvider, t *Task, user, content string) (c *Comment, err error) {\n\tdefer errors.DeferredAnnotatef(&err, \"Failed to create comment\")\n\n\tc = &Comment{\n\t\tPublicID: uuid.Must(uuid.NewV4()).String(),\n\t\tTaskID: t.ID,\n\t\tUsername: user,\n\t\tCreated: now.Get(),\n\t\tUpdated: now.Get(),\n\t\tContent: content,\n\t}\n\n\terr = c.Valid()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = dbp.DB().Insert(c)\n\tif err != nil {\n\t\treturn nil, pgjuju.Interpret(err)\n\t}\n\n\treturn c, nil\n}", "func (s *commentService) CreateComment(input dto.CreateComment) (entity.Comment, error) {\n\tcomment := entity.Comment{}\n\n\tcomment.Author = input.Author\n\tcomment.Comments = input.Comments\n\tcomment.BlogID = input.BlogID\n\n\t//proceed to the save method in the package repository, which returns the data and error values\n\tnewComment, err := s.commentRepository.Save(comment)\n\tif err != nil {\n\t\treturn newComment, err\n\t}\n\n\treturn newComment, nil\n\n}", "func (s *Server) createComment() http.HandlerFunc {\n\ttype request struct {\n\t\tBody string `json:\"body\"`\n\t}\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tuserToken := getAuthorizationToken(r)\n\t\tif userToken == \"\" {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\n\t\tctx := r.Context()\n\n\t\tuser, err := s.Accounts.GetUserByToken(ctx, userToken)\n\t\tif err != nil {\n\t\t\thandleRPCErrors(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tvar req request\n\t\tb, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\thandleRPCErrors(w, err)\n\t\t\treturn\n\t\t}\n\t\terr = json.Unmarshal(b, &req)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusUnprocessableEntity)\n\t\t\treturn\n\t\t}\n\n\t\tvars := mux.Vars(r)\n\t\tnewsUUID := vars[\"newsuuid\"]\n\n\t\tcomment, err := s.Comments.AddComment(ctx, req.Body, user.UID, newsUUID)\n\t\tif err != nil {\n\t\t\thandleRPCErrors(w, err)\n\t\t\treturn\n\t\t}\n\t\tjson, err := json.Marshal(*comment)\n\t\tif err != nil {\n\t\t\thandleRPCErrors(w, err)\n\t\t\treturn\n\t\t}\n\t\tw.WriteHeader(http.StatusAccepted)\n\t\tw.Write(json)\n\t}\n}", "func CreateComment(comment *Comment) error {\n\tvar err error\n\tcomment.CreatedAt = time.Now()\n\terr = db.Debug().Create(comment).Error\n\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func CreateComment(c *gin.Context, in *createCommentIn) (*task.Comment, error) {\n\tmetadata.AddActionMetadata(c, metadata.TaskID, in.TaskID)\n\n\tdbp, err := zesty.NewDBProvider(utask.DBName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tt, err := task.LoadFromPublicID(dbp, in.TaskID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttt, err := tasktemplate.LoadFromID(dbp, t.TemplateID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmetadata.AddActionMetadata(c, metadata.TemplateName, tt.Name)\n\n\tvar res *resolution.Resolution\n\tif t.Resolution != nil {\n\t\tres, err = resolution.LoadFromPublicID(dbp, *t.Resolution)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tmetadata.AddActionMetadata(c, metadata.ResolutionID, res.PublicID)\n\t}\n\n\tadmin := auth.IsAdmin(c) == nil\n\trequester := auth.IsRequester(c, t) == nil\n\twatcher := auth.IsWatcher(c, t) == nil\n\tresolutionManager := auth.IsResolutionManager(c, tt, t, res) == nil\n\n\tif !requester && !watcher && !resolutionManager && !admin {\n\t\treturn nil, errors.Forbiddenf(\"Can't create comment\")\n\t} else if !requester && !watcher && !resolutionManager {\n\t\tmetadata.SetSUDO(c)\n\t}\n\n\treqUsername := auth.GetIdentity(c)\n\n\tcomment, err := task.CreateComment(dbp, t, reqUsername, in.Content)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn comment, nil\n}", "func (m *GormCommentRepository) Create(ctx context.Context, u *Comment) error {\n\tdefer goa.MeasureSince([]string{\"goa\", \"db\", \"comment\", \"create\"}, time.Now())\n\n\tu.ID = uuid.NewV4()\n\n\terr := m.db.Create(u).Error\n\tif err != nil {\n\t\tgoa.LogError(ctx, \"error adding Comment\", \"error\", err.Error())\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (c Comment) Create(commentUid, articleId, content string, parentId int) error {\n\tcomment := Comment{\n\t\tContent: strings.TrimSpace(content),\n\t\tArticleId: articleId,\n\t\tCommentUid: commentUid,\n\t\tParentId: parentId,\n\t}\n\tif err := load.Conn.Create(&comment).Error; err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (r *Resolver) CreateComment(ctx context.Context, args struct {\n\tInput createCommentInput\n}) (Comment, error) {\n\tresult := Comment{}\n\tm := dbmodel.Comment{}\n\n\t// Role-based Access Control\n\tif _, err := AssertPermissions(ctx, \"create\", \"Comment\", args, &args.Input); err != nil {\n\t\treturn result, errors.Wrapf(err, \"permission denied\")\n\t}\n\n\tdata, err := json.Marshal(args.Input)\n\tif err != nil {\n\t\treturn result, errors.Wrapf(err, \"json.Marshal(%#v)\", args.Input)\n\t}\n\tif err = json.Unmarshal(data, &m); err != nil {\n\t\treturn result, errors.Wrapf(err, \"json.Unmarshal(%s)\", data)\n\t}\n\n\tif err := m.Insert(r.db(ctx)); err != nil {\n\t\treturn result, errors.Wrapf(err, \"createComment(%#v)\", m)\n\t}\n\treturn Comment{model: m, db: r.db(ctx)}, nil\n}", "func createComment(w http.ResponseWriter, r *http.Request) {\n\n\tsession := sessions.Start(w, r)\n\n\tvars := mux.Vars(r)\n\n\tpost_id := vars[\"id\"]\n\tuser_id := session.GetString(\"user_id\")\n\tbody := r.FormValue(\"body\")\n\timage := r.FormValue(\"image\")\n\turl := r.FormValue(\"url\")\n\n\tif len(body) > 2000 {\n\t\thttp.Error(w, \"Your comment is too long. (2000 characters maximum)\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tif len(body) == 0 && len(image) == 0 {\n\t\thttp.Error(w, \"Your comment is empty.\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tstmt, err := db.Prepare(\"INSERT comments SET created_by=?, post=?, body=?, image=?, url=?\")\n\tif err == nil {\n\n\t\t// If there's no errors, we can go ahead and execute the statement.\n\t\t_, err := stmt.Exec(&user_id, &post_id, &body, &image, &url)\n\t\tif err != nil {\n\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\n\t\t}\n\n\t\tvar comments = comment{}\n\t\tvar timestamp time.Time\n\n\t\tdb.QueryRow(\"SELECT comments.id, created_by, created_at, body, image, username, nickname, avatar FROM comments LEFT JOIN users ON users.id = created_by WHERE created_by = ? ORDER BY created_at DESC LIMIT 1\", user_id).\n\t\t\tScan(&comments.ID, &comments.CreatedBy, &timestamp, &comments.Body, &comments.Image, &comments.CommenterUsername, &comments.CommenterNickname, &comments.CommenterIcon)\n\t\tcomments.CreatedAt = humanTiming(timestamp)\n\n\t\tvar data = map[string]interface{}{\n\t\t\t// This is sent to the user who created the comment so they can't yeah it.\n\t\t\t\"CanYeah\": false,\n\t\t\t\"Comment\": comments,\n\t\t}\n\n\t\terr = templates.ExecuteTemplate(w, \"create_comment.html\", data)\n\n\t\tif err != nil {\n\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\n\t\t}\n\n\t\tvar commentTpl bytes.Buffer\n\t\tvar commentPreviewTpl bytes.Buffer\n\n\t\t// This will be sent other users so they can yeah it.\n\t\tdata[\"CanYeah\"] = true\n\n\t\ttemplates.ExecuteTemplate(&commentTpl, \"create_comment.html\", data)\n\t\ttemplates.ExecuteTemplate(&commentPreviewTpl, \"comment_preview.html\", data)\n\n\t\tvar msg wsMessage\n\t\tvar community_id string\n\n\t\tdb.QueryRow(\"SELECT community_id FROM posts WHERE id = ?\", post_id).Scan(&community_id)\n\n\t\tfor client := range clients {\n\t\t\tif clients[client].OnPage == \"/posts/\"+post_id && clients[client].UserID != strconv.Itoa(comments.CreatedBy) {\n\t\t\t\tmsg.Type = \"comment\"\n\t\t\t\tmsg.Content = commentTpl.String()\n\t\t\t\terr := client.WriteJSON(msg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tclient.Close()\n\t\t\t\t\tdelete(clients, client)\n\t\t\t\t}\n\t\t\t} else if clients[client].OnPage == \"/communities/\"+community_id {\n\t\t\t\tmsg.Type = \"commentPreview\"\n\t\t\t\tmsg.ID = post_id\n\t\t\t\tmsg.Content = commentPreviewTpl.String()\n\t\t\t\terr := client.WriteJSON(msg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tclient.Close()\n\t\t\t\t\tdelete(clients, client)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn\n\n\t}\n\n}", "func (c *client) CreateCommentReaction(org, repo string, id int, reaction string) error {\n\tc.log(\"CreateCommentReaction\", org, repo, id, reaction)\n\tr := Reaction{Content: reaction}\n\t_, err := c.request(&request{\n\t\tmethod: http.MethodPost,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/issues/comments/%d/reactions\", org, repo, id),\n\t\taccept: \"application/vnd.github.squirrel-girl-preview\",\n\t\torg: org,\n\t\texitCodes: []int{201},\n\t\trequestBody: &r,\n\t}, nil)\n\treturn err\n}", "func CreateNewComment(newComment Comment) {\n\tconfig := LoadConfigurationFile(\"config.json\")\n\n\tdb, err := sql.Open(\"postgres\", config.DatabaseURI)\n\tif err != nil {\n\t\tlog.Fatal(\"[!] Error while running sql.Open(): \", err)\n\t}\n\tdefer db.Close()\n\n\t// Begin a database transaction\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\tlog.Fatal(\"[!] Error in db.Being(): \", err)\n\t}\n\n\t// Prepare the statement\n\tstmt, err := tx.Prepare(\"INSERT INTO go_comments (post_id, user_id, comment, likes) VALUES ($1, $2, $3, $4)\")\n\tif err != nil {\n\t\tlog.Fatal(\"[!] Error preparing statement: \", err)\n\t}\n\tdefer stmt.Close()\n\n\t// Execute the statement\n\t_, err = stmt.Exec(&newComment.PostID, &newComment.UserID, &newComment.Comment, &newComment.Likes)\n\tif err != nil {\n\t\tlog.Fatal(\"[!] Error executing statement: \", err)\n\t}\n\n\t// Commit the transaction\n\ttx.Commit()\n\n\t// Tell the User\n\tfmt.Println(\"[!] New user added to database....\")\n\n\t// Update the Comments slice\n\tGetComments(db)\n}", "func CreateComment(cmt Comment) (Comment, error) {\n\t// call existing create comments func\n\tcreatedComments, createCommentsErr := CreateComments([]Comment{\n\t\tcmt,\n\t})\n\tif createCommentsErr != nil {\n\t\treturn Comment{}, createCommentsErr\n\t}\n\n\t//Return success without any error.\n\treturn createdComments[0], nil\n}", "func resourceCommentCreate(d *schema.ResourceData, m interface{}) error {\n\tconfig := m.(*Config)\n\tbody := d.Get(\"body\").(string)\n\tissueKey := d.Get(\"issue_key\").(string)\n\n\tc := jira.Comment{Body: body}\n\n\tcomment, res, err := config.jiraClient.Issue.AddComment(issueKey, &c)\n\n\tif err != nil {\n\t\tbody, _ := ioutil.ReadAll(res.Body)\n\t\treturn errors.Wrapf(err, \"creating jira issue failed: %s\", body)\n\t}\n\n\td.SetId(comment.ID)\n\n\treturn resourceCommentRead(d, m)\n}", "func (db *ConcreteDatastore) CreateComment(Comment model.Comment) (int64, error) {\n\tvar (\n\t\ttx *sql.Tx\n\t\terr error\n\t\tres sql.Result\n\t\tcommentId int64\n\t)\n\n\t// Preparing to request\n\tif tx, err = db.Begin(); err != nil {\n\t\treturn -1, err\n\t}\n\n\t// Setting up the request and executing it\n\trequest := `INSERT INTO Comment(schedule_id, comment, is_important) VALUES (?, ?, ?)`\n\tif res, err = tx.Exec(request, Comment.ScheduleId, Comment.Comment, Comment.IsImportant); err != nil {\n\t\tif errr := tx.Rollback(); errr != nil {\n\t\t\treturn -1, errr\n\t\t}\n\t\treturn -1, err\n\t}\n\n\t// Getting the id of the last item inserted\n\tif commentId, err = res.LastInsertId(); err != nil {\n\t\tif errr := tx.Rollback(); errr != nil {\n\t\t\treturn -1, errr\n\t\t}\n\t\treturn -1, err\n\t}\n\n\t// Saving\n\tif err = tx.Commit(); err != nil {\n\t\tif errr := tx.Rollback(); errr != nil {\n\t\t\treturn -1, errr\n\t\t}\n\t\treturn -1, err\n\t}\n\n\treturn commentId, nil\n}", "func (u *commentUsecase) Create(org, comment string) (*domain.Comment, error) {\n\texists, err := u.ghCli.OrgExists(org)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, domain.NewErrorNotFound(fmt.Sprintf(\"Org %s not found\", org))\n\t}\n\n\tnewComment := &domain.Comment{Org: org, Comment: comment}\n\tID, err := u.dbRepo.InsertComment(newComment)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnewComment.ID = ID\n\n\treturn newComment, nil\n}", "func CreateComment(db *sql.DB, c *Comment) (int, error) {\n\tvar id int\n\terr := db.QueryRow(`\n\t\tinsert into comments (\n\t\t\tcomment, created_at\n\t\t) values (\n\t\t\t$1, $2\n\t\t) returning id\n\t`, c.Comment, c.CreatedAt).Scan(&id)\n\tif err != nil {\n\t\treturn 0, nil\n\t}\n\treturn id, nil\n}", "func CreateNewCommentHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tnewComment := Comment{}\n\n\terr := json.NewDecoder(r.Body).Decode(&newComment)\n\tif err != nil {\n\t\tlog.Fatal(\"[!] Error decoding data in request body (CreateNewCommentHandler): \", err)\n\t}\n\n\tCreateNewComment(newComment)\n}", "func (s *Rest) createCommentCtrl(w http.ResponseWriter, r *http.Request) {\n\n\tcomment := store.Comment{}\n\tif err := render.DecodeJSON(http.MaxBytesReader(w, r.Body, hardBodyLimit), &comment); err != nil {\n\t\trest.SendErrorJSON(w, r, http.StatusBadRequest, err, \"can't bind comment\")\n\t\treturn\n\t}\n\n\tuser, err := rest.GetUserInfo(r)\n\tif err != nil { // this not suppose to happen (handled by Auth), just dbl-check\n\t\trest.SendErrorJSON(w, r, http.StatusUnauthorized, err, \"can't get user info\")\n\t\treturn\n\t}\n\tlog.Printf(\"[DEBUG] create comment %+v\", comment)\n\n\tcomment.PrepareUntrusted() // clean all fields user not supposed to set\n\tcomment.User = user\n\tcomment.User.IP = strings.Split(r.RemoteAddr, \":\")[0]\n\n\tcomment.Orig = comment.Text // original comment text, prior to md render\n\tif err = s.DataService.ValidateComment(&comment); err != nil {\n\t\trest.SendErrorJSON(w, r, http.StatusBadRequest, err, \"invalid comment\")\n\t\treturn\n\t}\n\tcomment.Text = string(blackfriday.Run([]byte(comment.Text), blackfriday.WithExtensions(mdExt)))\n\tcomment.Text = s.ImageProxy.Convert(comment.Text)\n\t// check if user blocked\n\tif s.adminService.checkBlocked(comment.Locator.SiteID, comment.User) {\n\t\trest.SendErrorJSON(w, r, http.StatusForbidden, errors.New(\"rejected\"), \"user blocked\")\n\t\treturn\n\t}\n\n\tif s.ReadOnlyAge > 0 {\n\t\tif info, e := s.DataService.Info(comment.Locator, s.ReadOnlyAge); e == nil && info.ReadOnly {\n\t\t\trest.SendErrorJSON(w, r, http.StatusForbidden, errors.New(\"rejected\"), \"old post, read-only\")\n\t\t\treturn\n\t\t}\n\t}\n\n\tid, err := s.DataService.Create(comment)\n\tif err != nil {\n\t\trest.SendErrorJSON(w, r, http.StatusInternalServerError, err, \"can't save comment\")\n\t\treturn\n\t}\n\n\t// DataService modifies comment\n\tfinalComment, err := s.DataService.Get(comment.Locator, id)\n\tif err != nil {\n\t\trest.SendErrorJSON(w, r, http.StatusInternalServerError, err, \"can't load created comment\")\n\t\treturn\n\t}\n\ts.Cache.Flush(comment.Locator.URL, \"last\", comment.User.ID)\n\n\trender.Status(r, http.StatusCreated)\n\trender.JSON(w, r, &finalComment)\n}", "func (c CommentRepo) Create(context context.Context, comment model.CommentDTO) (string, error) {\n\tcommentEntity, err := comment.Entity()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tres, err := c.collection.InsertOne(context, commentEntity)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn res.InsertedID.(primitive.ObjectID).Hex(), nil\n}", "func (client *Client) Comment(refType string, refId int64, text string, params map[string]interface{}) (*Comment, error) {\n\tpath := fmt.Sprintf(\"/comment/%s/%d/\", refType, refId)\n\tif params == nil {\n\t\tparams = map[string]interface{}{}\n\t}\n\tparams[\"value\"] = text\n\n\tcomment := &Comment{}\n\terr := client.RequestWithParams(\"POST\", path, nil, params, comment)\n\treturn comment, err\n}", "func (*XMLDocument) CreateComment(data string) (w *window.Comment) {\n\tmacro.Rewrite(\"$_.createComment($1)\", data)\n\treturn w\n}", "func (c PostCommentDetailController) Create(ctx *fasthttp.RequestCtx) {\n\tvar e []bool\n\tpostID, notExists := utils.ParseInt(phi.URLParam(ctx, \"postID\"), 10, 64)\n\te = append(e, notExists)\n\tcommentID, notExists := utils.ParseInt(phi.URLParam(ctx, \"commentID\"), 10, 64)\n\te = append(e, notExists)\n\n\tif exists, _ := utils.InArray(true, e); exists {\n\t\tc.JSONResponse(ctx, model2.ResponseError{\n\t\t\tDetail: fasthttp.StatusMessage(fasthttp.StatusBadRequest),\n\t\t}, fasthttp.StatusBadRequest)\n\t\treturn\n\t}\n\n\tcommentDetail := new(model.PostCommentDetail)\n\tc.JSONBody(ctx, &commentDetail)\n\tcommentDetail.PostID = postID\n\tcommentDetail.CommentID = commentID\n\n\tif errs, err := database.ValidateStruct(commentDetail); err != nil {\n\t\tc.JSONResponse(ctx, model2.ResponseError{\n\t\t\tErrors: errs,\n\t\t\tDetail: fasthttp.StatusMessage(fasthttp.StatusUnprocessableEntity),\n\t\t}, fasthttp.StatusUnprocessableEntity)\n\t\treturn\n\t}\n\n\tcommentDetail.Comment = c.App.TextPolicy.Sanitize(commentDetail.Comment)\n\n\terr := c.GetDB().Insert(new(model.PostCommentDetail), commentDetail, \"id\", \"inserted_at\")\n\tif errs, err := database.ValidateConstraint(err, commentDetail); err != nil {\n\t\tc.JSONResponse(ctx, model2.ResponseError{\n\t\t\tErrors: errs,\n\t\t\tDetail: fasthttp.StatusMessage(fasthttp.StatusUnprocessableEntity),\n\t\t}, fasthttp.StatusUnprocessableEntity)\n\t\treturn\n\t}\n\n\tc.JSONResponse(ctx, model2.ResponseSuccessOne{\n\t\tData: commentDetail,\n\t}, fasthttp.StatusCreated)\n}", "func (r *Rietveld) AddComment(issue int64, message string) error {\n\tdata := url.Values{}\n\tdata.Add(\"message\", message)\n\tdata.Add(\"message_only\", \"True\")\n\tdata.Add(\"add_as_reviewer\", \"False\")\n\tdata.Add(\"send_mail\", \"True\")\n\tdata.Add(\"no_redirect\", \"True\")\n\treturn r.post(fmt.Sprintf(\"/%d/publish\", issue), data)\n}", "func CreateCommentReaction(doerID, issueID, commentID int64, content string) (*Reaction, error) {\n\treturn CreateReaction(&ReactionOptions{\n\t\tType: content,\n\t\tDoerID: doerID,\n\t\tIssueID: issueID,\n\t\tCommentID: commentID,\n\t})\n}", "func (z *Client) CreateTicketComment(ctx context.Context, ticketID int64, ticketComment TicketComment) (TicketComment, error) {\n\ttype comment struct {\n\t\tTicket struct {\n\t\t\tTicketComment TicketComment `json:\"comment\"`\n\t\t} `json:\"ticket\"`\n\t}\n\n\tdata := &comment{}\n\tdata.Ticket.TicketComment = ticketComment\n\n\tbody, err := z.put(ctx, fmt.Sprintf(\"/tickets/%d.json\", ticketID), data)\n\tif err != nil {\n\t\treturn TicketComment{}, err\n\t}\n\n\tresult := TicketComment{}\n\terr = json.Unmarshal(body, &result)\n\tif err != nil {\n\t\treturn TicketComment{}, err\n\t}\n\n\treturn result, err\n}", "func (m *Client) CreateTicketComment(arg0 context.Context, arg1 int64, arg2 zendesk.TicketComment) (zendesk.TicketComment, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateTicketComment\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(zendesk.TicketComment)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (db *Database) CreateComment(body string, author string, path string, confirmed bool, replyTo *uuid.UUID) (*uuid.UUID, error) {\n\tthread, err := db.GetThread(path)\n\tif err != nil {\n\t\tif err == global.ErrThreadNotFound {\n\t\t\t_, err := db.CreateThread(path)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn db.CreateComment(body, author, path, confirmed, replyTo)\n\t\t}\n\t\treturn nil, err\n\t}\n\tif replyTo != nil {\n\t\tcomment, err := db.GetComment(*replyTo)\n\t\tif err != nil {\n\t\t\tif err == global.ErrCommentNotFound {\n\t\t\t\treturn nil, global.ErrWrongReplyTo\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\t\t// Check if the comment you're replying to actually is a part of the thread\n\t\tif !bytes.Equal(comment.ThreadId.Bytes(), thread.Id.Bytes()) {\n\t\t\treturn nil, global.ErrWrongReplyTo\n\t\t}\n\t\t// We allow for only a single layer of nesting. (Maybe just for now? who knows.)\n\t\tif comment.ReplyTo != nil && replyTo != nil {\n\t\t\treplyTo = comment.ReplyTo\n\t\t}\n\t}\n\tuid := global.GetUUID()\n\tvar toReplyTo *string\n\tif replyTo != nil {\n\t\ttrt := replyTo.String()\n\t\ttoReplyTo = &trt\n\t}\n\terr = db.DB.Table(db.TablePrefix + global.DefaultDynamoDbCommentTableName).Put(dynamoModel.Comment{\n\t\tId: uid,\n\t\tThreadId: thread.Id,\n\t\tBody: body,\n\t\tAuthor: author,\n\t\tConfirmed: confirmed,\n\t\tCreatedAt: time.Now().UTC(),\n\t\tReplyTo: toReplyTo,\n\t}).Run()\n\treturn &uid, err\n}", "func (c *Client) NewComment(nc *www.NewComment) (*www.NewCommentReply, error) {\n\tresponseBody, err := c.makeRequest(http.MethodPost,\n\t\twww.PoliteiaWWWAPIRoute, www.RouteNewComment, nc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar ncr www.NewCommentReply\n\terr = json.Unmarshal(responseBody, &ncr)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshal NewCommentReply: %v\", err)\n\t}\n\n\tif c.cfg.Verbose {\n\t\terr := prettyPrintJSON(ncr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn &ncr, nil\n}", "func (w *ServerInterfaceWrapper) NewComment(ctx echo.Context) error {\n\tvar err error\n\t// ------------- Path parameter \"project_id\" -------------\n\tvar projectId string\n\n\terr = runtime.BindStyledParameter(\"simple\", false, \"project_id\", ctx.Param(\"project_id\"), &projectId)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"Invalid format for parameter project_id: %s\", err))\n\t}\n\n\t// ------------- Path parameter \"issue_id\" -------------\n\tvar issueId string\n\n\terr = runtime.BindStyledParameter(\"simple\", false, \"issue_id\", ctx.Param(\"issue_id\"), &issueId)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"Invalid format for parameter issue_id: %s\", err))\n\t}\n\n\t// HasSecurity is set\n\n\tctx.Set(\"OpenId.Scopes\", []string{\"exitus/comment.write\"})\n\n\t// Invoke the callback with all the unmarshalled arguments\n\terr = w.Handler.NewComment(ctx, projectId, issueId)\n\treturn err\n}", "func (mr *MockClientMockRecorder) CreateComment(org, repo, number, comment interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreateComment\", reflect.TypeOf((*MockClient)(nil).CreateComment), org, repo, number, comment)\n}", "func CreateIssueComment(ctx *context.APIContext) {\n\t// swagger:operation POST /repos/{owner}/{repo}/issues/{index}/comments issue issueCreateComment\n\t// ---\n\t// summary: Add a comment to an issue\n\t// consumes:\n\t// - application/json\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: index\n\t// in: path\n\t// description: index of the issue\n\t// type: integer\n\t// format: int64\n\t// required: true\n\t// - name: body\n\t// in: body\n\t// schema:\n\t// \"$ref\": \"#/definitions/CreateIssueCommentOption\"\n\t// responses:\n\t// \"201\":\n\t// \"$ref\": \"#/responses/Comment\"\n\t// \"403\":\n\t// \"$ref\": \"#/responses/forbidden\"\n\tform := web.GetForm(ctx).(*api.CreateIssueCommentOption)\n\tissue, err := issues_model.GetIssueByIndex(ctx, ctx.Repo.Repository.ID, ctx.ParamsInt64(\":index\"))\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetIssueByIndex\", err)\n\t\treturn\n\t}\n\n\tif issue.IsLocked && !ctx.Repo.CanWriteIssuesOrPulls(issue.IsPull) && !ctx.Doer.IsAdmin {\n\t\tctx.Error(http.StatusForbidden, \"CreateIssueComment\", errors.New(ctx.Tr(\"repo.issues.comment_on_locked\")))\n\t\treturn\n\t}\n\n\tcomment, err := issue_service.CreateIssueComment(ctx, ctx.Doer, ctx.Repo.Repository, issue, form.Body, nil)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"CreateIssueComment\", err)\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusCreated, convert.ToAPIComment(ctx, ctx.Repo.Repository, comment))\n}", "func (pr *PrMock) CreateCommentEvent(userCreator SenderCreator, content, action string) *gogh.IssueCommentEvent {\n\treturn &gogh.IssueCommentEvent{\n\t\tAction: utils.String(action),\n\t\tIssue: &gogh.Issue{\n\t\t\tNumber: pr.PullRequest.Number,\n\t\t},\n\t\tComment: &gogh.IssueComment{\n\t\t\tBody: utils.String(content),\n\t\t},\n\t\tRepo: pr.PullRequest.Base.Repo,\n\t\tSender: userCreator(pr.PullRequest),\n\t}\n}", "func (a *ProblemsApiService) CreateCommentExecute(r ApiCreateCommentRequest) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"ProblemsApiService.CreateComment\")\n\tif err != nil {\n\t\treturn nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/problems/{problemId}/comments\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"problemId\"+\"}\", _neturl.PathEscape(parameterToString(r.problemId, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json; charset=utf-8\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = r.commentRequestDtoImpl\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif apiKey, ok := auth[\"Api-Token\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif apiKey.Prefix != \"\" {\n\t\t\t\t\tkey = apiKey.Prefix + \" \" + apiKey.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = apiKey.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func CreateIssueComment(id int64, login, owner, repo string) error {\n\tmessage := fmt.Sprintf(\"Thank you for opening an issue @%s. Your contributions are welcome.\", login)\n\n\tissueComment := github.IssueComment{\n\t\tID: &id,\n\t\tBody: &message,\n\t}\n\n\tgithubClient := New()\n\tcomment, _, err := githubClient.Issues.CreateComment(context.Background(), owner, repo, int(id), &issueComment)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.SetOutput(os.Stdout)\n\tlog.Print(comment)\n\n\treturn nil\n}", "func (self *CourseService)AddComment(content, courseId, userId string) (commentVo *course.CourseCommentVo, error *err.HttpError) {\n\tcommentVo = new(course.CourseCommentVo)\n\tcommentTable := new(table.CourseCommentTable)\n\tcommentTable.UUID = uuid.New()\n\tcommentTable.Content = content\n\tcommentTable.CourseId = courseId\n\tcommentTable.CreateUser = userId\n\tcommentTable.CreateTime = time.Now()\n\tcommentTable.FrozenStatus = value.STATUS_ENABLED\n\tinsertNum, insertErr := self.Session.InsertOne(commentTable)\n\tif insertNum == 0 {\n\t\tif insertErr != nil {\n\t\t\tself.Log.Println(insertErr)\n\t\t}\n\t\terror = err.COURSE_COMMENT_INSERT_ERR\n\t\treturn\n\t}\n\tcommentVo = course.NewCommentVo(commentTable, self.Session, self.Log)\n\terror = nil\n\treturn\n}", "func createIssueComment(\n\tctx context.Context,\n\tpr *github.PullRequest,\n\tclient *github.Client,\n\tmessage string,\n) error {\n\tcomment := &github.IssueComment{Body: &message}\n\t_, _, err := client.Issues.CreateComment(\n\t\tctx,\n\t\tpr.Base.Repo.Owner.GetLogin(),\n\t\tpr.Base.Repo.GetName(),\n\t\tpr.GetNumber(),\n\t\tcomment,\n\t)\n\treturn err\n}", "func CreatePRReviewComment(username string, owner string, repo string, id int64) error {\n\tmessage := fmt.Sprintf(\"Thank you for opening an PR @%s. Your contributions are welcomed ! :)\", username)\n\n\tpullReqComment := github.PullRequestComment{\n\t\tID: &id,\n\t\tBody: &message,\n\t}\n\n\tgithubClient := New()\n\tcomment, _, err := githubClient.PullRequests.CreateComment(context.Background(), owner, repo, int(id), &pullReqComment)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.SetOutput(os.Stdout)\n\tlog.Print(comment)\n\n\treturn nil\n}", "func TestCommentCreateOnline(t *testing.T) {\n\tvar slideIndex int32 = 3\n\tcommentText := \"Comment text\"\n\tauthor := \"Test author\"\n\tchildCommentText := \"Child comment text\"\n\n\tc := slidescloud.GetTestApiClient()\n\t_, e := c.SlidesApi.CopyFile(\"TempTests/\"+fileName, folderName+\"/\"+fileName, \"\", \"\", \"\")\n\tif e != nil {\n\t\tt.Errorf(\"Error: %v.\", e)\n\t\treturn\n\t}\n\n\tdto := slidescloud.NewSlideComment()\n\tdto.Text = commentText\n\tdto.Author = author\n\n\tchildCommentDto := slidescloud.NewSlideComment()\n\tchildCommentDto.Text = childCommentText\n\tchildCommentDto.Author = author\n\tdto.ChildComments = []slidescloud.ISlideCommentBase{childCommentDto}\n\n\tsource, e := ioutil.ReadFile(localTestFile)\n\t_, _, e = c.SlidesApi.CreateCommentOnline(source, slideIndex, dto, nil, password)\n\n\tif e != nil {\n\t\tt.Errorf(\"Error: %v.\", e)\n\t\treturn\n\t}\n}", "func (c Client) AddComment(ctx context.Context, ID it.IssueID, comment it.Comment) (it.CommentID, error) {\n\tissueID, err := strconv.Atoi(string(ID))\n\tif err != nil {\n\t\treturn it.CommentID(\"\"), err\n\t}\n\tid, err := c.Client.IssueNoteAdd(ctx, issueID, mantis.IssueNoteData{\n\t\t//Reporter:\n\t\tDateSubmitted: mantis.Time(comment.CreatedAt),\n\t\tText: comment.Body,\n\t})\n\treturn it.CommentID(strconv.Itoa(id)), err\n}", "func NewComment(text string) Comment {\n\treturn Comment{\n\t\tID: time.Now().Unix(),\n\t\tText: \"Hello\",\n\t}\n}", "func NewComment(createdAtTimestamp int64) *Comment {\n\tthis := Comment{}\n\tthis.CreatedAtTimestamp = createdAtTimestamp\n\treturn &this\n}", "func NewCreateCommentContext(ctx context.Context, r *http.Request, service *goa.Service) (*CreateCommentContext, error) {\n\tvar err error\n\tresp := goa.ContextResponse(ctx)\n\tresp.Service = service\n\treq := goa.ContextRequest(ctx)\n\treq.Request = r\n\trctx := CreateCommentContext{Context: ctx, ResponseData: resp, RequestData: req}\n\treturn &rctx, err\n}", "func NewCreateCommentContext(ctx context.Context, r *http.Request, service *goa.Service) (*CreateCommentContext, error) {\n\tvar err error\n\tresp := goa.ContextResponse(ctx)\n\tresp.Service = service\n\treq := goa.ContextRequest(ctx)\n\treq.Request = r\n\trctx := CreateCommentContext{Context: ctx, ResponseData: resp, RequestData: req}\n\treturn &rctx, err\n}", "func (mr *MockCommentClientMockRecorder) CreateComment(org, repo, number, comment interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreateComment\", reflect.TypeOf((*MockCommentClient)(nil).CreateComment), org, repo, number, comment)\n}", "func TestCommentCreate(t *testing.T) {\n\tvar slideIndex int32 = 3\n\tcommentText := \"Comment text\"\n\tauthor := \"Test author\"\n\tchildCommentText := \"Child comment text\"\n\n\tc := slidescloud.GetTestApiClient()\n\t_, e := c.SlidesApi.CopyFile(\"TempTests/\"+fileName, folderName+\"/\"+fileName, \"\", \"\", \"\")\n\tif e != nil {\n\t\tt.Errorf(\"Error: %v.\", e)\n\t\treturn\n\t}\n\n\tdto := slidescloud.NewSlideComment()\n\tdto.Text = commentText\n\tdto.Author = author\n\n\tchildCommentDto := slidescloud.NewSlideComment()\n\tchildCommentDto.Text = childCommentText\n\tchildCommentDto.Author = author\n\tdto.ChildComments = []slidescloud.ISlideCommentBase{childCommentDto}\n\n\tresponse, _, e := c.SlidesApi.CreateComment(fileName, slideIndex, dto, nil, password, folderName, \"\")\n\n\tif e != nil {\n\t\tt.Errorf(\"Error: %v.\", e)\n\t\treturn\n\t}\n\n\tif len(response.GetList()) != 1 {\n\t\tt.Errorf(\"Expected %v, but was %v\", 1, len(response.GetList()))\n\t\treturn\n\t}\n\n\tif response.GetList()[0].GetText() != commentText {\n\t\tt.Errorf(\"Expected %v, but was %v\", commentText, response.GetList()[0].GetText())\n\t\treturn\n\t}\n\n\tif response.GetList()[0].GetAuthor() != author {\n\t\tt.Errorf(\"Expected %v, but was %v\", author, response.GetList()[0].GetAuthor())\n\t\treturn\n\t}\n\n\tchildComment := response.GetList()[0].GetChildComments()[0]\n\tif childComment.GetText() != childCommentText {\n\t\tt.Errorf(\"Expected %v, but was %v\", childCommentText, childComment.GetText())\n\t\treturn\n\t}\n\n\tif childComment.GetAuthor() != author {\n\t\tt.Errorf(\"Expected %v, but was %v\", childCommentText, childComment.GetAuthor())\n\t\treturn\n\t}\n}", "func (c *client) CreatePullRequestReviewComment(org, repo string, number int, rc ReviewComment) error {\n\tc.log(\"CreatePullRequestReviewComment\", org, repo, number, rc)\n\n\t// TODO: remove custom Accept headers when their respective API fully launches.\n\tacceptHeaders := []string{\n\t\t// https://developer.github.com/changes/2016-05-12-reactions-api-preview/\n\t\t\"application/vnd.github.squirrel-girl-preview\",\n\t\t// https://developer.github.com/changes/2019-10-03-multi-line-comments/\n\t\t\"application/vnd.github.comfort-fade-preview+json\",\n\t}\n\n\t_, err := c.request(&request{\n\t\tmethod: http.MethodPost,\n\t\taccept: strings.Join(acceptHeaders, \", \"),\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/pulls/%d/comments\", org, repo, number),\n\t\torg: org,\n\t\trequestBody: &rc,\n\t\texitCodes: []int{201},\n\t}, nil)\n\treturn err\n}", "func (ctx *CreateCommentContext) Created() error {\n\tctx.ResponseData.WriteHeader(201)\n\treturn nil\n}", "func (ctx *CreateCommentContext) Created() error {\n\tctx.ResponseData.WriteHeader(201)\n\treturn nil\n}", "func (m *MockClient) CreateComment(org, repo string, number int, comment string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateComment\", org, repo, number, comment)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func DefaultCreateComment(ctx context.Context, in *Comment, db *gorm1.DB) (*Comment, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(CommentORMWithBeforeCreate_); ok {\n\t\tif db, err = hook.BeforeCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Create(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(CommentORMWithAfterCreate_); ok {\n\t\tif err = hook.AfterCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func TestModernCommentCreate(t *testing.T) {\n\tvar slideIndex int32 = 3\n\tvar textSelectionStartIndex int32 = 1\n\tvar textSelectionLength int32 = 5\n\tcommentText := \"Comment text\"\n\tauthor := \"Test author\"\n\tchildCommentText := \"Child comment text\"\n\n\tc := slidescloud.GetTestApiClient()\n\t_, e := c.SlidesApi.CopyFile(\"TempTests/\"+fileName, folderName+\"/\"+fileName, \"\", \"\", \"\")\n\tif e != nil {\n\t\tt.Errorf(\"Error: %v.\", e)\n\t\treturn\n\t}\n\n\tchildCommentDto := slidescloud.NewSlideModernComment()\n\tchildCommentDto.Text = childCommentText\n\tchildCommentDto.Author = author\n\tchildCommentDto.Status = \"Resolved\"\n\n\tdto := slidescloud.NewSlideModernComment()\n\tdto.Text = commentText\n\tdto.Author = author\n\tdto.Status = \"Active\"\n\tdto.TextSelectionStart = textSelectionStartIndex\n\tdto.TextSelectionLength = textSelectionLength\n\tdto.ChildComments = []slidescloud.ISlideCommentBase{childCommentDto}\n\n\tresponse, _, e := c.SlidesApi.CreateComment(fileName, slideIndex, dto, nil, password, folderName, \"\")\n\n\tif e != nil {\n\t\tt.Errorf(\"Error: %v.\", e)\n\t\treturn\n\t}\n\n\tif len(response.GetList()) != 1 {\n\t\tt.Errorf(\"Expected %v, but was %v\", 1, len(response.GetList()))\n\t\treturn\n\t}\n\n\tchildComment := response.GetList()[0].GetChildComments()[0]\n\tif childComment.GetText() != childCommentText {\n\t\tt.Errorf(\"Expected %v, but was %v\", childCommentText, childComment.GetText())\n\t\treturn\n\t}\n}", "func Comment(c *fiber.Ctx) {\n\tShopID := c.Params(\"shop_id\")\n\tUserID := userIDF(c.Get(\"token\"))\n\n\tvar Data CommentStruct\n\n\tif errorParse := c.BodyParser(&Data); errorParse != nil {\n\t\tfmt.Println(\"Error parsing data\", errorParse)\n\t\tc.JSON(ErrorResponse{MESSAGE: \"Error al parsear información\"})\n\t\tc.Status(400)\n\t\treturn\n\t}\n\n\tid, errorInsert := sq.Insert(\"shop_comments\").\n\t\tColumns(\n\t\t\t\"user_id\",\n\t\t\t\"shop_id\",\n\t\t\t\"comment\",\n\t\t).\n\t\tValues(\n\t\t\tUserID,\n\t\t\tShopID,\n\t\t\tData.Comment,\n\t\t).\n\t\tRunWith(database).\n\t\tExec()\n\n\tif errorInsert != nil {\n\t\tfmt.Println(\"Error to save shop\", errorInsert)\n\t}\n\n\tIDLast, _ := id.LastInsertId()\n\tIDS := strconv.FormatInt(IDLast, 10)\n\n\tc.JSON(SuccessResponse{MESSAGE: IDS})\n}", "func TestShapeModernCommentCreate(t *testing.T) {\n\tvar slideIndex int32 = 3\n\tvar shapeIndex int32 = 1\n\tvar textSelectionStartIndex int32 = 1\n\tvar textSelectionLength int32 = 5\n\tcommentText := \"Comment text\"\n\tauthor := \"Test author\"\n\tchildCommentText := \"Child comment text\"\n\n\tc := slidescloud.GetTestApiClient()\n\t_, e := c.SlidesApi.CopyFile(\"TempTests/\"+fileName, folderName+\"/\"+fileName, \"\", \"\", \"\")\n\tif e != nil {\n\t\tt.Errorf(\"Error: %v.\", e)\n\t\treturn\n\t}\n\n\tchildCommentDto := slidescloud.NewSlideModernComment()\n\tchildCommentDto.Text = childCommentText\n\tchildCommentDto.Author = author\n\tchildCommentDto.Status = \"Resolved\"\n\n\tdto := slidescloud.NewSlideModernComment()\n\tdto.Text = commentText\n\tdto.Author = author\n\tdto.Status = \"Active\"\n\tdto.TextSelectionStart = textSelectionStartIndex\n\tdto.TextSelectionLength = textSelectionLength\n\tdto.ChildComments = []slidescloud.ISlideCommentBase{childCommentDto}\n\n\tresponse, _, e := c.SlidesApi.CreateComment(fileName, slideIndex, dto, &shapeIndex, password, folderName, \"\")\n\n\tif e != nil {\n\t\tt.Errorf(\"Error: %v.\", e)\n\t\treturn\n\t}\n\n\tif len(response.GetList()) != 1 {\n\t\tt.Errorf(\"Expected %v, but was %v\", 1, len(response.GetList()))\n\t\treturn\n\t}\n}", "func (m *MockCommentClient) CreateComment(org, repo string, number int, comment string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateComment\", org, repo, number, comment)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (j *Jira) AddComment(issue *Issue, comment string) error {\n\tvar cMap = make(map[string]string)\n\tcMap[\"body\"] = comment\n\n\tcJson, err := json.Marshal(cMap)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\turi := j.BaseUrl + j.ApiPath + \"/issue/\" + issue.Key + \"/comment\"\n\tbody := bytes.NewBuffer(cJson)\n\n\t_, err = j.postJson(uri, body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (s *IdeaStorage) AddComment(number int, content string, userID int) (int, error) {\n\treturn 0, nil\n}", "func (s *TeamsService) CreateCommentByID(ctx context.Context, orgID, teamID int64, discsusionNumber int, comment DiscussionComment) (*DiscussionComment, *Response, error) {\n\tu := fmt.Sprintf(\"organizations/%v/team/%v/discussions/%v/comments\", orgID, teamID, discsusionNumber)\n\treq, err := s.client.NewRequest(\"POST\", u, comment)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tdiscussionComment := &DiscussionComment{}\n\tresp, err := s.client.Do(ctx, req, discussionComment)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn discussionComment, resp, nil\n}", "func (cs *CommentService) Post(ctx context.Context, diagramID string, opt *CommentOption) (*Comment, *Response, error) {\n\tu := fmt.Sprintf(\"diagrams/%s/comments/post.json\", diagramID)\n\n\tc := new(Comment)\n\n\tresp, err := cs.client.Post(ctx, u, opt, &c)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn c, resp, nil\n}", "func NewComment(db boil.Executor, model dbmodel.Comment) Comment {\n\treturn Comment{\n\t\tmodel: model,\n\t\tdb: db,\n\t}\n}", "func (mr *MockFeedUseCaseMockRecorder) CreateComment(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreateComment\", reflect.TypeOf((*MockFeedUseCase)(nil).CreateComment), arg0, arg1)\n}", "func (mr *MockServiceBoardMockRecorder) CreateComment(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreateComment\", reflect.TypeOf((*MockServiceBoard)(nil).CreateComment), arg0)\n}", "func NewComment(commentid, userid, postid uint64, commenttext string) *Comment {\n\tcomment := new(Comment)\n\tcomment.CommentID = commentid\n\tcomment.UserID = userid\n\tcomment.PostID = postid\n\tcomment.CommentText = commenttext\n\n\treturn comment\n}", "func NewCommentEvent(comment *models.Comment, torrent *models.Torrent) {\n\tcomment.Torrent = torrent\n\turl := \"/view/\" + strconv.FormatUint(uint64(torrent.ID), 10)\n\tif torrent.UploaderID > 0 {\n\t\ttorrent.Uploader.ParseSettings()\n\t\tif torrent.Uploader.Settings.Get(\"new_comment\") {\n\t\t\tT, _, _ := publicSettings.TfuncAndLanguageWithFallback(torrent.Uploader.Language, torrent.Uploader.Language) // We need to send the notification to every user in their language\n\t\t\tnotifications.NotifyUser(torrent.Uploader, comment.Identifier(), fmt.Sprintf(T(\"new_comment_on_torrent\"), torrent.Name), url, torrent.Uploader.Settings.Get(\"new_comment_email\"))\n\t\t}\n\t}\n}", "func (issue *Issue) SetComment(comment io.Reader) (*Comment, error) {\n\turl := fmt.Sprintf(\"%s/issue/%s/comment\", BaseUrl, issue.Key)\n\tcode, body := execRequest(\"POST\", url, comment)\n\tif code == http.StatusCreated {\n\t\tvar jiraComment Comment\n\t\terr := json.Unmarshal(body, &jiraComment)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &jiraComment, nil\n\t} else {\n\t\treturn nil, handleJiraError(body)\n\t}\n}", "func AddComment(tid, nickname, content string) error {\n\ttidNum, err := strconv.ParseInt(tid, 10, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcomment := new(Comment)\n\tcomment.Tid = tidNum\n\tcomment.Name = nickname\n\tcomment.Content = content\n\tcomment.Created = time.Now()\n\n\to := orm.NewOrm()\n\t/*insert a reply*/\n\t_, err = o.Insert(comment)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t/* update topic reply count */\n\ttopic := new(Topic)\n\tqs := o.QueryTable(\"topic\")\n\terr = qs.Filter(\"Id\", tid).One(topic)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttopic.ReplyCount++\n\ttopic.ReplyTime = time.Now()\n\n\t_, err = o.Update(topic)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (s *TeamsService) CreateCommentBySlug(ctx context.Context, org, slug string, discsusionNumber int, comment DiscussionComment) (*DiscussionComment, *Response, error) {\n\tu := fmt.Sprintf(\"orgs/%v/teams/%v/discussions/%v/comments\", org, slug, discsusionNumber)\n\treq, err := s.client.NewRequest(\"POST\", u, comment)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tdiscussionComment := &DiscussionComment{}\n\tresp, err := s.client.Do(ctx, req, discussionComment)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn discussionComment, resp, nil\n}", "func (mr *MockFeedRepositoryMockRecorder) CreateComment(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreateComment\", reflect.TypeOf((*MockFeedRepository)(nil).CreateComment), arg0, arg1)\n}", "func createRobotComment(c context.Context, runID int64, comment tricium.Data_Comment) *robotCommentInput {\n\troco := &robotCommentInput{\n\t\tMessage: comment.Message,\n\t\tRobotID: comment.Category,\n\t\tRobotRunID: strconv.FormatInt(runID, 10),\n\t\tURL: composeRunURL(c, runID),\n\t\tPath: pathForGerrit(comment.Path),\n\t\tProperties: map[string]string{\"tricium_comment_uuid\": comment.Id},\n\t\tFixSuggestions: createFixSuggestions(comment.Suggestions),\n\t}\n\t// If no StartLine is given, the comment is assumed to be a file-level comment,\n\t// and the line field will not be populated so it will be set to zero.\n\tif comment.StartLine > 0 {\n\t\tif comment.EndLine > 0 {\n\t\t\t// If range is set, [the line field] equals the end line of the range.\n\t\t\troco.Line = int(comment.EndLine)\n\t\t\troco.Range = &commentRange{\n\t\t\t\tStartLine: int(comment.StartLine),\n\t\t\t\tEndLine: int(comment.EndLine),\n\t\t\t\tStartCharacter: int(comment.StartChar),\n\t\t\t\tEndCharacter: int(comment.EndChar),\n\t\t\t}\n\t\t} else {\n\t\t\troco.Line = int(comment.StartLine)\n\t\t}\n\t}\n\treturn roco\n}", "func (h *Handler) PostComment(w http.ResponseWriter, r *http.Request) {\n\tvar comment comment.Comment\n\tif err := json.NewDecoder(r.Body).Decode(&comment); err != nil {\n\t\tsendErrorResponse(w, \"Failed to decodde JSON body\", err)\n\t\treturn\n\t}\n\n\tcomment, err := h.Service.PostComment(comment)\n\tif err != nil {\n\t\tsendErrorResponse(w, \"Error posting a new comment\", err)\n\t\treturn\n\t}\n\n\tif err := json.NewEncoder(w).Encode(comment); err != nil {\n\t\tlog.Error(err)\n\t\treturn\n\t}\n}", "func (m *MaintainerManager) AddComment(number, comment string) (gh.Comment, error) {\n\treturn m.client.AddComment(m.repo, number, comment)\n}", "func (issue *Issue) SetComment(comment *Comment) (*Comment, error) {\n\turl := fmt.Sprintf(\"%s/issue/%s/comment\", BaseURL, issue.Key)\n\tencodedParams, err := json.Marshal(comment)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcode, body := execRequest(\"POST\", url, bytes.NewBuffer(encodedParams))\n\tif code == http.StatusCreated {\n\t\tvar jiraComment Comment\n\t\terr := json.Unmarshal(body, &jiraComment)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &jiraComment, nil\n\t}\n\treturn nil, handleJiraError(body)\n}", "func AddComment(commentValue, threadId, collectionName, projectName, repositoryId, pullRequestId string) error {\n\tusername, pass, _, _, _, domain := GetConfigDatas()\n\turl := domain + collectionName + `/` + projectName + `/_apis/git/repositories/` + repositoryId + `/pullRequests/` + pullRequestId + `/threads/` + threadId + `/comments?api-version=4.1`\n\n\tfmt.Println(\"url: \", url)\n\tbody := `{\n\t\t\t \"content\": \"` + commentValue + `\",\n\t\t\t \"parentCommentId\": 1,\n\t\t\t \"commentType\": 1\n\t\t\t}`\n\n\tfmt.Println(\"body: \", body)\n\n\tclient := &http.Client{}\n\treq, _ := http.NewRequest(\"POST\", url, bytes.NewBuffer([]byte(body)))\n\n\treq.SetBasicAuth(username, pass)\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tresp, err := client.Do(req)\n\n\tif err != nil {\n\t\tCreateLogJson(\"Error\", \"AzureOps/AddComment\", \"Error while requesting to update tasks on Azure DevOps for adding comment on thread.\", err.Error())\n\t\treturn err\n\t}\n\n\tioutil.ReadAll(resp.Body)\n\n\t//CreateLogJson(\"Info\",\"AddComment\",\"Adding comment on pull request thread.\",\"AzureDevops thread comment is added. =>\"+bodyString)\n\treturn nil\n}", "func (mr *ClientMockRecorder) CreateTicketComment(arg0, arg1, arg2 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreateTicketComment\", reflect.TypeOf((*Client)(nil).CreateTicketComment), arg0, arg1, arg2)\n}", "func (m *MockFeedUseCase) CreateComment(arg0 int, arg1 models.Comment) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateComment\", arg0, arg1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (c *Client) CreateIssueCommentContext(ctx context.Context, issueIDOrKey string, input *CreateIssueCommentInput) (*IssueComment, error) {\n\tu := fmt.Sprintf(\"/api/v2/issues/%v/comments\", issueIDOrKey)\n\n\treq, err := c.NewRequest(\"POST\", u, input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tissueComment := new(IssueComment)\n\tif err := c.Do(ctx, req, &issueComment); err != nil {\n\t\treturn nil, err\n\t}\n\treturn issueComment, nil\n}", "func (m *MockServiceBoard) CreateComment(arg0 models.CommentInput) (models.CommentOutside, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateComment\", arg0)\n\tret0, _ := ret[0].(models.CommentOutside)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (o *WatchlistScreeningIndividualReviewCreateResponse) SetComment(v string) {\n\to.Comment.Set(&v)\n}", "func (r *AutoRoller) AddComment(ctx context.Context, issueNum int64, message, user string, timestamp time.Time) error {\n\troll, err := r.recent.Get(ctx, issueNum)\n\tif err != nil {\n\t\treturn skerr.Fmt(\"No such issue %d\", issueNum)\n\t}\n\tid := fmt.Sprintf(\"%d_%d\", issueNum, len(roll.Comments))\n\troll.Comments = append(roll.Comments, comment.New(id, message, user))\n\treturn r.recent.Update(ctx, roll)\n}", "func Comment(ctx context.Context, cfg *v1.Config, pr int, contents []byte) error {\n\tc := newClient(ctx, cfg.Github)\n\treturn c.CommentOnPR(pr, string(contents))\n}", "func StoreComment(dbOwner, dbFolder, dbName, commenter string, discID int, comText string, discClose bool, mrState MergeRequestState) error {\n\t// Begin a transaction\n\ttx, err := pdb.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Set up an automatic transaction roll back if the function exits without committing\n\tdefer tx.Rollback()\n\n\t// Get the current details for the discussion or MR\n\tvar discCreator string\n\tvar discState bool\n\tvar discType int64\n\tvar discTitle string\n\tdbQuery := `\n\t\tSELECT disc.open, u.user_name, disc.discussion_type, disc.title\n\t\tFROM discussions AS disc, users AS u\n\t\tWHERE disc.db_id = (\n\t\t\t\tSELECT db.db_id\n\t\t\t\tFROM sqlite_databases AS db\n\t\t\t\tWHERE db.user_id = (\n\t\t\t\t\t\tSELECT user_id\n\t\t\t\t\t\tFROM users\n\t\t\t\t\t\tWHERE lower(user_name) = lower($1)\n\t\t\t\t\t)\n\t\t\t\t\tAND folder = $2\n\t\t\t\t\tAND db_name = $3\n\t\t\t)\n\t\t\tAND disc.disc_id = $4\n\t\t\tAND disc.creator = u.user_id`\n\terr = tx.QueryRow(dbQuery, dbOwner, dbFolder, dbName, discID).Scan(&discState, &discCreator, &discType, &discTitle)\n\tif err != nil {\n\t\tlog.Printf(\"Error retrieving current open state for '%s%s%s', discussion '%d': %v\\n\", dbOwner,\n\t\t\tdbFolder, dbName, discID, err)\n\t\treturn err\n\t}\n\n\t// If the discussion is to be closed or reopened, ensure the person doing so is either the database owner or the\n\t// person who started the discussion\n\tif discClose == true {\n\t\tif (strings.ToLower(commenter) != strings.ToLower(dbOwner)) && (strings.ToLower(commenter) != strings.ToLower(discCreator)) {\n\t\t\treturn errors.New(\"Not authorised\")\n\t\t}\n\t}\n\n\t// If comment text was provided, insert it into the database\n\tvar commandTag pgx.CommandTag\n\tvar comID int64\n\tif comText != \"\" {\n\t\tdbQuery = `\n\t\t\tWITH d AS (\n\t\t\t\tSELECT db.db_id\n\t\t\t\tFROM sqlite_databases AS db\n\t\t\t\tWHERE db.user_id = (\n\t\t\t\t\t\tSELECT user_id\n\t\t\t\t\t\tFROM users\n\t\t\t\t\t\tWHERE lower(user_name) = lower($1)\n\t\t\t\t\t)\n\t\t\t\t\tAND folder = $2\n\t\t\t\t\tAND db_name = $3\n\t\t\t), int AS (\n\t\t\t\tSELECT internal_id AS int_id\n\t\t\t\tFROM discussions\n\t\t\t\tWHERE db_id = (SELECT db_id FROM d)\n\t\t\t\tAND disc_id = $5\n\t\t\t)\n\t\t\tINSERT INTO discussion_comments (db_id, disc_id, commenter, body, entry_type)\n\t\t\tSELECT (SELECT db_id FROM d), (SELECT int_id FROM int), (SELECT user_id FROM users WHERE lower(user_name) = lower($4)), $6, 'txt'\n\t\t\tRETURNING com_id`\n\t\terr = tx.QueryRow(dbQuery, dbOwner, dbFolder, dbName, commenter, discID, comText).Scan(&comID)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Adding comment for database '%s%s%s', discussion '%d' failed: %v\\n\", dbOwner, dbFolder,\n\t\t\t\tdbName, discID, err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// If the discussion is to be closed or reopened, insert a close or reopen record as appropriate\n\tif discClose == true {\n\t\tvar eventTxt, eventType string\n\t\tif discState {\n\t\t\t// Discussion is open, so a close event should be inserted\n\t\t\teventTxt = \"close\"\n\t\t\teventType = \"cls\"\n\t\t} else {\n\t\t\t// Discussion is closed, so a re-open event should be inserted\n\t\t\teventTxt = \"reopen\"\n\t\t\teventType = \"rop\"\n\t\t}\n\n\t\t// Insert the appropriate close or reopen record\n\t\tdbQuery = `\n\t\t\tWITH d AS (\n\t\t\t\tSELECT db.db_id\n\t\t\t\tFROM sqlite_databases AS db\n\t\t\t\tWHERE db.user_id = (\n\t\t\t\t\t\tSELECT user_id\n\t\t\t\t\t\tFROM users\n\t\t\t\t\t\tWHERE lower(user_name) = lower($1)\n\t\t\t\t\t)\n\t\t\t\t\tAND folder = $2\n\t\t\t\t\tAND db_name = $3\n\t\t\t), int AS (\n\t\t\t\tSELECT internal_id AS int_id\n\t\t\t\tFROM discussions\n\t\t\t\tWHERE db_id = (SELECT db_id FROM d)\n\t\t\t\tAND disc_id = $5\n\t\t\t)\n\t\t\tINSERT INTO discussion_comments (db_id, disc_id, commenter, body, entry_type)\n\t\t\tSELECT (SELECT db_id FROM d), (SELECT int_id FROM int), (SELECT user_id FROM users WHERE lower(user_name) = lower($4)), $6, $7`\n\t\tcommandTag, err = tx.Exec(dbQuery, dbOwner, dbFolder, dbName, commenter, discID, eventTxt, eventType)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Adding comment for database '%s%s%s', discussion '%d' failed: %v\\n\", dbOwner, dbFolder,\n\t\t\t\tdbName, discID, err)\n\t\t\treturn err\n\t\t}\n\t\tif numRows := commandTag.RowsAffected(); numRows != 1 {\n\t\t\tlog.Printf(\n\t\t\t\t\"Wrong number of rows (%v) affected when adding a comment to database '%s%s%s', discussion '%d'\\n\",\n\t\t\t\tnumRows, dbOwner, dbFolder, dbName, discID)\n\t\t}\n\t}\n\n\t// Update the merge request state for MR's being closed\n\tif discClose == true && discType == MERGE_REQUEST {\n\t\tdbQuery = `\n\t\t\tUPDATE discussions\n\t\t\tSET mr_state = $5\n\t\t\tWHERE db_id = (\n\t\t\t\t\tSELECT db.db_id\n\t\t\t\t\tFROM sqlite_databases AS db\n\t\t\t\t\tWHERE db.user_id = (\n\t\t\t\t\t\t\tSELECT user_id\n\t\t\t\t\t\t\tFROM users\n\t\t\t\t\t\t\tWHERE lower(user_name) = lower($1)\n\t\t\t\t\t\t)\n\t\t\t\t\t\tAND folder = $2\n\t\t\t\t\t\tAND db_name = $3\n\t\t\t\t)\n\t\t\t\tAND disc_id = $4`\n\t\tcommandTag, err = tx.Exec(dbQuery, dbOwner, dbFolder, dbName, discID, mrState)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Updating MR state for database '%s%s%s', discussion '%d' failed: %v\\n\", dbOwner,\n\t\t\t\tdbFolder, dbName, discID, err)\n\t\t\treturn err\n\t\t}\n\t\tif numRows := commandTag.RowsAffected(); numRows != 1 {\n\t\t\tlog.Printf(\n\t\t\t\t\"Wrong number of rows (%v) affected when updating MR state for database '%s%s%s', discussion '%d'\\n\",\n\t\t\t\tnumRows, dbOwner, dbFolder, dbName, discID)\n\t\t}\n\t}\n\n\t// Update the last_modified date for the parent discussion\n\tdbQuery = `\n\t\tUPDATE discussions\n\t\tSET last_modified = now()`\n\tif discClose == true {\n\t\tif discState {\n\t\t\t// Discussion is open, so set it to closed\n\t\t\tdbQuery += `, open = false`\n\t\t} else {\n\t\t\t// Discussion is closed, so set it to open\n\t\t\tdbQuery += `, open = true`\n\t\t}\n\t}\n\tif comText != \"\" {\n\t\tdbQuery += `, comment_count = comment_count + 1`\n\t}\n\tdbQuery += `\n\t\tWHERE db_id = (\n\t\t\t\tSELECT db.db_id\n\t\t\t\tFROM sqlite_databases AS db\n\t\t\t\tWHERE db.user_id = (\n\t\t\t\t\t\tSELECT user_id\n\t\t\t\t\t\tFROM users\n\t\t\t\t\t\tWHERE lower(user_name) = lower($1)\n\t\t\t\t\t)\n\t\t\t\t\tAND folder = $2\n\t\t\t\t\tAND db_name = $3\n\t\t\t)\n\t\t\tAND disc_id = $4`\n\tcommandTag, err = tx.Exec(dbQuery, dbOwner, dbFolder, dbName, discID)\n\tif err != nil {\n\t\tlog.Printf(\"Updating last modified date for database '%s%s%s', discussion '%d' failed: %v\\n\", dbOwner,\n\t\t\tdbFolder, dbName, discID, err)\n\t\treturn err\n\t}\n\tif numRows := commandTag.RowsAffected(); numRows != 1 {\n\t\tlog.Printf(\n\t\t\t\"Wrong number of rows (%v) affected when updating last_modified date for database '%s%s%s', discussion '%d'\\n\",\n\t\t\tnumRows, dbOwner, dbFolder, dbName, discID)\n\t}\n\n\t// Update the open discussion and MR counters for the database\n\tdbQuery = `\n\t\tWITH d AS (\n\t\t\tSELECT db.db_id\n\t\t\tFROM sqlite_databases AS db\n\t\t\tWHERE db.user_id = (\n\t\t\t\t\tSELECT user_id\n\t\t\t\t\tFROM users\n\t\t\t\t\tWHERE lower(user_name) = lower($1)\n\t\t\t\t)\n\t\t\t\tAND folder = $2\n\t\t\t\tAND db_name = $3\n\t\t)\n\t\tUPDATE sqlite_databases\n\t\tSET discussions = (\n\t\t\t\tSELECT count(disc.*)\n\t\t\t\tFROM discussions AS disc, d\n\t\t\t\tWHERE disc.db_id = d.db_id\n\t\t\t\t\tAND open = true\n\t\t\t\t\tAND discussion_type = 0\n\t\t\t),\n\t\t\tmerge_requests = (\n\t\t\t\tSELECT count(disc.*)\n\t\t\t\tFROM discussions AS disc, d\n\t\t\t\tWHERE disc.db_id = d.db_id\n\t\t\t\t\tAND open = true\n\t\t\t\t\tAND discussion_type = 1\n\t\t\t)\n\t\tWHERE db_id = (SELECT db_id FROM d)`\n\tcommandTag, err = tx.Exec(dbQuery, dbOwner, dbFolder, dbName)\n\tif err != nil {\n\t\tlog.Printf(\"Updating discussion count for database '%s%s%s' failed: %v\\n\", dbOwner, dbFolder, dbName,\n\t\t\terr)\n\t\treturn err\n\t}\n\tif numRows := commandTag.RowsAffected(); numRows != 1 {\n\t\tlog.Printf(\n\t\t\t\"Wrong number of rows (%v) affected when updating discussion count for database '%s%s%s'\\n\",\n\t\t\tnumRows, dbOwner, dbFolder, dbName)\n\t}\n\n\t// If comment text was provided, generate an event about the new comment\n\tif comText != \"\" {\n\t\tvar commentURL string\n\t\tif discType == MERGE_REQUEST {\n\t\t\tcommentURL = fmt.Sprintf(\"/merge/%s%s%s?id=%d#c%d\", url.PathEscape(dbOwner), dbFolder,\n\t\t\t\turl.PathEscape(dbName), discID, comID)\n\t\t} else {\n\t\t\tcommentURL = fmt.Sprintf(\"/discuss/%s%s%s?id=%d#c%d\", url.PathEscape(dbOwner), dbFolder,\n\t\t\t\turl.PathEscape(dbName), discID, comID)\n\t\t}\n\t\tdetails := EventDetails{\n\t\t\tDBName: dbName,\n\t\t\tDiscID: discID,\n\t\t\tFolder: dbFolder,\n\t\t\tOwner: dbOwner,\n\t\t\tType: EVENT_NEW_COMMENT,\n\t\t\tTitle: discTitle,\n\t\t\tURL: commentURL,\n\t\t\tUserName: commenter,\n\t\t}\n\t\terr = NewEvent(details)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error when creating a new event: %s\\n\", err.Error())\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Commit the transaction\n\terr = tx.Commit()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (p PRMirror) AddComment(id int, comment string) bool {\n\tissueComment := github.IssueComment{}\n\tissueComment.Body = &comment\n\n\t_, _, err := p.GitHubClient.Issues.CreateComment(*p.Context, p.Configuration.DownstreamOwner, p.Configuration.DownstreamRepo, id, &issueComment)\n\tif err != nil {\n\t\tlog.Errorf(\"Error while adding a comment to issue#:%d - %s\", id, err.Error())\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (r *AutoRoller) AddComment(issueNum int64, message, user string, timestamp time.Time) error {\n\troll, err := r.recent.Get(issueNum)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"No such issue %d\", issueNum)\n\t}\n\tid := fmt.Sprintf(\"%d_%d\", issueNum, len(roll.Comments))\n\troll.Comments = append(roll.Comments, comment.New(id, message, user))\n\treturn r.recent.Update(roll)\n}", "func addComment(gh *octokat.Client, repo octokat.Repo, prNum, comment, commentType string) error {\n\t// get the comments\n\tcomments, err := gh.Comments(repo, prNum, &octokat.Options{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// check if we already made the comment\n\tfor _, c := range comments {\n\t\t// if we already made the comment return nil\n\t\tif strings.ToLower(c.User.Login) == \"gordontheturtle\" && strings.Contains(c.Body, commentType) {\n\t\t\tlogrus.Debugf(\"Already made comment about %q on PR %s\", commentType, prNum)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// add the comment because we must not have already made it\n\tif _, err := gh.AddComment(repo, prNum, comment); err != nil {\n\t\treturn err\n\t}\n\n\tlogrus.Infof(\"Would have added comment about %q PR %s\", commentType, prNum)\n\treturn nil\n}", "func ajaxCreateComment(w http.ResponseWriter, r *http.Request) {\n\tpr(\"ajaxCreateComment\")\n\tprVal(\"r.Method\", r.Method)\n\n\tif r.Method != \"POST\" {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\n\tuserId := GetSession(w, r)\n\tif userId == -1 { // Secure cookie not found. Either session expired, or someone is hacking.\n\t\t// So go to the register page.\n\t\tpr(\"Must be logged in to create a comment.\")\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tprVal(\"userId\", userId)\n\n\t//parse request to struct\n\tvar newComment struct {\n\t\tId int64\n\t\tPostId int64\n\t\tParentId int64\n\t\tText string\n\t}\n\n\terr := json.NewDecoder(r.Body).Decode(&newComment)\n\tif err != nil {\n\t\tprVal(\"Failed to decode json body\", r.Body)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tprVal(\"=======>>>>> newComment\", newComment)\n\n\t// Get the postId and path from the parent's info, in the database.\n\tnewPath := []int64{} // New path = append(parent's path, num children).\n\t{\n\t\t// Have the database determine what the new path should be.\n\t\t// e.g\tParent path:\t1, 2, 3\n\t\t// Child0 path: \t1, 2, 3, 0\n\t\t// Child1 path: \t1, 2, 3, 1\n\t\t// New Child path: [1, 2, 3] + (NumChildren)\n\t\trows := DbQuery(\"SELECT ARRAY_APPEND(Path, NumChildren) FROM $$Comment WHERE Id = $1::bigint\", newComment.ParentId)\n\t\tdefer rows.Close()\n\t\tif rows.Next() {\n\t\t\tarr := pq.Int64Array{} // This weirdness is required for scanning into []int64\n\n\t\t\terr := rows.Scan(&arr)\n\t\t\tcheck(err)\n\n\t\t\tnewPath = []int64(arr) // This weirdness is required for scanning into []int64\n\t\t} else {\n\t\t\t// If it's not in the database, it must be because it has Id = -1 (the top-level post)...\n\t\t\tassert(newComment.ParentId == -1)\n\n\t\t\t// The head comment of the tree, must be added!\n\t\t\t// This allows us to maintain a count of top-level posts, in this head record's NumChildren.\n\t\t\tDbExec(`INSERT INTO $$Comment (Id, PostId, UserId, ParentId, Text, Path, NumChildren)\n\t\t\t\t\tVALUES (-1, $1::bigint, -1, -1, '', '{}'::bigint[], 0);`,\n\t\t\t\tnewComment.PostId)\n\t\t}\n\t\tcheck(rows.Err())\n\t}\n\n\t// TODO: add a database transaction here.\n\t// See: http://go-database-sql.org/prepared.html\n\n\t// Send the new comment to the database.\n\tnewComment.Id = DbInsert(\n\t\t`INSERT INTO $$Comment (PostId, UserId, ParentId, Text, Path)\n\t VALUES ($1::bigint, $2::bigint, $3::bigint, $4, $5::bigint[])\n\t returning Id;`,\n\t\tnewComment.PostId,\n\t\tuserId,\n\t\tnewComment.ParentId,\n\t\tnewComment.Text,\n\t\tpq.Array(newPath))\n\n\t// Increment the parent's number of children.\n\tDbExec(`UPDATE $$Comment SET NumChildren = NumChildren + 1 WHERE Id = $1::bigint`, newComment.ParentId)\n\n\t// Increment the Post's NumComments field here.\n\tDbExec(`UPDATE $$Post SET NumComments = NumComments + 1 WHERE Id = $1::bigint`, newComment.PostId)\n\n\t// Have user like their own comments by default.\n\tvoteUpDown(newComment.Id, userId, true, true, true)\n\n\t// Convert newlines to be HTML-friendly. (Do it here so the JSON response gets it and also it will get reapplied\n\t// in ReadCommentTagsFromDB.)\n\tnewComment.Text = strings.Replace(newComment.Text, \"\\n\", \"<br>\", -1)\n\n\t// create json response from struct. It needs to know newCommentId so it knows where to put the focus after the window reload.\n\ta, err := json.Marshal(newComment)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Write(a)\n}", "func (db *Database) AddComment(userID types.UserID, objectID types.ObjectID, content string) (err error) {\n\tif err = userID.Validate(); err != nil {\n\t\treturn\n\t}\n\tif err = objectID.Validate(); err != nil {\n\t\treturn\n\t}\n\tif len(content) > 1024 {\n\t\treturn errors.New(\"content too large\")\n\t}\n\n\terr = db.comments.Insert(types.Comment{\n\t\tUserID: userID,\n\t\tObjectID: objectID,\n\t\tContent: content,\n\t\tDate: time.Now(),\n\t})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to insert new comment\")\n\t}\n\n\treturn\n}", "func (cmntRepo *CommentGormRepo) StoreComment(comment *entity.Comment) (*entity.Comment, []error) {\n\tcmnt := comment\n\terrs := cmntRepo.conn.Create(cmnt).GetErrors()\n\tif len(errs) > 0 {\n\t\treturn nil, errs\n\t}\n\treturn cmnt, errs\n}", "func (cri *CommentRepositoryImpl) StoreComment(c entity.Comment) error {\r\n\r\n\t_, err := cri.conn.Exec(\"INSERT INTO comments (username,email,messages,placedat) values($1, $2, $3,$4)\", c.UserName, c.Email, c.Message, c.PlacedAt)\r\n\tif err != nil {\r\n\t\treturn errors.New(\"Insertion has failed\")\r\n\t}\r\n\r\n\treturn nil\r\n}" ]
[ "0.78270876", "0.77825135", "0.7703459", "0.76506597", "0.7603772", "0.7488165", "0.74705166", "0.74413574", "0.7431552", "0.7339268", "0.71487004", "0.71214473", "0.7113855", "0.71087986", "0.71036273", "0.70275646", "0.70132875", "0.6990566", "0.6989565", "0.6970975", "0.69571495", "0.693099", "0.6873804", "0.6854227", "0.6839919", "0.6750548", "0.6731324", "0.67124367", "0.6707782", "0.6682779", "0.6676664", "0.6660153", "0.6586898", "0.65529215", "0.6493634", "0.6485769", "0.6478456", "0.64337534", "0.6401435", "0.6382773", "0.6368067", "0.6359626", "0.63572377", "0.6348645", "0.6344735", "0.6332879", "0.6310622", "0.62951976", "0.62923384", "0.62855774", "0.6246597", "0.6217452", "0.62148035", "0.62025076", "0.62025076", "0.61854476", "0.61592567", "0.61534005", "0.61310476", "0.61310476", "0.6087621", "0.60648316", "0.60149616", "0.6002502", "0.5983041", "0.59769154", "0.59700954", "0.59700763", "0.5969006", "0.59686804", "0.59634846", "0.5961855", "0.5955311", "0.5940309", "0.59070957", "0.58910996", "0.58876497", "0.58825576", "0.5873124", "0.58730346", "0.58572865", "0.58410996", "0.5806708", "0.5788734", "0.5772476", "0.5754941", "0.5715491", "0.57022077", "0.56979644", "0.5696392", "0.5677787", "0.56648225", "0.5657109", "0.5652912", "0.5650021", "0.56496716", "0.5641635", "0.5634016", "0.5621224" ]
0.8281089
1
NumComments counts the number of tracked comments
NumComments считает количество отслеживаемых комментариев
func (fc *fakeClient) NumComments() int { n := 0 for _, comments := range fc.commentsAdded { n += len(comments) } return n }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *commentsQueryBuilder) Count() (int64, error) {\n\tif c.err != nil {\n\t\treturn 0, c.err\n\t}\n\treturn c.builder.Count()\n}", "func (o *ViewMilestone) GetCommentsCount() int32 {\n\tif o == nil || o.CommentsCount == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.CommentsCount\n}", "func (q commentQuery) Count() (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRow().Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count comment rows\")\n\t}\n\n\treturn count, nil\n}", "func (o *InlineResponse20033Milestones) GetCommentsCount() string {\n\tif o == nil || o.CommentsCount == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.CommentsCount\n}", "func (o *InlineResponse20034Milestone) GetCommentsCount() string {\n\tif o == nil || o.CommentsCount == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.CommentsCount\n}", "func (o *InlineResponse200115) GetCommentsCount() string {\n\tif o == nil || o.CommentsCount == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.CommentsCount\n}", "func (o *InlineResponse20033Milestones) SetCommentsCount(v string) {\n\to.CommentsCount = &v\n}", "func (o *InlineResponse200115) SetCommentsCount(v string) {\n\to.CommentsCount = &v\n}", "func CountAllCommentsPerPost(postID uint64) uint64 {\n\n\tvar result uint64\n\tDB, err := database.NewOpen()\n\n\tcountedCommentsResult, err := DB.Query(\"SELECT * FROM comment WHERE PostID=?\", postID)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tfor countedCommentsResult.Next() {\n\t\tresult = result + 1\n\t}\n\n\tDB.Close()\n\n\tfmt.Println(\"Number of comments for u:\", result)\n\n\treturn result\n}", "func (o *InlineResponse20051TodoItems) GetCommentsCount() string {\n\tif o == nil || o.CommentsCount == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.CommentsCount\n}", "func (o *InlineResponse20034Milestone) SetCommentsCount(v string) {\n\to.CommentsCount = &v\n}", "func (o *ViewMilestone) SetCommentsCount(v int32) {\n\to.CommentsCount = &v\n}", "func (t *TeamDiscussion) GetCommentsCount() int {\n\tif t == nil || t.CommentsCount == nil {\n\t\treturn 0\n\t}\n\treturn *t.CommentsCount\n}", "func (o *InlineResponse20049Post) GetCommentsCount() string {\n\tif o == nil || o.CommentsCount == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.CommentsCount\n}", "func (o *InlineResponse200115) HasCommentsCount() bool {\n\tif o != nil && o.CommentsCount != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *InlineResponse20034Milestone) HasCommentsCount() bool {\n\tif o != nil && o.CommentsCount != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *InlineResponse20049Post) SetCommentsCount(v string) {\n\to.CommentsCount = &v\n}", "func (s *TattooStorage) GetArticleCommentCount(name string) int {\n\tlst_buff, err := s.CommentIndexDB.GetJSON(name)\n\tif err != nil {\n\t\tlog.Printf(\"load comment index failed (%v)!\\n\", err)\n\t\treturn 0\n\t}\n\treturn len(lst_buff.([]interface{}))\n}", "func (o *InlineResponse20033Milestones) HasCommentsCount() bool {\n\tif o != nil && o.CommentsCount != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (s *userService) IncrCommentCount(userId int64) int {\n\tt := dao.UserDao.Get(userId)\n\tif t == nil {\n\t\treturn 0\n\t}\n\tcommentCount := t.CommentCount + 1\n\tif err := dao.UserDao.UpdateColumn(userId, \"comment_count\", commentCount); err != nil {\n\t\tlog.Error(err.Error())\n\t} else {\n\t\tcache.UserCache.Invalidate(userId)\n\t}\n\treturn commentCount\n}", "func (o *ViewMilestone) HasCommentsCount() bool {\n\tif o != nil && o.CommentsCount != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *InlineResponse20051TodoItems) HasCommentsCount() bool {\n\tif o != nil && o.CommentsCount != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *ViewMilestone) GetCommentsCountOk() (*int32, bool) {\n\tif o == nil || o.CommentsCount == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CommentsCount, true\n}", "func (cs commentsByTimestamp) Len() int { return len(cs) }", "func (o *InlineResponse20051TodoItems) SetCommentsCount(v string) {\n\to.CommentsCount = &v\n}", "func (c *Commit) GetCommentCount() int {\n\tif c == nil || c.CommentCount == nil {\n\t\treturn 0\n\t}\n\treturn *c.CommentCount\n}", "func (b *PhotosGetCommentsBuilder) Count(v int) *PhotosGetCommentsBuilder {\n\tb.Params[\"count\"] = v\n\treturn b\n}", "func (o *InlineResponse20049Post) HasCommentsCount() bool {\n\tif o != nil && o.CommentsCount != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *InlineResponse20034Milestone) GetCommentsCountOk() (*string, bool) {\n\tif o == nil || o.CommentsCount == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CommentsCount, true\n}", "func (q cmfPaidprogramCommentQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRowContext(ctx, exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count cmf_paidprogram_comment rows\")\n\t}\n\n\treturn count, nil\n}", "func (o *InlineResponse20051TodoItems) GetCommentsCountOk() (*string, bool) {\n\tif o == nil || o.CommentsCount == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CommentsCount, true\n}", "func (o *InlineResponse20049Post) GetCommentsCountOk() (*string, bool) {\n\tif o == nil || o.CommentsCount == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CommentsCount, true\n}", "func (b *PhotosGetAllCommentsBuilder) Count(v int) *PhotosGetAllCommentsBuilder {\n\tb.Params[\"count\"] = v\n\treturn b\n}", "func (o *InlineResponse200115) GetCommentsCountOk() (*string, bool) {\n\tif o == nil || o.CommentsCount == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CommentsCount, true\n}", "func (o *InlineResponse20033Milestones) GetCommentsCountOk() (*string, bool) {\n\tif o == nil || o.CommentsCount == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CommentsCount, true\n}", "func (g *Gist) GetComments() int {\n\tif g == nil || g.Comments == nil {\n\t\treturn 0\n\t}\n\treturn *g.Comments\n}", "func (o *ViewMilestone) GetNumCommentsRead() int32 {\n\tif o == nil || o.NumCommentsRead == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.NumCommentsRead\n}", "func ProcessComments(comments []Comment) map[string]int {\n\twords := map[string]int{}\n\tfor _, comment := range comments {\n\t\tfor _, word := range strings.Fields(comment.Body) {\n\t\t\t_, ok := words[word]\n\t\t\tif ok {\n\t\t\t\twords[word]++\n\n\t\t\t} else {\n\t\t\t\twords[word] = 1\n\t\t\t}\n\t\t}\n\t}\n\treturn words\n}", "func CountNbLines(filename string) int {\n\treader, file := ReturnReader(filename, 0)\n\tdefer CloseFile(file)\n\n\tnbLines := 0\n\n\ttStart := time.Now()\n\n\tfor reader.Scan() {\n\t\tnbLines++\n\t}\n\n\ttDiff := time.Since(tStart)\n\tfmt.Printf(\"Count nb lines done in time: %f s \\n\", tDiff.Seconds())\n\n\treturn nbLines\n}", "func (env *Env) NumCon() int {\n\tenv.RLock()\n\tn := env.openCons.len()\n\tenv.RUnlock()\n\treturn n\n}", "func GetAllComments(c *gin.Context) {\n\tcontent, err := comments.GetAllComments()\n\n\tif err != nil {\n\t\tc.JSON(400, gin.H{\"success\": false, \"msg\": \"Unable to fetch translations\", \"errCode\": 38})\n\t\treturn\n\t}\n\n\ttotal :=len(content)\n\tc.Header(\"X-Total-Count\", strconv.Itoa(total))\n\tc.Header(\"Access-Control-Expose-Headers\",\"X-Total-Count\")\n\tc.JSON(http.StatusOK, gin.H{\"success\": true, \"msg\": \"ok\", \"data\": content})\n}", "func (l *ChannelList) Count() int {\n\tc := 0\n\tfor i := 0; i < Conf.ChannelBucketCount; i++ {\n\t\tc += len(l.channels[i].data)\n\t}\n\treturn c\n}", "func (c *Client) GetIssueCommentsCount(issueIDOrKey string) (int, error) {\n\treturn c.GetIssueCommentsCountContext(context.Background(), issueIDOrKey)\n}", "func (node *Node) CountBreakpoints() int {\n\tnumBreakpoints := 0\n\n\t// If we find any breakpoints return false\n\t// A breakpoint in a permutation X is a position j such that X(j) + 1 ≠ X(j+1)\n\tfor i, element := range node.contents[:len(node.contents)-1] {\n\t\tif math.Abs(float64(element-node.contents[i+1])) > 1 {\n\t\t\tnumBreakpoints++\n\t\t}\n\t}\n\n\treturn numBreakpoints\n}", "func (c *CommentStats) GetTotalPullRequestComments() int {\n\tif c == nil || c.TotalPullRequestComments == nil {\n\t\treturn 0\n\t}\n\treturn *c.TotalPullRequestComments\n}", "func (c *CommentStats) GetTotalGistComments() int {\n\tif c == nil || c.TotalGistComments == nil {\n\t\treturn 0\n\t}\n\treturn *c.TotalGistComments\n}", "func (s Scope) Count() int {\n\treturn s.m.Count()\n}", "func (c *Counter) Count() int64 { return c.count }", "func (b *Buffer) LinesNum() int {\n\treturn len(b.lines)\n}", "func (h *clientHub) NumClients() int {\n\th.RLock()\n\tdefer h.RUnlock()\n\ttotal := 0\n\tfor _, clientConnections := range h.users {\n\t\ttotal += len(clientConnections)\n\t}\n\treturn total\n}", "func (g *Grid) Count() int32 {\n\treturn int32(len(g.set))\n}", "func (h HMSketch) Count(kvs map[string]string) float64 {\n\thist := h.Sketch(kvs)\n\treturn hist.Total()\n}", "func (i *Issue) GetComments() int {\n\tif i == nil || i.Comments == nil {\n\t\treturn 0\n\t}\n\treturn *i.Comments\n}", "func CountOpenFiles() int {\n\tt.Lock()\n\tdefer t.Unlock()\n\treturn len(t.entries)\n}", "func (p *PostingsList) Count() uint64 {\n\tvar n, e uint64\n\tif p.normBits1Hit != 0 {\n\t\tn = 1\n\t\tif p.except != nil && p.except.Contains(uint32(p.docNum1Hit)) {\n\t\t\te = 1\n\t\t}\n\t} else if p.postings != nil {\n\t\tn = p.postings.GetCardinality()\n\t\tif p.except != nil {\n\t\t\te = p.postings.AndCardinality(p.except)\n\t\t}\n\t}\n\treturn n - e\n}", "func (s *plannerStats) NumFacts() int {\n\tr := s.impl.NumFacts()\n\treturn s.track(r, \"NumFacts\")\n}", "func openDiscussionsCount(discussions []*gitlab.Discussion) int {\n\t// check if any of the discussions are unresolved\n\tcount := 0\n\tfor _, d := range discussions {\n\t\tfor _, n := range d.Notes {\n\t\t\tif !n.Resolved && n.Resolvable {\n\t\t\t\tcount++\n\t\t\t}\n\t\t}\n\t}\n\treturn count\n}", "func (cgCache *consumerGroupCache) getNumOpenConns() int32 {\n\treturn int32(cgCache.cgMetrics.Get(load.CGMetricNumOpenConns))\n}", "func (r *SlidingWindow) Count() int {return r.count}", "func (q commentQuery) CountP() int64 {\n\tc, err := q.Count()\n\tif err != nil {\n\t\tpanic(boil.WrapErr(err))\n\t}\n\n\treturn c\n}", "func (c *countHashWriter) Count() int {\n\treturn c.n\n}", "func (e *Editor) NumLines() int {\n\te.makeValid()\n\treturn len(e.lines)\n}", "func (tb *TextBuf) NumLines() int {\n\ttb.LinesMu.RLock()\n\tdefer tb.LinesMu.RUnlock()\n\treturn tb.NLines\n}", "func (fc *fakeClient) ClearComments() {\n\tfc.commentsAdded = map[int][]string{}\n}", "func (m NMap) Count() int {\n\tcount := 0\n\tfor _, inMap := range m {\n\t\tinMap.RLock()\n\t\tcount += len(inMap.objs)\n\t\tinMap.RUnlock()\n\t}\n\treturn count\n}", "func (q oauthClientQuery) Count(exec boil.Executor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRow(exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count oauth_clients rows\")\n\t}\n\n\treturn count, nil\n}", "func (c *Client) GetIssueCommentsCountContext(ctx context.Context, issueIDOrKey string) (int, error) {\n\tu := fmt.Sprintf(\"/api/v2/issues/%v/comments/count\", issueIDOrKey)\n\n\treq, err := c.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tr := new(p)\n\tif err := c.Do(ctx, req, &r); err != nil {\n\t\treturn 0, err\n\t}\n\treturn r.Count, nil\n}", "func (t *CountTracker) IncrementCount() {\n\tt.m.Lock()\n\tdefer t.m.Unlock()\n\tt.tokenCount++\n}", "func (t *Todo) Count() int {\n\treturn len(t.todos)\n}", "func ObserveCount(mType, provider string, success, timeCritical bool) {\n\tmessageCounter.WithLabelValues(mType, provider, strconv.FormatBool(success), strconv.FormatBool(timeCritical)).Inc()\n}", "func (o *ViewMilestone) HasNumCommentsRead() bool {\n\tif o != nil && o.NumCommentsRead != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (h *StmtHistory) Count() int {\n\treturn len(h.history)\n}", "func (s *Streaming) NumObservations() float64 {\n\treturn s.n\n}", "func (strg *inMemoryStorage) NumConnected() int {\n\tstrg.lock.RLock()\n\tdefer strg.lock.RUnlock()\n\treturn len(strg.connected)\n}", "func (bq *BrowserQuery) Count(ctx context.Context) (int, error) {\n\tif err := bq.prepareQuery(ctx); err != nil {\n\t\treturn 0, err\n\t}\n\treturn bq.sqlCount(ctx)\n}", "func (o *VersionedConnection) HasComments() bool {\n\tif o != nil && o.Comments != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (b *CompactableBuffer) Count() int {\n\treturn int(atomic.LoadInt64(&b.count))\n}", "func CountTrackedTimes(ctx context.Context, opts *FindTrackedTimesOptions) (int64, error) {\n\tsess := db.GetEngine(ctx).Where(opts.toCond())\n\tif opts.RepositoryID > 0 || opts.MilestoneID > 0 {\n\t\tsess = sess.Join(\"INNER\", \"issue\", \"issue.id = tracked_time.issue_id\")\n\t}\n\treturn sess.Count(&TrackedTime{})\n}", "func (o DebugSessionOutput) Count() pulumi.IntOutput {\n\treturn o.ApplyT(func(v *DebugSession) pulumi.IntOutput { return v.Count }).(pulumi.IntOutput)\n}", "func (o *VersionedControllerService) HasComments() bool {\n\tif o != nil && o.Comments != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (s *Store) NumLines() int {\n\treturn len(s.lines)\n}", "func (p *MongodbProvider) Count() (total int) {\n\tvar err error\n\ttotal, err = p.c.Count()\n\tif err != nil {\n\t\tpanic(\"session/mgoSession: error counting records: \" + err.Error())\n\t}\n\treturn total\n}", "func (c *countHashReader) Count() int {\n\treturn c.n\n}", "func (s *Server) NumConns() int64 {\n\treturn atomic.LoadInt64(&s.activeConns)\n}", "func FlushCommentSum(courseId string, ds *db.DataSource, log *log.Logger) error {\n\tsession := ds.NewSession()\n\tdefer session.Close()\n\tcomment := new(table.CourseCommentTable)\n\tcountSql := `SELECT COUNT(\"UUID\") FROM \"COURSE_COMMENT\" WHERE \"COURSE_ID\" = ?\n\t\tAND \"FROZEN_STATUS\" = ?`\n\tcount, countErr := session.Sql(countSql, courseId, value.STATUS_ENABLED).Count(comment)\n\tif countErr != nil {\n\t\tlog.Println(count)\n\t}\n\tcourse := new(table.CourseTable)\n\tcourse.CommentSum = count\n\tupdateNum, updateErr := session.Id(courseId).Update(course)\n\tif updateNum == 0 {\n\t\tif updateErr != nil {\n\t\t\tlog.Println(updateNum)\n\t\t}\n\t\treturn COURSE_FLUSH_COMMENT_NUM_ERR\n\t}\n\tcommitErr := session.Commit()\n\tif commitErr != nil {\n\t\tlog.Println(commitErr)\n\t\tsession.Rollback()\n\t\treturn COURSE_FLUSH_COMMENT_NUM_ERR\n\t}\n\treturn nil\n}", "func (c connectInfo) numConnections(grid rect) int {\n\tn := 0\n\tif c.up && grid.y > 0 {\n\t\tn++\n\t}\n\tif c.right && grid.x < grid.w-1 {\n\t\tn++\n\t}\n\tif c.down && grid.y < grid.h-1 {\n\t\tn++\n\t}\n\tif c.left && grid.x > 0 {\n\t\tn++\n\t}\n\treturn n\n}", "func (m *Cmap) Count() int {\n\treturn int(atomic.LoadInt64(&m.count))\n}", "func (r *postCommentResolver) NumChildren(ctx context.Context, post *posts.Comment) (int, error) {\n\treturn r.postService.GetNumChildrenOfPost(post.ID)\n}", "func (s MemoryStorage) Count(q Query) (int, error) {\n\tfmt.Println(\"LEN\", len(s.bookmarks))\n\treturn len(s.bookmarks), nil\n}", "func (t *TrafficClones) GetCount() int {\n\tif t == nil || t.Count == nil {\n\t\treturn 0\n\t}\n\treturn *t.Count\n}", "func (s *Server) Count() int {\n\ts.cond.L.Lock()\n\tdefer s.cond.L.Unlock()\n\treturn len(s.points)\n}", "func (state *StateConditions) Count() int {\n\tcount := 0\n\tif state.Exit != nil {\n\t\tcount++\n\t}\n\tif state.Timeout != nil {\n\t\tcount++\n\t}\n\tcount += len(state.FileMonitors)\n\tcount += len(state.Outputs)\n\treturn count\n}", "func (o *ViewProjectActivePages) HasComments() bool {\n\tif o != nil && o.Comments != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (c CounterSnapshot) Count() int64 { return int64(c) }", "func (g *Game)Count()(map[string]int,map[string]*DetailScore,map[string]int){\n\troundScore,detailScore := g.CurrentRound.countScore(g.playersById,g.Players[g.CurrentDicoPlayer],true,g.TypeGameNormal)\n\treturn roundScore,detailScore,g.GetTotalScore()\n}", "func (n *NodeServiceImpl) Count(namespace string) (map[string]int, error) {\n\tlist, err := n.List(namespace, &models.ListOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn map[string]int{\n\t\tplugin.QuotaNode: len(list.Items),\n\t}, nil\n}", "func (p *printer) commentSizeBefore(next token.Position) int {\n\t// save/restore current p.commentInfo (p.nextComment() modifies it)\n\tdefer func(info commentInfo) {\n\t\tp.commentInfo = info\n\t}(p.commentInfo)\n\n\tsize := 0\n\tfor p.commentBefore(next) {\n\t\tfor _, c := range p.comment.List {\n\t\t\tsize += len(c.Text)\n\t\t}\n\t\tp.nextComment()\n\t}\n\treturn size\n}", "func (s *gcBlobTaskStore) Count(ctx context.Context) (int, error) {\n\tdefer metrics.InstrumentQuery(\"gc_blob_task_count\")()\n\n\tq := \"SELECT COUNT(*) FROM gc_blob_review_queue\"\n\tvar count int\n\n\tif err := s.db.QueryRowContext(ctx, q).Scan(&count); err != nil {\n\t\treturn count, fmt.Errorf(\"counting GC blob tasks: %w\", err)\n\t}\n\n\treturn count, nil\n}", "func (game *Game) NumDistinctActions() int {\n\treturn int(C.GameNumDistinctActions(game.game))\n}" ]
[ "0.68079066", "0.6796185", "0.6678622", "0.6601007", "0.65994287", "0.6564124", "0.65543514", "0.6539888", "0.6526384", "0.6513839", "0.64962953", "0.6486291", "0.6461657", "0.638486", "0.63353264", "0.63241607", "0.6318741", "0.63061756", "0.6243758", "0.62380064", "0.62378746", "0.6220585", "0.620633", "0.61968046", "0.6171061", "0.6121614", "0.6091763", "0.60822076", "0.6018143", "0.5950989", "0.59445137", "0.59093654", "0.59075516", "0.5896424", "0.5881786", "0.58195764", "0.5753269", "0.5720177", "0.5645601", "0.56334037", "0.5594379", "0.55807394", "0.5565681", "0.55521196", "0.5543793", "0.55103266", "0.55067986", "0.5446873", "0.54441005", "0.5425608", "0.54179865", "0.5397907", "0.5384524", "0.5375621", "0.53722537", "0.53547305", "0.53526103", "0.5343816", "0.5325659", "0.53189856", "0.5295645", "0.5287216", "0.5283478", "0.5274075", "0.52619296", "0.5253266", "0.52498114", "0.5247458", "0.5243047", "0.52423835", "0.52373093", "0.52228785", "0.5214331", "0.5213373", "0.52103764", "0.5208144", "0.52052206", "0.5199832", "0.51956034", "0.51952195", "0.51885915", "0.5183358", "0.5183189", "0.51724726", "0.5167646", "0.5165896", "0.5162264", "0.51577485", "0.51540226", "0.51532954", "0.51527226", "0.5151885", "0.5148952", "0.51458836", "0.5142926", "0.51400924", "0.5138832", "0.5137771", "0.51300544" ]
0.8412568
1
NewOutput instantiates a new output plugin instance publishing to elasticsearch.
NewOutput создает новый экземпляр плагина вывода, публикующего данные в elasticsearch.
func (f elasticsearchOutputPlugin) NewOutput( config *outputs.MothershipConfig, topologyExpire int, ) (outputs.Outputer, error) { // configure bulk size in config in case it is not set if config.BulkMaxSize == nil { bulkSize := defaultBulkSize config.BulkMaxSize = &bulkSize } output := &elasticsearchOutput{} err := output.init(*config, topologyExpire) if err != nil { return nil, err } return output, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *Manager) NewOutput(conf loutput.Config, pipelines ...processor.PipelineConstructorFunc) (output.Streamed, error) {\n\treturn bundle.AllOutputs.Init(conf, m, pipelines...)\n}", "func NewOutput() *Output {\n\treturn &Output{}\n}", "func newOutput(node rpcClient, txHash *chainhash.Hash, vout uint32, value uint64, redeem dex.Bytes) *output {\n\treturn &output{\n\t\ttxHash: *txHash,\n\t\tvout: vout,\n\t\tvalue: value,\n\t\tredeem: redeem,\n\t\tnode: node,\n\t}\n}", "func (c *Config) NewOutput(ctx context.Context) (output.Output, error) {\n\tvar e exporter.Exporter\n\tswitch c.Format {\n\tcase exporter.FormatJSON:\n\t\te = exporter.NewJSONExporter()\n\tcase exporter.FormatRaw:\n\t\te = exporter.NewRawExporter()\n\tcase exporter.FormatMD:\n\t\te = exporter.NewMarkdownExporter()\n\tcase exporter.FormatHTML:\n\t\te = exporter.NewHTMLExporter()\n\tcase exporter.FormatIntermediate:\n\t\te = exporter.NewIntermediateExporter()\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"format %q not supported\", c.Format)\n\t}\n\n\to := &Output{\n\t\tpath: c.Path,\n\t\texporter: e,\n\t}\n\treturn o, nil\n}", "func newOutput(txHash *chainhash.Hash, vout uint32, value uint64, tree int8) *output {\n\treturn &output{\n\t\tpt: outPoint{\n\t\t\ttxHash: *txHash,\n\t\t\tvout: vout,\n\t\t},\n\t\tvalue: value,\n\t\ttree: tree,\n\t}\n}", "func newOutput(txHash *chainhash.Hash, vout uint32, value uint64, tree int8) *output {\n\treturn &output{\n\t\tpt: outPoint{\n\t\t\ttxHash: *txHash,\n\t\t\tvout: vout,\n\t\t},\n\t\tvalue: value,\n\t\ttree: tree,\n\t}\n}", "func NewOutput() *Output {\n\treturn &Output{\n\t\tConnections: make(map[Connection]bool),\n\t}\n}", "func NewOutput(source *ValueSource, controlProgram *Program, ordinal uint64) *Output {\n\treturn &Output{\n\t\tSource: source,\n\t\tControlProgram: controlProgram,\n\t\tOrdinal: ordinal,\n\t}\n}", "func NewOutput(value *Thunk) *Thunk {\n\treturn Normal(OutputType{value})\n}", "func NewOutput(t mockConstructorTestingTNewOutput) *Output {\n\tmock := &Output{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewOutput(conf output.MongoDBConfig, mgr bundle.NewManagement) (output.Streamed, error) {\n\tm, err := NewWriter(mgr, conf, mgr.Logger(), mgr.Metrics())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar w output.Streamed\n\tif w, err = output.NewAsyncWriter(\"mongodb\", conf.MaxInFlight, m, mgr); err != nil {\n\t\treturn w, err\n\t}\n\treturn batcher.NewFromConfig(conf.Batching, w, mgr)\n}", "func NewOutputter(outputFormat string) (Outputter, error) {\n\tif _, exists := registry.Outputs[outputFormat]; !exists {\n\t\treturn nil, ErrorUnknownOutputter\n\t}\n\tfactory, ok := registry.Outputs[outputFormat]\n\tif !ok {\n\t\treturn nil, ErrorInvalidOutputter\n\t}\n\to := factory()\n\treturn o, nil\n}", "func (a *Agent) StartOutput(ctx context.Context, pluginName string) (string, error) {\n\toutputConfig := models.OutputConfig{\n\t\tName: pluginName,\n\t}\n\n\toutput, err := a.CreateOutput(pluginName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tuniqueId, err := uuid.NewUUID()\n\tif err != nil {\n\t\treturn \"\", errors.New(\"errored while generating UUID for new INPUT\")\n\t}\n\n\tro := models.NewRunningOutput(pluginName, output, &outputConfig,\n\t\ta.Config.Agent.MetricBatchSize, a.Config.Agent.MetricBufferLimit, uniqueId.String())\n\n\terr = ro.Init()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr = a.connectOutput(ctx, ro)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr = a.RunSingleOutput(ro, ctx)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// add new output to outputunit\n\ta.ou.outputs = append(a.ou.outputs, ro)\n\n\terr = a.Config.UpdateConfig(map[string]interface{}{\"unique_id\": uniqueId.String(), \"name\": pluginName}, uniqueId.String(), \"outputs\", \"START_PLUGIN\")\n\tif err != nil {\n\t\tlog.Printf(\"W! [agent] Unable to save configuration for output %s\", uniqueId.String())\n\t}\n\treturn uniqueId.String(), nil\n}", "func NewOutputController(service *goa.Service, om *output.Manager) *OutputController {\n\treturn &OutputController{\n\t\tController: service.NewController(\"OutputController\"),\n\t\tom: om,\n\t}\n}", "func NewOutput(output *Synapse) *Neuron {\n\treturn &Neuron{\n\t\tInputs: []*Synapse{},\n\t\tOutputs: []*Synapse{output},\n\t\tFunction: func(inputs, outputs []*Synapse) {\n\t\t\tvar sum float64\n\t\t\tfor _, s := range inputs {\n\t\t\t\tsum += (*s.Value * *s.Weight)\n\t\t\t}\n\t\t\toutputs[0].Value = &sum\n\t\t},\n\t}\n}", "func (out *elasticsearchOutput) Init(beat string, config outputs.MothershipConfig, topology_expire int) error {\n\n\tif len(config.Protocol) == 0 {\n\t\tconfig.Protocol = \"http\"\n\t}\n\n\tvar urls []string\n\n\tif len(config.Hosts) > 0 {\n\t\t// use hosts setting\n\t\tfor _, host := range config.Hosts {\n\t\t\turl := fmt.Sprintf(\"%s://%s%s\", config.Protocol, host, config.Path)\n\t\t\turls = append(urls, url)\n\t\t}\n\t} else {\n\t\t// use host and port settings\n\t\turl := fmt.Sprintf(\"%s://%s:%d%s\", config.Protocol, config.Host, config.Port, config.Path)\n\t\turls = append(urls, url)\n\t}\n\n\tes := NewElasticsearch(urls, config.Username, config.Password)\n\tout.Conn = es\n\n\tif config.Index != \"\" {\n\t\tout.Index = config.Index\n\t} else {\n\t\tout.Index = beat\n\t}\n\n\tout.TopologyExpire = 15000\n\tif topology_expire != 0 {\n\t\tout.TopologyExpire = topology_expire /*sec*/ * 1000 // millisec\n\t}\n\n\tout.FlushInterval = 1000 * time.Millisecond\n\tif config.Flush_interval != nil {\n\t\tout.FlushInterval = time.Duration(*config.Flush_interval) * time.Millisecond\n\t}\n\tout.BulkMaxSize = 10000\n\tif config.Bulk_size != nil {\n\t\tout.BulkMaxSize = *config.Bulk_size\n\t}\n\n\tif config.Max_retries != nil {\n\t\tout.Conn.SetMaxRetries(*config.Max_retries)\n\t}\n\n\tlogp.Info(\"[ElasticsearchOutput] Using Elasticsearch %s\", urls)\n\tlogp.Info(\"[ElasticsearchOutput] Using index pattern [%s-]YYYY.MM.DD\", out.Index)\n\tlogp.Info(\"[ElasticsearchOutput] Topology expires after %ds\", out.TopologyExpire/1000)\n\tif out.FlushInterval > 0 {\n\t\tlogp.Info(\"[ElasticsearchOutput] Insert events in batches. Flush interval is %s. Bulk size is %d.\", out.FlushInterval, out.BulkMaxSize)\n\t} else {\n\t\tlogp.Info(\"[ElasticsearchOutput] Insert events one by one. This might affect the performance of the shipper.\")\n\t}\n\n\tif config.Save_topology {\n\t\terr := out.EnableTTL()\n\t\tif err != nil {\n\t\t\tlogp.Err(\"Fail to set _ttl mapping: %s\", err)\n\t\t\t// keep trying in the background\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\terr := out.EnableTTL()\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tlogp.Err(\"Fail to set _ttl mapping: %s\", err)\n\t\t\t\t\ttime.Sleep(5 * time.Second)\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}\n\n\tout.sendingQueue = make(chan EventMsg, 1000)\n\tgo out.SendMessagesGoroutine()\n\n\treturn nil\n}", "func (pub *Publisher) CreateOutput(nodeHWID string, outputType types.OutputType,\n\tinstance string) *types.OutputDiscoveryMessage {\n\toutput := pub.registeredOutputs.CreateOutput(nodeHWID, outputType, instance)\n\treturn output\n}", "func New(\n\tconf Config,\n\tmgr interop.Manager,\n\tlog log.Modular,\n\tstats metrics.Type,\n\tpipelines ...iprocessor.PipelineConstructorFunc,\n) (output.Streamed, error) {\n\tif mgrV2, ok := mgr.(interface {\n\t\tNewOutput(Config, ...iprocessor.PipelineConstructorFunc) (output.Streamed, error)\n\t}); ok {\n\t\treturn mgrV2.NewOutput(conf, pipelines...)\n\t}\n\tif c, ok := Constructors[conf.Type]; ok {\n\t\treturn c.constructor(conf, mgr, log, stats, pipelines...)\n\t}\n\treturn nil, component.ErrInvalidType(\"output\", conf.Type)\n}", "func newOutput(\n\tnames []Name,\n\tgenera map[string]struct{},\n\tts []token.TokenSN,\n\tversion string,\n\tcfg config.Config,\n) Output {\n\tfor i := range names {\n\t\tlg := math.Log10(names[i].Odds)\n\t\tif math.IsInf(lg, 0) {\n\t\t\tlg = 0\n\t\t}\n\t\tnames[i].OddsLog10 = lg\n\t}\n\tmeta := Meta{\n\t\tDate: time.Now(),\n\t\tFinderVersion: version,\n\t\tWithAllMatches: cfg.WithAllMatches,\n\t\tWithAmbiguousNames: cfg.WithAmbiguousNames,\n\t\tWithUniqueNames: cfg.WithUniqueNames,\n\t\tWithBayes: cfg.WithBayes,\n\t\tWithOddsAdjustment: cfg.WithOddsAdjustment,\n\t\tWithVerification: cfg.WithVerification,\n\t\tWordsAround: cfg.TokensAround,\n\t\tLanguage: cfg.Language.String(),\n\t\tLanguageDetected: cfg.LanguageDetected,\n\t\tTotalWords: len(ts),\n\t\tTotalNameCandidates: candidatesNum(ts),\n\t\tTotalNames: len(names),\n\t}\n\tif !cfg.WithAmbiguousNames {\n\t\tnames = FilterNames(names, genera)\n\t}\n\n\tif !cfg.WithBayesOddsDetails || cfg.WithOddsAdjustment {\n\t\tpostprocessNames(names, meta.TotalNameCandidates, cfg)\n\t}\n\to := Output{Meta: meta, Names: names}\n\to.WithLanguageDetection = o.LanguageDetected != \"\"\n\n\treturn o\n}", "func (a *Agent) CreateOutput(name string) (telegraf.Output, error) {\n\tp, exists := outputs.Outputs[name]\n\tif exists {\n\t\treturn p(), nil\n\t}\n\treturn nil, fmt.Errorf(\"could not find output plugin with name: %s\", name)\n}", "func NewOutputField() *OutputField {\n\tthis := OutputField{}\n\treturn &this\n}", "func NewOutputs(outputsCfg config.Outputs) Outputs {\n\toutputs := make(Outputs)\n\tfor _, o := range outputsCfg {\n\t\tName := o.IO.Name\n\t\tType := o.IO.Type\n\t\tRepr := msgs.Representation(o.IO.Representation)\n\t\tChan := o.IO.Channel\n\t\tif !msgs.IsMessageTypeRegistered(Type) {\n\t\t\terrorString := fmt.Sprintf(\"The '%s' message type has not been registered!\", Type)\n\t\t\tpanic(errorString)\n\t\t}\n\t\tif !msgs.DoesMessageTypeImplementsRepresentation(Type, Repr) {\n\t\t\terrorString := fmt.Sprintf(\"'%s' message-type does not implement codec for '%s' representation format\", Type, Repr)\n\t\t\tpanic(errorString)\n\t\t}\n\t\toutputs[Name] = Output{IO{Name: Name, Type: Type, Representation: Repr, Channel: Chan}}\n\t}\n\treturn outputs\n}", "func (x *fastReflection_Output) New() protoreflect.Message {\n\treturn new(fastReflection_Output)\n}", "func (p *TwitterOutputPlugin) Build(output *model.OutputDef) (model.OutputProvider, error) {\n\tconsumerKey := output.Props.Get(\"consumerKey\")\n\tif consumerKey == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing consumer key property\")\n\t}\n\tconsumerSecret := output.Props.Get(\"consumerSecret\")\n\tif consumerSecret == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing consumer secret property\")\n\t}\n\taccessToken := output.Props.Get(\"accessToken\")\n\tif accessToken == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing access token property\")\n\t}\n\taccessTokenSecret := output.Props.Get(\"accessTokenSecret\")\n\tif accessTokenSecret == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing access token secret property\")\n\t}\n\tanaconda.SetConsumerKey(consumerKey)\n\tanaconda.SetConsumerSecret(consumerSecret)\n\tapi := anaconda.NewTwitterApi(accessToken, accessTokenSecret)\n\n\treturn &TwitterOutputProvider{\n\t\tid: output.ID,\n\t\talias: output.Alias,\n\t\tspec: spec,\n\t\ttags: output.Tags,\n\t\tenabled: output.Enabled,\n\t\tapi: api,\n\t\tconsumerKey: consumerKey,\n\t\tconsumerSecret: consumerSecret,\n\t}, nil\n}", "func NewExporter(uri string, timeout time.Duration) *Exporter {\n\tcounters := make(map[string]*prometheus.CounterVec)\n\tgauges := make(map[string]*prometheus.GaugeVec)\n\n\tfor name, info := range counterVecMetrics {\n\t\tlog.Printf(\"Registering %s\", name)\n\t\tcounters[name] = prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: name,\n\t\t\tHelp: info.help,\n\t\t}, append([]string{\"cluster\", \"node\"}, info.labels...))\n\t}\n\n\tfor name, info := range gaugeVecMetrics {\n\t\tlog.Printf(\"Registering %s\", name)\n\t\tgauges[name] = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: name,\n\t\t\tHelp: info.help,\n\t\t}, append([]string{\"cluster\", \"node\"}, info.labels...))\n\t}\n\n\tfor name, help := range counterMetrics {\n\t\tcounters[name] = prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: name,\n\t\t\tHelp: help,\n\t\t}, []string{\"cluster\", \"node\"})\n\t}\n\n\tfor name, help := range gaugeMetrics {\n\t\tgauges[name] = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: name,\n\t\t\tHelp: help,\n\t\t}, []string{\"cluster\", \"node\"})\n\t}\n\n\t// Init our exporter.\n\treturn &Exporter{\n\t\tURI: uri,\n\n\t\tup: prometheus.NewGauge(prometheus.GaugeOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"up\",\n\t\t\tHelp: \"Was the Elasticsearch instance query successful?\",\n\t\t}),\n\n\t\tcounters: counters,\n\t\tgauges: gauges,\n\n\t\tclient: &http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tDial: func(netw, addr string) (net.Conn, error) {\n\t\t\t\t\tc, err := net.DialTimeout(netw, addr, timeout)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\tif err := c.SetDeadline(time.Now().Add(timeout)); err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\treturn c, nil\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func NewOutput(output string) io.Writer {\n\tswitch output {\n\tcase \"-\":\n\t\treturn os.Stdout\n\tdefault:\n\t\tf, err := os.Create(output)\n\t\tcheck(err)\n\t\treturn bufio.NewWriter(f)\n\t}\n}", "func NewOutput(addr sdk.CUAddress, coins sdk.Coins) Output {\n\treturn Output{\n\t\tAddress: addr,\n\t\tCoins: coins,\n\t}\n}", "func (a *Agent) UpdateOutputPlugin(uid string, config map[string]interface{}) (telegraf.Output, error) {\n\ta.pluginLock.Lock()\n\tplugin, ok := a.runningPlugins[uid]\n\ta.pluginLock.Unlock()\n\n\tif !ok {\n\t\tlog.Printf(\"E! [agent] You are trying to update an output that does not exist: %s \\n\", uid)\n\t\treturn nil, errors.New(\"you are trying to update an output that does not exist\")\n\t}\n\n\toutput := plugin.(*models.RunningOutput)\n\n\t// This code creates a copy of the struct and see if JSON Unmarshal works without errors\n\tconfigJSON, err := validateStructConfig(reflect.ValueOf(output.Output), config)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not update output plugin %s with error: %s\", uid, err)\n\t}\n\n\ttomlMap, err := generateTomlKeysMap(reflect.ValueOf(output.Output), config)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not update output plugin %s with error: %s\", uid, err)\n\t}\n\n\tif len(a.Config.Outputs) == 1 {\n\t\ta.incrementOutputCount(1)\n\t}\n\n\terr = a.StopOutputPlugin(uid, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = json.Unmarshal(configJSON, &output.Output)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tro := models.NewRunningOutput(output.Config.Name, output.Output, output.Config,\n\t\ta.Config.Agent.MetricBatchSize, a.Config.Agent.MetricBufferLimit, output.UniqueId)\n\n\terr = a.RunSingleOutput(ro, a.Context)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = a.Config.UpdateConfig(tomlMap, output.UniqueId, \"outputs\", \"UPDATE_PLUGIN\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not update output plugin %s with error: %s\", uid, err)\n\t}\n\n\tif len(a.Config.Outputs) == 1 {\n\t\ta.incrementOutputCount(-1)\n\t}\n\n\treturn output.Output, nil\n}", "func NewOutputter(dt, dtOut, tmax float64, nu int, outFcn OutFcnType) (o *Outputter) {\n\tif dtOut < dt {\n\t\tdtOut = dt\n\t}\n\to = new(Outputter)\n\to.Dt = dt\n\to.DtOut = dtOut\n\to.Nsteps = int(math.Ceil(tmax / o.Dt))\n\to.Tmax = float64(o.Nsteps) * o.Dt // fix tmax\n\to.Every = int(o.DtOut / o.Dt)\n\to.Nmax = int(math.Ceil(float64(o.Nsteps)/float64(o.Every))) + 1\n\to.T = make([]float64, o.Nmax)\n\to.U = Alloc(o.Nmax, nu)\n\tif outFcn != nil {\n\t\to.Fcn = outFcn\n\t\to.Fcn(o.U[o.Idx], 0)\n\t\tif o.Every > 1 {\n\t\t\to.Tidx = o.Every - 1 // use -1 here only for the first output\n\t\t}\n\t\to.Idx++\n\t}\n\treturn\n}", "func NewOutput(sink Sink, opts ...OutputOption) Output {\n\tvar config OutputConfig\n\tfor _, opt := range opts {\n\t\topt(&config)\n\t}\n\treturn newOutput(\"\", sink, config)\n}", "func NewOutputWidget(name string, x0, y0, x1, y1 float32, label string, initMsg string) *OutputWidget {\n\treturn &OutputWidget{name: name, x0: x0, y0: y0, x1: x1, y1: y1, label: label, initMsg: initMsg}\n}", "func NewJsonOutput(w http.ResponseWriter) (out JsonOutput) {\n\tout.ResponseWriter = w\n\treturn\n}", "func (s TestingSingleton) Output(file string) TestingBuildParams {\n\treturn buildParamsFromOutput(s.provider, file)\n}", "func (c *SearchCall) Output(output string) *SearchCall {\n\tc.urlParams_.Set(\"output\", output)\n\treturn c\n}", "func (a *Agent) storePluginOutput(plugin PluginOutput) error {\n\n\tif plugin.Data == nil {\n\t\tplugin.Data = make(PluginInventoryDataset, 0)\n\t}\n\n\tsort.Sort(plugin.Data)\n\n\t// Filter out ignored inventory data before writing the file out\n\tvar sortKey string\n\tignore := a.Context.Config().IgnoredInventoryPathsMap\n\tsimplifiedPluginData := make(map[string]interface{})\nDataLoop:\n\tfor _, data := range plugin.Data {\n\t\tif data == nil {\n\t\t\tcontinue\n\t\t}\n\t\tsortKey = data.SortKey()\n\t\tpluginSource := fmt.Sprintf(\"%s/%s\", plugin.Id, sortKey)\n\t\tif _, ok := ignore[strings.ToLower(pluginSource)]; ok {\n\t\t\tcontinue DataLoop\n\t\t}\n\t\tsimplifiedPluginData[sortKey] = data\n\t}\n\n\treturn a.store.SavePluginSource(\n\t\tplugin.Entity.Key.String(),\n\t\tplugin.Id.Category,\n\t\tplugin.Id.Term,\n\t\tsimplifiedPluginData,\n\t)\n}", "func newExporter(cfg component.Config, set exporter.CreateSettings) (*baseExporter, error) {\n\toCfg := cfg.(*Config)\n\n\tif oCfg.Endpoint == \"\" {\n\t\treturn nil, errors.New(\"OTLP exporter config requires an Endpoint\")\n\t}\n\n\tuserAgent := fmt.Sprintf(\"%s/%s (%s/%s)\",\n\t\tset.BuildInfo.Description, set.BuildInfo.Version, runtime.GOOS, runtime.GOARCH)\n\n\treturn &baseExporter{config: oCfg, settings: set.TelemetrySettings, userAgent: userAgent}, nil\n}", "func New() *Module {\n\tconstruct()\n\tm := new(Module)\n\tl.Register(m, \"outputFunc\")\n\tm.Output(defaultOutput)\n\treturn m\n}", "func (pub *Publisher) PublishOutputEvent(node *types.NodeDiscoveryMessage) error {\n\treturn PublishOutputEvent(node, pub.registeredOutputs, pub.registeredOutputValues, pub.messageSigner)\n}", "func NewPgoutputEventHandler(s RowSink) *PgoutputEventHandler {\n\treturn &PgoutputEventHandler{\n\t\tsink: s,\n\t\tlog: pkglog.NewLogger(\"replication-event-handler\"),\n\t\trelations: &relationSet{},\n\t}\n}", "func (act *PublishAction) Output() error {\n\t// do nothing.\n\treturn nil\n}", "func (c *StdOutputConfig) CreateOutput() (plugin.Output, error) {\n\treturn &StdOutput{\n\t\tdata: make(chan []byte),\n\t\terrs: make(chan error),\n\t\tstopChan: make(chan struct{}),\n\t\twg: sync.WaitGroup{},\n\t}, nil\n}", "func NewPlugin(plugins func() discovery.Plugins, choices selector.Options) instance.Plugin {\n\tbase := &internal.Base{\n\t\tPlugins: plugins,\n\t\tChoices: choices,\n\t\tSelectFunc: SelectOne,\n\t}\n\treturn &impl{\n\t\tPlugin: base.Init(),\n\t}\n}", "func NewOutput(path string, batchSize int) (*Path, error) {\n\n\tp := &Path{\n\t\tname: path,\n\t\tbatchSize: batchSize,\n\t}\n\n\tif err := p.create(); err != nil {\n\t\treturn p, err\n\t}\n\n\treturn p, nil\n}", "func newExporter(w io.Writer) (trace.SpanExporter, error) {\n\treturn stdouttrace.New(\n\t\tstdouttrace.WithWriter(w),\n\t\t// Use human-readable output.\n\t\tstdouttrace.WithPrettyPrint(),\n\t\t// Do not print timestamps for the demo.\n\t\tstdouttrace.WithoutTimestamps(),\n\t)\n}", "func newTestPublisherWithBulk(response OutputResponse) *testPublisher {\n\treturn newTestPublisher(defaultBulkSize, response)\n}", "func (md *MassDns) SetOutput(oc chan dns.RR) {\n\tmd.output = oc\n}", "func (r *Request) NewResult(plugin string) *Result {\n\treturn &Result{\n\t\tPlugin: plugin,\n\t\tVersion: r.Version,\n\t\tMetadata: make(map[string]string),\n\t}\n}", "func NewMockedOutput(txID utxo.TransactionID, index uint16, balance uint64) (out *MockedOutput) {\n\tout = model.NewStorable[utxo.OutputID, MockedOutput](&mockedOutput{\n\t\tTxID: txID,\n\t\tIndex: index,\n\t\tBalance: balance,\n\t})\n\tout.SetID(utxo.OutputID{TransactionID: txID, Index: index})\n\treturn out\n}", "func newTXOutput(value int, address string) TXOutput {\n\ttxo := TXOutput{value, address}\n\treturn txo\n}", "func NewOutputsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) *OutputsClient {\n\tcp := arm.ClientOptions{}\n\tif options != nil {\n\t\tcp = *options\n\t}\n\tif len(cp.Host) == 0 {\n\t\tcp.Host = arm.AzurePublicCloud\n\t}\n\treturn &OutputsClient{subscriptionID: subscriptionID, ep: string(cp.Host), pl: armruntime.NewPipeline(module, version, credential, &cp)}\n}", "func (p *jsonOutputNode) New(attr string) outputNode {\n\treturn &jsonOutputNode{make(map[string]interface{})}\n}", "func NewOutputter(name string, measurements <-chan Measurement, config Config) (Outputter, error) {\n\tswitch name {\n\tcase \"stdoutl2metraw\":\n\t\t{\n\t\t\treturn NewStdOutL2MetRaw(measurements, config), nil\n\t\t}\n\tcase \"stdoutl2metder\":\n\t\t{\n\t\t\treturn NewStdOutL2MetDer(measurements, config), nil\n\t\t}\n\tcase \"librato\":\n\t\t{\n\t\t\treturn NewLibratoOutputter(measurements, config), nil\n\t\t}\n\tcase \"carbon\":\n\t\t{\n\t\t\treturn NewCarbonOutputter(measurements, config), nil\n\t\t}\n\tcase \"statsd\":\n\t\t{\n\t\t\treturn NewStatsdOutputter(measurements, config), nil\n\t\t}\n\t}\n\n\treturn nil, errors.New(\"unknown outputter\")\n}", "func newTestPublisherNoBulk(response OutputResponse) *testPublisher {\n\treturn newTestPublisher(-1, response)\n}", "func NewPlugin(opts ...Option) *Plugin {\n\tp := &Plugin{}\n\n\tp.SetName(\"generator\")\n\tp.KVStore = &etcd.DefaultPlugin\n\tp.KVScheduler = &kvscheduler.DefaultPlugin\n\n\tfor _, o := range opts {\n\t\to(p)\n\t}\n\n\tp.Setup()\n\n\treturn p\n}", "func NewEs(componentId ...string) *Es {\n id := defaultEsId\n if len(componentId) > 0 {\n id = componentId[0]\n }\n e := &Es{}\n e.client = pgo2.App().Component(id, es.New).(*es.Client)\n\n return e\n}", "func (z *ZapPlugin) New(results []index.Document) (\n\tsegment.Segment, uint64, error) {\n\treturn z.newWithChunkFactor(results, defaultChunkFactor)\n}", "func NewExporter(cfg *Configuration) *Exporter {\n\te := Exporter{}\n\te.Client = ovsdb.NewOvnClient()\n\te.initParas(cfg)\n\treturn &e\n}", "func New(s *lmsensors.Scanner) *Exporter {\n\treturn &Exporter{\n\t\ts: s,\n\t}\n}", "func (sdkLogger SdkLogger) Output(calldepth int, s string) error {\n\tlog.WithField(\"type\", \"nsq driver\").Info(s)\n\treturn nil\n}", "func New(api api.API, inputCommand []string) (p *Publisher, err error) {\n\tp = new(Publisher)\n\n\tp.sdAPI = api\n\tp.specPath, p.tag, err = parsePublishCommand(inputCommand)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to parse command:%v\", err)\n\t}\n\n\tp.commandSpec, err = util.LoadYaml(p.specPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Yaml load failed:%v\", err)\n\t}\n\tp.commandSpec.SpecYamlPath = p.specPath\n\n\treturn\n}", "func (pub *Publisher) CreateInputFromOutput(\n\tnodeHWID string, inputType types.InputType, instance string, outputAddress string,\n\thandler func(input *types.InputDiscoveryMessage, sender string, value string)) {\n\n\tinput := pub.inputFromOutputs.CreateInput(nodeHWID, inputType, instance, outputAddress, handler)\n\n\t_ = input\n}", "func NewPlugin(namespace string, dfn plugin.Definition, cfg *plugin.WorkerConfig) *Plugin {\n\treturn &Plugin{\n\t\tName: dfn.Name,\n\t\tUUID: gouuid.NewV4(),\n\t\tResultType: dfn.ResultType,\n\t\tPodSpec: &dfn.PodSpec,\n\t\tNamespace: namespace,\n\t\tConfig: cfg,\n\t}\n}", "func newSinkMetaHandler(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\tvars := mux.Vars(r)\n\tpluginName := vars[\"name\"]\n\n\tlanguage := getLanguage(r)\n\tptrMetadata, err := meta.GetSinkMeta(pluginName, language)\n\tif err != nil {\n\t\thandleError(w, err, \"\", logger)\n\t\treturn\n\t}\n\tjsonResponse(ptrMetadata, w, logger)\n}", "func NewElasticsearchOutboundOp(opts ...Option) *Schema {\n\treturn NewDBOutboundOp(\"elasticsearch\", opts...)\n}", "func Output(props *OutputProps, children ...Element) *OutputElem {\n\trProps := &_OutputProps{\n\t\tBasicHTMLElement: newBasicHTMLElement(),\n\t}\n\n\tif props != nil {\n\t\tprops.assign(rProps)\n\t}\n\n\treturn &OutputElem{\n\t\tElement: createElement(\"output\", rProps, children...),\n\t}\n}", "func GetOutputPlugin() (op model.OutputPlugin, err error) {\n\treturn &TwitterOutputPlugin{}, nil\n}", "func newPlugin() (p *slackscot.Plugin) {\n\tp = new(slackscot.Plugin)\n\tp.Name = \"tester\"\n\tp.Commands = []slackscot.ActionDefinition{{\n\t\tMatch: func(m *slackscot.IncomingMessage) bool {\n\t\t\treturn strings.HasPrefix(m.NormalizedText, \"make\")\n\t\t},\n\t\tUsage: \"make `<something>`\",\n\t\tDescription: \"Have the test bot make something for you\",\n\t\tAnswer: func(m *slackscot.IncomingMessage) *slackscot.Answer {\n\t\t\treturn &slackscot.Answer{Text: \"Ready\"}\n\t\t},\n\t}}\n\n\treturn p\n}", "func New(options ...Option) (metric.Exporter, error) {\n\tcfg := newConfig(options...)\n\texp := &exporter{\n\t\ttemporalitySelector: cfg.temporalitySelector,\n\t\taggregationSelector: cfg.aggregationSelector,\n\t}\n\texp.encVal.Store(*cfg.encoder)\n\treturn exp, nil\n}", "func NewCliOutput(color bool) OutputWriter {\n\tau := aurora.NewAurora(color)\n\tif run.GOOS == \"windows\" {\n\t\tau = aurora.NewAurora(false)\n\t}\n\n\tt := newCliTemplate()\n\n\treturn OutputWriter{\n\t\tout: os.Stdout,\n\t\tau: au,\n\t\ttemplate: t,\n\t}\n}", "func (m *Module) Output(outputFunc func(Info) bar.Output) *Module {\n\tm.outputFunc.Set(outputFunc)\n\treturn m\n}", "func New() *Action {\n\treturn &Action{w: os.Stdout}\n}", "func NewOutputStrategy(root string, g g.Generator) (OutputStrategy, error) {\n\toutput := outputStrategies[g.Spec.Build.Output.Strategy]\n\tif output == nil {\n\t\treturn nil, fmt.Errorf(\"Output Strategy %s unknown\", g.Spec.Build.Output.Strategy)\n\t}\n\n\treturn output(root, g), nil\n}", "func New(cfg *Config, logger logger.Logger, registerer prometheus.Registerer) (*Plugin, error) {\n\tservice := &Plugin{\n\t\tcfg: cfg,\n\t\tregisterer: registerer,\n\t\tLogger: logger.NewLogger(\"simplePlugin\"),\n\t}\n\treturn service, nil\n}", "func NewElasticsearch(conf Config, mgr interop.Manager, log log.Modular, stats metrics.Type) (output.Streamed, error) {\n\telasticWriter, err := writer.NewElasticsearchV2(conf.Elasticsearch, mgr, log, stats)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tw, err := NewAsyncWriter(\n\t\tTypeElasticsearch, conf.Elasticsearch.MaxInFlight, elasticWriter, log, stats,\n\t)\n\tif err != nil {\n\t\treturn w, err\n\t}\n\treturn NewBatcherFromConfig(conf.Elasticsearch.Batching, w, mgr, log, stats)\n}", "func (h *MemHome) Output(p, name string) io.WriteCloser {\n\tpkg := h.pkgs[p]\n\tif pkg == nil {\n\t\tpanic(\"pkg not exists\")\n\t}\n\tret := newMemFile()\n\tpkg.outs[name] = ret\n\treturn ret\n}", "func New(config *Config, log *zap.Logger) (exporter.TraceExporter, error) {\n\thttpClient := &http.Client{}\n\toptions := []elastic.ClientOptionFunc{\n\t\telastic.SetURL(config.Servers...),\n\t\telastic.SetBasicAuth(config.Username, config.Password),\n\t\telastic.SetSniff(config.Sniffer),\n\t\telastic.SetHttpClient(httpClient),\n\t}\n\tif config.TokenFile != \"\" {\n\t\ttoken, err := loadToken(config.TokenFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thttpClient.Transport = &tokenAuthTransport{\n\t\t\ttoken: token,\n\t\t\twrapped: &http.Transport{},\n\t\t}\n\t}\n\n\tesRawClient, err := elastic.NewClient(options...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create Elasticsearch client for %s, %v\", config.Servers, err)\n\t}\n\tbulk, err := esRawClient.BulkProcessor().\n\t\tBulkActions(config.bulkActions).\n\t\tBulkSize(config.bulkSize).\n\t\tWorkers(config.bulkWorkers).\n\t\tFlushInterval(config.bulkFlushInterval).\n\t\tDo(context.Background())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tversion := config.Version\n\tif version == 0 {\n\t\tversion, err = getVersion(esRawClient, config.Servers[0])\n\t}\n\tvar tags []string\n\tif config.TagsAsFields.AllAsFields && config.TagsAsFields.File != \"\" {\n\t\ttags, err = loadTagsFromFile(config.TagsAsFields.File)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to load tags file: %v\", err)\n\t\t}\n\t}\n\n\tw := esSpanStore.NewSpanWriter(esSpanStore.SpanWriterParams{\n\t\tLogger: log,\n\t\tMetricsFactory: metrics.NullFactory,\n\t\tClient: eswrapper.WrapESClient(esRawClient, bulk, version),\n\t\tIndexPrefix: config.IndexPrefix,\n\t\tUseReadWriteAliases: config.UseWriteAlias,\n\t\tAllTagsAsFields: config.TagsAsFields.AllAsFields,\n\t\tTagKeysAsFields: tags,\n\t\tTagDotReplacement: config.TagsAsFields.DotReplacement,\n\t})\n\n\tif config.CreateTemplates {\n\t\tspanMapping, serviceMapping := es.GetMappings(int64(config.Shards), int64(config.Shards), version)\n\t\terr := w.CreateTemplates(spanMapping, serviceMapping)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tstorage := jexporter.Storage{\n\t\tWriter: w,\n\t}\n\treturn exporterhelper.NewTraceExporter(\n\t\tconfig,\n\t\tstorage.Store,\n\t\texporterhelper.WithShutdown(func() error {\n\t\t\treturn w.Close()\n\t\t}))\n}", "func NewOut(name string, f []float64) *Out {\n\treturn &Out{\n\t\tname: name,\n\t\tframe: f,\n\t}\n}", "func codeintelUploadOutput() (out *output.Output) {\n\tif codeintelUploadFlags.json || codeintelUploadFlags.noProgress || codeintelUploadFlags.verbosity > 0 {\n\t\treturn nil\n\t}\n\n\treturn output.NewOutput(flag.CommandLine.Output(), output.OutputOpts{\n\t\tVerbose: true,\n\t})\n}", "func NewLogger(output *os.File, component string) (Logger, error) {\n\tlog := zerolog.New(output).With().\n\t\tStr(\"component\", component).\n\t\tLogger()\n\n\tswitch viper.GetString(\"log-level\") {\n\tcase \"debug\":\n\t\tzerolog.SetGlobalLevel(zerolog.DebugLevel)\n\tcase \"warning\":\n\t\tzerolog.SetGlobalLevel(zerolog.WarnLevel)\n\tcase \"fatal\":\n\t\tzerolog.SetGlobalLevel(zerolog.FatalLevel)\n\tcase \"info\":\n\t\tzerolog.SetGlobalLevel(zerolog.InfoLevel)\n\tdefault:\n\t\tzerolog.SetGlobalLevel(zerolog.InfoLevel)\n\t\tlog.Info().Msgf(\"Unknown log-level %s, using info.\", viper.GetString(\"log-level\"))\n\t}\n\n\treturn logger{\n\t\tLogger: log,\n\t}, nil\n}", "func New(out string, truncate bool) *Engine {\n\tif (len(_extractors) < 1) || (len(_loaders) < 1) {\n\t\t// we need at least 1 extractor and 1 loader for work\n\t\treturn nil\n\t}\n\te := &Engine{\n\t\textractors: _extractors,\n\t\tloaders: _loaders,\n\t\toutputFolder: out,\n\t}\n\tif truncate {\n\t\te.Clean()\n\t}\n\treturn e\n}", "func (p *protoOutputNode) New(attr string) outputNode {\n\tuc := nodePool.Get().(*graph.Node)\n\tuc.Attribute = attr\n\treturn &protoOutputNode{uc}\n}", "func NewExporter(dsIP string, interval time.Duration) (*Exporter, error) {\n\tlog.Infof(\"Setup Syno client using diskstation: %s and interval %s\\n\", dsIP, interval)\n\tclient, err := syno.NewClient(dsIP, interval)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Can't create the Syno client: %s\", err)\n\t}\n\n\tlog.Debugln(\"Init exporter\")\n\treturn &Exporter{\n\t\tClient: client,\n\t}, nil\n}", "func NewExporter(o Options) (*Exporter, error) {\n\tif o.Host == \"\" {\n\t\t// default Host\n\t\to.Host = \"127.0.0.1\"\n\t}\n\n\tif o.Port == 0 {\n\t\t// default Port\n\t\to.Port = 2003\n\t}\n\n\te := &Exporter{\n\t\topts: o,\n\t}\n\n\tfor _, val := range o.Tags {\n\t\te.tags += \";\" + val\n\t}\n\n\tb := bundler.NewBundler((*view.Data)(nil), func(items interface{}) {\n\t\tvds := items.([]*view.Data)\n\t\te.sendBundle(vds)\n\t})\n\te.bundler = b\n\n\te.bundler.BufferedByteLimit = defaultBufferedViewDataLimit\n\te.bundler.BundleCountThreshold = defaultBundleCountThreshold\n\te.bundler.DelayThreshold = defaultDelayThreshold\n\n\te.connectGraphite = func() (*client.Graphite, error) {\n\t\treturn client.NewGraphite(o.Host, o.Port)\n\t}\n\n\treturn e, nil\n}", "func NewOutputCodec() *codec.MsgpackHandle {\n\t_codec := &codec.MsgpackHandle{}\n\t_codec.MapType = reflect.TypeOf(map[string]interface{}(nil))\n\t_codec.RawToString = false\n\t// _codec.DecodeOptions.MapValueReset = true\n\t_codec.StructToArray = true\n\treturn _codec\n}", "func NewMySQLOutput(cfg *config.Config, outCfg *config.Output) (outputs.Output, error) {\n\tif outCfg == nil {\n\t\toutCfg = &config.Output{\n\t\t\tMySQL: &config.MySQL{},\n\t\t}\n\t}\n\tm := MySQL{\n\t\tlogger: log.WithFields(logrus.Fields{\"output\": NameMySQL}),\n\t\tconfig: outCfg.MySQL,\n\t\tdbCons: map[string]*sqlx.DB{},\n\t}\n\tif m.config.DSN == \"\" {\n\t\treturn nil, fmt.Errorf(\"no DSN for mysql connection given\")\n\t}\n\tif m.config.TableNamePattern == \"\" {\n\t\tm.config.TableNamePattern = defaultTableNamePattern\n\t}\n\n\treturn m, nil\n}", "func NewExport(info *ExportInfo) *ExportData {\n\tif len(agent.Version) > 0 {\n\t\tinfo.AgentVersion = agent.Version\n\t} else {\n\t\tinfo.AgentVersion = \"debug\"\n\t}\n\n\tinfo.ExportVersion = \"1.0\"\n\tinfo.CreationDate = time.Now()\n\treturn &ExportData{\n\t\tInfo: info,\n\t}\n}", "func RegisterOutput(name string, factory OutputCtr) {\n\tif _, exists := registry.Outputs[name]; !exists {\n\t\tregistry.Outputs[name] = factory\n\t}\n}", "func (all *Widgets) Output() *OutputWidget { return all.widgets[OutputWidgetName].(*OutputWidget) }", "func New(outputToFile bool, outputFile string, pageSize uint, allowOutputOverwrite bool) (*Writer, error) {\n\tif allowOutputOverwrite == false && core.IsFileExisting(outputFile) {\n\t\treturn nil, errors.New(\"Output file already existing. You can use `-o` to allow overwrite\")\n\t}\n\n\tfilepool := file.New(outputFile, pageSize)\n\n\treturn &Writer{outputToFile, outputFile, filepool}, nil\n}", "func NewOTExporter(conf *envvar.Configuration) (*prometheus.Exporter, error) {\n\tif err := runtime.Start(runtime.WithMinimumReadMemStatsInterval(time.Second)); err != nil {\n\t\treturn nil, fmt.Errorf(\"runtime.Start %w\", err)\n\t}\n\n\tpromExporter, err := prometheus.NewExportPipeline(prometheus.Config{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"prometheus.NewExportPipeline %w\", err)\n\t}\n\n\tglobal.SetMeterProvider(promExporter.MeterProvider())\n\n\t//-\n\n\tjaegerEndpoint, _ := conf.Get(\"JAEGER_ENDPOINT\")\n\n\tjaegerExporter, err := jaeger.NewRawExporter(\n\t\tjaeger.WithCollectorEndpoint(jaegerEndpoint),\n\t\tjaeger.WithSDKOptions(sdktrace.WithSampler(sdktrace.AlwaysSample())),\n\t\tjaeger.WithProcessFromEnv(),\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"jaeger.NewRawExporter %w\", err)\n\t}\n\n\ttp := sdktrace.NewTracerProvider(\n\t\tsdktrace.WithSampler(sdktrace.AlwaysSample()),\n\t\tsdktrace.WithSyncer(jaegerExporter),\n\t)\n\n\totel.SetTracerProvider(tp)\n\totel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}))\n\n\treturn promExporter, nil\n}", "func NewExporter(uri string, timeout time.Duration, logger log.Logger) (*Exporter, error) {\n\treturn &Exporter{\n\t\tURI: uri,\n\t\ttotalScrapes: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"exporter_scrapes_total\",\n\t\t\tHelp: \"Current total iqAir scrapes.\",\n\t\t}),\n\t\tjsonParseFailures: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"exporter_json_parse_failures_total\",\n\t\t\tHelp: \"Number of errors while parsing JSON.\",\n\t\t}),\n\t\tlogger: logger,\n\t}, nil\n}", "func NewPluginCommand(cmd *cobra.Command, dockerCli *client.DockerCli) {\n}", "func NewFakeOutput(t testing.TB) *FakeOutput {\n\treturn &FakeOutput{\n\t\tReceived: make(chan *entry.Entry, 100),\n\t\tSugaredLogger: zaptest.NewLogger(t).Sugar(),\n\t}\n}", "func NewHTTPPublisher(endpoint string) *HTTPPublisher { return &HTTPPublisher{endpoint} }", "func newBgMetadataElasticSearchConnector(elasticSearchClient ElasticSearchClient, registry prometheus.Registerer, bulkSize, maxRetry uint, indexName, IndexDateFmt string) *BgMetadataElasticSearchConnector {\n\tvar esc = BgMetadataElasticSearchConnector{\n\t\tclient: elasticSearchClient,\n\t\tBulkSize: bulkSize,\n\t\tbulkBuffer: make([]ElasticSearchDocument, 0, bulkSize),\n\t\tMaxRetry: maxRetry,\n\t\tIndexName: indexName,\n\t\tIndexDateFmt: IndexDateFmt,\n\n\t\tUpdatedDocuments: prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"updated_documents\",\n\t\t\tHelp: \"total number of documents updated in ElasticSearch splited between metrics and directories\",\n\t\t}, []string{\"status\", \"type\"}),\n\n\t\tHTTPErrors: prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"http_errors\",\n\t\t\tHelp: \"total number of http errors encountered partitionned by status code\",\n\t\t}, []string{\"code\"}),\n\n\t\tWriteDurationMs: prometheus.NewHistogram(prometheus.HistogramOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"write_duration_ms\",\n\t\t\tHelp: \"time spent writing to ElasticSearch based on `took` field of response \",\n\t\t\tBuckets: []float64{250, 500, 750, 1000, 1500, 2000, 5000, 10000}}),\n\n\t\tRequestSize: prometheus.NewHistogram(prometheus.HistogramOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"write_request_size_bytes\",\n\t\t\tHelp: \"Size of batch create requests performed on elasticsearch\",\n\t\t\tBuckets: []float64{10000, 100000, 1000000, 5000000, 10000000, 20000000, 50000000}}),\n\n\t\tDocumentBuildDurationMs: prometheus.NewHistogram(prometheus.HistogramOpts{\n\t\t\tNamespace: namespace,\n\t\t\tName: \"document_build_duration_ms\",\n\t\t\tHelp: \"time spent building an ElasticSearch document\",\n\t\t\tBuckets: []float64{1, 5, 10, 50, 100, 250, 500, 750, 1000, 2000}}),\n\t\tlogger: zap.L(),\n\t}\n\t_ = registry.Register(esc.UpdatedDocuments)\n\t_ = registry.Register(esc.WriteDurationMs)\n\t_ = registry.Register(esc.DocumentBuildDurationMs)\n\t_ = registry.Register(esc.HTTPErrors)\n\t_ = registry.Register(esc.RequestSize)\n\tif esc.IndexName == \"\" {\n\t\tesc.IndexName = default_metrics_metadata_index\n\t}\n\tif esc.IndexDateFmt == \"\" {\n\t\tesc.DirectoriesIndexAlias = fmt.Sprintf(\"%s_%s\", esc.IndexName, directories_index_suffix)\n\t\tesc.MetricsIndexAlias = fmt.Sprintf(\"%s_%s\", esc.IndexName, metrics_index_suffix)\n\t}\n\n\tesc.KnownIndices = map[string]bool{}\n\treturn &esc\n}", "func NewPlugin(name string, path string, args []string, config skyconfig.Configuration) Plugin {\n\tfactory := transportFactories[name]\n\tif factory == nil {\n\t\tpanic(fmt.Errorf(\"unable to find plugin transport '%v'\", name))\n\t}\n\tp := Plugin{\n\t\ttransport: factory.Open(path, args, config),\n\t\tgatewayMap: map[string]*router.Gateway{},\n\t}\n\treturn p\n}", "func New() *AttestorPlugin {\n\treturn &AttestorPlugin{}\n}", "func New() (*Plugin, error) {\n\treturn &Plugin{\n\t\tHandler: admission.NewHandler(admission.Create, admission.Update),\n\t}, nil\n}", "func NewUsecase(outputPort Outport) Inport {\n\treturn &showPostBySlugInteractor{\n\t\toutport: outputPort,\n\t}\n}", "func NewExporter(endpoint string) (*Exporter, error) {\n\tlog.Infof(\"Setup Pihole exporter using URL: %s\", endpoint)\n\tpihole, err := pihole.NewClient(endpoint, auth)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Exporter{\n\t\tPihole: pihole,\n\t}, nil\n}" ]
[ "0.6398827", "0.6216201", "0.61832756", "0.6142493", "0.600292", "0.600292", "0.58812755", "0.5859912", "0.57929546", "0.57894796", "0.578846", "0.5770203", "0.5722518", "0.5554305", "0.5553186", "0.55329394", "0.55323535", "0.55209005", "0.54609996", "0.5429639", "0.54219884", "0.5405621", "0.53437126", "0.53307337", "0.53292", "0.52711636", "0.5236616", "0.52067727", "0.5201567", "0.51732534", "0.51714903", "0.505765", "0.5002947", "0.5000211", "0.49892643", "0.4986545", "0.4972501", "0.49460304", "0.4929581", "0.49263844", "0.49176493", "0.4913875", "0.4887396", "0.48831442", "0.4876739", "0.48689485", "0.48527572", "0.4850293", "0.48355407", "0.48313758", "0.4824092", "0.48172522", "0.47862184", "0.47666293", "0.4765697", "0.4761946", "0.47535133", "0.47486657", "0.47359955", "0.4728658", "0.47230285", "0.47140732", "0.47137624", "0.47006634", "0.47002882", "0.46996567", "0.4697176", "0.46963033", "0.46652922", "0.46609977", "0.4654663", "0.464867", "0.46448267", "0.46338913", "0.4631898", "0.46292064", "0.46277216", "0.46134928", "0.46100476", "0.46051022", "0.4591692", "0.45904315", "0.45849147", "0.45820555", "0.45789778", "0.45755494", "0.4558131", "0.4555649", "0.4549276", "0.45422187", "0.45321774", "0.45217153", "0.45151034", "0.45108235", "0.4493657", "0.4492377", "0.4489348", "0.44855615", "0.44854528", "0.4485322" ]
0.8399119
0
New returns a new PagerDuty notifier.
New возвращает новый уведомитель PagerDuty.
func New(c *config.PagerdutyConfig, t *template.Template, l log.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) { client, err := commoncfg.NewClientFromConfig(*c.HTTPConfig, "pagerduty", httpOpts...) if err != nil { return nil, err } n := &Notifier{conf: c, tmpl: t, logger: l, client: client} if c.ServiceKey != "" || c.ServiceKeyFile != "" { n.apiV1 = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" // Retrying can solve the issue on 403 (rate limiting) and 5xx response codes. // https://v2.developer.pagerduty.com/docs/trigger-events n.retrier = &notify.Retrier{RetryCodes: []int{http.StatusForbidden}, CustomDetailsFunc: errDetails} } else { // Retrying can solve the issue on 429 (rate limiting) and 5xx response codes. // https://v2.developer.pagerduty.com/docs/events-api-v2#api-response-codes--retry-logic n.retrier = &notify.Retrier{RetryCodes: []int{http.StatusTooManyRequests}, CustomDetailsFunc: errDetails} } return n, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func New(c *config.DingTalkConfig, t *template.Template, l log.Logger) (*Notifier, error) {\n\tclient, err := commoncfg.NewClientFromConfig(*c.HTTPConfig, \"dingtalk\", false, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Notifier{conf: c, tmpl: t, logger: l, client: client}, nil\n}", "func New(done <-chan bool) *Notifier {\n\tnotifier := Notifier{\n\t\tnotificationMessages: make(chan string),\n\t\tobservers: make(map[chan *model.Notification]bool),\n\t\tdone: done,\n\t}\n\n\tgo notifier.dispatch()\n\n\treturn &notifier\n}", "func New(conf *config.YachConfig, t *template.Template) (*Notifier, error) {\n\tclient, err := commoncfg.NewClientFromConfig(*conf.HTTPConfig, \"yach\", false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Notifier{\n\t\tconf: conf,\n\t\ttmpl: t,\n\t\tclient: client,\n\t\tlogger: logging.DefaultLogger.WithField(\"notify\", \"yach\"),\n\t\tretrier: &notify.Retrier{},\n\t}, nil\n}", "func New(config *model.NotifMail, meta model.Meta) notifier.Notifier {\n\treturn notifier.Notifier{\n\t\tHandler: &Client{\n\t\t\tcfg: config,\n\t\t\tmeta: meta,\n\t\t},\n\t}\n}", "func New(config *model.NotifAmqp, app model.App) notifier.Notifier {\n\treturn notifier.Notifier{\n\t\tHandler: &Client{\n\t\t\tcfg: config,\n\t\t\tapp: app,\n\t\t},\n\t}\n}", "func NewNotifier(cfg Config) (forward.Notifier, error) {\n\terr := cfg.defaults()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%s: %w\", err, internalerrors.ErrInvalidConfiguration)\n\t}\n\n\treturn &notifier{\n\t\tcfg: cfg,\n\t\ttplRenderer: cfg.TemplateRenderer,\n\t\tclient: cfg.Client,\n\t\tlogger: cfg.Logger.WithValues(log.KV{\"notifier\": \"telegram\"}),\n\t}, nil\n}", "func NewNotifier(slack *chat.Slack) (*Notifier, error) {\n\tnotifier := &Notifier{s: slack, db: slack.DB, conf: slack.Conf}\n\treturn notifier, nil\n}", "func NewNotifier(configs []*pb.NotificationConfig, amURL string) *notifier {\n\tnotifier := &notifier{\n\t\tpendingNotifications: make(chan *notificationReq, *notificationBufferSize),\n\t\talertmanagerURL: amURL,\n\t}\n\tnotifier.SetNotificationConfigs(configs)\n\treturn notifier\n}", "func New(d *dut.DUT) *Reporter {\n\treturn &Reporter{d}\n}", "func NewNotifier(config *config.Config) Notifier {\n\t// webhook URL and template are required\n\tif len(config.WebHookURL) > 0 && len(config.WebHookTemplate) > 0 {\n\t\treturn &baseNotifier{config}\n\t}\n\t// otherwise return noop\n\treturn &noopNotifier{baseNotifier{config}}\n}", "func New(url string) *SlackNotify {\n\treturn &SlackNotify{\n\t\tURL: url,\n\t\tc: http.Client{\n\t\t\tTimeout: 10 * time.Second,\n\t\t},\n\t}\n}", "func New(cfg Config) (*Notifier, error) {\n\tparsedProjectID, err := strconv.ParseInt(cfg.ProjectID, 10, 64)\n\tif err != nil {\n\t\treturn nil, ex.New(err)\n\t}\n\t// create a new reporter\n\tclient := gobrake.NewNotifierWithOptions(&gobrake.NotifierOptions{\n\t\tProjectId: parsedProjectID,\n\t\tProjectKey: cfg.ProjectKey,\n\t\tEnvironment: cfg.Environment,\n\t})\n\n\t// filter airbrakes from `dev`, `ci`, and `test`.\n\tclient.AddFilter(func(notice *gobrake.Notice) *gobrake.Notice {\n\t\tif noticeEnv := notice.Context[\"environment\"]; noticeEnv == env.ServiceEnvDev ||\n\t\t\tnoticeEnv == env.ServiceEnvCI ||\n\t\t\tnoticeEnv == env.ServiceEnvTest {\n\t\t\treturn nil\n\t\t}\n\t\treturn notice\n\t})\n\n\treturn &Notifier{\n\t\tClient: client,\n\t}, nil\n}", "func NewNotifier() *Notifier {\n\tnotifier := &Notifier{\n\t\tnotifierMap: new(sync.Map),\n\t\treceiveCh: make(chan Message, 65536),\n\t}\n\treturn notifier\n}", "func New(name, summary, body, icon string, timeout time.Duration, urgency NotificationUrgency) *Notification {\n\treturn &Notification{name, summary, body, icon, timeout, urgency}\n}", "func NewNotifier(c *cobra.Command) *Notifier {\n\tn := &Notifier{}\n\n\tf := c.PersistentFlags()\n\n\tlevel, _ := f.GetString(\"notifications-level\")\n\tlogLevel, err := log.ParseLevel(level)\n\tif err != nil {\n\t\tlog.Fatalf(\"Notifications invalid log level: %s\", err.Error())\n\t}\n\n\tacceptedLogLevels := slackrus.LevelThreshold(logLevel)\n\t// slackrus does not allow log level TRACE, even though it's an accepted log level for logrus\n\tif len(acceptedLogLevels) == 0 {\n\t\tlog.Fatalf(\"Unsupported notification log level provided: %s\", level)\n\t}\n\n\t// Parse types and create notifiers.\n\ttypes, err := f.GetStringSlice(\"notifications\")\n\tif err != nil {\n\t\tlog.WithField(\"could not read notifications argument\", log.Fields{\"Error\": err}).Fatal()\n\t}\n\n\tn.types = n.getNotificationTypes(c, acceptedLogLevels, types)\n\n\treturn n\n}", "func NewNotifier(token string) *Notifier {\n\treturn &Notifier{\n\t\tToken: token,\n\t\tClient: nil,\n\t}\n}", "func New(c *config.AliyunSmsConfig, t *template.Template, l log.Logger) (*Notifier, error) {\n\tclient, err := dysmsapi.NewClientWithAccessKey(\"cn-hangzhou\", c.AccessKeyId, c.AccessSecret)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Notifier{conf: c, tmpl: t, logger: l, client: client}, nil\n}", "func New(patterns []string) (*Notify, error) {\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tlogger.ErrorObject(err)\n\t\treturn nil, err\n\t}\n\n\twatchDirs := findDirs(patterns)\n\n\tfor _, t := range watchDirs {\n\t\terr = watcher.Add(t)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"%s: %v\", t, err)\n\t\t} else {\n\t\t\tlogger.Info(\"gazing at: %s\", t)\n\t\t}\n\t}\n\n\tnotify := &Notify{\n\t\tEvents: make(chan Event),\n\t\twatcher: watcher,\n\t\tisClosed: false,\n\t\ttimes: make(map[string]int64),\n\t\tpendingPeriod: 100,\n\t\tregardRenameAsModPeriod: 1000,\n\t\tdetectCreate: false,\n\t}\n\n\tgo notify.wait()\n\n\treturn notify, nil\n}", "func NewNotifier(c config.Config, chat chat.Chat) (*Notifier, error) {\n\tconn, err := storage.NewMySQL(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnotifier := &Notifier{Chat: chat, DB: conn, Config: c}\n\treturn notifier, nil\n}", "func NewNotifier() WakeSleepNotifier {\n\treturn new(notifier)\n}", "func NewNotifier(site database.Site, message string, subject string,\n\tsendEmail EmailSender, sendSms SmsSender) *Notifier {\n\tn := Notifier{Site: site, Message: message, Subject: subject,\n\t\tSendEmail: sendEmail, SendSms: sendSms}\n\treturn &n\n}", "func New(dependencies Dependencies) {\n\twriter = dependencies.Writer\n\treader = dependencies.Reader\n\thost = dependencies.Host\n\tnotifierService = dependencies.NotifierService\n}", "func New(cfg config.Queue, n notifier) *Queue {\n\tq := &Queue{\n\t\taddCh: make(chan struct{}, cfg.QueueSize),\n\t\tpopCh: make(chan struct{}, cfg.GoRoutinesSize),\n\t\taddMessage: make(chan entity.NotifierMessage, 1),\n\t\tpopMessage: make(chan entity.NotifierMessage, 1),\n\t\tnotifier: n,\n\t}\n\n\tgo q.pop()\n\tgo q.add()\n\n\treturn q\n}", "func New() *Prober {\n\treturn newForTest(time.Now, newRealTicker)\n}", "func newNotifier() (*notifier, error) {\n\tepfd, err := unix.EpollCreate1(0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tw := &notifier{\n\t\tepFD: epfd,\n\t\tfdMap: make(map[int32]*fdInfo),\n\t}\n\n\tgo w.waitAndNotify() // S/R-SAFE: no waiter exists during save / load.\n\n\treturn w, nil\n}", "func NewNotifier(ec *EngineConfig) (*Notifier, error) {\n\toutgoing := make(chan Notification, 1)\n\tn := &Notifier{\n\t\tC: outgoing,\n\t\toutgoing: outgoing,\n\t\treload: make(chan bool, 1),\n\t\tshutdown: make(chan bool, 1),\n\t\tengineCfg: ec,\n\t\tsource: SourceServer,\n\t}\n\n\tnote, err := n.bootstrap()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnote.Cluster.Vservers = rateLimitVS(note.Cluster.Vservers, nil)\n\n\tn.outgoing <- *note\n\tn.last = note\n\n\t// If the on disk configuration is different, update it.\n\tif note.Source != SourceDisk {\n\t\tdNote, _ := n.pullConfig(SourceDisk)\n\t\tif dNote == nil || !dNote.Cluster.Equal(note.Cluster) {\n\t\t\tif err := saveConfig(note.protobuf, n.engineCfg.ClusterFile, true); err != nil {\n\t\t\t\tlog.Warningf(\"Failed to save config to %s: %v\", n.engineCfg.ClusterFile, err)\n\t\t\t}\n\t\t}\n\t}\n\n\tgo n.run()\n\treturn n, nil\n}", "func New(notifier *bugsnag.Notifier) *NegroniBugsnag {\n\treturn &NegroniBugsnag{\n\t\tnotifier: notifier,\n\t}\n}", "func MustNew(cfg Config) *Notifier {\n\tnotifier, err := New(cfg)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn notifier\n}", "func New(reporter metrics.Reporter) *Wrapper {\n\treturn &Wrapper{\n\t\treporter: reporter,\n\t}\n}", "func New(pushURL, owner string) {\n\tSave(pushURL, config.Tart{\n\t\tName: pushURL,\n\t\tPushURL: pushURL,\n\t\tIsRunning: false,\n\t\tOwners: []string{owner},\n\t\tPID: -1,\n\t\tRestartDelaySecs: 30,\n\t\tRestartOnStop: false,\n\t\tLogStdout: true,\n\t})\n}", "func New() *DelayCaller {\n\tvar c DelayCaller\n\tc.p.init(runtime.NumCPU())\n\tc.queue = nil\n\tc.queueIn = make(chan call)\n\tgo c.runner()\n\treturn &c\n}", "func New(reporter services.UsageReporter, log logrus.FieldLogger, inner apievents.Emitter) (*UsageLogger, error) {\n\tif log == nil {\n\t\tlog = logrus.StandardLogger()\n\t}\n\n\treturn &UsageLogger{\n\t\tEntry: log.WithField(\n\t\t\ttrace.Component,\n\t\t\tteleport.Component(teleport.ComponentUsageReporting),\n\t\t),\n\t\treporter: reporter,\n\t\tinner: inner,\n\t}, nil\n}", "func New(label, message string) *Badge {\n\treturn &Badge{\n\t\tVersion: 1,\n\t\tLabel: label,\n\t\tMessage: message,\n\t}\n}", "func New() Email {\n\treturn Email{}\n}", "func NewPagerDuty(apiKey string) *PagerDuty {\n\treturn &PagerDuty{client: pagerduty.NewClient(apiKey)}\n}", "func New(dnsList, domainList []string, checkInterval time.Duration) *Checker {\n\tc := &Checker{}\n\tif len(dnsList) == 0 {\n\t\tc.DNSList = DefaultDNSList\n\t} else {\n\t\tc.DNSList = dnsList\n\t}\n\n\tif len(domainList) == 0 {\n\t\tc.DomainList = DefaultDomainList\n\t} else {\n\t\tc.DomainList = domainList\n\t}\n\n\tif checkInterval == 0 {\n\t\tc.CheckInterval = DefaultCheckInterval\n\t} else {\n\t\tc.CheckInterval = checkInterval\n\t}\n\n\tgo c.Monitor()\n\treturn c\n}", "func New(configfile string) *Poloniex {\r\n\treturn NewWithConfig(configfile)\r\n}", "func New(id uint, observerChannel chan common.ObserverMessage, resultChannel chan common.RoundResult) *Diner {\n\treturn &Diner{\n\t\tnil,\n\t\tmake(chan bool, 1),\n\t\tnil,\n\t\tobserverChannel,\n\t\tid,\n\t\tresultChannel}\n}", "func NewPinger(opts *Options) Pinger {\n\topts.setDefaults()\n\treturn &pinger{\n\t\tid: rand.Intn(maxID),\n\t\topts: opts,\n\t\treportChan: make(chan Ping), // TODO: use buffer?\n\t\terrChan: make(chan error, 1),\n\t\tstop: make(chan struct{}, 1),\n\t\tstats: &Stats{},\n\t\tclock: defaultClock{},\n\t}\n}", "func New(wsPath string, webhookURLs []string) *WebNotifier {\n\twebhook := NewHTTPNotifier(webhookURLs)\n\tws := NewWSNotifier(wsPath)\n\n\tn := WebNotifier{\n\t\tnotifiers: []command.Notifier{webhook, ws},\n\t\thandlers: ws.GetRESTHandlers(),\n\t}\n\n\treturn &n\n}", "func New() *Email {\r\n\treturn &Email{\r\n\t\tCreated: time.Now(),\r\n\t\tUpdated: time.Now(),\r\n\t}\r\n}", "func New(apiKey, apiSecret string) *Poloniex {\n\tclient := NewClient(apiKey, apiSecret)\n\treturn &Poloniex{client}\n}", "func New() *PubSub {\n\treturn &PubSub{\n\t\tMaxSubs: 20,\n\t\tregistry: make(map[string]*topic),\n\t}\n}", "func New(host string) *PubSub {\n\tps := PubSub{\n\t\thost: host,\n\t}\n\n\t// PRETEND THERE IS A SPECIFIC IMPLEMENTATION.\n\n\treturn &ps\n}", "func New(logger log.Logger) *Prober {\n\treturn &Prober{logger: logger}\n}", "func New(cfg *Config) *Tailer {\n\tif cfg.Log == nil {\n\t\tcfg.Log = &log.Logger{Out: ioutil.Discard}\n\t}\n\n\treturn &Tailer{\n\t\tcfg: cfg,\n\t\tstripeAuthClient: stripeauth.NewClient(cfg.Key, &stripeauth.Config{\n\t\t\tLog: cfg.Log,\n\t\t\tAPIBaseURL: cfg.APIBaseURL,\n\t\t}),\n\t\tinterruptCh: make(chan os.Signal, 1),\n\t}\n}", "func NewNotifiee(code uint32, messageCh chan Message) *Notifiee {\n\treturn &Notifiee{code: code, messageCh: messageCh}\n}", "func New(counter metrics.Counter, latency metrics.Histogram, logger log.Logger) Logger {\n\treturn Logger{\n\t\tcallUpdate: make(chan interface{}),\n\t\tcallError: make(chan error),\n\t\trequestCount: counter,\n\t\trequestLatency: latency,\n\t\tlogger: logger,\n\t}\n}", "func NewNotifier(v *viper.Viper) (Notifier, error) {\n\treturn NewSNSServer(v)\n}", "func New(params Params) (*presignerT, error) {\n\treturn newPresigner(params)\n}", "func New(b *beat.Beat, cfg *common.Config) (beat.Beater, error) {\n\tc := config.DefaultConfig\n\tif err := cfg.Unpack(&c); err != nil {\n\t\treturn nil, fmt.Errorf(\"Error reading config file: %v\", err)\n\t}\n\n\tbt := &Polutbeat{\n\t\tdone: make(chan struct{}),\n\t\tconfig: c,\n\t}\n\treturn bt, nil\n}", "func New() *Updater {\n\treturn &Updater{}\n}", "func New(notifier *gobrake.Notifier) *Handler {\n\th := Handler{notifier}\n\treturn &h\n}", "func NewNotification(title, message string) Notification {\n\tif title == \"\" {\n\t\ttitle = \"notification\"\n\t}\n\treturn Notification{\n\t\tTitle: title,\n\t\tMessage: message,\n\t}\n}", "func NewNotificator(st storage.Storage, settings *storage.Settings) (*Notificator, error) {\n\tns, err := NewNotificationStorage(st)\n\tconnectionCreator := &notificationConnectionCreatorImpl{\n\t\tstorageURI: settings.URI,\n\t\tminReconnectInterval: settings.Notification.MinReconnectInterval,\n\t\tmaxReconnectInterval: settings.Notification.MaxReconnectInterval,\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Notificator{\n\t\tqueueSize: settings.Notification.QueuesSize,\n\t\tconnectionMutex: &sync.Mutex{},\n\t\tconsumersMutex: &sync.Mutex{},\n\t\tconsumers: make(consumers),\n\t\tstorage: ns,\n\t\tconnectionCreator: connectionCreator,\n\t\tlastKnownRevision: invalidRevisionNumber,\n\t}, nil\n}", "func NewNotification(userID, title, subtitle, urlLink, body string) (notification Notification) {\n\tid := uuid.NewV4().String()\n\n\treturn Notification{\n\t\tID: id,\n\t\tUserID: userID,\n\t\tTitle: title,\n\t\tSubtitle: subtitle,\n\t\tURLLink: urlLink,\n\t\tBody: body,\n\t}\n}", "func New(numNodes int, outgoing chan packet.Message, timeout time.Duration, numRetries int) *Repeater {\n\tr := Repeater{\n\t\toutgoing: outgoing,\n\t\ttimeout: timeout,\n\t\tnumRetries: numRetries,\n\t\tlock: sync.Mutex{},\n\t\tunackedReqs: make(map[int]map[int]map[packet.Messagetype]bool),\n\t}\n\n\tfor i := 0; i < numNodes; i++ {\n\t\tr.unackedReqs[i] = make(map[int]map[packet.Messagetype]bool)\n\t}\n\n\treturn &r\n}", "func New(config *rpcclient.ConnConfig, chainParams *chaincfg.Params,\n\tspendHintCache chainntnfs.SpendHintCache,\n\tconfirmHintCache chainntnfs.ConfirmHintCache) (*DcrdNotifier, error) {\n\n\tnotifier := &DcrdNotifier{\n\t\tchainParams: chainParams,\n\n\t\tnotificationCancels: make(chan interface{}),\n\t\tnotificationRegistry: make(chan interface{}),\n\n\t\tblockEpochClients: make(map[uint64]*blockEpochRegistration),\n\n\t\tchainUpdates: queue.NewConcurrentQueue(10),\n\n\t\tspendHintCache: spendHintCache,\n\t\tconfirmHintCache: confirmHintCache,\n\n\t\tquit: make(chan struct{}),\n\t}\n\n\tntfnCallbacks := &rpcclient.NotificationHandlers{\n\t\tOnBlockConnected: notifier.onBlockConnected,\n\t\tOnBlockDisconnected: notifier.onBlockDisconnected,\n\t}\n\n\t// Disable connecting to dcrd within the rpcclient.New method. We defer\n\t// establishing the connection to our .Start() method.\n\tconfig.DisableConnectOnNew = true\n\tconfig.DisableAutoReconnect = false\n\tchainConn, err := rpcclient.New(config, ntfnCallbacks)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnotifier.chainConn = chainConn\n\tnotifier.cca = &chainConnAdaptor{c: chainConn, ctx: context.TODO()}\n\n\treturn notifier, nil\n}", "func NewNotification(Type string, Body string) *Notification {\n\treturn &Notification{\"notification\", Type, Body}\n}", "func New() *PerfcPublisher {\n\treturn &PerfcPublisher{}\n}", "func New() (*T) {\n\n\tme := T{\n\t\tcount: 0,\n\t\tdatum: \"\",\n\t}\n\n\treturn &me\n}", "func newNotification(format NotificationFormat, payload []byte) (*Notification, error) {\n\tif !format.IsValid() {\n\t\treturn nil, fmt.Errorf(\"unknown format '%s'\", format)\n\t}\n\n\treturn &Notification{format, payload}, nil\n}", "func New() *Beeper { return &Beeper{} }", "func New(conf Config, client *http.Client) (*Deliverer, error) {\n\tvar c Config\n\tvar err error\n\tif c, err = conf.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\tif client == nil {\n\t\tclient = http.DefaultClient\n\t}\n\treturn &Deliverer{\n\t\tconf: c,\n\t\tc: client,\n\t}, nil\n}", "func setUpNotifier(t *testing.T, h *rpctest.Harness) *DcrdNotifier {\n\thintCache := initHintCache(t)\n\n\trpcConfig := h.RPCConfig()\n\tnotifier, err := New(&rpcConfig, netParams, hintCache, hintCache)\n\tif err != nil {\n\t\tt.Fatalf(\"unable to create notifier: %v\", err)\n\t}\n\tif err := notifier.Start(); err != nil {\n\t\tt.Fatalf(\"unable to start notifier: %v\", err)\n\t}\n\n\treturn notifier\n}", "func NewNotification(c NotificationConfig) Elem {\n\treturn driver.NewNotification(c)\n}", "func New(opts ...metrics.Option) *Reporter {\n\treturn &Reporter{\n\t\toptions: metrics.NewOptions(opts...),\n\t}\n}", "func NewMockNotifier(ctrl *gomock.Controller) *MockNotifier {\n\tmock := &MockNotifier{ctrl: ctrl}\n\tmock.recorder = &MockNotifierMockRecorder{mock}\n\treturn mock\n}", "func NewMockNotifier(ctrl *gomock.Controller) *MockNotifier {\n\tmock := &MockNotifier{ctrl: ctrl}\n\tmock.recorder = &MockNotifierMockRecorder{mock}\n\treturn mock\n}", "func NewMockNotifier(ctrl *gomock.Controller) *MockNotifier {\n\tmock := &MockNotifier{ctrl: ctrl}\n\tmock.recorder = &MockNotifierMockRecorder{mock}\n\treturn mock\n}", "func New(config *Config) (*Reporter, error) {\n\tvar (\n\t\treporter Reporter\n\t\terr error\n\t)\n\n\tif config == nil {\n\t\treturn nil, nil\n\t}\n\n\tif err := config.validate(); err != nil {\n\t\treturn nil, err\n\t}\n\treporter.config = config\n\n\treporter.D = debug.New(\"reporter/errors\")\n\tif config.Debug {\n\t\treporter.D.On()\n\t}\n\n\treporter.Debug(\"initializing Sentry client\")\n\n\tsentryOpts := sentry.ClientOptions{\n\t\tDsn: config.DSN,\n\t\tAttachStacktrace: true,\n\t}\n\n\tif config.Wait {\n\t\tsentryOpts.Transport = &sentry.HTTPSyncTransport{Timeout: sentryFlushTimeout}\n\t}\n\n\tif config.Debug {\n\t\tsentryOpts.Debug = true\n\t\tsentryOpts.DebugWriter = &sentryDebugWriter{d: reporter.D}\n\t}\n\n\treporter.sentry, err = sentry.NewClient(sentryOpts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &reporter, nil\n}", "func New(\n\tspyInterval, publishInterval time.Duration,\n\tpublisher ReportPublisher,\n\tticksPerFullReport int,\n\tnoControls bool,\n) *Probe {\n\tresult := &Probe{\n\t\tspyInterval: spyInterval,\n\t\tpublishInterval: publishInterval,\n\t\tpublisher: publisher,\n\t\trateLimiter: rate.NewLimiter(rate.Every(publishInterval/100), 1),\n\t\tticksPerFullReport: ticksPerFullReport,\n\t\tnoControls: noControls,\n\t\tquit: make(chan struct{}),\n\t\tspiedReports: make(chan report.Report, spiedReportBufferSize),\n\t\tshortcutReports: make(chan report.Report, shortcutReportBufferSize),\n\t}\n\treturn result\n}", "func NewPinger(p *lt.Port, target uint8) *Pinger {\n\treturn &Pinger{\n\t\tp: p,\n\t\taddress: target,\n\t}\n}", "func New(cfg *worker.Config, taskid string, devices []int) *Worker {\n\treturn &Worker{\n\t\ttaskid: taskid,\n\t\tdevices: devices,\n\t\tcfg: cfg,\n\t}\n}", "func NewPinger(addr, network, protocol string, id int) (*Pinger, error) {\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tp := &Pinger{\n\t\tRecordRtts: true,\n\t\tSize: timeSliceLength,\n\t\tTracker: r.Int63n(math.MaxInt64),\n\n\t\taddr: addr,\n\t\tdone: make(chan bool),\n\t\tid: id,\n\t\tipaddr: nil,\n\t\tipv4: false,\n\t\tnetwork: network,\n\t\tprotocol: protocol,\n\t}\n\treturn p, p.Resolve()\n}", "func NewNotifierService(c *Config) *NotifierService {\n\tclient := client.NewHTTP(\n\t\tc.Endpoint,\n\t\t\"\",\n\t\t\"\",\n\t\t\"\",\n\t\t\"\",\n\t\ttrue,\n\t\tnil,\n\t)\n\tclient.Init()\n\n\treturn &NotifierService{\n\t\tinTransactionDoer: c.InTransactionDoer,\n\t\tclient: client,\n\t\tlog: pkglog.NewLogger(\"vnc-api-notifier\"),\n\t}\n}", "func New(w io.Writer, template string) (*Logger, error) {\n\tformatters, isTimeRequired, err := compileFormat(template)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Create a dummy event to see how long the log line is with the provided\n\t// template.\n\tbuf := make([]byte, 0, 64)\n\tvar e event\n\tfor _, formatter := range formatters {\n\t\tformatter(&e, &buf)\n\t}\n\tmin := len(buf) + 64\n\tif min < 128 {\n\t\tmin = 128\n\t}\n\tparent := &base{\n\t\tc: min,\n\t\tformatters: formatters,\n\t\tisTimeRequired: isTimeRequired,\n\t\tw: w,\n\t}\n\treturn &Logger{parent: parent, level: Warning}, nil\n}", "func New(progressAction string, logger slog.Logger) *Logger {\n\treturn &Logger{\n\t\tlastLogTime: time.Now(),\n\t\tprogressAction: progressAction,\n\t\tsubsystemLogger: logger,\n\t}\n}", "func New(auth aws.Auth, region aws.Region, name string) *SQSNotify {\n\treturn &SQSNotify{\n\t\tauth: auth,\n\t\tregion: region,\n\t\tname: name,\n\t\tqueue: nil,\n\t\trunning: false,\n\t}\n}", "func NewReporter(webhook string, d time.Duration) *Reporter {\n\tc := make(chan string, 1)\n\tdone := make(chan bool)\n\tticker := time.NewTicker(d)\n\tr := &Reporter{c: c, done: done, ticker: ticker}\n\tgo runUpdater(webhook, c, done, ticker, &r.E)\n\treturn r\n}", "func Notification(title, message string) GNotifier {\n\tconfig := &Config{title, message, 5000, \"\"}\n\tn := &notifier{Config: config}\n\treturn n\n}", "func New(fireAfter time.Duration, fireFunc func() ()) *Ticker {\n\treturn &Ticker{\n\t\tlastRestart: time.Now(),\n\t\tfireAfter: fireAfter,\n\t\tfireFunc: fireFunc,\n\t\tactive: false,\n\t}\n}", "func New(owner string, repo string, event string, id int, data *string) (*Labeler, error) {\n\tif data == nil {\n\t\treturn nil, errors.New(\"a JSON string of event data is required\")\n\t}\n\treturn NewWithOptions(\n\t\tWithOwner(owner),\n\t\tWithRepo(repo),\n\t\tWithEvent(event),\n\t\tWithID(id),\n\t\tWithData(*data),\n\t\tWithContext(context.Background()),\n\t\tWithConfigPath(\".github/labeler.yml\"),\n\t)\n}", "func New(l log.Logger, taskInterval, taskDelay time.Duration) Timer {\n\treturn &timer{\n\t\twg: sync.WaitGroup{},\n\t\tl: l.WithModule(\"timer\"),\n\t\ttaskInterval: taskInterval,\n\t\ttaskDelay: taskDelay,\n\t}\n}", "func New(c Config) (*Prober, error) {\n\tpr := &Prober{\n\t\tcfg: c,\n\t\tclock: realClock{},\n\t\tmtu: mtuMax,\n\t\ttransitProbes: newTransitProbes(),\n\t\tmeasurements: measurement.NewDB(),\n\t\tstop: make(chan struct{}),\n\t\tpayload: make(gopacket.Payload, c.PayloadSizeBytes),\n\t}\n\n\treturn pr, nil\n}", "func New() *Mediator {\n\tconfig := cfg.New()\n\taddress := fmt.Sprintf(\"%s:%s\", config.RPCHost, config.RPCPort)\n\tpool := pools.NewResourcePool(func() (pools.Resource, error) {\n\t\tconn, err := grpc.Dial(address, grpc.WithInsecure())\n\t\tclient := pb.NewDailyBonusClient(conn)\n\t\treturn &ResourceConn{\n\t\t\tconn,\n\t\t\tclient,\n\t\t}, err\n\t}, config.RPCConnectionPool.InitialCapacity, config.RPCConnectionPool.MaxCapacity, config.RPCConnectionPool.IdleTimeout)\n\treturn &Mediator{\n\t\tclientPool: pool,\n\t\tconfig: &config,\n\t\tpoolMutex: &sync.Mutex{},\n\t}\n}", "func New(w io.Writer) *PLog {\n\tp := &PLog{\n\t\twriter: w,\n\t}\n\n\tdp := &defaultPrinter{now: time.Now}\n\tdp.setOutput(w)\n\tp.printer = dp\n\n\treturn p\n}", "func New(\n\tprotocolID string,\n\tbroadcastChannel net.BroadcastChannel,\n\tmembershipValidator *group.MembershipValidator,\n) *Announcer {\n\tbroadcastChannel.SetUnmarshaler(func() net.TaggedUnmarshaler {\n\t\treturn &announcementMessage{}\n\t})\n\n\treturn &Announcer{\n\t\tprotocolID: protocolID,\n\t\tbroadcastChannel: broadcastChannel,\n\t\tmembershipValidator: membershipValidator,\n\t}\n}", "func New() broker.Broker {\n\treturn &natsBroker{\n\t\tsubscriptionMap: make(map[string]*natsSubscriber),\n\t}\n}", "func NewTracker(timeout time.Duration) *Tracker {\n\treturn &Tracker{\n\t\tlast: time.Now(),\n\t\ttimeout: timeout,\n\t}\n}", "func NewPinger(ds Datastorer) Pinger {\n\treturn Pinger{ds}\n}", "func New(timeout time.Duration, timeoutFunc func()) *Heartbeat {\n\thb := &Heartbeat{\n\t\ttimeout: int64(timeout),\n\t\ttimer: time.AfterFunc(timeout, timeoutFunc),\n\t}\n\treturn hb\n}", "func New() Publisher {\n\treturn &publisher{\n\t\ttopics: make(map[string]*topic),\n\t}\n}", "func New() (*PolicyChecker, error) {\n\treturn NewWithConfig(Config{})\n}", "func NewDutyManager() *DutyManager {\n\treturn &DutyManager{\n\t\tFailures: make(map[string]error),\n\t}\n}", "func New(filename string) *Logger {\n\tl := lumberjack.Logger{\n\t\tFilename: filename,\n\t\tMaxSize: 500,\n\t\tMaxBackups: 3,\n\t\tMaxAge: 30,\n\t\tCompress: true,\n\t}\n\n\treturn &Logger{\n\t\tLogger: l,\n\t}\n}", "func NewInformer(workqueue workqueue.RateLimitingInterface) (Informer, error) {\n\ttOpts := []http.Option{cloudevents.WithBinaryEncoding()}\n\n\t// Make an http transport for the CloudEvents client.\n\tt, err := cloudevents.NewHTTPTransport(tOpts...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error creating HTTP transport: %v\", err)\n\t}\n\n\t// Use the transport to make a new CloudEvents client.\n\tc, err := cloudevents.NewClient(t,\n\t\tcloudevents.WithUUIDs(),\n\t\tcloudevents.WithTimeNow(),\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error creating CloudEvent client: %v\", err)\n\t}\n\n\treturn &informer{\n\t\tworkqueue: workqueue,\n\t\tclient: c,\n\t}, nil\n}", "func New(provider Provider) *Module {\n\tm := &Module{\n\t\tprovider: provider,\n\t\tscheduler: timing.NewScheduler(),\n\t}\n\n\tm.notifyFn, m.notifyCh = notifier.New()\n\tm.outputFunc.Set(func(info Info) bar.Output {\n\t\tif info.Updates == 1 {\n\t\t\treturn outputs.Text(\"1 update\")\n\t\t}\n\t\treturn outputs.Textf(\"%d updates\", info.Updates)\n\t})\n\n\tm.Every(time.Hour)\n\n\treturn m\n}", "func New(t testing.TB) lg.Log {\n\treturn NewWith(t, FactoryFn)\n}", "func NewPusher(g prometheus.Gatherer) *Pusher {\n\treturn &Pusher{\n\t\tURL: \"https://telemetry.influxdata.com/metrics/job/influxdb\",\n\t\tGather: &pr.Filter{\n\t\t\tGatherer: g,\n\t\t\tMatcher: telemetryMatcher,\n\t\t},\n\t\tClient: &http.Client{\n\t\t\tTransport: http.DefaultTransport,\n\t\t\tTimeout: 10 * time.Second,\n\t\t},\n\t\tPushFormat: expfmt.FmtText,\n\t}\n}" ]
[ "0.7220423", "0.7142861", "0.6967976", "0.68429434", "0.6688869", "0.6591405", "0.65628475", "0.65613985", "0.6546102", "0.6531209", "0.6517486", "0.6479263", "0.6462606", "0.64456606", "0.64222896", "0.6400352", "0.63652164", "0.6297083", "0.62604415", "0.6237658", "0.6207808", "0.6120669", "0.6021293", "0.60014665", "0.5964776", "0.5932267", "0.5877909", "0.585091", "0.5838484", "0.58356595", "0.5832543", "0.57886803", "0.5757485", "0.5756792", "0.5742825", "0.5726592", "0.5718845", "0.57083595", "0.56761247", "0.5647791", "0.5644625", "0.562607", "0.56080735", "0.55862504", "0.55794656", "0.5563436", "0.556205", "0.55570954", "0.55479115", "0.55475366", "0.55418116", "0.55318344", "0.5523775", "0.5513814", "0.551201", "0.5511859", "0.5509789", "0.5505789", "0.5498581", "0.54880685", "0.5484692", "0.5468045", "0.54561573", "0.54505193", "0.5449757", "0.5446721", "0.5444769", "0.5442755", "0.5442755", "0.5442755", "0.54379106", "0.5431306", "0.54274577", "0.5423401", "0.54212636", "0.54204047", "0.538821", "0.53812236", "0.5379022", "0.53789043", "0.53750265", "0.53599304", "0.5355527", "0.53552276", "0.53541315", "0.53483486", "0.53451526", "0.5344224", "0.5341943", "0.53323084", "0.5331325", "0.5314864", "0.53116214", "0.5311331", "0.53105783", "0.5308818", "0.53070015", "0.5303048", "0.53025985", "0.52994835" ]
0.77650785
0
NewEndpoint creates a new endpoint. To keep things simple, the endpoint listens on a fixed port number.
NewEndpoint создает новый эндпоинт. Чтобы упростить вещи, эндпоинт слушает на фиксированном порте.
func NewEndpoint() *Endpoint { // Create a new Endpoint with an empty list of handler funcs. return &Endpoint{ handler: map[string]HandleFunc{}, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (*protocol) NewEndpoint(stack *stack.Stack, netProto tcpip.NetworkProtocolNumber,\n\twaiterQueue *waiter.Queue) (tcpip.Endpoint, *tcpip.Error) {\n\treturn newEndpoint(stack, netProto, waiterQueue), nil\n}", "func NewEndpoint(service health.Service) *Endpoint {\n\treturn &Endpoint{\n\t\tservice: service,\n\t}\n}", "func NewEndpoint(resource, httpMethod, route string) *Endpoint {\n\treturn &Endpoint{\n\t\tResource: resource,\n\t\tHTTPMethod: httpMethod,\n\t\tRoute: route,\n\t\tBodyParameters: []*Parameter{},\n\t\tRequests: []*Request{},\n\t}\n}", "func newEndpoint() *testSocket {\n\tp := fmt.Sprintf(\"@%s.sock\", uuid.NewUUID())\n\n\treturn &testSocket{\n\t\tpath: p,\n\t\tendpoint: fmt.Sprintf(\"unix:///%s\", p),\n\t}\n}", "func NewEndpoint(config *config.Configs, result *config.ReturnResult) Endpoint {\n\treturn &endpoint{\n\t\tconfig: config,\n\t\tresult: result,\n\t\tservice: NewService(config, result),\n\t}\n}", "func NewEndpoint(ctx *pulumi.Context,\n\tname string, args *EndpointArgs, opts ...pulumi.ResourceOption) (*Endpoint, error) {\n\tif args == nil || args.EndpointId == nil {\n\t\treturn nil, errors.New(\"missing required argument 'EndpointId'\")\n\t}\n\tif args == nil || args.Service == nil {\n\t\treturn nil, errors.New(\"missing required argument 'Service'\")\n\t}\n\tif args == nil {\n\t\targs = &EndpointArgs{}\n\t}\n\tvar resource Endpoint\n\terr := ctx.RegisterResource(\"gcp:servicedirectory/endpoint:Endpoint\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewEndpoint(conn *websocket.Conn, registry *Registry) *Endpoint {\n\tif registry == nil {\n\t\tregistry = dummyRegistry\n\t}\n\te := &Endpoint{}\n\te.conn = conn\n\te.server.registry = registry\n\te.client.pending = make(map[uint64]*rpc.Call)\n\treturn e\n}", "func NewEndpoint(network, address string, options ...Option) Endpoint {\n\treturn &endpoint{\n\t\tnetwork: network,\n\t\taddress: address,\n\t\toptions: options,\n\t}\n}", "func NewEndpoint(ws *websocket.Conn) *Endpoint {\n\tep := &Endpoint{WebSocket: ws, MessageChannel: make(chan EndpointMessage)}\n\tep.State = \"INITIAL\"\n\treturn ep\n}", "func NewEndpoint(dnsName, recordType string, targets ...string) *Endpoint {\n\treturn NewEndpointWithTTL(dnsName, recordType, TTL(0), targets...)\n}", "func New(bc component.Core) *Endpoint {\n\treturn &Endpoint{\n\t\tCore: bc,\n\t}\n}", "func newEndpoints() *Endpoints {\n\treturn &Endpoints{\n\t\tBackends: map[string]service.PortConfiguration{},\n\t}\n}", "func newServerEndpoint(impl *implementation, role role, args []string) (*endpoint, error) {\n\tusock, err := net.ListenUDP(\"udp\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, port, err := net.SplitHostPort(usock.LocalAddr().String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\targs = append(args, []string{\"-addr\", fmt.Sprintf(\"localhost:%s\", port)}...)\n\n\tcmd := exec.Command(impl.Path, append(impl.Args, args...)...)\n\n\tep := &endpoint{\n\t\t\"server\",\n\t\trole,\n\t\tusock,\n\t\tnil,\n\t\tcmd,\n\t\tnil,\n\t\tnil,\n\t\tnil,\n\t}\n\n\terr = ep.getOutputs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsport, err := ep.out.ReadString('\\n')\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdebug(\"Read server port=%v\", sport)\n\tsport = strings.TrimSpace(sport)\n\tep.addr, err = net.ResolveUDPAddr(\"udp\", fmt.Sprintf(\"localhost:%s\", sport))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ep, nil\n}", "func NewEndpoint(uuid, key string) *Endpoint {\n\treturn &Endpoint{Uuid: uuid, Key: key}\n}", "func NewEndpoint(name, url string, config EndpointConfig) *Endpoint {\n\tvar endpoint Endpoint\n\tendpoint.name = name\n\tendpoint.url = url\n\tendpoint.EndpointConfig = config\n\tendpoint.defaults()\n\tendpoint.metrics = newSafeMetrics(name)\n\n\t// Configures the inmemory queue, retry, http pipeline.\n\tendpoint.Sink = newHTTPSink(\n\t\tendpoint.url, endpoint.Timeout, endpoint.Headers,\n\t\tendpoint.Transport, endpoint.metrics.httpStatusListener())\n\tendpoint.Sink = events.NewRetryingSink(endpoint.Sink, events.NewBreaker(endpoint.Threshold, endpoint.Backoff))\n\tendpoint.Sink = newEventQueue(endpoint.Sink, endpoint.metrics.eventQueueListener())\n\tmediaTypes := append(config.Ignore.MediaTypes, config.IgnoredMediaTypes...)\n\tendpoint.Sink = newIgnoredSink(endpoint.Sink, mediaTypes, config.Ignore.Actions)\n\n\tregister(&endpoint)\n\treturn &endpoint\n}", "func newRESTEndpointService(hostPortStr string) endpointService {\n\treturn endpointService(\n\t\tnewRESTDiscoveryService(fmt.Sprintf(edsRestEndpointTemplate, hostPortStr)),\n\t)\n}", "func NewVirtualEndpoint()(*VirtualEndpoint) {\n m := &VirtualEndpoint{\n Entity: *NewEntity(),\n }\n return m\n}", "func New(lower tcpip.LinkEndpointID) tcpip.LinkEndpointID {\n\treturn stack.RegisterLinkEndpoint(&endpoint{\n\t\tlower: stack.FindLinkEndpoint(lower),\n\t})\n}", "func NewAddEndpoint(s Service) goa.Endpoint {\n\treturn func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\tp := req.(*AddPayload)\n\t\treturn s.Add(ctx, p)\n\t}\n}", "func NewLocalEndpoint() (*Endpoint, error) {\n\taddrs, err := net.InterfaceAddrs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar ip []string\n\tfor _, addr := range addrs {\n\t\tipnet, ok := addr.(*net.IPNet)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tif ipnet.IP.IsLoopback() {\n\t\t\tcontinue\n\t\t}\n\t\tif ipnet.IP.To4() != nil {\n\t\t\tip = append(ip, ipnet.IP.String())\n\t\t}\n\t}\n\n\treturn &Endpoint{\n\t\tIP: ip,\n\t\tPort: make(map[string]int),\n\t}, nil\n}", "func NewAddEndpoint(s Service) goa.Endpoint {\n\treturn func(ctx context.Context, req any) (any, error) {\n\t\tep := req.(*AddEndpointInput)\n\t\treturn nil, s.Add(ctx, ep.Payload, ep.Stream)\n\t}\n}", "func newClientEndpoint(impl *implementation, role role, args []string) (*endpoint, error) {\n\tusock, err := net.ListenUDP(\"udp\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, port, err := net.SplitHostPort(usock.LocalAddr().String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\targs = append(args, []string{\"-addr\", fmt.Sprintf(\"localhost:%s\", port)}...)\n\n\tcmd := exec.Command(impl.Path, append(impl.Args, args...)...)\n\tep := &endpoint{\n\t\t\"client\",\n\t\trole,\n\t\tusock,\n\t\tnil,\n\t\tcmd,\n\t\tnil,\n\t\tnil,\n\t\tnil,\n\t}\n\n\terr = ep.getOutputs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ep, nil\n}", "func (t *Application_Application_Application) NewEndpoint(Name string) (*Application_Application_Application_Endpoint, error){\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.Endpoint == nil {\n\t\tt.Endpoint = make(map[string]*Application_Application_Application_Endpoint)\n\t}\n\n\tkey := Name\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.Endpoint[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list Endpoint\", key)\n\t}\n\n\tt.Endpoint[key] = &Application_Application_Application_Endpoint{\n\t\tName: &Name,\n\t}\n\n\treturn t.Endpoint[key], nil\n}", "func NewEndpoints() Endpoints {\n\treturn Endpoints{\n\t\tendpoints: make([]*Endpoint, 0),\n\t\tmapUUID: make(map[string]int),\n\t}\n}", "func NewEndPoint(uid uint32, host string) *pb.EndPoint {\n\treturn &pb.EndPoint{\n\t\tUid: uid,\n\t\tHost: host,\n\t\tPortMap: make(map[string]int32),\n\t}\n}", "func makeEndpoint(hostport, serviceName string) *zipkincore.Endpoint {\n\thost, port, err := net.SplitHostPort(hostport)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tportInt, err := strconv.ParseInt(port, 10, 16)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\taddrs, err := net.LookupIP(host)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tvar addr4, addr16 net.IP\n\tfor i := range addrs {\n\t\tif addr := addrs[i].To4(); addr == nil {\n\t\t\tif addr16 == nil {\n\t\t\t\taddr16 = addrs[i].To16() // IPv6 - 16 bytes\n\t\t\t}\n\t\t} else {\n\t\t\tif addr4 == nil {\n\t\t\t\taddr4 = addr // IPv4 - 4 bytes\n\t\t\t}\n\t\t}\n\t\tif addr16 != nil && addr4 != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tif addr4 == nil {\n\t\tif addr16 == nil {\n\t\t\treturn nil\n\t\t}\n\t\t// we have an IPv6 but no IPv4, code IPv4 as 0 (none found)\n\t\taddr4 = []byte(\"\\x00\\x00\\x00\\x00\")\n\t}\n\n\tendpoint := zipkincore.NewEndpoint()\n\tendpoint.Ipv4 = (int32)(binary.BigEndian.Uint32(addr4))\n\tendpoint.Ipv6 = []byte(addr16)\n\tendpoint.Port = int16(portInt)\n\tendpoint.ServiceName = serviceName\n\n\treturn endpoint\n}", "func New(e *calc.Endpoints, uh goagrpc.UnaryHandler) *Server {\n\treturn &Server{\n\t\tAddH: NewAddHandler(e.Add, uh),\n\t}\n}", "func New(nftOutPath string) Endpoint {\n\treturn config{\n\t\tnftOutPath: nftOutPath,\n\t}\n}", "func NewEndpointFactory(cluster string, nvbs int) c.RouterEndpointFactory {\n\n\treturn func(topic, endpointType, addr string, config c.Config) (c.RouterEndpoint, error) {\n\t\tswitch endpointType {\n\t\tcase \"dataport\":\n\t\t\treturn dataport.NewRouterEndpoint(cluster, topic, addr, nvbs, config)\n\t\tdefault:\n\t\t\tlog.Fatal(\"Unknown endpoint type\")\n\t\t}\n\t\treturn nil, nil\n\t}\n}", "func (proxier *Proxier) newEndpointInfo(baseInfo *proxy.BaseEndpointInfo) proxy.Endpoint {\n\n\tportNumber, err := baseInfo.Port()\n\n\tif err != nil {\n\t\tportNumber = 0\n\t}\n\n\tinfo := &endpointsInfo{\n\t\tip: baseInfo.IP(),\n\t\tport: uint16(portNumber),\n\t\tisLocal: baseInfo.GetIsLocal(),\n\t\tmacAddress: conjureMac(\"02-11\", netutils.ParseIPSloppy(baseInfo.IP())),\n\t\trefCount: new(uint16),\n\t\thnsID: \"\",\n\t\thns: proxier.hns,\n\n\t\tready: baseInfo.Ready,\n\t\tserving: baseInfo.Serving,\n\t\tterminating: baseInfo.Terminating,\n\t}\n\n\treturn info\n}", "func New(endpoint api.Endpoint, authenticator Authenticator) api.Endpoint {\n\tif authenticator == nil {\n\t\tauthenticator = &noOpAuthenticator{}\n\t}\n\n\treturn &proxyEndpoint{\n\t\tendpoint: endpoint,\n\t\tauthenticator: authenticator,\n\t\tlogger: logging.GetLogger(\"ias/proxy\"),\n\t}\n}", "func newAgentEndpoint(id string, conn *websocket.Conn, user string) *agentEndpoint {\n\treturn &agentEndpoint{id, conn, &sync.RWMutex{}, user, false}\n}", "func (c *Client) New() goa.Endpoint {\n\tvar (\n\t\tdecodeResponse = DecodeNewResponse(c.decoder, c.RestoreResponseBody)\n\t)\n\treturn func(ctx context.Context, v interface{}) (interface{}, error) {\n\t\treq, err := c.BuildNewRequest(ctx, v)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresp, err := c.NewDoer.Do(req)\n\t\tif err != nil {\n\t\t\treturn nil, goahttp.ErrRequestError(\"spin-broker\", \"new\", err)\n\t\t}\n\t\treturn decodeResponse(resp)\n\t}\n}", "func CreateEndpoint(ctx iris.Context) {\n\t// Add logic to check if given ports exits\n\turi := ctx.Request().RequestURI\n\tfabricID := ctx.Params().Get(\"id\")\n\tfabricData, ok := capdata.FabricDataStore.Data[fabricID]\n\tif !ok {\n\t\terrMsg := fmt.Sprintf(\"Fabric data for uri %s not found\", uri)\n\t\tlog.Error(errMsg)\n\t\tresp := updateErrorResponse(response.ResourceNotFound, errMsg, []interface{}{\"Fabric\", fabricID})\n\t\tctx.StatusCode(http.StatusNotFound)\n\t\tctx.JSON(resp)\n\t\treturn\n\t}\n\n\tvar endpoint model.Endpoint\n\terr := ctx.ReadJSON(&endpoint)\n\tif err != nil {\n\t\terrorMessage := \"error while trying to get JSON body from the request: \" + err.Error()\n\t\tlog.Error(errorMessage)\n\t\tresp := updateErrorResponse(response.MalformedJSON, errorMessage, nil)\n\t\tctx.StatusCode(http.StatusBadRequest)\n\t\tctx.JSON(resp)\n\t\treturn\n\t}\n\tif len(endpoint.Redundancy) < 1 {\n\t\terrMsg := fmt.Sprintf(\"Endpoint cannot be created, Redudancy in the request is missing: \" + err.Error())\n\t\tresp := updateErrorResponse(response.PropertyMissing, errMsg, []interface{}{\"Redundancy\"})\n\t\tctx.StatusCode(http.StatusBadRequest)\n\t\tctx.JSON(resp)\n\t\treturn\n\t}\n\tif len(endpoint.Redundancy[0].RedundancySet) == 0 {\n\t\terrMsg := fmt.Sprintf(\"Endpoint cannot be created, RedudancySet in the request is missing: \" + err.Error())\n\t\tresp := updateErrorResponse(response.PropertyMissing, errMsg, []interface{}{\"RedudancySet\"})\n\t\tctx.StatusCode(http.StatusBadRequest)\n\t\tctx.JSON(resp)\n\t\treturn\n\t}\n\t// get all existing endpoints under fabric check for the name\n\tfor _, endpointData := range capdata.EndpointDataStore {\n\t\tif endpoint.Name == endpointData.Endpoint.Name {\n\t\t\terrMsg := \"Endpoint name is already assigned to other endpoint:\" + endpointData.Endpoint.Name\n\t\t\tresp := updateErrorResponse(response.ResourceAlreadyExists, errMsg, []interface{}{\"Endpoint\", endpointData.Endpoint.Name, endpoint.Name})\n\t\t\tctx.StatusCode(http.StatusConflict)\n\t\t\tctx.JSON(resp)\n\t\t\treturn\n\t\t}\n\t}\n\tvar switchURI = \"\"\n\tvar portPattern = \"\"\n\tportList := make(map[string]bool)\n\t// check if given ports are present in plugin database\n\tfor i := 0; i < len(endpoint.Redundancy[0].RedundancySet); i++ {\n\t\tportURI := endpoint.Redundancy[0].RedundancySet[i].Oid\n\t\tif _, ok := portList[endpoint.Redundancy[0].RedundancySet[i].Oid]; ok {\n\t\t\terrMsg := \"Duplicate port passed in the request\"\n\t\t\tresp := updateErrorResponse(response.PropertyValueConflict, errMsg, []interface{}{endpoint.Redundancy[0].RedundancySet[i].Oid, endpoint.Redundancy[0].RedundancySet[i].Oid})\n\t\t\tctx.StatusCode(http.StatusBadRequest)\n\t\t\tctx.JSON(resp)\n\t\t\treturn\n\n\t\t}\n\t\tportList[endpoint.Redundancy[0].RedundancySet[i].Oid] = true\n\n\t\t_, statusCode, resp := getPortData(portURI)\n\t\tif statusCode != http.StatusOK {\n\t\t\tctx.StatusCode(statusCode)\n\t\t\tctx.JSON(resp)\n\t\t\treturn\n\t\t}\n\t\tstatusCode, resp = checkEndpointPortMapping(endpoint.Redundancy[0].RedundancySet[i].Oid)\n\t\tif statusCode != http.StatusOK {\n\t\t\tctx.StatusCode(statusCode)\n\t\t\tctx.JSON(resp)\n\t\t\treturn\n\t\t}\n\t\tportURIData := strings.Split(portURI, \"/\")\n\t\tswitchID := portURIData[6]\n\t\tswitchIDData := strings.Split(switchID, \":\")\n\t\tswitchURI = switchURI + \"-\" + switchIDData[1]\n\t\tportIDData := strings.Split(portURIData[8], \":\")\n\t\ttmpPortPattern := strings.Replace(portIDData[1], \"eth\", \"\", -1)\n\t\ttmpPortPattern = strings.Replace(tmpPortPattern, \"-\", \"-ports-\", -1)\n\t\tportPattern = tmpPortPattern\n\t}\n\n\tportPolicyGroupList, err := caputilities.GetPortPolicyGroup(fabricData.PodID, switchURI)\n\tif err != nil || len(portPolicyGroupList) == 0 {\n\t\terrMsg := \"Port policy group not found for given ports\"\n\t\tlog.Error(errMsg)\n\t\tresp := updateErrorResponse(response.ResourceNotFound, errMsg, []interface{}{\"protpaths\" + switchURI, \"PolicyGroup\"})\n\t\tctx.StatusCode(http.StatusNotFound)\n\t\tctx.JSON(resp)\n\t\treturn\n\n\t}\n\tpolicyGroupDN := \"\"\n\tfor i := 0; i < len(portPolicyGroupList); i++ {\n\t\tif strings.Contains(portPolicyGroupList[i].BaseAttributes.DistinguishedName, portPattern) {\n\t\t\tpolicyGroupDN = portPolicyGroupList[i].BaseAttributes.DistinguishedName\n\t\t}\n\t}\n\tif policyGroupDN == \"\" {\n\t\terrMsg := \"Port policy group not found for given ports\"\n\t\tlog.Error(errMsg)\n\t\tresp := updateErrorResponse(response.ResourceNotFound, errMsg, []interface{}{portPattern, \"PolicyGroup\"})\n\t\tctx.StatusCode(http.StatusNotFound)\n\t\tctx.JSON(resp)\n\t\treturn\n\t}\n\tlog.Info(\"Dn of Policy group:\" + policyGroupDN)\n\tsaveEndpointData(uri, fabricID, policyGroupDN, &endpoint)\n\tcommon.SetResponseHeader(ctx, map[string]string{\n\t\t\"Location\": endpoint.ODataID,\n\t})\n\tctx.StatusCode(http.StatusCreated)\n\tctx.JSON(endpoint)\n}", "func NewEndpoint(githubReporter Reporter, intercomReporter Reporter, storage Uploader, rateLimiter *infra.RateLimiter) *Endpoint {\n\treturn &Endpoint{githubReporter: githubReporter, storage: storage, rateLimiter: rateLimiter, intercomReporter: intercomReporter}\n}", "func New(e *goastarter.Endpoints, uh goagrpc.UnaryHandler) *Server {\n\treturn &Server{\n\t\tAddH: NewAddHandler(e.Add, uh),\n\t}\n}", "func NewEndpointCreated() filters.Spec {\n\tvar ec endpointCreated\n\treturn ec\n}", "func NewListenEndpoint(s Service) goa.Endpoint {\n\treturn func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\tep := req.(*ListenEndpointInput)\n\t\treturn nil, s.Listen(ctx, ep.Stream)\n\t}\n}", "func MakeAddNodeEndpoint(s registry.Service) endpoint.Endpoint {\n\treturn func(ctx context.Context, request interface{}) (interface{}, error) {\n\t\treq := request.(AddNodeRequest)\n\t\terr := s.AddNode(ctx, req.Token, req.Node)\n\t\treturn AddNodeResponse{Err: err}, nil\n\t}\n}", "func NewEndpointNode(options ...Option) *EndpointNode {\n\tn := &EndpointNode{\n\t\tBasicNode: &BasicNode{},\n\t}\n\tapply(n, options...)\n\treturn n\n}", "func New(config Config) (*Endpoint, error) {\n\tvar err error\n\n\tvar searcherEndpoint *searcher.Endpoint\n\t{\n\t\tsearcherConfig := searcher.Config{\n\t\t\tLogger: config.Logger,\n\t\t\tMiddleware: config.Middleware,\n\t\t\tService: config.Service,\n\t\t}\n\t\tsearcherEndpoint, err = searcher.New(searcherConfig)\n\t\tif err != nil {\n\t\t\treturn nil, microerror.Mask(err)\n\t\t}\n\t}\n\n\tnewEndpoint := &Endpoint{\n\t\tSearcher: searcherEndpoint,\n\t}\n\n\treturn newEndpoint, nil\n}", "func NewEndpointResource(e Endpointer) EndpointResource {\n\treturn EndpointResource{\n\t\tResource: \"endpoint\",\n\t\tName: e.GetName(),\n\t\tPath: e.GetPath(),\n\t\tMethodsList: GetMethodsList(e),\n\t\tMethods: GetMethods(e),\n\t\tMediaTypesList: GetContentTypesList(hAPI, e),\n\t\tMediaTypes: GetContentTypes(hAPI, e),\n\t\tDesc: e.GetDesc(),\n\t\tParams: createEndpointResourceParams(e),\n\t}\n}", "func NewHelloEndpoint(s Service) goa.Endpoint {\n\treturn func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn s.Hello(ctx)\n\t}\n}", "func NewEndpoints() *Endpoints {\n\treturn &Endpoints{}\n}", "func NewEndpoints(s Service) *Endpoints {\n\treturn &Endpoints{\n\t\tHello: NewHelloEndpoint(s),\n\t}\n}", "func (d *Driver) CreateEndpoint(r *sdk.CreateEndpointRequest) (*sdk.CreateEndpointResponse, error) {\n\tendID := r.EndpointID\n\tnetID := r.NetworkID\n\teInfo := r.Interface\n\tlog.Debugf(\"CreateEndpoint called :%v\", r)\n\t// Get the network handler and make sure it exists\n\td.Lock()\n\tnetwork, ok := d.networks[r.NetworkID]\n\td.Unlock()\n\n\tif !ok {\n\t\treturn nil, types.NotFoundErrorf(\"network %s does not exist\", netID)\n\t}\n\n\t// Try to convert the options to endpoint configuration\n\tepConfig, err := parseEndpointOptions(r.Options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create and add the endpoint\n\tnetwork.Lock()\n\tendpoint := &bridgeEndpoint{id: endID, nid: netID, config: epConfig}\n\tnetwork.endpoints[endID] = endpoint\n\tnetwork.Unlock()\n\n\t// On failure make sure to remove the endpoint\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tnetwork.Lock()\n\t\t\tdelete(network.endpoints, endID)\n\t\t\tnetwork.Unlock()\n\t\t}\n\t}()\n\n\t// Generate a name for what will be the host side pipe interface\n\thostIfName, err := netutils.GenerateIfaceName(d.nlh, vethPrefix, vethLen)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Generate a name for what will be the sandbox side pipe interface\n\tcontainerIfName := network.config.ContainerIfName\n\n\t// Generate and add the interface pipe host <-> sandbox\n\tveth := &netlink.Veth{\n\t\tLinkAttrs: netlink.LinkAttrs{Name: hostIfName, TxQLen: 0},\n\t\tPeerName: containerIfName}\n\tif err = d.nlh.LinkAdd(veth); err != nil {\n\t\treturn nil, types.InternalErrorf(\"failed to add the host (%s) <=> sandbox (%s) pair interfaces: %v\", hostIfName, containerIfName, err)\n\t}\n\n\t// Get the host side pipe interface handler\n\thost, err := d.nlh.LinkByName(hostIfName)\n\tif err != nil {\n\t\treturn nil, types.InternalErrorf(\"failed to find host side interface %s: %v\", hostIfName, err)\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\td.nlh.LinkDel(host)\n\t\t}\n\t}()\n\n\t// Get the sandbox side pipe interface handler\n\tsbox, err := d.nlh.LinkByName(containerIfName)\n\tif err != nil {\n\t\treturn nil, types.InternalErrorf(\"failed to find sandbox side interface %s: %v\", containerIfName, err)\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\td.nlh.LinkDel(sbox)\n\t\t}\n\t}()\n\n\tnetwork.Lock()\n\tconfig := network.config\n\tnetwork.Unlock()\n\n\t// Add bridge inherited attributes to pipe interfaces\n\tif config.Mtu != 0 {\n\t\terr = d.nlh.LinkSetMTU(host, config.Mtu)\n\t\tif err != nil {\n\t\t\treturn nil, types.InternalErrorf(\"failed to set MTU on host interface %s: %v\", hostIfName, err)\n\t\t}\n\t\terr = d.nlh.LinkSetMTU(sbox, config.Mtu)\n\t\tif err != nil {\n\t\t\treturn nil, types.InternalErrorf(\"failed to set MTU on sandbox interface %s: %v\", containerIfName, err)\n\t\t}\n\t}\n\n\t// Attach host side pipe interface into the bridge\n\tif err = addToBridge(d.nlh, hostIfName, config.BridgeName); err != nil {\n\t\treturn nil, fmt.Errorf(\"adding interface %s to bridge %s failed: %v\", hostIfName, config.BridgeName, err)\n\t}\n\n\t// Store the sandbox side pipe interface parameters\n\tendpoint.srcName = containerIfName\n\tendpoint.macAddress = eInfo.MacAddress\n\tendpoint.addr = eInfo.Address\n\tendpoint.addrv6 = eInfo.AddressIPv6\n\n\t// Up the host interface after finishing all netlink configuration\n\tif err = d.nlh.LinkSetUp(host); err != nil {\n\t\treturn nil, fmt.Errorf(\"could not set link up for host interface %s: %v\", hostIfName, err)\n\t}\n\n\tres := &sdk.CreateEndpointResponse{\n\t\tInterface: &sdk.EndpointInterface{\n\t\t\tAddress: endpoint.addr,\n\t\t\tMacAddress: endpoint.macAddress,\n\t\t},\n\t}\n\n\tlog.Debugf(\"Create endpoint response: %+v\", res)\n\tlog.Debugf(\"Create endpoint %s %+v\", endID, res)\n\treturn res, nil\n}", "func New(port int, version string, requestsCh chan<- []byte) (*Service, error) {\n\tif !(1 <= port && port <= 65535) {\n\t\treturn nil, errors.New(\"ws.new: wrong port\")\n\t}\n\n\tvc, err := versionToConstr(version)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tws := &Service{\n\t\tverConstr: vc,\n\t\tconns: make(map[string]conn, 10),\n\t\tdone: make(chan struct{}),\n\t\trequestsCh: requestsCh,\n\t}\n\n\tws.upgrader.CheckOrigin = func(r *http.Request) bool {\n\t\treturn true // allow all origins\n\t}\n\n\tws.upgrader.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) {\n\t\terr := writeError(w, reason, status)\n\t\tif err != nil {\n\t\t\tlogger := r.Context().Value(loggerKey).(*log.Entry)\n\t\t\tlogger.WithError(err).Error(\"write error (ws handler)\")\n\t\t}\n\t}\n\n\tsrv := &http.Server{\n\t\tAddr: \"localhost:\" + strconv.Itoa(port),\n\t\tHandler: requestsWrapper(ws.handler),\n\t\tReadTimeout: httpTimeout,\n\t\tWriteTimeout: httpTimeout,\n\t\tMaxHeaderBytes: httpMaxHeaderBytes,\n\t}\n\n\tws.srv = srv\n\n\treturn ws, nil\n}", "func NewServiceEndpoint(\n\tname string,\n\tprotocolName ProtoID,\n\te endpoint.Endpoint,\n\tdec DecodeRequestFunc,\n\tenc EncodeResponseFunc,\n) (*ServiceEndpoint, error) {\n\tif name == \"\" {\n\t\treturn nil, ErrNoName\n\t}\n\n\tif len(protocolName) < 3 || (protocolName[0] == 0 && protocolName[1] == 0 && protocolName[2] == 0) {\n\t\treturn nil, ErrInvalidProtoID\n\t}\n\n\tif e == nil {\n\t\treturn nil, ErrNoEndpoint\n\t}\n\n\tif dec == nil {\n\t\tdec = StdDencode\n\t}\n\n\tif enc == nil {\n\t\tenc = StdDencode\n\t}\n\n\treturn &ServiceEndpoint{\n\t\tName: name,\n\t\tProtocolName: protocolName,\n\t\tE: e,\n\t\tDec: dec,\n\t\tEnc: enc,\n\t}, nil\n}", "func New(endpoint string) *Client {\n\treturn &Client{\n\t\tendpoint: endpoint,\n\t}\n}", "func newGRPCEndpointService(hostPortStr string) (clusterService, error) {\n\tconn, err := mkConnection(hostPortStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\teds := envoyapi.NewEndpointDiscoveryServiceClient(conn)\n\n\treturn endpointService(\n\t\tfnDiscoveryService{\n\t\t\tfetchFn: func(req *envoyapi.DiscoveryRequest) (*envoyapi.DiscoveryResponse, error) {\n\t\t\t\treturn eds.FetchEndpoints(context.Background(), req)\n\t\t\t},\n\t\t\tcloseFn: conn.Close,\n\t\t},\n\t), nil\n}", "func NewEndpoints(endpoint string) (*Endpoints, error) {\n\tif endpoint == \"\" {\n\t\treturn nil, fmt.Errorf(\"endpoint is required\")\n\t}\n\tep, err := url.Parse(endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\troles, err := urlJoin(ep, \"roles\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tusers, err := urlJoin(ep, \"users\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinputs, err := urlJoin(ep, \"system/inputs\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tindexSets, err := urlJoin(ep, \"system/indices/index_sets\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tindexSetStats, err := urlJoin(indexSets, \"stats\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstreams, err := urlJoin(ep, \"streams\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tenabledStreams, err := urlJoin(streams, \"enabled\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\talertConditions, err := urlJoin(ep, \"alerts/conditions\")\n\treturn &Endpoints{\n\t\troles: roles,\n\t\tusers: users,\n\t\tinputs: inputs,\n\t\tindexSets: indexSets,\n\t\tindexSetStats: indexSetStats,\n\t\tstreams: streams,\n\t\tenabledStreams: enabledStreams,\n\t\talertConditions: alertConditions,\n\t}, nil\n}", "func New(addr string, port int) *Server {\n\tctx, cancel := context.WithCancel(context.Background())\n\treturn &Server{\n\t\taddr: addr,\n\t\tport: port,\n\t\tctx: ctx,\n\t\tctxCancel: cancel,\n\t}\n}", "func NewDevEndpoint(ctx *pulumi.Context,\n\tname string, args *DevEndpointArgs, opts ...pulumi.ResourceOption) (*DevEndpoint, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.RoleArn == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'RoleArn'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource DevEndpoint\n\terr := ctx.RegisterResource(\"aws-native:glue:DevEndpoint\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (a *DefaultApiService) CreateEndpoint(ctx _context.Context) ApiCreateEndpointRequest {\n\treturn ApiCreateEndpointRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func New(endpoint string) *EcomClient {\n\ttr := &http.Transport{\n\t\tMaxIdleConnsPerHost: 10,\n\t}\n\tclient := &http.Client{\n\t\tTransport: tr,\n\t\tTimeout: timeout,\n\t}\n\n\turl, err := url.Parse(endpoint)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%+v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\treturn &EcomClient{\n\t\tendpoint: endpoint,\n\t\tscheme: url.Scheme,\n\t\thostname: url.Host,\n\t\tport: url.Port(),\n\t\tclient: client,\n\t}\n}", "func New(network, endpoint string) (h *Handler, err error) {\n\tconn, err := net.Dial(network, endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Handler{\n\t\tconn: conn,\n\t\tenc: json.NewEncoder(conn),\n\t}, nil\n}", "func (r *ForwarderRequest) CreateEndpoint(queue *waiter.Queue) (tcpip.Endpoint, tcpip.Error) {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tif r.segment == nil {\n\t\treturn nil, &tcpip.ErrInvalidEndpointState{}\n\t}\n\n\tf := r.forwarder\n\tep, err := f.listen.performHandshake(r.segment, header.TCPSynOptions{\n\t\tMSS: r.synOptions.MSS,\n\t\tWS: r.synOptions.WS,\n\t\tTS: r.synOptions.TS,\n\t\tTSVal: r.synOptions.TSVal,\n\t\tTSEcr: r.synOptions.TSEcr,\n\t\tSACKPermitted: r.synOptions.SACKPermitted,\n\t}, queue, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ep, nil\n}", "func NewEndpointWithTTL(dnsName, recordType string, ttl TTL, targets ...string) *Endpoint {\n\tcleanTargets := make([]string, len(targets))\n\tfor idx, target := range targets {\n\t\tcleanTargets[idx] = strings.TrimSuffix(target, \".\")\n\t}\n\n\treturn &Endpoint{\n\t\tDNSName: strings.TrimSuffix(dnsName, \".\"),\n\t\tTargets: cleanTargets,\n\t\tRecordType: recordType,\n\t\tLabels: NewLabels(),\n\t\tRecordTTL: ttl,\n\t}\n}", "func NewEndpointRegistry(\n\tid wire.Account,\n\tonNewEndpoint func(wire.Address) wire.Consumer,\n\tdialer Dialer,\n\tser wire.EnvelopeSerializer,\n) *EndpointRegistry {\n\treturn &EndpointRegistry{\n\t\tid: id,\n\t\tonNewEndpoint: onNewEndpoint,\n\t\tdialer: dialer,\n\t\tser: ser,\n\n\t\tendpoints: make(map[wire.AddrKey]*fullEndpoint),\n\t\tdialing: make(map[wire.AddrKey]*dialingEndpoint),\n\n\t\tEmbedding: log.MakeEmbedding(log.WithField(\"id\", id.Address())),\n\t}\n}", "func NewEndpoints(s Service) *Endpoints {\n\treturn &Endpoints{\n\t\tAdd: NewAddEndpoint(s),\n\t}\n}", "func New(e *step.Endpoints, uh goagrpc.UnaryHandler) *Server {\n\treturn &Server{\n\t\tListH: NewListHandler(e.List, uh),\n\t\tAddH: NewAddHandler(e.Add, uh),\n\t\tRemoveH: NewRemoveHandler(e.Remove, uh),\n\t\tUpdateH: NewUpdateHandler(e.Update, uh),\n\t}\n}", "func New(mtu, bufferSize uint32, addr tcpip.LinkAddress, tx, rx QueueConfig) (stack.LinkEndpoint, error) {\n\te := &endpoint{\n\t\tmtu: mtu,\n\t\tbufferSize: bufferSize,\n\t\taddr: addr,\n\t}\n\n\tif err := e.tx.init(bufferSize, &tx); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := e.rx.init(bufferSize, &rx); err != nil {\n\t\te.tx.cleanup()\n\t\treturn nil, err\n\t}\n\n\treturn e, nil\n}", "func NewEndpoints(s Service) Endpoints {\n\treturn endpoints{\n\t\tservice: s,\n\t}\n}", "func CreateEndpoint(projectProvider provider.ProjectProvider, privilegedProjectProvider provider.PrivilegedProjectProvider, serviceAccountProvider provider.ServiceAccountProvider, privilegedServiceAccount provider.PrivilegedServiceAccountProvider, userInfoGetter provider.UserInfoGetter) endpoint.Endpoint {\n\treturn func(ctx context.Context, request interface{}) (interface{}, error) {\n\t\treq := request.(addReq)\n\t\terr := req.Validate()\n\t\tif err != nil {\n\t\t\treturn nil, errors.NewBadRequest(err.Error())\n\t\t}\n\t\tsaFromRequest := req.Body\n\t\tproject, err := common.GetProject(ctx, userInfoGetter, projectProvider, privilegedProjectProvider, req.ProjectID)\n\t\tif err != nil {\n\t\t\treturn nil, common.KubernetesErrorToHTTPError(err)\n\t\t}\n\n\t\t// check if service account name is already reserved in the project\n\t\texistingSAList, err := listSA(ctx, serviceAccountProvider, privilegedServiceAccount, userInfoGetter, project, &saFromRequest)\n\t\tif err != nil {\n\t\t\treturn nil, common.KubernetesErrorToHTTPError(err)\n\t\t}\n\n\t\tif len(existingSAList) > 0 {\n\t\t\treturn nil, errors.NewAlreadyExists(\"service account\", saFromRequest.Name)\n\t\t}\n\n\t\tsa, err := createSA(ctx, serviceAccountProvider, privilegedServiceAccount, userInfoGetter, project, saFromRequest)\n\t\tif err != nil {\n\t\t\treturn nil, common.KubernetesErrorToHTTPError(err)\n\t\t}\n\n\t\treturn convertInternalServiceAccountToExternal(sa), nil\n\t}\n}", "func NewExposedPort(port string) (*ExposedPort, error) {\n\tparts := strings.SplitN(port, \"/\", 2)\n\tif len(parts) != 2 {\n\t\treturn nil, errors.New(\"invalid port: \" + port)\n\t}\n\tportInt, err := strconv.Atoi(parts[0])\n\tif err != nil {\n\t\treturn nil, errors.New(\"invalid port: \" + port)\n\t}\n\treturn &ExposedPort{\n\t\tPort: portInt,\n\t\tProtocol: strings.ToUpper(parts[1]),\n\t}, nil\n}", "func New(port int, backend string) (*Proxy, error) {\n\tu, err := url.Parse(backend)\n\n\tif err != nil {\n\t\treturn new(Proxy), err\n\t}\n\n\tl, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", port))\n\n\tif err != nil {\n\t\treturn new(Proxy), err\n\t}\n\n\treturn &Proxy{l, u}, nil\n}", "func newService(namespace, name string) *v1.Service {\n\treturn &v1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: namespace,\n\t\t\tLabels: labelMap(),\n\t\t},\n\t\tSpec: v1.ServiceSpec{\n\t\t\tSelector: labelMap(),\n\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t{Name: \"port-1338\", Port: 1338, Protocol: \"TCP\", TargetPort: intstr.FromInt(1338)},\n\t\t\t\t{Name: \"port-1337\", Port: 1337, Protocol: \"TCP\", TargetPort: intstr.FromInt(1337)},\n\t\t\t},\n\t\t},\n\t}\n\n}", "func CreateEndpoint(w http.ResponseWriter, req *http.Request) {\n\tvar url ShortURL\n\t_ = json.NewDecoder(req.Body).Decode($url)\n\tvar n1q1Params []interface{}\n\tn1q1Params = append(n1q1Params, url.LongURL)\n\tquery := gocb.NewN1qlQuery(\"SELECT `\" + bucketName + \"`.* FROM `\" + bucketName + \"` WHERE longUrl = $1\")\n\trows, err := bucket.ExecuteN1qlQuery(query, n1qlParams)\n\tif err != nil {\n w.WriteHeader(401)\n w.Write([]byte(err.Error()))\n return\n\t}\n\tvar row ShortURL\n rows.One(&row)\n if row == (ShortURL{}) {\n hd := hashids.NewData()\n h := hashids.NewWithData(hd)\n now := time.Now()\n url.ID, _ = h.Encode([]int{int(now.Unix())})\n url.ShortUrl = \"http://localhost:12345/\" + url.ID\n bucket.Insert(url.ID, url, 0)\n } else {\n url = row\n }\n json.NewEncoder(w).Encode(url)\n}", "func New() Port {\n\treturn &port{}\n}", "func New() HelloServer {\n\thttp.DefaultServeMux = new(http.ServeMux)\n\treturn HelloServer{\n\t\t&http.Server{\n\t\t\tAddr: \":7100\",\n\t\t},\n\t}\n}", "func NewEndpointMiddleware() endpoint.Middleware {\n\treturn func(next endpoint.Endpoint) endpoint.Endpoint {\n\t\treturn func(ctx context.Context, req interface{}) (resp interface{}, err error) {\n\t\t\tctx = New().WithCtx(ctx)\n\t\t\treturn next(ctx, req)\n\t\t}\n\t}\n}", "func NewEndpoints(s service.Service) Endpoints {\n\treturn Endpoints{\n\t\tGenerateEndpoint: MakeGenerateEndpoint(s),\n\t\tVerifyEndpoint: MakeVerifyEndpoint(s),\n\t}\n}", "func NewEndpoints(c Configuration, alternate func() (Endpoints, error)) (Endpoints, error) {\n\tif endpoints := c.endpoints(); len(endpoints) > 0 {\n\t\treturn ParseURLs(endpoints...)\n\t}\n\n\tif alternate != nil {\n\t\treturn alternate()\n\t}\n\n\treturn nil, errNoConfiguredEndpoints\n}", "func (r *EndpointRegistry) addEndpoint(addr wire.Address, conn Conn, dialer bool) *Endpoint {\n\tr.Log().WithField(\"peer\", addr).Trace(\"EndpointRegistry.addEndpoint\")\n\n\te := newEndpoint(addr, conn)\n\tfe, created := r.fullEndpoint(addr, e)\n\tif !created {\n\t\tif e, closed := fe.replace(e, r.id.Address(), dialer); closed {\n\t\t\treturn e\n\t\t}\n\t}\n\n\tconsumer := r.onNewEndpoint(addr)\n\t// Start receiving messages.\n\tgo func() {\n\t\tif err := e.recvLoop(consumer); err != nil {\n\t\t\tr.Log().WithError(err).Error(\"recvLoop finished unexpectedly\")\n\t\t}\n\t\tfe.delete(e)\n\t}()\n\n\treturn e\n}", "func NewEndpointCore(id string, name string, discoveredBy string, dims map[string]string) *EndpointCore {\n\tif id == \"\" {\n\t\t// Observers must provide an ID or else they are majorly broken\n\t\tpanic(\"EndpointCore cannot be created without an id\")\n\t}\n\n\tec := &EndpointCore{\n\t\tID: ID(id),\n\t\tName: name,\n\t\tDiscoveredBy: discoveredBy,\n\t\textraDimensions: dims,\n\t\textraFields: map[string]interface{}{},\n\t}\n\n\treturn ec\n}", "func EndpointFactory(args *endpoint.Arg, stats *stats.Stats, workerCount uint) (endpoint.EndPoint, error) {\n\tif FailSetup {\n\t\treturn nil, errors.New(\"Forced Error\")\n\t}\n\treturn &fakeEndpoint{}, nil\n}", "func New(endpoint string) *Client {\n\treturn &Client{endpoint, &http.Client{}, \"\"}\n}", "func MakeNewSiteEndpoint(svc service.Service) endpoint.Endpoint {\n\treturn func(ctx context.Context, request interface{}) (response interface{}, err error) {\n\t\treq := request.(NewSiteRequest)\n\t\tid, err := svc.NewSite(ctx, req.SiteName)\n\t\treturn NewSiteResponse{SiteID: id, Err: err}, nil\n\t}\n}", "func New(instance string, options ...httptransport.ClientOption) (pb.CustomerServer, error) {\n\n\tif !strings.HasPrefix(instance, \"http\") {\n\t\tinstance = \"http://\" + instance\n\t}\n\tu, err := url.Parse(instance)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_ = u\n\n\tpanic(\"No HTTP Endpoints, this client will not work, define bindings in your proto definition\")\n\n\treturn svc.Endpoints{}, nil\n}", "func newHTTPServer(appConfig config.AppConfig, logger services.Logger) services.HTTPServer {\n\treturn services.NewDefaultHTTPServer(appConfig.Port, logger)\n}", "func Create(port string) (net.Listener, *grpc.Server) {\n\t//TODO: Find a better way to pass \"127.0.0.1:\"\n\tlis, err := net.Listen(\"tcp\", \"127.0.0.1:\"+port)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"port\": port,\n\t\t}).WithError(err).Fatal(\"Failed to bind port !\")\n\t\tlog.Print(\"Trying to bind onto another port !\")\n\t}\n\treturn lis, grpc.NewServer()\n}", "func NewEndpoints(s Service) *Endpoints {\n\treturn &Endpoints{\n\t\tList: NewListEndpoint(s),\n\t\tGet: NewGetEndpoint(s),\n\t\tRandomFacts: NewRandomFactsEndpoint(s),\n\t}\n}", "func CreateEP(t *testing.T, ns string, Name string, multiPort bool, multiAddress bool, addressPrefix string, multiProtocol ...corev1.Protocol) {\n\tif addressPrefix == \"\" {\n\t\taddressPrefix = \"1.1.1\"\n\t}\n\tvar endpointSubsets []corev1.EndpointSubset\n\tnumPorts, numAddresses, addressStart := 1, 1, 0\n\tif multiPort {\n\t\tnumPorts = 3\n\t}\n\tif len(multiProtocol) != 0 {\n\t\tnumPorts = len(multiProtocol)\n\t}\n\tif multiAddress {\n\t\tnumAddresses, addressStart = 3, 0\n\t}\n\n\tfor i := 0; i < numPorts; i++ {\n\t\tprotocol := corev1.ProtocolTCP\n\t\tif len(multiProtocol) != 0 {\n\t\t\tprotocol = multiProtocol[i]\n\t\t}\n\t\tmPort := 8080 + i\n\n\t\tvar addressStartIndex int\n\t\tif !multiPort && !multiAddress {\n\t\t\tnumAddresses, addressStart = 1, 0\n\t\t} else {\n\t\t\taddressStartIndex = addressStart + i\n\t\t}\n\t\tvar epAddresses []corev1.EndpointAddress\n\t\tfor j := 0; j < numAddresses; j++ {\n\t\t\tepAddresses = append(epAddresses, corev1.EndpointAddress{IP: fmt.Sprintf(\"%s.%d\", addressPrefix, addressStartIndex+j+1)})\n\t\t}\n\t\tnumAddresses = numAddresses - 1\n\t\taddressStart = addressStart + numAddresses\n\t\tendpointSubsets = append(endpointSubsets, corev1.EndpointSubset{\n\t\t\tAddresses: epAddresses,\n\t\t\tPorts: []corev1.EndpointPort{{\n\t\t\t\tName: fmt.Sprintf(\"foo%d\", i),\n\t\t\t\tPort: int32(mPort),\n\t\t\t\tProtocol: protocol,\n\t\t\t}},\n\t\t})\n\t}\n\n\tepExample := &corev1.Endpoints{\n\t\tObjectMeta: metav1.ObjectMeta{Namespace: ns, Name: Name},\n\t\tSubsets: endpointSubsets,\n\t}\n\t_, err := KubeClient.CoreV1().Endpoints(ns).Create(context.TODO(), epExample, metav1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"error in creating Endpoint: %v\", err)\n\t}\n\ttime.Sleep(2 * time.Second)\n}", "func New(e *calc.Endpoints, uh goagrpc.UnaryHandler) *Server {\n\treturn &Server{\n\t\tDivideH: NewDivideHandler(e.Divide, uh),\n\t}\n}", "func NewServer(endpoint string) (*Server, error) {\n\n\tret := &Server{}\n\tvar err error\n\tret.Listener, err = net.Listen(\"tcp\", endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tret.mux = http.NewServeMux()\n\tret.mux.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Write([]byte(\"This is the monitoring endpoint\"))\n\t})\n\tret.mux.Handle(\"/mon/varz\", expvar.Handler())\n\n\tret.mux.HandleFunc(\"/mon/pprof/\", pprof.Index)\n\tret.mux.HandleFunc(\"/mon/pprof/cmdline\", pprof.Cmdline)\n\tret.mux.HandleFunc(\"/mon/pprof/profile\", pprof.Profile)\n\tret.mux.HandleFunc(\"/mon/pprof/symbol\", pprof.Symbol)\n\tEnableTracing()\n\tret.mux.HandleFunc(\"/mon/trace\", TraceHandler())\n\tret.srv = &http.Server{}\n\treturn ret, nil\n}", "func New(opts Options) (stack.LinkEndpoint, error) {\n\te := &endpoint{\n\t\tmtu: opts.MTU,\n\t\tbufferSize: opts.BufferSize,\n\t\taddr: opts.LinkAddress,\n\t\tpeerFD: opts.PeerFD,\n\t\tonClosed: opts.OnClosed,\n\t\tvirtioNetHeaderRequired: opts.VirtioNetHeaderRequired,\n\t\tgsoMaxSize: opts.GSOMaxSize,\n\t}\n\n\tif err := e.tx.init(opts.BufferSize, &opts.TX); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := e.rx.init(opts.BufferSize, &opts.RX); err != nil {\n\t\te.tx.cleanup()\n\t\treturn nil, err\n\t}\n\n\te.caps = stack.LinkEndpointCapabilities(0)\n\tif opts.RXChecksumOffload {\n\t\te.caps |= stack.CapabilityRXChecksumOffload\n\t}\n\n\tif opts.TXChecksumOffload {\n\t\te.caps |= stack.CapabilityTXChecksumOffload\n\t}\n\n\tif opts.LinkAddress != \"\" {\n\t\te.hdrSize = header.EthernetMinimumSize\n\t\te.caps |= stack.CapabilityResolutionRequired\n\t}\n\n\tif opts.VirtioNetHeaderRequired {\n\t\te.hdrSize += header.VirtioNetHeaderSize\n\t}\n\n\treturn e, nil\n}", "func NewEndpointDescription(endpointUrl PascalString, server ExtensionObjectDefinition, serverCertificate PascalByteString, securityMode MessageSecurityMode, securityPolicyUri PascalString, noOfUserIdentityTokens int32, userIdentityTokens []ExtensionObjectDefinition, transportProfileUri PascalString, securityLevel uint8) *_EndpointDescription {\n\t_result := &_EndpointDescription{\n\t\tEndpointUrl: endpointUrl,\n\t\tServer: server,\n\t\tServerCertificate: serverCertificate,\n\t\tSecurityMode: securityMode,\n\t\tSecurityPolicyUri: securityPolicyUri,\n\t\tNoOfUserIdentityTokens: noOfUserIdentityTokens,\n\t\tUserIdentityTokens: userIdentityTokens,\n\t\tTransportProfileUri: transportProfileUri,\n\t\tSecurityLevel: securityLevel,\n\t\t_ExtensionObjectDefinition: NewExtensionObjectDefinition(),\n\t}\n\t_result._ExtensionObjectDefinition._ExtensionObjectDefinitionChildRequirements = _result\n\treturn _result\n}", "func New(host, port string, h http.Handler) *WebServer {\n\tvar ws WebServer\n\n\tws.Addr = net.JoinHostPort(host, port)\n\tws.Handler = h\n\n\treturn &ws\n}", "func New(addr string, host app.HostService, collector *metrics.Collector) app.Server {\n\treturn &server{\n\t\tsrv: telnet.Server{Addr: addr, Handler: nil},\n\t\thost: host,\n\t\tcollector: collector,\n\t}\n}", "func NewEndpoints(s service.Service) Endpoints {\n\treturn Endpoints{\n\t\tAllEndpoint: MakeAllEndpoint(s),\n\t\tGetEndpoint: MakeGetEndpoint(s),\n\t}\n}", "func MakeEndpointNodeID(hostID, address, port string) string {\n\treturn lookupID(hostID, address, port, func() string {\n\t\treturn MakeAddressNodeID(hostID, address) + ScopeDelim + port\n\t})\n}", "func New(addr string) *Server {\n if addr == \"\" {\n addr = DefaultAddr\n }\n return &Server{\n addr: DefaultAddr,\n ds: newDataStore(),\n done: make(chan struct{}),\n }\n}", "func createEndpoint(paths ...string) string {\n\tendpoint, err := utils.ConstructURL(paths...)\n\tif err != nil {\n\t\tlog.HandleErrorAndExit(\"cannot construct endpoint\", err)\n\t}\n\treturn endpoint\n}", "func NewEndpoints(s Service) *Endpoints {\n\treturn &Endpoints{\n\t\tAdd: NewAddEndpoint(s),\n\t\tResta: NewRestaEndpoint(s),\n\t\tMultiplicacion: NewMultiplicacionEndpoint(s),\n\t\tDivision: NewDivisionEndpoint(s),\n\t}\n}", "func NewEndpoints(s Service) *Endpoints {\n\treturn &Endpoints{\n\t\tCreateSession: NewCreateSessionEndpoint(s),\n\t\tUseSession: NewUseSessionEndpoint(s),\n\t}\n}", "func NewDeleteEndpoint(s Service) goa.Endpoint {\n\treturn func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\tp := req.(*DeletePayload)\n\t\treturn nil, s.Delete(ctx, p)\n\t}\n}", "func New(port string) *Server {\n\treturn &Server{\n\t\tport: port,\n\t\tmanager: endly.New(),\n\t}\n}", "func New(e *todo.Endpoints, uh goagrpc.UnaryHandler) *Server {\n\treturn &Server{\n\t\tGetH: NewGetHandler(e.Get, uh),\n\t\tListH: NewListHandler(e.List, uh),\n\t\tAddH: NewAddHandler(e.Add, uh),\n\t\tRemoveH: NewRemoveHandler(e.Remove, uh),\n\t}\n}", "func generateEndpoint(command_name string, args_hash string) string {\n\treturn fmt.Sprintf(\"http://localhost:3001/%s\", path.Join(\"commands\",command_name, args_hash))\n}", "func New(endpoint string) BaseClient {\n\treturn NewWithoutDefaults(endpoint)\n}" ]
[ "0.74292636", "0.72935945", "0.72410816", "0.7215659", "0.71920615", "0.711459", "0.70928013", "0.705746", "0.69879735", "0.6974851", "0.69721305", "0.68303615", "0.6723265", "0.6712752", "0.6605164", "0.6602713", "0.6558774", "0.65187037", "0.64022964", "0.6365016", "0.63610584", "0.6209596", "0.6203358", "0.6123653", "0.6108437", "0.60979474", "0.60764503", "0.6075556", "0.59888357", "0.59879845", "0.5980729", "0.5905845", "0.58581066", "0.5843042", "0.5838468", "0.5834476", "0.5823453", "0.5822517", "0.58159256", "0.5814215", "0.5793094", "0.5780002", "0.57655656", "0.57516503", "0.57405627", "0.5737683", "0.57363343", "0.57352555", "0.57346916", "0.5728066", "0.5726761", "0.57263607", "0.5693105", "0.5684099", "0.56610405", "0.56539375", "0.5641001", "0.5629674", "0.56220806", "0.56123734", "0.5592099", "0.55898917", "0.5583745", "0.55637497", "0.5561949", "0.55585724", "0.55447054", "0.5542989", "0.5535745", "0.552908", "0.5524277", "0.5519087", "0.5475816", "0.5474678", "0.5474238", "0.54732114", "0.5467853", "0.54596967", "0.5459371", "0.54546916", "0.54546463", "0.54491895", "0.5447852", "0.54413813", "0.54342437", "0.54234684", "0.5419775", "0.5418819", "0.5410803", "0.54066557", "0.538818", "0.5377203", "0.5366244", "0.5363872", "0.53628534", "0.535963", "0.5357422", "0.53544366", "0.53506035", "0.53285164" ]
0.74718064
0
AddHandleFunc adds a new function for handling incoming data.
AddHandleFunc добавляет новую функцию для обработки входящих данных.
func (e *Endpoint) AddHandleFunc(name string, f HandleFunc) { e.mutex.Lock() e.handler[name] = f e.mutex.Unlock() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (l Listener) AddHandler(cmd string, handleFunc func()) {\n\tl[cmd] = handleFunc\n}", "func (l *logPipe) HandleFunc(hf func(string) error) {\n\tl.handleFunc = hf\n}", "func HandleFunc(name string, handlerFunc func(Response)) {\n\thandlers[name] = toFunction(handlerFunc)\n}", "func HandleFunc(h HandlerFunc) {\n\tapex.Handle(h)\n}", "func HandleFunc(h HandlerFunc) {\n\tapex.Handle(h)\n}", "func HandleFunc(h HandlerFunc) {\n\tapex.Handle(h)\n}", "func (h *MxHandler) HandleFunc(pattern *checkSelection, handler func(http.ResponseWriter, *http.Request)) {\n\th.routes = append(h.routes, &route{pattern, http.HandlerFunc(handler)})\n}", "func (h *RegexpHandler) HandleFunc(pattern *regexp.Regexp, handler func(http.ResponseWriter, *http.Request)) {\n\th.routes = append(h.routes, &Route{pattern, http.HandlerFunc(handler)})\n}", "func (c *CmdRunner) HandleFunc(cmdId string, handler func(cmdMessage CmdMessage)) {\n\tc.Handlers[cmdId] = handler\n}", "func (s *Server) HandleFunc(path string, fn http.HandlerFunc) {\n\ts.Handle(path, http.HandlerFunc(fn))\n}", "func (mx *Mux) HandleFunc(pattern string, handler interface{}) {\n\tmx.handle(ALL, pattern, handler)\n}", "func HandleFunc(c Checker, pattern string, h http.HandlerFunc) {\n\thttp.HandleFunc(pattern, HandlerFunc(c, h))\n}", "func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request))", "func (mux *Mux) HandleFunc(pattern string, handler HandlerFunc) {\n\tmux.Handle(pattern, handler)\n}", "func (mx *Mux) HandleFunc(pattern string, handlerFn http.HandlerFunc) {\n\tmx.handle(mALL, pattern, handlerFn)\n}", "func (app *App) HandleFunc(pattern string, handlerFunc http.HandlerFunc) {\n\tif app.Server() != nil {\n\t\tapp.Server().Handler.(*http.ServeMux).HandleFunc(pattern, handlerFunc)\n\t}\n}", "func (m *ServeMux) HandleFunc(method string, path string, h interface{}) {\n\tm.Handle(method, path, &handlerContainerImpl{\n\t\thandler: h,\n\t\tContext: background,\n\t})\n}", "func (m *Transport) AddHandle(handle interface{}, args ...string) error {\n\th := handle.(gin.HandlerFunc)\n\tif len(args) == 0 {\n\t\tm.engine.Use(h)\n\t} else if len(args) == 2 {\n\t\tm.engine.Handle(args[0], args[1], h)\n\t} else {\n\t\treturn errors.New(\"invalid args\")\n\t}\n\treturn nil\n}", "func (mux *ServeMux) HandleFunc(m Matcher, h HandlerFunc) {\n\tmux.Handle(m, h)\n}", "func (r *Router) HandleFunc(method, path string, handler func(context.Context, taxi.Decoder) (interface{}, error)) {\n\tr.router.HandleFunc(method, path, handler)\n}", "func (r *Mux) HandleFunc(method, path string, handler http.HandlerFunc) {\n\tr.Handle(method, path, http.HandlerFunc(handler))\n}", "func HandleFunc(method string, path string, h interface{}) {\n\tDefaultMux.HandleFunc(method, path, h)\n}", "func (s *server) HandleFunc(path string, handlerFunc http.HandlerFunc) {\n\ts.mux.HandleFunc(path, handlerFunc)\n}", "func (s *Server) HandleFunc(path string, h http.HandlerFunc) {\n\ts.router.HandleFunc(path, h)\n}", "func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {\n\tDefaultServeMux.HandleFunc(pattern, handler)\n}", "func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {\n\tDefaultServeMux.HandleFunc(pattern, handler)\n}", "func (e *Exporter) HandleFunc(url string, f func(w http.ResponseWriter, r *http.Request)) {\n\te.mu.Lock()\n\tdefer e.mu.Unlock()\n\tif e.name == \"\" {\n\t\tHTTPHandleFunc(url, f)\n\t\treturn\n\t}\n\n\tif hf, ok := e.handleFuncs[url]; ok {\n\t\thf.Set(f)\n\t\treturn\n\t}\n\thf := &handleFunc{f: f}\n\te.handleFuncs[url] = hf\n\n\tHTTPHandleFunc(e.URLPrefix()+url, func(w http.ResponseWriter, r *http.Request) {\n\t\tif f := hf.Get(); f != nil {\n\t\t\tf(w, r)\n\t\t}\n\t})\n}", "func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {\n\tDefaultServeMux.HandleFunc(pattern, handler)\n}", "func (r *Router) HandleFunc(method, pattern string, fn http.HandlerFunc) Route {\n\treturn r.Handle(method, pattern, http.HandlerFunc(fn))\n}", "func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {\n\tmux.Handle(pattern, HandlerFunc(handler))\n}", "func (s *Server) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {\r\n\ts.router.HandleFunc(pattern, handler)\r\n}", "func (self *ServeMux) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {\n\tself.Handle(pattern, http.HandlerFunc(handler))\n}", "func (k *Kite) HandleHTTPFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {\n\tk.muxer.HandleFunc(pattern, handler)\n}", "func (s *Server) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {\n\ts.router.HandleFunc(pattern, handler)\n}", "func (r *Router) HandleFunc(pattern, method string, f func(Context) error) {\n\tr.Handle(pattern, method, HandlerFunc(f))\n}", "func (a *App) HandleFunc(mount string, handler http.HandlerFunc) *route {\n\troute := a.newRoute(mount, handler)\n\troute.buildPatterns(\"\")\n\treturn route\n}", "func (s *Stub) HandleFunc(fn func(http.ResponseWriter, *http.Request)) {\n\ts.response.handler = fn\n}", "func (m *ServeMux) HandleFunc(command string, handler func(conn Conn, cmd Command)) {\n\tif handler == nil {\n\t\tpanic(\"redcon: nil handler\")\n\t}\n\tm.Handle(command, HandlerFunc(handler))\n}", "func (p *spaDevProxy) HandleFunc(w http.ResponseWriter, r *http.Request) {\n\tp.proxy.ServeHTTP(w, r)\n}", "func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {\n\tif handler == nil {\n\t\tpanic(\"http: nil handler\")\n\t}\n\tmux.Handle(pattern, HandlerFunc(handler))\n}", "func (a *Asock) AddHandler(name string, argmode string, df DispatchFunc) error {\n\tif _, ok := a.d[name]; ok {\n\t\treturn fmt.Errorf(\"handler '%v' already exists\", name)\n\t}\n\tif argmode != \"split\" && argmode != \"nosplit\" {\n\t\treturn fmt.Errorf(\"invalid argmode '%v'\", argmode)\n\t}\n\ta.d[name] = &dispatchFunc{df, argmode}\n\ta.help = \"\"\n\tfor cmd := range a.d {\n\t\ta.help = a.help + cmd + \" \"\n\t}\n\treturn nil\n}", "func (mux *ServeMux) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) error {\n\treturn mux.Handle(pattern, http.HandlerFunc(handler))\n}", "func (s *Subrouter) HandleFunc(m, p string, hfunc http.HandlerFunc) {\n\tk := s.prefix + resolvedPath(p)\n\n\ts.initEndp(k)\n\n\ts.endps[k][m] = []interface{}{hfunc}\n}", "func HandleFunc(code int, data interface{}) func(w http.ResponseWriter, r *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tReply(w, code, data)\n\t}\n}", "func (m *ServeMux) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {\n\tif handler == nil {\n\t\tpanic(\"http: nil handler\")\n\t}\n\n\tm.Handle(pattern, http.HandlerFunc(handler))\n}", "func (m *RegExpMux) HandleFunc(pattern *regexp.Regexp, handler func(http.ResponseWriter, *http.Request)) {\n\tm.Handle(pattern, http.HandlerFunc(handler))\n}", "func (c *Cluster) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {\n\tc.router.HandleFunc(pattern, handler)\n}", "func (mux *XMux) HandleFunc(uriHandler *UriHandler) {\n\tmux.Handle(uriHandler)\n}", "func (r *Router) HandleFunc(path string, f func(context.Context, http.ResponseWriter, *http.Request) error) *Route {\n\treturn r.Handle(path, HandlerFunc(f))\n}", "func (c *RestClient) HandleFunc(h func(http.ResponseWriter, *http.Request)) http.Handler {\n\treturn c.Handle(http.HandlerFunc(h))\n}", "func (fn AddContactHandlerFunc) Handle(params AddContactParams, principal *app.Auth) AddContactResponder {\n\treturn fn(params, principal)\n}", "func HandleFunc(n xml.Name, h xmpp.HandlerFunc) Option {\n\treturn Handle(n, h)\n}", "func HandleFunc(n xml.Name, h xmpp.HandlerFunc) Option {\n\treturn Handle(n, h)\n}", "func (h *Handler) EventHandleFunc(etype string, name string, eventHandler func(Event)) {\n\th.EventHandle(etype, name, EventHandlerFunc(eventHandler))\n}", "func (h *Handler) HandleFunc(path string, f func(w http.ResponseWriter, r *http.Request)) *mux.Route {\n\treturn h.Router.HandleFunc(path, f)\n}", "func (a *App) HandleFunc(p string, l UserLevel, f func(w http.ResponseWriter, r *Request)) {\n\ta.Handle(p, l, HandleFunc(f))\n}", "func (i *Client) Handle(fs []Filter, hf HandlerFunc) {\n\th := &Handler{\n\t\tFilters: fs,\n\t\tHandler: hf,\n\t}\n\n\ti.handlers = append(i.handlers, h)\n}", "func (hf HandlerFunc) Handle(metadata Metadata) error {\n\treturn hf(metadata)\n}", "func (mux *TypeMux) HandleFunc(t Type, f HandlerFunc) {\n\tmux.Handle(t, f)\n}", "func (s *Session) AddMessageHandler(fn func(Message, *Session)) {\n\ts.handlers.msgHandler = fn\n}", "func (m *MuxTracer) HandleFunc(router *mux.Router, pattern string, handler http.HandlerFunc) *mux.Route {\n\treturn router.HandleFunc(pattern, m.TraceHandleFunc(handler))\n}", "func (auth *Mux) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request), role models.Role) {\n\tauth.Handle(pattern, http.HandlerFunc(handler), role)\n}", "func (h *Handler) Add(pattern string, handler HandlerFunc, opts ...RouteOption) *Route {\n\tfn := func(w http.ResponseWriter, req *http.Request) {\n\t\terr := handler(w, req)\n\t\tif err != nil {\n\t\t\th.Abort(w, req, err)\n\t\t}\n\t}\n\treturn h.Handle(pattern, http.Handler(http.HandlerFunc(fn)), opts...)\n}", "func (rm *RouterMux) HandleFunc(path string, handler HandlerFunction, description ...string) {\n\tif path == RouteRoot {\n\t\trm.root.handler = NewHandler(handler, path, description...)\n\t\treturn\n\t}\n\tparts := strings.Split(path, \"/\")\n\tnode := rm.root\n\tfor _, part := range parts {\n\t\tif part == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tif node.children[part] == nil {\n\t\t\tnode.children[part] = NewNode(node, part, NewHandler(handler, path, description...))\n\t\t}\n\t\tnode = node.children[part]\n\t}\n}", "func (r *Router) HandleFunc(w http.ResponseWriter, req *http.Request) {\n\th, params, err := r.Find(req.Method, req.URL.Path)\n\tif err != nil {\n\t\tif err.Error() == \"no matching patterns\" {\n\t\t\tr.NotFoundHandelr(w, req)\n\t\t\treturn\n\t\t}\n\t\tr.ErrorHandler(w, req)\n\t\treturn\n\t}\n\treq = req.WithContext(setParsedParamsToCtx(req.Context(), params))\n\th(w, req)\n}", "func (m *mDNS) AddHandler(f func(net.Interface, net.Addr, Packet)) {\n\tm.pHandlers = append(m.pHandlers, f)\n}", "func HandleFunc(t Type, f func(ResponseWriter, *Request)) {\n\tDefaultMux.HandleFunc(t, f)\n}", "func (m *mDNS) AddHandler(f func(net.Addr, Packet)) {\n\tm.pHandlers = append(m.pHandlers, f)\n}", "func (rhf HandlerFunc) Handle(req *Request) (resp *Response, err error) {\n\treturn rhf(req)\n}", "func (service *HealthService) HealthCheckHandleFunc() func(w http.ResponseWriter, r *http.Request) {\n\treturn fthealth.Handler(service)\n}", "func (ps *PS) AddHandle(handle htypes.Handle) {\n\tps.Handles = append(ps.Handles, handle)\n}", "func (h *Handler) Add(cmd int32, hf HandlerFunc) {\n\th.router[cmd] = hf\n}", "func EventHandleFunc(etype string, name string, eventHandler func(Event)) {\n\tDefaultHandler.EventHandleFunc(etype, name, eventHandler)\n}", "func (router *Routes) AddHandler(definition string, givenHandler Handler) {\n\telements := strings.Split(definition, \" \")\n\trouter.handlers = append(router.handlers, handler{elements, givenHandler})\n}", "func HandleFunc(aq querier.AgentQuerier) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvar err error\n\t\tvar resps []Response\n\t\tpodName := r.URL.Query().Get(\"pod\")\n\t\tnamespace := r.URL.Query().Get(\"namespace\")\n\n\t\tif podName == \"\" && namespace == \"\" {\n\t\t\tresps, err = getAllFlows(aq)\n\t\t} else if podName != \"\" && namespace != \"\" {\n\t\t\t// Pod Namespace must be provided to dump flows of a Pod.\n\t\t\tresps, err = getPodFlows(aq, podName, namespace)\n\t\t} else {\n\t\t\t// Not supported.\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tif resps == nil {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\n\t\terr = json.NewEncoder(w).Encode(resps)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t}\n\t}\n}", "func (h HandlerFunc) Handle(e Event) { h(e) }", "func (dumbRouter *DumbRouter) AddFunctionMapping(funcURL string, function func(req *http.Request, res http.ResponseWriter)) {\n\tdumbRouter.routes[funcURL] = function\n}", "func HandleFunc(h http.Handler, enabledApps map[string]string) httprouter.Handle {\n\tlog.Println(\"in handleFunc\")\n\treturn func(w http.ResponseWriter, r *http.Request, params httprouter.Params) {\n\t\tctx := r.Context()\n\t\tctx = context.WithValue(ctx, \"enabledApps\", enabledApps)\n\t\tctx = context.WithValue(ctx, \"params\", params)\n\t\tr = r.WithContext(ctx)\n\t\th.ServeHTTP(w, r)\n\t}\n}", "func (handler *WebsocketHandler) AddHandler(commandName string, fn func(context.Context, interfaces.WebsocketClient, interfaces.WebsocketCommand)) {\n\thandler.Handlers[commandName] = fn\n}", "func (ph *Handler) SetHandlerFunc(newHandler HandlerFunc) {\n\tph.mu.Lock()\n\tdefer ph.mu.Unlock()\n\tph.handle = newHandler\n}", "func (hm *Mux) AddHandler(op Op, h Func) {\n\thm.l.Lock()\n\thm.handlers[op] = h\n\thm.l.Unlock()\n}", "func NewHandleFunc(h func(*Client)) http.HandlerFunc {\n\n\tupgrader := &websocket.Upgrader{\n\t\tReadBufferSize: 1024,\n\t\tWriteBufferSize: 1024,\n\t\tCheckOrigin: func(r *http.Request) bool {\n\t\t\treturn true\n\t\t},\n\t}\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tserveWs(h, upgrader, w, r)\n\t}\n}", "func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}) {\n\tif r.AddFunc != nil {\n\t\tr.AddFunc(obj)\n\t}\n}", "func HandleAdd(c *cron.Cron, event jobs.JobCron) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvar job jobs.Job\n\t\tdata, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\treq.InternalError(w, err)\n\t\t\tlogger.FromRequest(r).WithError(err).Debugln(\"无法解析body的内容\")\n\t\t\treturn\n\t\t}\n\n\t\terr = json.Unmarshal(data, &job)\n\t\tif err != nil {\n\t\t\tlogger.FromRequest(r).WithError(err).Debugln(\"Json数据格式或者参数错误\")\n\t\t\treq.BadRequest(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tif job.Name == \"\" || job.Scheduler == \"\" || job.Action == \"\" {\n\t\t\tlogger.FromRequest(r).WithError(err).Debugln(\"Json数据格式或者参数错误\")\n\t\t\treq.BadRequestf(w, \"Json数据格式或者参数错误\")\n\t\t\treturn\n\t\t}\n\n\t\terr = event.Add(c, job)\n\t\tif err != nil {\n\t\t\tlogger.FromRequest(r).WithError(err).Debugln(\"任务添加失败\")\n\t\t\treq.InternalError(w, err)\n\t\t\treturn\n\t\t}\n\t\treq.JSON(w, \"任务添加成功\", 200)\n\t}\n}", "func AddHandler(handler HandlerFunc, evts ...string) *HandlerFunc {\n\tfor _, evt := range evts {\n\t\tif evt == \"*\" && len(evts) > 1 {\n\t\t\tlogrus.Warn(AddMultipleWildcardWarning)\n\t\t\treturn AddHandler(handler, \"*\")\n\t\t}\n\t}\n\n\thPtr := &handler\n\n\teventHandlersLock.Lock()\n\tdefer eventHandlersLock.Unlock()\n\n\tfor _, evt := range evts {\n\t\teventHandlers[evt] = append(eventHandlers[evt], hPtr)\n\t}\n\n\treturn hPtr\n}", "func (e *Engine) AddFunc(name string, fn interface{}) *Engine {\n\te.Mutex.Lock()\n\te.Funcmap[name] = fn\n\te.Mutex.Unlock()\n\treturn e\n}", "func (r *Routers) Add(url string, handler func(http.ResponseWriter, *http.Request)) {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\n\tif r.urls == nil {\n\t\tr.urls = make(map[string]*func(http.ResponseWriter, *http.Request))\n\t}\n\tr.urls[url] = &handler\n\thttp.HandleFunc(url, handler)\n}", "func RegisterAdminHandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {\n\tapiHandlerStore[pattern] = NewAPIHandler(handler)\n\tlog.StartLogger.Infof(\"[admin server] [register api] register a new api %s\", pattern)\n}", "func (fn DepositNewFileHandlerFunc) Handle(params DepositNewFileParams) middleware.Responder {\n\treturn fn(params)\n}", "func (f RouteHandlerFunc) RouteHandle(rm *RouteMatch) { f(rm) }", "func (socket *MockSocket) AddEventHandler(\n\thandler socket.EventHandler,\n) {\n}", "func HandleFuncWithCode(code int) func(w http.ResponseWriter, r *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tReplyWithCode(w, code)\n\t}\n}", "func (fn EchoHandlerFunc) Handle(params EchoParams) middleware.Responder {\n\treturn fn(params)\n}", "func (e *EventHandlerFuncs) OnAdd(table string, model Model) {\n\tif e.AddFunc != nil {\n\t\te.AddFunc(table, model)\n\t}\n}", "func (fn AddClaimHandlerFunc) Handle(params AddClaimParams) middleware.Responder {\n\treturn fn(params)\n}", "func AddUserHandle(service iface.Service) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tpayload := struct {\n\t\t\tName string `json:\"name\"`\n\t\t}{}\n\n\t\terr := json.NewDecoder(r.Body).Decode(&payload)\n\t\tif err != nil {\n\t\t\tFail(w, r, http.StatusBadRequest, \"could not parse payload\")\n\t\t\treturn\n\t\t}\n\n\t\tpayload.Name = strings.TrimSpace(payload.Name)\n\t\tif len(payload.Name) == 0 {\n\t\t\tFail(w, r, http.StatusBadRequest, \"empty name\")\n\t\t\treturn\n\t\t}\n\n\t\tuserID, err := service.AddUser(r.Context(), payload.Name)\n\t\tif err != nil {\n\t\t\tlog.Log(err)\n\t\t\tFail(w, r, http.StatusInternalServerError, \"service failed\")\n\t\t\treturn\n\t\t}\n\n\t\tJSON(w, r, map[string]interface{}{\n\t\t\t\"user_id\": userID,\n\t\t})\n\t}\n}", "func (s *Server) AddHandler(route string, handler http.Handler) {\n\ts.router.Handle(route, handler)\n}", "func (router *Router) PostFunc(path string, handler http.HandlerFunc) {\n\trouter.Handle(\"POST\", path, handler)\n}", "func (h HandlerFunc) Handle(w irc.Writer, ev *irc.Event) {\n\th(w, ev)\n}", "func HandlerFunc(log func(message string, time time.Time, level LogLevel, call CallStack, context ContextMap) error) LogHandler {\n\treturn remoteHandler(log)\n}" ]
[ "0.6896334", "0.6849562", "0.67962635", "0.6719276", "0.6719276", "0.6719276", "0.6665553", "0.66522294", "0.66437876", "0.64656377", "0.6382179", "0.63733864", "0.6351293", "0.634102", "0.6330453", "0.6318307", "0.62964743", "0.6292983", "0.6267896", "0.62676644", "0.6267022", "0.62607217", "0.62477607", "0.6232308", "0.62158805", "0.62158805", "0.62029934", "0.61907643", "0.61870885", "0.6182279", "0.6175737", "0.61754864", "0.6174431", "0.6170246", "0.6168716", "0.6159183", "0.6147757", "0.60715795", "0.6070811", "0.6056701", "0.6049589", "0.6047232", "0.5979215", "0.5968772", "0.59487724", "0.5946904", "0.592694", "0.5925211", "0.59250164", "0.59128237", "0.59004617", "0.59004056", "0.59004056", "0.58984596", "0.58356166", "0.5801194", "0.5786414", "0.5781139", "0.5777651", "0.5723876", "0.5697638", "0.5696652", "0.56909126", "0.5667802", "0.56542003", "0.5617993", "0.560046", "0.55976826", "0.5548152", "0.5538665", "0.55303246", "0.5529652", "0.5523013", "0.550802", "0.5501738", "0.5501003", "0.5494699", "0.5494681", "0.54768336", "0.5459503", "0.54441345", "0.5438022", "0.5430188", "0.5411539", "0.5403524", "0.5395093", "0.53928393", "0.5392592", "0.5384636", "0.53841853", "0.537467", "0.5356929", "0.5334893", "0.5313913", "0.5306438", "0.53024465", "0.5287039", "0.5286194", "0.5282241", "0.5278717" ]
0.781103
0
handleMessages reads the connection up to the first newline. Based on this string, it calls the appropriate HandleFunc.
handleMessages считывает соединение до первого перевода строки. На основе этой строки он вызывает соответствующую HandleFunc.
func (e *Endpoint) handleMessages(conn net.Conn) { // Wrap the connection into a buffered reader for easier reading. rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)) defer conn.Close() // Read from the connection until EOF. Expect a command name as the // next input. Call the handler that is registered for this command. for { log.Print("Receive command '") cmd, err := rw.ReadString('\n') switch { case err == io.EOF: log.Println("Reached EOF - close this connection.\n ---") return case err != nil: log.Println("\nError reading command. Got: '"+cmd+"'\n", err) return } // Trim the request string - ReadString does not strip any newlines. cmd = strings.Trim(cmd, "\n ") log.Println(cmd + "'") // Fetch the appropriate handler function from the 'handler' map and call it. e.mutex.RLock() handleCommand, ok := e.handler[cmd] e.mutex.RUnlock() if !ok { log.Println("Command '" + cmd + "' is not registered.") return } handleCommand(rw) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *MetricReceiver) handleMessage(addr net.Addr, msg []byte) {\n\tbuf := bytes.NewBuffer(msg)\n\tfor {\n\t\tline, readerr := buf.ReadBytes('\\n')\n\n\t\t// protocol does not require line to end in \\n, if EOF use received line if valid\n\t\tif readerr != nil && readerr != io.EOF {\n\t\t\tr.handleError(fmt.Errorf(\"error reading message from %s: %s\", addr, readerr))\n\t\t\treturn\n\t\t} else if readerr != io.EOF {\n\t\t\t// remove newline, only if not EOF\n\t\t\tif len(line) > 0 {\n\t\t\t\tline = line[:len(line)-1]\n\t\t\t}\n\t\t}\n\n\t\t// Only process lines with more than one character\n\t\tif len(line) > 1 {\n\t\t\tmetric, err := parseLine(line)\n\t\t\tif err != nil {\n\t\t\t\tr.handleError(fmt.Errorf(\"error parsing line %q from %s: %s\", line, addr, err))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tgo r.Handler.HandleMetric(metric)\n\t\t}\n\n\t\tif readerr == io.EOF {\n\t\t\t// if was EOF, finished handling\n\t\t\treturn\n\t\t}\n\t}\n}", "func HandleMessages(body *string) error {\n\tvar m Message\n\n\terr := json.Unmarshal([]byte(*body), &m)\n\tif err != nil {\n\t\tlog.Printf(\"Error unmarshal sqs message body into %v error:%v\", m, err)\n\t\treturn err\n\t}\n\n\tswitch m.Category {\n\tcase CategoryConsumer:\n\t\treturn unmarshalConsumer(m)\n\tcase CategoryBusiness:\n\t\treturn unmarshalBusiness(m)\n\tcase CategoryAccount:\n\t\treturn unmarshalAccount(m)\n\tcase CategoryCard:\n\t\treturn unmarshalCard(m)\n\t}\n\n\treturn nil\n\n}", "func (c *Conn) handleMessages() {\n\tfor {\n\t\tselect {\n\t\tcase msg := <-c.outputChan:\n\t\t\t_, err := io.WriteString(c.c, msg)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"error writing to conn %d: %s\\n\", c.id, err)\n\t\t\t}\n\t\tcase <-c.closeChan:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (M *ConnectionManager) handleMessages() {\n\tfor {\n\t\tselect {\n\t\tcase msg := <-M.Messages:\n\t\t\tterr.VerbPrint(outputTo, 5, verb, M.ID, \"RECIEVED:\", msg)\n\t\t}\n\t}\n}", "func (u *Input) goHandleMessages(ctx context.Context) {\n\tu.wg.Add(1)\n\n\tgo func() {\n\t\tdefer u.wg.Done()\n\n\t\tdec := decoder.New(u.encoding)\n\t\tbuf := make([]byte, 0, MaxUDPSize)\n\t\tfor {\n\t\t\tmessage, remoteAddr, err := u.readMessage()\n\t\t\tif err != nil {\n\t\t\t\tselect {\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t\tu.Errorw(\"Failed reading messages\", zap.Error(err))\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif u.OneLogPerPacket {\n\t\t\t\tlog := truncateMaxLog(message)\n\t\t\t\tu.handleMessage(ctx, remoteAddr, dec, log)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tscanner := bufio.NewScanner(bytes.NewReader(message))\n\t\t\tscanner.Buffer(buf, MaxUDPSize)\n\n\t\t\tscanner.Split(u.splitFunc)\n\n\t\t\tfor scanner.Scan() {\n\t\t\t\tu.handleMessage(ctx, remoteAddr, dec, scanner.Bytes())\n\t\t\t}\n\t\t\tif err := scanner.Err(); err != nil {\n\t\t\t\tu.Errorw(\"Scanner error\", zap.Error(err))\n\t\t\t}\n\t\t}\n\t}()\n}", "func (svc ChatService) HandleConnection(session session.Session, conn session.Connection) {\n\tb, _ := ioutil.ReadAll(conn)\n\treceivedChatMessages = receivedChatMessages + \"<br>\" + string(b[:])\n}", "func (bot *Hitbot) MessageHandler() {\n\tfor {\n\t\t_, p, err := bot.conn.ReadMessage()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\t//log.Printf(\"Message: %v\", string(p)) //debug info\n\t\tif string(p[:3]) == \"2::\" {\n\t\t\tbot.conn.WriteMessage(websocket.TextMessage, []byte(\"2::\"))\n\t\t\t//log.Print(\"Ping!\")\n\t\t\tcontinue\n\t\t} else if string(p[:3]) == \"1::\" {\n\t\t\tlog.Print(\"Connection successful!\")\n\t\t\tfor _, channel := range bot.channels {\n\t\t\t\tbot.joinChannel(channel)\n\t\t\t}\n\t\t\tcontinue\n\t\t} else if string(p[:4]) == \"5:::\" {\n\t\t\tbot.parseMessage(p[4:])\n\t\t}\n\t}\n}", "func HandleReceiver(conn net.Conn) {\n\tdefer conn.Close()\n\tvar buf [1024]byte\n\tfor {\n\t\t// read upto 1024 bytes\n\t\tn, err := conn.Read(buf[0:])\n\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tmsg := string(buf[0:n])\n\t\tfmt.Println(\"Messaged received from client: \", msg)\n\t\tmessage := ParseMessage(msg)\n\t\tmessage.ExecuteCommand(conn)\n\t\tbreak\n\t}\n\tfmt.Println(\"Done handle Receiver\", Users)\n\n}", "func (irc *IrcCon) handleIncomingMessages() {\n\tscan := bufio.NewScanner(irc.con)\n\tfor scan.Scan() {\n\t\tmes := ParseMessage(scan.Text())\n\t\tconsumed := false\n\t\tfor _,t := range irc.tr {\n\t\t\tif t.Condition(mes) {\n\t\t\t\tconsumed = t.Action(irc,mes)\n\t\t\t}\n\t\t\tif consumed {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !consumed {\n\t\t\tirc.Incoming <- mes\n\t\t}\n\t}\n}", "func (s *Socket) handleMessagesIn() {\n\tfor {\n\t\tm := <-s.messagesIn\n\t\tfmt.Printf(\"Receiving message: %v\", m)\n\t\tswitch m.MessageType {\n\t\tcase PLACE_ORDER:\n\t\t\ts.placeOrder(m.Payload)\n\t\tcase CANCEL_ORDER:\n\t\t\ts.cancelOrder(m.Payload)\n\t\tcase SIGNED_DATA:\n\t\t\ts.executeOrder(m.Payload)\n\t\tcase DONE:\n\t\tdefault:\n\t\t\tpanic(\"Unknown message type\")\n\t\t}\n\t}\n}", "func (handler *BotHandler) handleMessages() {\n\thandler.McRunner.WaitGroup.Add(1)\n\tdefer handler.McRunner.WaitGroup.Done()\n\tfor {\n\t\tselect {\n\t\tcase msg := <-handler.McRunner.MessageChannel:\n\t\t\tmessage := message{Timestamp: time.Now().Format(time.RFC3339), Message: msg}\n\t\t\tmessageJSON, _ := json.Marshal(message)\n\t\t\theader := header{Type: \"msg\", Data: messageJSON}\n\t\t\thandler.sock.WriteJSON(header)\n\t\tcase <-handler.killChannel:\n\t\t\treturn\n\t\t}\n\t}\n}", "func receiveMessages(conn net.Conn) {\n\tvar data []byte\n\tbuffer := make([]byte, bufferSize)\n\n\tfor {\n\t\tfor {\n\t\t\tn, err := conn.Read(buffer)\n\t\t\tif err != nil && err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tbuffer = buffer[:n]\n\t\t\tdata = append(data, buffer...)\n\t\t\tif data[len(data)-1] == endLine {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tfmt.Printf(\"%s\\n\", data[:len(data)-1])\n\t\tdata = make([]byte, 0)\n\t}\n}", "func (s *SlaveNode) handleMessages(featurePipe *os.File) {\n\treader := bufio.NewReader(featurePipe)\n\tfor {\n\t\tif msg, err := reader.ReadString('\\n'); err != nil {\n\t\t\treturn\n\t\t} else {\n\t\t\tmsg = strings.TrimRight(msg, \"\\n\")\n\t\t\ts.featureL.Lock()\n\t\t\ts.features[msg] = true\n\t\t\ts.featureL.Unlock()\n\t\t\ts.fileMonitor.Add(msg)\n\t\t}\n\t}\n}", "func (em *EventManager) handleMessages(c chan interface{}) {\n\tlog.Printf(\"Starting message handler routine\")\n\n\tfor {\n\t\t// Load events\n\t\tm, open := <-c\n\t\tif open {\n\t\t\t// Call message handler\n\t\t\tlog.Printf(\"Received message generic\")\n\t\t\tem.handleMessage(m)\n\t\t} else {\n\t\t\t// Exit message handling go-routine\n\t\t\tlog.Println(\"Exiting client routine\")\n\t\t\tbreak\n\t\t}\n\t}\n\tlog.Printf(\"Exiting message handler routine\")\n\tem.wg.Done()\n}", "func (tv *TV) MessageHandler() (err error) {\n\tdefer func() {\n\t\ttv.resMutex.Lock()\n\t\tfor _, ch := range tv.res {\n\t\t\tclose(ch)\n\t\t}\n\t\ttv.res = nil\n\t\ttv.resMutex.Unlock()\n\t}()\n\n\tfor {\n\t\tmt, p, err := tv.ws.ReadMessage()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif mt != websocket.TextMessage {\n\t\t\tcontinue\n\t\t}\n\n\t\tmsg := Message{}\n\n\t\terr = json.Unmarshal(p, &msg)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\ttv.resMutex.Lock()\n\t\tch := tv.res[msg.ID]\n\t\ttv.resMutex.Unlock()\n\n\t\tch <- msg\n\t}\n}", "func (a *Adapter) handleMessage() {\n\tfor {\n\t\t_, input, err := a.Conn.ReadMessage()\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Could not read message! %s\", err)\n\t\t\tbreak\n\t\t}\n\n\t\t// Decodes input message\n\t\terr, meta, data := lib.DecodeMessage(&input)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Could not decode incoming message! %s\", err)\n\t\t}\n\n\t\tif glob.V_LOG_IO_MSG {\n\t\t\tlog.Infof(\"Message received! \\nMeta: %s \\nData: %s\", meta, data)\n\t\t}\n\n\t\tgo a.TraverseCBs(meta, data)\n\t}\n}", "func (g *Gossiper) HandleClientMessages() {\n\tg.ConnectToClient()\n\n\tpacketBytes := make([]byte, buffsize)\n\tmsg := &message.Message{}\n\n\tfor {\n\t\tnRead, _, err := g.clientConn.ReadFromUDP(packetBytes)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error: read from buffer failed.\")\n\t\t\tos.Exit(-1)\n\t\t}\n\n\t\tif nRead > 0 {\n\t\t\tprotobuf.Decode(packetBytes, msg)\n\t\t\tprintClientMessage(*msg)\n\t\t\tg.PrintPeers()\n\n\t\t\trumorMsg := message.RumorMessage{Origin: g.name, ID: messageID, Text: msg.Text}\n\t\t\tg.rumorMsgs.AddMessage(g.name, rumorMsg)\n\t\t\tg.newMsgs = append(g.newMsgs, rumorMsg)\n\t\t\tmessageID++\n\t\t\tg.myStatus.SetStatus(g.name, messageID)\n\n\t\t\tpacket := &gossippacket.GossipPacket{Rumor: &rumorMsg}\n\t\t\tgo g.rumorMonger(*packet, nil)\n\t\t}\n\t}\n}", "func (b *Builder) HandleMessages() {\n\tfor {\n\t\tm := <-b.incoming\n\t\tswitch message := m.(type) {\n\t\tcase *rmake.RequiredFileMessage:\n\t\t\tslog.Info(\"Received required file.\")\n\t\t\t//Get a file from another node\n\t\t\tb.newfiles <- message\n\n\t\tcase *rmake.BuilderRequest:\n\t\t\tslog.Info(\"Received builder request.\")\n\t\t\tb.RequestQueue.Push(message)\n\n\t\tcase *rmake.BuilderResult:\n\t\t\tslog.Info(\"Received builder result.\")\n\t\t\tb.HandleBuilderResult(message)\n\n\t\tdefault:\n\t\t\tslog.Warnf(\"Received invalid message type. '%s'\", reflect.TypeOf(message))\n\t\t}\n\t}\n}", "func handle_conn(conn * Connection) {\n for conn.connected {\n messages := conn.Receive()\n if conn.connected && messages != nil {\n for _, message := range messages {\n fmt.Println(\"Received message\", string(message.Serialize()))\n handle_message(conn, message)\n }\n }\n }\n}", "func HandleMessage(msg *WeechatMessage, handler HandleWeechatMessage) error {\n\t// Got an empty message, simply don't process it for now. We can figure\n\t// out how to handle this.\n\tif msg == nil {\n\t\tfmt.Printf(\"Got Nil message to handle.\\n\")\n\t\treturn nil\n\t}\n\tswitch msg.Msgid {\n\tcase \"listbuffers\", \"_buffer_opened\":\n\t\t// parse out the list of buffers which are Hda objects.\n\t\tbufffers := msg.Object.Value.(WeechatHdaValue)\n\t\tbuflist := make(map[string]*WeechatBuffer, len(bufffers.Value))\n\n\t\tfor _, each := range bufffers.Value {\n\t\t\tbuf := &WeechatBuffer{\n\t\t\t\tShortName: each[\"short_name\"].Value.(string),\n\t\t\t\tFullName: each[\"full_name\"].Value.(string),\n\t\t\t\tTitle: each[\"title\"].Value.(string),\n\t\t\t\tNumber: each[\"number\"].Value.(int32),\n\t\t\t\tLocalVars: each[\"local_variables\"].Value.(map[WeechatObject]WeechatObject),\n\t\t\t\tLines: make([]*WeechatLine, 0),\n\t\t\t\t// this is essentially a list of strings, pointers,\n\t\t\t\t// the first pointer of which is the buffer' pointer.\n\t\t\t\tPath: each[\"__path\"].Value.([]string)[1],\n\t\t\t}\n\t\t\tbuflist[buf.Path] = buf\n\t\t}\n\n\t\thandler.HandleListBuffers(buflist)\n\n\tcase \"_buffer_line_added\":\n\t\tfor _, each := range msg.Object.Value.(WeechatHdaValue).Value {\n\t\t\taddLine(handler, each)\n\t\t}\n\tcase \"listlines\":\n\t\tlines := msg.Object.Value.(WeechatHdaValue).Value\n\t\tfor i := len(lines) - 1; i >= 0; i-- {\n\t\t\taddLine(handler, lines[i])\n\t\t}\n\tcase \"nicklist\", \"_nicklist\":\n\t\t// handle list of nicks.\n\t\tvar nicks []*WeechatNick\n\t\tnickValues := msg.Object.Value.(WeechatHdaValue).Value\n\t\tvar buffer = \"default\"\n\t\tfor _, val := range nickValues {\n\n\t\t\titem := &WeechatNick{\n\t\t\t\tName: val[\"name\"].as_string(),\n\t\t\t\tColor: val[\"color\"].as_string(),\n\t\t\t\tLevel: val[\"level\"].as_int(),\n\t\t\t\tPrefix: val[\"prefix\"].as_string(),\n\t\t\t\tPrefixColor: val[\"prefix_color\"].as_string(),\n\t\t\t\tGroup: val[\"group\"].as_bool(),\n\t\t\t\tVisible: val[\"visible\"].as_bool(),\n\t\t\t}\n\n\t\t\tnicks = append(nicks, item)\n\t\t\tbuffer = val[\"__path\"].Value.([]string)[2]\n\t\t}\n\t\thandler.HandleNickList(buffer, nicks)\n\tcase \"error\":\n\t\thandler.Default(msg)\n\tdefault:\n\t\thandler.Default(msg)\n\t}\n\treturn nil\n}", "func receiveMessages(conn *websocket.Conn) {\n\tdefer disconnect(conn)\n\tfor {\n\t\tvar demarshaled struct {\n\t\t\tCommand string\n\t\t\tBody string\n\t\t\tClient models.Client\n\t\t}\n\t\terr := conn.ReadJSON(&demarshaled)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error: Unable to read message from client\")\n\t\t\tlog.Println(\"Disconnecting client...\")\n\t\t\tbreak\n\t\t}\n\t\tmessage := &models.Message{\n\t\t\tCommand: demarshaled.Command,\n\t\t\tBody: demarshaled.Body,\n\t\t\tClient: &demarshaled.Client,\n\t\t}\n\t\trequest := serverRequest{\n\t\t\tMessage: message,\n\t\t\tClient: models.CloneClient(clients[conn]),\n\t\t}\n\n\t\tswitch command := message.GetCommand(); command {\n\t\tcase \"login\":\n\t\t\tloginRequests <- request\n\t\tcase \"newuser\":\n\t\t\tnewUserRequests <- request\n\t\tcase \"send\":\n\t\t\tsendRequests <- request\n\t\tcase \"logout\":\n\t\t\tlogoutRequests <- request\n\t\tcase \"help\":\n\t\t\thelpRequests <- request\n\t\tdefault:\n\t\t\tlog.Println(\"Received unrecognized command -\", command, \"- from client\")\n\t\t}\n\t}\n}", "func (c *Client) Read(data string) (err error) {\n\tmsg, err := parseMessage(data)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tm, ok := c.events[Unknown]\n\tif ok {\n\t\tfor _, f := range m {\n\t\t\tf(c, msg)\n\t\t}\n\t}\n\n\tif msg.Command == Unknown {\n\t\treturn // Already called these handlers.\n\t}\n\n\tif m, ok = c.events[msg.Command]; !ok {\n\t\treturn\n\t}\n\n\tfor _, f := range m {\n\t\tf(c, msg)\n\t}\n\n\treturn\n}", "func (c *Conn) handleConnection() {\n\tdefer c.Close()\n\n\tfmt.Fprintf(c.c, welcomeText, c.username)\n\n\tgo c.handleMessages()\n\n\tscanner := bufio.NewScanner(c.c)\n\tfor scanner.Scan() {\n\t\tinput := scanner.Text()\n\n\t\tif input == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tif input[0] == '/' {\n\t\t\tif !c.handleCommand(input) {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tc.Announce(input)\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tlog.Print(\"error scanning lines:\", err)\n\t}\n}", "func HandleNewMessages(conn io.ReadWriteCloser, msgs chan<- *messages.Message, welcomes chan<- *messages.ArborMessage) {\n\treadMessages := messages.MakeMessageReader(conn)\n\tdefer close(msgs)\n\tfor fromServer := range readMessages {\n\t\tswitch fromServer.Type {\n\t\tcase messages.WELCOME:\n\t\t\twelcomes <- fromServer\n\t\t\tclose(welcomes)\n\t\t\twelcomes = nil\n\t\tcase messages.NEW_MESSAGE:\n\t\t\t// add the new message\n\t\t\tmsgs <- fromServer.Message\n\t\tdefault:\n\t\t\tlog.Println(\"Unknown message type: \", fromServer.String)\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func readLoop(conn Connection, finish *sync.WaitGroup) {\n defer func() {\n if p := recover(); p != nil {\n holmes.Error(\"panics: %v\", p)\n }\n finish.Done()\n conn.Close()\n }()\n\n for conn.IsRunning() {\n select {\n case <-conn.GetCloseChannel():\n return\n\n default:\n msg, err := conn.GetMessageCodec().Decode(conn)\n if err != nil {\n holmes.Error(\"Error decoding message %v\", err)\n if _, ok := err.(ErrorUndefined); ok {\n // update heart beat timestamp\n conn.SetHeartBeat(time.Now().UnixNano())\n continue\n }\n return\n }\n\n // update heart beat timestamp\n conn.SetHeartBeat(time.Now().UnixNano())\n handler := GetHandler(msg.MessageNumber())\n if handler == nil {\n if conn.GetOnMessageCallback() != nil {\n holmes.Info(\"Message %d call onMessage()\", msg.MessageNumber())\n conn.GetOnMessageCallback()(msg, conn)\n } else {\n holmes.Warn(\"No handler or onMessage() found for message %d\", msg.MessageNumber())\n }\n continue\n }\n\n // send handler to handleLoop\n conn.GetMessageHandlerChannel()<- MessageHandler{msg, handler}\n }\n }\n}", "func MessagesHandler(w http.ResponseWriter, r *http.Request) {\n\tclient := context.Get(r, \"redis.Client\").(*redis.Client)\n\tu := url.Values{}\n\n\tu.Set(\"a\", \"sf-muni\")\n\n\trouteTags := r.URL.Query().Get(\"route_tags\")\n\tif routeTags != \"\" {\n\t\troutes := strings.Split(routeTags, \",\")\n\t\tfor n := range routes {\n\t\t\tu.Add(\"r\", routes[n])\n\t\t}\n\t}\n\n\tbody, err := getFromCache(client, r.URL.String())\n\tif err == nil {\n\t\tfmt.Fprintf(w, \"%s\", body)\n\t\treturn\n\t}\n\n\tbody, err = fetch(\"messages\", u)\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\twriteToCache(client, r.URL.String(), body)\n\n\tfmt.Fprintf(w, \"%s\", body)\n}", "func handleStrings(rw *bufio.ReadWriter) {\r\n\t// Receive a string.\r\n\tlog.Print(\"Receive STRING message:\")\r\n\ts, err := rw.ReadString('\\n')\r\n\tif err != nil {\r\n\t\tlog.Println(\"Cannot read from connection.\\n\", err)\r\n\t}\r\n\ts = strings.Trim(s, \"\\n \")\r\n\tlog.Println(s)\r\n\t_, err = rw.WriteString(\"Thank you.\\n\")\r\n\tif err != nil {\r\n\t\tlog.Println(\"Cannot write to connection.\\n\", err)\r\n\t}\r\n\terr = rw.Flush()\r\n\tif err != nil {\r\n\t\tlog.Println(\"Flush failed.\", err)\r\n\t}\r\n}", "func processMessages(conn net.Conn, c chan *packetInfo, t *Tracker) {\n\tdefer t.remove(conn)\n\n\tscanner := bufio.NewScanner(conn)\n\tscanner.Split(bufio.ScanBytes)\n\n\tvar validMagic bool\n\tvar numReadBytes int\n\tneedFrame := true\n\n\tvar b bytes.Buffer\n\tvar mb bytes.Buffer\n\n\tfor {\n\t\tfor scanner.Scan() {\n\t\t\tscanBytes := scanner.Bytes()\n\t\t\tb.Write(scanBytes)\n\n\t\t\tif needFrame {\n\t\t\t\tif b.Len() >= headerLength {\n\t\t\t\t\tvalidMagic, numReadBytes = processFrame(&b)\n\n\t\t\t\t\tif !validMagic {\n\t\t\t\t\t\tfmt.Fprintf(os.Stderr, \"[Error] %s\\n\", \"invalid magic\")\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tif numReadBytes <= 0 || numReadBytes > maxMessageLength {\n\t\t\t\t\t\tfmt.Fprintf(os.Stderr, \"[Error] %s\\n\", \"invalid message length\")\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tneedFrame = false\n\t\t\t\t}\n\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif b.Len() >= numReadBytes {\n\t\t\t\tmsg := make([]byte, numReadBytes)\n\t\t\t\tb.Read(msg)\n\n\t\t\t\tmb.Write(msg)\n\n\t\t\t\tneedFrame = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif err := scanner.Err(); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"[Error] %s\\n\", err.Error())\n\t\t\tbreak\n\t\t}\n\n\t\tif mb.Len() == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\t// Extend the deadline, we got a valid full message.\n\t\tconn.SetDeadline(time.Now().Add(deadline))\n\n\t\tif err := sendToPacketInfoChan(&mb, conn, c, t); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"[Error] %s\\n\", err.Error())\n\t\t\tbreak\n\t\t}\n\t}\n}", "func (p *MultiLineParser) Handle(input *DecodedInput) {\n\tp.inputChan <- input\n}", "func (serv *BusServer) handleConnection(conn net.Conn) {\n\n\t// create a buffered reader.\n\treader := bufio.NewReader(conn)\n\n\t// attempt to read a line.\n\tfor {\n\t\tline, err := reader.ReadString('\\n')\n\n\t\t// error occured before reaching delimiter.\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t// no error. handle the line.\n\t\tserv.readHandler(serv, conn, line)\n\n\t}\n}", "func reader(conn *websocket.Conn ) {\n\tfor {\n\t\t// read in a message\n\t\tmessageType, p, err := conn.ReadMessage()\n\t\tif err != nil && len(p) != 0 {\n\t\t\talltime <- (string)(p)\n\t\t\tif websocket.IsCloseError(err, websocket.CloseNormalClosure) {\n\t\t\t\tlog.Printf(\"error: %v\", err)\n\t\t\t}\n\t\t\tlog.Printf(\"error: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tprocessMessage(messageType,string(p))\n\n\t}\n}", "func (nc *NetClient) handleMessage(m *arbor.ProtocolMessage) {\n\tswitch m.Type {\n\tcase arbor.NewMessageType:\n\t\tif !nc.Archive.Has(m.UUID) {\n\t\t\tif nc.receiveHandler != nil {\n\t\t\t\tnc.receiveHandler(m.ChatMessage)\n\t\t\t\t// ask Notifier to handle the message\n\t\t\t\tnc.Notifier.Handle(nc, m.ChatMessage)\n\t\t\t}\n\t\t\tif m.Parent != \"\" && !nc.Archive.Has(m.Parent) {\n\t\t\t\tnc.Query(m.Parent)\n\t\t\t}\n\t\t}\n\tcase arbor.WelcomeType:\n\t\tif !nc.Has(m.Root) {\n\t\t\tnc.Query(m.Root)\n\t\t}\n\t\tfor _, recent := range m.Recent {\n\t\t\tif !nc.Has(recent) {\n\t\t\t\tnc.Query(recent)\n\t\t\t}\n\t\t}\n\tcase arbor.MetaType:\n\t\tnc.HandleMeta(m.Meta)\n\t}\n}", "func handleConnection(conn *net.Conn, inputChannel chan string, KeyChannel chan string) {\n\treader := bufio.NewReader(*conn)\n\tfor {\n\t\tmsg, er := reader.ReadString('\\n')\n\t\tif er != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif msg[0] == 'k' {\n\t\t\t//Receive key\n\t\t\tKeyChannel <- msg\n\t\t} else {\n\t\t\tinputChannel <- msg\n\t\t}\n\t}\n}", "func (s *Serve) HandleMessage(msg *nsq.Message) (err error) {\n\tif string(msg.Body) == \"TOBEFAILED\" {\n\t\treturn errors.New(\"fail this message\")\n\t}\n\n\tvar m url.Values\n\terr = json.Unmarshal(msg.Body, &m)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tresC <- m\n\treturn\n}", "func ReadMessage(conn *websocket.Conn, stopChan chan<- bool) {\n\tfor {\n\t\treader := bufio.NewReader(os.Stdin)\n\t\ttext, err := reader.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tstopChan <- true\n\t\t\treturn\n\t\t}\n\t\terr = conn.WriteMessage(websocket.TextMessage, []byte(text))\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\t}\n}", "func (self *OFSwitch) handleMessages(dpid net.HardwareAddr, msg util.Message) {\n\tlog.Debugf(\"Received message: %+v, on switch: %s\", msg, dpid.String())\n\n\tswitch t := msg.(type) {\n\tcase *common.Header:\n\t\tswitch t.Header().Type {\n\t\tcase openflow13.Type_Hello:\n\t\t\t// Send Hello response\n\t\t\th, err := common.NewHello(4)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Error creating hello message\")\n\t\t\t}\n\t\t\tself.Send(h)\n\n\t\tcase openflow13.Type_EchoRequest:\n\t\t\t// Send echo reply\n\t\t\tres := openflow13.NewEchoReply()\n\t\t\tself.Send(res)\n\n\t\tcase openflow13.Type_EchoReply:\n\t\t\tself.lastUpdate = time.Now()\n\n\t\tcase openflow13.Type_FeaturesRequest:\n\n\t\tcase openflow13.Type_GetConfigRequest:\n\n\t\tcase openflow13.Type_BarrierRequest:\n\n\t\tcase openflow13.Type_BarrierReply:\n\n\t\t}\n\tcase *openflow13.ErrorMsg:\n\t\terrMsg := GetErrorMessage(t.Type, t.Code, 0)\n\t\tmsgType := GetErrorMessageType(t.Data)\n\t\tlog.Errorf(\"Received OpenFlow1.3 error: %s on message %s\", errMsg, msgType)\n\t\tresult := MessageResult{\n\t\t\tsucceed: false,\n\t\t\terrType: t.Type,\n\t\t\terrCode: t.Code,\n\t\t\txID: t.Xid,\n\t\t\tmsgType: UnknownMessage,\n\t\t}\n\t\tself.publishMessage(t.Xid, result)\n\n\tcase *openflow13.VendorHeader:\n\t\tlog.Debugf(\"Received Experimenter message, VendorType: %d, ExperimenterType: %d, VendorData: %+v\", t.Vendor, t.ExperimenterType, t.VendorData)\n\t\tswitch t.ExperimenterType {\n\t\tcase openflow13.Type_TlvTableReply:\n\t\t\treply := t.VendorData.(*openflow13.TLVTableReply)\n\t\t\tstatus := TLVTableStatus(*reply)\n\t\t\tself.tlvMgr.TLVMapReplyRcvd(self, &status)\n\t\tcase openflow13.Type_BundleCtrl:\n\t\t\tresult := MessageResult{\n\t\t\t\txID: t.Header.Xid,\n\t\t\t\tsucceed: true,\n\t\t\t\tmsgType: BundleControlMessage,\n\t\t\t}\n\t\t\treply := t.VendorData.(*openflow13.BundleControl)\n\t\t\tself.publishMessage(reply.BundleID, result)\n\t\t}\n\n\tcase *openflow13.SwitchFeatures:\n\t\tswitch t.Header.Type {\n\t\tcase openflow13.Type_FeaturesReply:\n\t\t\tgo func() {\n\t\t\t\tswConfig := openflow13.NewSetConfig()\n\t\t\t\tswConfig.MissSendLen = 128\n\t\t\t\tself.Send(swConfig)\n\t\t\t\tself.Send(openflow13.NewSetControllerID(self.ctrlID))\n\t\t\t}()\n\t\t}\n\n\tcase *openflow13.SwitchConfig:\n\t\tswitch t.Header.Type {\n\t\tcase openflow13.Type_GetConfigReply:\n\n\t\tcase openflow13.Type_SetConfig:\n\n\t\t}\n\tcase *openflow13.PacketIn:\n\t\tlog.Debugf(\"Received packet(ofctrl): %+v\", t)\n\t\t// send packet rcvd callback\n\t\tself.app.PacketRcvd(self, (*PacketIn)(t))\n\n\tcase *openflow13.FlowRemoved:\n\n\tcase *openflow13.PortStatus:\n\t\t// FIXME: This needs to propagated to the app.\n\tcase *openflow13.PacketOut:\n\n\tcase *openflow13.FlowMod:\n\n\tcase *openflow13.PortMod:\n\n\tcase *openflow13.MultipartRequest:\n\n\tcase *openflow13.MultipartReply:\n\t\tlog.Debugf(\"Received MultipartReply\")\n\t\trep := (*openflow13.MultipartReply)(t)\n\t\tif self.monitorEnabled {\n\t\t\tkey := fmt.Sprintf(\"%d\", rep.Xid)\n\t\t\tch, found := monitoredFlows.Get(key)\n\t\t\tif found {\n\t\t\t\treplyChan := ch.(chan *openflow13.MultipartReply)\n\t\t\t\treplyChan <- rep\n\t\t\t}\n\t\t}\n\t\t// send packet rcvd callback\n\t\tself.app.MultipartReply(self, rep)\n\tcase *openflow13.VendorError:\n\t\terrData := t.ErrorMsg.Data.Bytes()\n\t\tresult := MessageResult{\n\t\t\tsucceed: false,\n\t\t\terrType: t.Type,\n\t\t\terrCode: t.Code,\n\t\t\texperimenter: int32(t.ExperimenterID),\n\t\t\txID: t.Xid,\n\t\t}\n\t\texperimenterID := binary.BigEndian.Uint32(errData[8:12])\n\t\terrMsg := GetErrorMessage(t.Type, t.Code, experimenterID)\n\t\texperimenterType := binary.BigEndian.Uint32(errData[12:16])\n\t\tswitch experimenterID {\n\t\tcase openflow13.ONF_EXPERIMENTER_ID:\n\t\t\tswitch experimenterType {\n\t\t\tcase openflow13.Type_BundleCtrl:\n\t\t\t\tbundleID := binary.BigEndian.Uint32(errData[16:20])\n\t\t\t\tresult.msgType = BundleControlMessage\n\t\t\t\tself.publishMessage(bundleID, result)\n\t\t\t\tlog.Errorf(\"Received Vendor error: %s on ONFT_BUNDLE_CONTROL message\", errMsg)\n\t\t\tcase openflow13.Type_BundleAdd:\n\t\t\t\tbundleID := binary.BigEndian.Uint32(errData[16:20])\n\t\t\t\tresult.msgType = BundleAddMessage\n\t\t\t\tself.publishMessage(bundleID, result)\n\t\t\t\tlog.Errorf(\"Received Vendor error: %s on ONFT_BUNDLE_ADD_MESSAGE message\", errMsg)\n\t\t\t}\n\t\tdefault:\n\t\t\tlog.Errorf(\"Received Vendor error: %s\", errMsg)\n\t\t}\n\t}\n}", "func (o *handler) handle(client mqtt.Client, msg mqtt.Message) {\r\n\t// We extract the count and write that out first to simplify checking for missing values\r\n\tvar m Message\r\n\tvar resp Session\r\n\tif err := json.Unmarshal(msg.Payload(), &resp); err != nil {\r\n\t\tfmt.Printf(\"Message could not be parsed (%s): %s\", msg.Payload(), err)\r\n\t\treturn\r\n\t}\r\n\tfmt.Println(resp)\r\n\tswitch resp.Type {\r\n\tcase CMDMSG_OFFER:\r\n\t\tenc.Decode(resp.Data, &m)\r\n\t\tNotice(m)\r\n\tcase CMDMSG_DISC:\r\n\t\tvar devcmd DiscoveryCmd\r\n\t\tenc.Decode(resp.Data, &devcmd)\r\n\t\tDiscoveryDev(&devcmd)\r\n\tcase CMDMSG_WAKE:\r\n\t\tvar fing Fing\r\n\t\tenc.Decode(resp.Data, &fing)\r\n\t\twakemac(fing)\r\n\tcase CMDMSG_UPDATE:\r\n\t\tvar newver *versionUpdate\r\n\t\tGetUpdateMyself(newver)\r\n\tcase CMDMSG_MR2:\r\n\t\tvar mr2info Mr2Msg\r\n\t\tenc.Decode(resp.Data, &mr2info)\r\n\t\tMr2HostPort(&mr2info)\r\n\t}\r\n}", "func (srv *Server) handleMessage(msg *Message) error {\n\tswitch msg.msgType {\n\tcase MsgSignalBinary:\n\t\tfallthrough\n\tcase MsgSignalUtf8:\n\t\tfallthrough\n\tcase MsgSignalUtf16:\n\t\tsrv.handleSignal(msg)\n\n\tcase MsgRequestBinary:\n\t\tfallthrough\n\tcase MsgRequestUtf8:\n\t\tfallthrough\n\tcase MsgRequestUtf16:\n\t\tsrv.handleRequest(msg)\n\n\tcase MsgRestoreSession:\n\t\treturn srv.handleSessionRestore(msg)\n\tcase MsgCloseSession:\n\t\treturn srv.handleSessionClosure(msg)\n\t}\n\treturn nil\n}", "func (srv *Server) handleMessage(msg *Message) error {\n\tswitch msg.msgType {\n\tcase MsgSignalBinary:\n\t\tfallthrough\n\tcase MsgSignalUtf8:\n\t\tfallthrough\n\tcase MsgSignalUtf16:\n\t\tsrv.handleSignal(msg)\n\n\tcase MsgRequestBinary:\n\t\tfallthrough\n\tcase MsgRequestUtf8:\n\t\tfallthrough\n\tcase MsgRequestUtf16:\n\t\tsrv.handleRequest(msg)\n\n\tcase MsgRestoreSession:\n\t\treturn srv.handleSessionRestore(msg)\n\tcase MsgCloseSession:\n\t\treturn srv.handleSessionClosure(msg)\n\t}\n\treturn nil\n}", "func Handler(conn net.Conn, pubsub *PubSub) {\n\n\treader := bufio.NewReader(conn)\n\n\tdata, err := reader.ReadString('\\n')\n\n\tif err != nil {\n\t\t//log.Fatal(err)\n\t\treturn\n\t}\n\n\tcommand := strings.Split(strings.TrimSuffix(data, \"\\n\"), \" \")\n\n\tswitch command[0] {\n\n\tcase \"PUBLISH\":\n\t\tgo Publish(conn, command, pubsub)\n\n\tcase \"SUBSCRIBE\":\n\t\tgo Subscribe(conn, command, pubsub)\n\t}\n\n}", "func (s *Server) handleRead(ctx context.Context, username string, c *websocket.Conn) error {\n\tvar data Message\n\n\tif err := wsjson.Read(ctx, c, &data); err != nil {\n\t\ts.userMu.Lock()\n\t\ts.unsubscribe(username)\n\t\ts.userMu.Unlock()\n\n\t\treturn err\n\t}\n\n\tdata.Username = username\n\n\ts.log.Info().Msgf(\"received %s message: %s\", data.Username, data.Text)\n\n\ts.writeCh <- data\n\n\treturn nil\n}", "func handleConnection(ms *TLVServ, conn net.Conn) {\n\tvar msgHandler *mtypeInfo\n\tvar ok bool\n\n\tdefer conn.Close()\n\n\tlog.Printf(\"%s connected\\n\", conn.RemoteAddr())\n\n\tfor {\n\t\t// Block till there is data to read.\n\t\tmtype, datalen, readErr := readHeader(conn)\n\t\tif readErr != nil {\n\t\t\tif readErr == io.EOF {\n\t\t\t\tlog.Printf(\"%s disconected\\n\", conn.RemoteAddr())\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tlog.Println(readErr)\n\t\t\t}\n\t\t}\n\n\t\t// datalen might be 0, but that's ok, we'll get back and empty array\n\t\tdata, err := readData(conn, datalen, time.Duration(ms.readTimeout))\n\t\tif err == nil {\n\t\t\t// Get the handler function for this message type (mtype)\n\t\t\tms.mtypeHandlersLock.RLock()\n\t\t\tmsgHandler, ok = ms.mtypeHandlers[mtype]\n\t\t\tms.mtypeHandlersLock.RUnlock()\n\t\t\tif ok == true {\n\t\t\t\tgo handlerRunner(msgHandler, conn, data)\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"missing handler for mtype %v\\n\", mtype)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Println(err)\n\t\t}\n\t}\n}", "func (u *UnityServer) readMessage() {\n\tfor {\n\t\tdata := make([]byte, 8192)\n\t\tn, err := u.conn.Read(data)\n\t\tif err != nil {\n\t\t\tu.Logger.Errorf(\"Error: Reading socket - %v\", err)\n\t\t\tu.stop <- true\n\t\t\tbreak\n\t\t}\n\t\tu.incoming <- string(data[:n])\n\t}\n}", "func (i *Irc) handleMsg(msg irc.Msg) {\n\tbotMsg := i.buildMessage(msg)\n\n\tswitch msg.Cmd {\n\tcase irc.ERROR:\n\t\tlog.Info().Msgf(\"Received error: \" + msg.Raw)\n\n\tcase irc.PING:\n\t\ti.Client.Out <- irc.Msg{Cmd: irc.PONG}\n\n\tcase irc.PONG:\n\t\t// OK, ignore\n\n\tcase irc.ERR_NOSUCHNICK:\n\t\tfallthrough\n\n\tcase irc.ERR_NOSUCHCHANNEL:\n\t\tfallthrough\n\n\tcase irc.RPL_MOTD:\n\t\tfallthrough\n\n\tcase irc.RPL_NAMREPLY:\n\t\tfallthrough\n\n\tcase irc.RPL_TOPIC:\n\t\tfallthrough\n\n\tcase irc.KICK:\n\t\tfallthrough\n\n\tcase irc.TOPIC:\n\t\tfallthrough\n\n\tcase irc.MODE:\n\t\tfallthrough\n\n\tcase irc.JOIN:\n\t\tfallthrough\n\n\tcase irc.PART:\n\t\tfallthrough\n\n\tcase irc.NOTICE:\n\t\tfallthrough\n\n\tcase irc.NICK:\n\t\tfallthrough\n\n\tcase irc.RPL_WHOREPLY:\n\t\tfallthrough\n\n\tcase irc.RPL_ENDOFWHO:\n\t\tbotMsg.Kind = bot.Event\n\t\ti.event(i, bot.Event, botMsg)\n\n\tcase irc.PRIVMSG:\n\t\tbotMsg.Kind = bot.Message\n\t\ti.event(i, bot.Message, botMsg)\n\n\tcase irc.QUIT:\n\t\tos.Exit(1)\n\n\tdefault:\n\t\tcmd := irc.CmdNames[msg.Cmd]\n\t\tlog.Debug().Msgf(\"(%s) %s\", cmd, msg.Raw)\n\t}\n}", "func (b *unixBus) HandleMessage(uuid uuid.UUID, msg interface{}) (<-chan interface{}, error) {\n\tsockAddr := fmt.Sprintf(\"%s/%v.sock\", endpointsDir, uuid)\n\n\tc, err := net.Dial(\"unix\", sockAddr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tenc := gob.NewEncoder(c)\n\terr = enc.Encode(message{Type: messageTypeSend, Message: msg})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tch := make(chan interface{})\n\n\tgo func() {\n\t\tdefer close(ch)\n\t\tdefer c.Close()\n\n\t\tdec := gob.NewDecoder(c)\n\t\tvar r message\n\t\tfor {\n\t\t\terr := dec.Decode(&r)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tswitch r.Type {\n\t\t\tcase messageTypeResult:\n\t\t\t\tch <- r.Message\n\n\t\t\tdefault:\n\t\t\t\tlog.Printf(\"Invalid response so ignoring\")\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn ch, nil\n}", "func handlerMsg(msg []byte) {\n\tchats <- string(msg)\n}", "func handleMessage(s *Server, p *TCPPeer) {\n\t// Disconnect the peer when we break out of the loop.\n\tdefer func() {\n\t\ts.unregister <- p\n\t}()\n\n\tfor {\n\t\tmsg := <-p.receive\n\t\tcommand := msg.commandType()\n\n\t\ts.logger.Printf(\"IN :: %d :: %s :: %v\", p.id(), command, msg)\n\n\t\tswitch command {\n\t\tcase cmdVersion:\n\t\t\tresp := s.handleVersionCmd(msg, p)\n\t\t\tp.nonce = msg.Payload.(*payload.Version).Nonce\n\t\t\tp.send <- resp\n\t\tcase cmdAddr:\n\t\t\ts.handleAddrCmd(msg, p)\n\t\tcase cmdGetAddr:\n\t\t\ts.handleGetaddrCmd(msg, p)\n\t\tcase cmdInv:\n\t\t\tresp := s.handleInvCmd(msg, p)\n\t\t\tp.send <- resp\n\t\tcase cmdBlock:\n\t\t\ts.handleBlockCmd(msg, p)\n\t\tcase cmdConsensus:\n\t\tcase cmdTX:\n\t\tcase cmdVerack:\n\t\t\tgo s.sendLoop(p)\n\t\tcase cmdGetHeaders:\n\t\tcase cmdGetBlocks:\n\t\tcase cmdGetData:\n\t\tcase cmdHeaders:\n\t\t}\n\t}\n}", "func (g *Gossiper) SimpleHandleClientMessages() {\n\tg.ConnectToClient()\n\n\tpacketBytes := make([]byte, buffsize)\n\tmsg := &message.Message{}\n\n\tfor {\n\t\tnRead, _, err := g.clientConn.ReadFromUDP(packetBytes)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error: read from buffer failed.\")\n\t\t\tos.Exit(-1)\n\t\t}\n\n\t\tif nRead > 0 {\n\t\t\tprotobuf.Decode(packetBytes, msg)\n\t\t\tprintClientMessage(*msg)\n\t\t\tg.PrintPeers()\n\t\t\tsimpleMsg := &message.SimpleMessage{OriginalName: g.name, RelayPeerAddr: g.udpAddr.String(), Contents: msg.Text}\n\t\t\t(*g).PrintPeers()\n\t\t\tpacket := &gossippacket.GossipPacket{Simple: simpleMsg}\n\t\t\tg.broadcastSimpleMessage(packet, nil)\n\t\t}\n\t}\n}", "func (mp *MessageProcessor) Handle(event *irc.Event) {\n\tmessage := event.Message()\n\tmessage = mp.StripInput(message)\n\n\tpaintCommand, err := mp.Parse(message)\n\tif err == nil {\n\t\tmp.Chan <- *paintCommand\n\t} else {\n\t\tfmt.Printf(\"ERROR: command %q encountered the following error: %q\", message, err)\n\t}\n\n}", "func HandleConsoleMessage(ctx *context.LegscContext, msgBytes []byte) {\n\tmsg := &message.Console{}\n\terr := message.Unmarshal(msgBytes, msg)\n\tif err != nil {\n\t\tlog.Error(\"failed in parsing message: \", err)\n\t\tsendConsoleError(err, msg, ctx)\n\t\treturn\n\t}\n\n\tswitch msg.State {\n\tcase message.ConsoleStartState:\n\t\thandleStartMessage(ctx, msg)\n\tcase message.ConsoleCloseState:\n\t\thandleCloseMessage(ctx, msg)\n\tcase message.ConsoleInputState:\n\t\thandleInputMessage(ctx, msg)\n\t}\n}", "func HandleMessage(w http.ResponseWriter, r *http.Request) {\n\tif token == \"\" {\n\t\tlog.Printf(\"no bot token available\")\n\t\treturn\n\t}\n\n\tbot, err := tgbotapi.NewBotAPI(token)\n\tif err != nil {\n\t\tlog.Printf(\"could not create telegram bot: %v\\n\", err)\n\t\treturn\n\t}\n\n\tdata, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlog.Printf(\"could not read request: %v\\n\", err)\n\t\treturn\n\t}\n\n\tvar update tgbotapi.Update\n\n\tif err := json.Unmarshal(data, &update); err != nil {\n\t\tlog.Printf(\"could not parse request: %v\\n\", err)\n\t\treturn\n\t}\n\n\tcid := update.Message.Chat.ID\n\n\tswitch {\n\tcase update.Message.Voice != nil:\n\t\thandleVoiceMessage(bot, cid, update.Message.Voice)\n\tcase update.Message.Text != \"\":\n\t\thandleTextMessage(bot, cid, update.Message.Text)\n\t}\n}", "func (th *TailHandler) HandleMessage(m *nsq.Message) error {\n\tth.messagesShown++\n\tif err := th.printMessage(th.writer, m); err != nil {\n\t\tfmt.Printf(\"err %v\\n\", err)\n\t\treturn err\n\t}\n\tif th.totalMessages > 0 && th.totalMessages < th.messagesShown {\n\t\tos.Exit(0)\n\t}\n\treturn nil\n}", "func handleConnection(conn net.Conn) {\n\tdefer conn.Close()\n\n\tmsg := make([]byte, 4096)\n\tn, _ := conn.Read(msg)\n\n\tif !HandleMessage(msg[:n], conn) {\n\t\tfmt.Println(\"Error with connection: \" + conn.RemoteAddr().String())\n\t}\n\n}", "func readClientMessages(ws *websocket.Conn, msgChan chan Message) {\n\tfor {\n\t\tvar message Message\n\n\t\t// check to see if there's anything shouldAllowPrompt from the server and if so\n\t\t// send to our channel\n\t\terr := websocket.JSON.Receive(ws, &message)\n\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error on receiving json on web socket connection %s\\n\", err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tmsgChan <- message\n\t}\n}", "func (s *Server) handleConnection(sock net.Conn) {\n\tmsg := make([]byte, 4096)\n\n\tn, _ := sock.Read(msg)\n\n\tlog.Printf(\"Incoming msg: %s\\n\", string(msg[:n]))\n\n\ts.SIPMessageReaction(sock, string(msg[:n]))\n}", "func (messenger *TCPMessenger) handleConn(c *net.TCPConn, channel chan Message) {\r\n\tmsg, err := messenger.recvMessage(c)\r\n\tif err != nil {\r\n\t\tlog.Println(\"[ERROR] Failed to rcvmessage: \" + err.Error())\r\n\t}\r\n\tif err == io.EOF {\r\n\t\tlog.Println(\"[DEBUG] Closing connection.\")\r\n\t\tc.Close()\r\n\t\treturn\r\n\t}\r\n\t// log.Println(\"[DEBUGMessage recieved \", msg)\r\n\t// Quesues messages for processing in the channel\r\n\tchannel <- msg\r\n\r\n}", "func (s *Server) handleRead(pubKey credentials.StaticSizedPublicKey, done <-chan struct{}) {\n\ttr, err := s.connMgr.getTransport(pubKey)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase in := <-tr.Read():\n\t\t\t// Unmarshal the message\n\t\t\tmsg := &message.Message{}\n\t\t\tif err := UnmarshalProtoMessage(in, msg); err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Handle the message request or response\n\t\t\tswitch ex := msg.Exchange.(type) {\n\t\t\tcase *message.Message_Request:\n\t\t\t\ts.handleMessageRequest(pubKey, ex.Request)\n\t\t\tcase *message.Message_Response:\n\t\t\t\ts.handleMessageResponse(ex.Response)\n\t\t\tdefault:\n\t\t\t\t// log.Println(\"Invalid message type\")\n\t\t\t}\n\t\tcase <-done:\n\t\t\treturn\n\t\t}\n\t}\n}", "func handleMessage(msg *game.InMessage, ws *websocket.Conn, board *game.Board) {\n\tfmt.Println(\"Message Got: \", msg)\n\n}", "func handleMessages() {\n\tfor {\n\t\tmsg := <-broadcaster\n\n\t\tstoreInRedis(msg)\n\t\tmessageClients(msg)\n\t}\n}", "func (api *APIv1) MessagesHandler(w http.ResponseWriter, r *http.Request) {\n\twr := &HTTPResponder{w, r, \"\"}\n\n\tconn, err := api.upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\twr.Error(HTTPErr, err)\n\t\treturn\n\t}\n\tid, messages := api.messages.Subscribe()\n\n\tfor {\n\t\tm, ok := <-messages // Blocks until we have a message.\n\t\tif !ok {\n\t\t\t// Channel is now closed.\n\t\t\tbreak\n\t\t}\n\t\tam, _ := json.Marshal(&APIv1Message{m.(*Message).typ, m.(*Message).text})\n\n\t\terr = conn.WriteMessage(websocket.TextMessage, am)\n\t\tif err != nil {\n\t\t\t// Silently unsubscribe, the client has gone away.\n\t\t\tbreak\n\t\t}\n\t}\n\tapi.messages.Unsubscribe(id)\n\tconn.Close()\n}", "func handleConnection(connection net.Conn, log *logs.MultipleLog) {\n\n\tlog.Infof(\"%s just Connected. \\n\", connection.RemoteAddr().String())\n\n\tfor {\n\t\tnetData, err := bufio.NewReader(connection).ReadString('\\n')\n\t\tif err != nil {\n\t\t\tlog.Error(\"Listening of:\" + connection.RemoteAddr().String() + \" stopped.\")\n\t\t\treturn\n\t\t}\n\n\t\tlog.Info(connection.RemoteAddr().String() + \" Says : \" + strings.TrimSpace(string(netData)))\n\n\t\tconnection.Write([]byte(string(\"Server: Message recived \\n\")))\n\n\t\tdefer connection.Close()\n\t}\n\n}", "func listenForMessages(in io.Reader, out chan *deviceResponse, closed chan struct{}) {\n\tfor {\n\t\t// stream the reply back in 64 byte chunks\n\t\tchunk := make([]byte, 64)\n\t\tvar reply []byte\n\t\tvar kind uint16\n\t\tfor {\n\t\t\t// Read next chunk\n\t\t\tif _, err := io.ReadFull(in, chunk); err != nil {\n\n\t\t\t\t// Abort if this keepkey has closed its connections\n\t\t\t\tselect {\n\t\t\t\tcase <-closed:\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t\tfmt.Println(color.RedString(\"Unable to read chunk from device:\", err)) // TODO: move to device specific log\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t//TODO: check transport header\n\n\t\t\t//if it is the first chunk, retreive the reply message type and total message length\n\t\t\tvar payload []byte\n\n\t\t\tif len(reply) == 0 {\n\t\t\t\tkind = binary.BigEndian.Uint16(chunk[3:5])\n\t\t\t\treply = make([]byte, 0, int(binary.BigEndian.Uint32(chunk[5:9])))\n\t\t\t\tpayload = chunk[9:]\n\t\t\t} else {\n\t\t\t\tpayload = chunk[1:]\n\t\t\t}\n\t\t\t// Append to the reply and stop when filled up\n\t\t\tif left := cap(reply) - len(reply); left > len(payload) {\n\t\t\t\treply = append(reply, payload...)\n\t\t\t} else {\n\t\t\t\treply = append(reply, payload[:left]...)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tout <- &deviceResponse{reply, kind}\n\t}\n}", "func (conn *Conn) recv() {\n\tfor {\n\t\ts, err := conn.io.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tlogging.Error(\"irc.recv(): %s\", err.Error())\n\t\t\tconn.shutdown()\n\t\t\treturn\n\t\t}\n\t\ts = strings.Trim(s, \"\\r\\n\")\n\t\tlogging.Debug(\"<- %s\", s)\n\n\t\tif line := parseLine(s); line != nil {\n\t\t\tline.Time = time.Now()\n\t\t\tconn.in <- line\n\t\t} else {\n\t\t\tlogging.Warn(\"irc.recv(): problems parsing line:\\n %s\", s)\n\t\t}\n\t}\n}", "func (c *Common) ListenForMessages() {\n\n defer func() {\n c.rp.Stop()\n }()\n\n for {\n for message := range c.receivedMessages {\n c.HandleReceivedMessage(message);\n }\n }\n}", "func (conn *Conn) recv() {\n\tfor {\n\t\ts, err := conn.io.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tlogging.Error(\"irc.recv(): %s\", err.Error())\n\t\t\t}\n\t\t\t// We can't defer this, because Close() waits for it.\n\t\t\tconn.wg.Done()\n\t\t\tconn.Close()\n\t\t\treturn\n\t\t}\n\t\ts = strings.Trim(s, \"\\r\\n\")\n\t\tlogging.Debug(\"<- %s\", s)\n\n\t\tif line := ParseLine(s); line != nil {\n\t\t\tline.Time = time.Now()\n\t\t\tconn.in <- line\n\t\t} else {\n\t\t\tlogging.Warn(\"irc.recv(): problems parsing line:\\n %s\", s)\n\t\t}\n\t}\n}", "func readFromServer(conn net.Conn) {\n for {\n message, err := bufio.NewReader(conn).ReadString('\\n')\n \n if err != nil {\n fmt.Println(\"Server down:\", err)\n return\n }\n \n message = strings.Trim(string(message), \"\\n\")\n \n fmt.Println(\"\\r<<\", string(message))\n }\n}", "func handle(conn net.Conn) {\n\tscan := bufio.NewReader(conn)\n\tfor {\n\t\tmsg, err := scan.ReadString('\\n') // recv\n\t\tmsg = encrypt(msg)\n\t\tif err != nil {\n\t\t\tprint(err)\n\t\t\tbreak\n\t\t} else if msg == \"Server is now down...\\n\" {\n\t\t\tfmt.Printf(\"\\rServer> %s\\n\", msg)\n\t\t\tconn.Close()\n\t\t\tos.Exit(0)\n\t\t}\n\t\tfmt.Print(\"\\r\" + msg)\n\t\tfmt.Print(\"> \")\n\t}\n}", "func messageListen(c net.Conn) {\n\tfor {\n\t\tbuf := make([]byte, 4096)\n\t\tlength, err := c.Read(buf)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Fail to read data, %s\\n\", err)\n\t\t\treturn\n\t\t}\n\n\t\tnetMsg := message.NetMessage{}\n\t\terr = json.Unmarshal(buf[:length], &netMsg)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Decode message error: \", err)\n\t\t\treturn\n\t\t}\n\n\t\troutes(netMsg.MsgName, []byte(netMsg.Data))\n\t}\n}", "func Parse(message string) (l *msg.Line, err error) {\n\tl, err = lexServerMsg(message)\n\tif err != nil {\n\t\treturn\n\t}\n\tvar output string\n\tvar context string\n\tswitch l.Cmd() {\n\tcase \"NOTICE\":\n\t\ttrail := l.Args()[len(l.Args())-1]\n\t\tif strings.HasPrefix(trail, \"\\001\") &&\n\t\t\tstrings.HasSuffix(trail, \"\\001\") {\n\t\t\tvar query string\n\t\t\toutput, context, query = ctcp(l.Nick(), l.Args())\n\n\t\t\t// create a new argument list to send to the handler\n\t\t\t// the first argument describes what kind of query is\n\t\t\t// being made\n\t\t\told := l.Args()\n\t\t\ttmp := make([]string, len(old)+1)\n\t\t\ttmp[0] = query\n\t\t\tfor i := range old {\n\t\t\t\ttmp[i+1] = old[i]\n\t\t\t}\n\n\t\t\tl.SetArgs(tmp)\n\t\t\tl.SetCmd(\"CTCP\")\n\t\t\tbreak\n\t\t}\n\t\toutput, context = notice(l.Nick(), l.Args())\n\tcase \"NICK\":\n\t\toutput, context = nick(l.Nick(), l.Args())\n\tcase \"MODE\":\n\t\toutput, context = mode(l.Nick(), l.Args())\n\tcase \"PRIVMSG\":\n\t\ttrail := l.Args()[len(l.Args())-1]\n\t\tif strings.HasPrefix(trail, \"\\001\") &&\n\t\t\tstrings.HasSuffix(trail, \"\\001\") {\n\t\t\tvar query string\n\t\t\toutput, context, query = ctcp(l.Nick(), l.Args())\n\n\t\t\t// create a new argument list to send to the handler\n\t\t\t// the first argument describes what kind of query is\n\t\t\t// being made\n\t\t\told := l.Args()\n\t\t\ttmp := make([]string, len(old)+1)\n\t\t\ttmp[0] = query\n\t\t\tfor i := range old {\n\t\t\t\ttmp[i+1] = old[i]\n\t\t\t}\n\n\t\t\tl.SetArgs(tmp)\n\t\t\tl.SetCmd(\"CTCP\")\n\t\t\tbreak\n\t\t}\n\n\t\toutput, context = privMsg(l.Nick(), l.Args())\n\t\tr := \"^\\\\W\"\n\t\tregex := regexp.MustCompile(r)\n\t\tif !regex.MatchString(context) {\n\t\t\tl.SetCmd(\"P2PMSG\")\n\t\t}\n\tcase \"PART\":\n\t\toutput, context = part(l.Nick(), l.Args())\n\tcase \"PING\":\n\t\toutput, context = ping(l.Args())\n\tcase \"PONG\":\n\t\t// TODO: Handle so that pongs from the server doesn't\n\t\t// print, but pongs from other users do\n\t\toutput, context = \"\", \"\"\n\tcase \"JOIN\":\n\t\toutput, context = join(l.Nick(), l.Args())\n\tcase \"QUIT\":\n\t\toutput, context = quit(l.Nick(), l.Args())\n\tcase \"328\":\n\t\toutput, context, err = chanUrl(l.Args())\n\tcase \"329\":\n\t\toutput, context, err = chanCreated(l.Args())\n\tcase \"332\":\n\t\toutput, context, err = topic(l.Args())\n\tcase \"333\":\n\t\toutput, context, err = topicSetBy(l.Args())\n\tcase \"353\":\n\t\toutput, context = nickList(l.Args())\n\tcase \"366\":\n\t\toutput, context = nickListEnd(l.Args())\n\tcase \"401\":\n\t\toutput, context = noSuchTarget(l.Args())\n\tcase \"470\":\n\t\toutput, context = forward(l.Args())\n\tdefault:\n\t\t// check for numeric commands\n\t\tr := regexp.MustCompile(\"^\\\\d+$\")\n\t\tif r.MatchString(l.Cmd()) {\n\t\t\toutput, context = numeric(l.Nick(), l.Args())\n\t\t} else {\n\t\t\terr = errors.New(\"Unknown command.\")\n\t\t\treturn\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn\n\t}\n\n\tl.SetOutput(output)\n\tl.SetContext(context)\n\treturn\n}", "func (p *SingleLineParser) Handle(input *DecodedInput) {\n\tp.inputChan <- input\n}", "func handleMessages(messages <-chan amqp.Delivery, handler AmqpHandler, waitGroup *sync.WaitGroup) {\n\tdefer waitGroup.Done()\n\tfor msg := range messages {\n\t\twaitGroup.Add(1)\n\t\tgo handleDeliveryWithRetry(msg, handler, waitGroup)\n\t}\n}", "func (svc *Client) ProcessMessages(ctx context.Context, queue string, processFunc func(*stomp.Message) error) error {\n\tstompConn, err := svc.getConnection()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thostname, err := os.Hostname()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsubQueue, err := stompConn.Subscribe(\n\t\tfmt.Sprintf(\"/queue/%s\", queue),\n\t\tstomp.AckAuto,\n\t\tstomp.SubscribeOpt.Header(\"durable-subscription-name\", hostname),\n\t\tstomp.SubscribeOpt.Header(\"subscription-type\", \"MULTICAST\"))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn subQueue.Unsubscribe()\n\t\tcase msg, ok := <-subQueue.C:\n\t\t\tif !ok {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tif msg.Err != nil {\n\t\t\t\treturn msg.Err\n\t\t\t}\n\n\t\t\terr := processFunc(msg)\n\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\t// No message recv'd\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func (b *Board) HandleIncomingMessages() {\n\tHandleIncomingMessages(b.cfg.ConnectionConfig.MulticastAddress, b.handlingMessage)\n}", "func (a *Agent) handleTCPMessages(c net.Conn) {\n\tdefer func() {\n\t\tif err := c.Close(); err != nil {\n\t\t\tlogger.Debug(err)\n\t\t}\n\t}()\n\tvar buf []byte\n\tmessageBuffer := bytes.NewBuffer(buf)\n\tconnReader := bufio.NewReader(c)\n\n\t// Read incoming tcp messages from client until we hit a valid JSON message.\n\t// If we don't get valid JSON or a ping request after 500ms, close the\n\t// connection (timeout).\n\treadDeadline := time.Now().Add(TCPSocketReadDeadline)\n\n\t// Only allow 500ms of IO. After this time, all IO calls on the connection\n\t// will fail.\n\tif err := c.SetReadDeadline(readDeadline); err != nil {\n\t\tlogger.WithError(err).Error(\"error setting read deadline\")\n\t\treturn\n\t}\n\n\t// It is possible that our buffered readers/writers will cause us\n\t// to iterate.\n\tfor time.Now().Before(readDeadline) {\n\t\t_, err := connReader.WriteTo(messageBuffer)\n\t\t// Check error condition. If it's a timeout error, continue so we can read\n\t\t// any remaining partial packets. Any other error type returns.\n\t\tif err != nil {\n\t\t\tif opError, ok := err.(*net.OpError); ok && !opError.Timeout() {\n\t\t\t\tlogger.Debugf(\"error reading message from tcp socket %s\", err.Error())\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tif match := pingRe.Match(messageBuffer.Bytes()); match {\n\t\t\tlogger.Debug(\"tcp socket received ping\")\n\t\t\t_, err = c.Write([]byte(\"pong\"))\n\t\t\tif err != nil {\n\t\t\t\tlogger.WithError(err).Error(\"could not write response to tcp socket\")\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\t// Check our received data for valid JSON. If we get invalid JSON at this point,\n\t\t// read again from client, add any new message to the buffer, and parse\n\t\t// again.\n\t\tvar event types.Event\n\t\tvar result v1.CheckResult\n\t\tif err = json.Unmarshal(messageBuffer.Bytes(), &result); err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif err = translateToEvent(a, result, &event); err != nil {\n\t\t\tlogger.WithError(err).Error(\"1.x returns \\\"invalid\\\"\")\n\t\t\treturn\n\t\t}\n\n\t\t// Prepare the event by mutating it as required so it passes validation\n\t\tif err = prepareEvent(a, &event); err != nil {\n\t\t\tlogger.WithError(err).Error(\"invalid event\")\n\t\t\treturn\n\t\t}\n\n\t\t// At this point, should receive valid JSON, so send it along to the\n\t\t// message sender.\n\t\tpayload, err := json.Marshal(event)\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Error(\"could not marshal json payload\")\n\t\t\treturn\n\t\t}\n\n\t\ta.sendMessage(transport.MessageTypeEvent, payload)\n\t\t_, _ = c.Write([]byte(\"ok\"))\n\t\treturn\n\t}\n\t_, _ = c.Write([]byte(\"invalid\"))\n}", "func makeMessageStreamHandler(handler api.ProtocolHandler, logger *logging.Logger) messageStreamHandler {\n\treturn func(in <-chan []byte, reply chan<- []byte) {\n\t\tfor msgBytes := range in {\n\n\t\t\tmsg, msgStr, err := handler.Unwrap(msgBytes)\n\t\t\tif err != nil {\n\n\t\t\t\tlogger.Warningf(\"Failed to unmarshal message: %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlogger.Debugf(\"Received %s\", msgStr)\n\n\t\t\tif replyChan, new, err := handler.Handle(msg); err != nil {\n\t\t\t\tlogger.Warningf(\"Failed to handle %s: %s\", msgStr, err)\n\t\t\t} else if replyChan != nil {\n\t\t\t\tm, more := <-replyChan\n\t\t\t\tif !more {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\treplyBytes, err := handler.Wrap(m)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\treply <- replyBytes\n\t\t\t} else if !new {\n\t\t\t\tlogger.Infof(\"Dropped %s\", msgStr)\n\t\t\t} else {\n\t\t\t\tlogger.Debugf(\"Handled %s\", msgStr)\n\t\t\t}\n\t\t}\n\t}\n}", "func handleMessages() {\n\tvar msg node.Message\n\tvar jsonMessage Message\n\tfor {\n\t\tmsg = <-uiChannel\n\t\tjsonMessage.Message = msg.Content\n\t\tjsonMessage.Peer = msg.Peer\n\t\tfor client := range clients {\n\t\t\t// Send message to web application\n\t\t\terr := client.WriteJSON(jsonMessage)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"error: %v\", err)\n\t\t\t\tclient.Close()\n\t\t\t\tdelete(clients, client)\n\t\t\t}\n\t\t}\n\t}\n}", "func (g *GCMMessageHandler) HandleMessages(msg interfaces.KafkaMessage) {\n\tg.sendMessage(msg)\n}", "func (client *Client) Read() {\n\tvar message Message\n\tfor {\n\t\tif err := client.socket.ReadJSON(&message); err != nil {\n\t\t\tbreak\n\t\t}\n\t\t// Call findHandler to know which handler to call. If handler in router map value matches key then call it.\n\t\tif handler, found := client.findHandler(message.Name); found {\n\t\t\thandler(client, message.Data)\n\t\t}\n\t}\n\t// close connection once finished.\n\tclient.socket.Close()\n}", "func MessageReceiveHandler(s *discordgo.Session, m *discordgo.MessageCreate) {\n\t// Ignore bot messages\n\tif m.Author.Bot {\n\t\treturn\n\t}\n\n\t// Check for command\n\tif strings.HasPrefix(m.Content, config.Get().Prefix) {\n\t\t// Get Server object\n\t\tc, err := s.Channel(m.ChannelID)\n\t\tif err != nil {\n\t\t\tlogger.Log.Error(err.Error())\n\t\t\treturn\n\t\t}\n\t\tserver, err := db.GetServer(c.GuildID)\n\t\tif err != nil {\n\t\t\tlogger.Log.Error(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\t// Execute command\n\t\ttrigger := strings.Split(m.Content, config.Get().Prefix)[1]\n\t\tfor _, cmd := range command.MessageCommands {\n\t\t\tif cmd.Trigger() == trigger {\n\t\t\t\tcmd.Execute(server, s, m)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// Fallback for wrong command\n\t\t// Create response message\n\t\tmessage := &structure.Message{\n\t\t\tTitle: \"Unknown command. Try using `!kod-help` for a list of all game commands.\",\n\t\t\tType: \"system\",\n\t\t\tIcon: \"https://cdn.discordapp.com/attachments/512302843437252611/512302951814004752/ac6918be09a389876ee5663d6b08b55a.png\",\n\t\t\tFooter: \"Command execution feedback.\",\n\t\t}\n\n\t\t// Build Embed\n\t\tembed := builder.BuildEmbed(message)\n\n\t\t// Send response\n\t\t_, err = s.ChannelMessageSendEmbed(m.ChannelID, embed)\n\t\tif err != nil {\n\t\t\tlogger.Log.Error(err.Error())\n\t\t}\n\t}\n}", "func (w Wrapper) OnReadInMessages(f ReadInMessagesHandler) {\n\tw.longpoll.EventNew(6, func(i []interface{}) error {\n\t\tvar event ReadInMessages\n\t\tif err := event.parse(i); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tf(event)\n\n\t\treturn nil\n\t})\n}", "func readMessages(in io.Reader) ([]*message, error) {\n\tvar msgs []*message\n\tinput := bufio.NewScanner(in)\n\tfor {\n\t\tif msg, err := scanMessage(input); err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tmsgs = append(msgs, msg)\n\t\t}\n\t}\n\treturn msgs, input.Err()\n}", "func (h *myMessageHandler) HandleMessage(m *nsq.Message) error {\n\tif len(m.Body) == 0 {\n\t\treturn nil\n\t}\n\n\terr := h.processMessage(m.Body)\n\treturn err\n}", "func (c *Controller) handleMessages() {\n\tfor {\n\t\tmsg := c.chatroom.Receive()\n\t\tif msg.Info.Action == Broadcast {\n\t\t\tc.window.MsgQ <- &iMessage{\n\t\t\t\tSenderID: msg.SenderID,\n\t\t\t\tSenderName: msg.SenderName,\n\t\t\t\tType: Public,\n\t\t\t\tBody: msg.Info.Body,\n\t\t\t}\n\t\t}\n\t}\n}", "func (h *messageHandler) HandleMessage(m *nsq.Message) error {\n\t//Process the Message\n\tvar request Message\n\tif err := json.Unmarshal(m.Body, &request); err != nil {\n\t\tlog.Println(\"Error when Unmarshaling the message body, Err : \", err)\n\t\t// Returning a non-nil error will automatically send a REQ command to NSQ to re-queue the message.\n\t\treturn err\n\t}\n\t//Print the Message\n\tlog.Println(\"Message\")\n\tfmt.Println(request)\n\tlog.Println(\"--------------------\")\n\tlog.Println(\"Name : \", request.Name)\n\tlog.Println(\"Content : \", request.Content)\n\tlog.Println(\"Timestamp : \", request.Timestamp)\n\tlog.Println(\"--------------------\")\n\tlog.Println(\"\")\n\t// Will automatically set the message as finish\n\treturn nil\n}", "func (h *Handler) HandleMessage(body []byte) ([]byte, error) {\n\tt := struct {\n\t\tType string `json:\"type\"`\n\t}{}\n\tif err := json.Unmarshal(body, &t); err != nil {\n\t\treturn nil, err\n\t}\n\n\tmessageMapping := map[string]handlerFunc{\n\t\t\"url_verification\": h.handleURLVerification,\n\t\t\"event_callback\": h.handleEvent,\n\t}\n\n\tfn, ok := messageMapping[t.Type]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"unknown event type %q\", t.Type)\n\t}\n\toutput, err := fn(body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%s: %v\", t.Type, err)\n\t}\n\treturn output, nil\n}", "func newMessageHandler(url string) (*messageHandler, error) {\n\tc, err := net.Dial(\"tcp\", url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconn := &messageHandler{\n\t\tc,\n\t\tjson.NewEncoder(c),\n\t\tmap[string][]interface{}{},\n\t\tmake(chan struct{}),\n\t}\n\n\t// start listening for json messages\n\tgo func() {\n\t\tr := bufio.NewReader(c)\n\t\tfor {\n\t\t\t// messages are separated by a newline\n\t\t\tline, err := r.ReadBytes('\\n')\n\t\t\tif err != nil {\n\t\t\t\tclose(conn.closer)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// sometimes we'll receive empty lines, ignore those\n\t\t\tif len(line) <= 1 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// extract the 'msg' field\n\t\t\tvar m struct{ Msg string }\n\t\t\tdeny(json.Unmarshal(line, &m))\n\n\t\t\t// callbacks are cleared when called\n\t\t\tcallbacks := conn.callbacks[m.Msg]\n\t\t\tconn.callbacks[m.Msg] = []interface{}{}\n\n\t\t\tif len(callbacks) == 0 {\n\t\t\t\tfmt.Println(\"unhandled message\", m.Msg)\n\t\t\t}\n\n\t\t\tfor _, f := range callbacks {\n\t\t\t\tm := reflect.New(reflect.TypeOf(f).In(0))\n\t\t\t\tdeny(json.Unmarshal(line, m.Interface()))\n\t\t\t\treflect.ValueOf(f).Call([]reflect.Value{m.Elem()})\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn conn, nil\n}", "func (r *Ricochet) ProcessMessages(service RicochetService) {\n\tfor {\n\t\toc := <-r.newconns\n\t\tif oc == nil {\n\t\t\treturn\n\t\t}\n\t\tgo r.processConnection(oc, service)\n\t}\n}", "func LoginHandle(msg []byte, c echo.Context) (recv []byte, err error) {\n\tdefer util.Stack()\n\n\tabsMessage := &pf.AbsMessage{}\n\terr = absMessage.Unmarshal(msg)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tmsgID := absMessage.GetMsgID()\n\tmsgBody := absMessage.GetMsgBody()\n\n\tswitch msgID {\n\tcase int32(pf.Login):\n\t\tloginSend := &pf.LoginSend{}\n\t\terr = loginSend.Unmarshal(msgBody)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\ttoken, id, loginRecv := handleLogin(loginSend, c)\n\t\trecv, err = loginRecv.Marshal()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tabsMessage.Token = token\n\t\tutil.LogSend(msgID, id, 0, loginSend, \"Login\")\n\t\tutil.LogRecv(msgID, id, 0, loginRecv, \"Login\")\n\tdefault:\n\t\terr = def.ErrHandleLogin\n\t\treturn\n\t}\n\n\tabsMessage.MsgBody = recv\n\trecv, err = absMessage.Marshal()\n\treturn\n}", "func Handler(conn *websocket.Conn) {\n\t// handle connected\n\tvar userId string\n\tvar err error\n\tif userId, err = doConnected(conn); err != nil {\n\t\tfmt.Println(\"Client connect error: \", err)\n\t\treturn\n\t}\n\n\tfmt.Println(\"Client connected, userId: \", userId)\n\n\tfor {\n\t\tmsg := new(Message)\n\n\t\tif err := websocket.JSON.Receive(conn, msg); err != nil {\n\t\t\tfmt.Println(\"Can't receive, error: \", err)\n\t\t\tbreak\n\t\t}\n\n\t\tmsg.UpdateAt = Timestamp()\n\n\t\tfmt.Println(\"Received from client: \", msg)\n\n\t\t// handle received message\n\t\tif err := doReceived(conn, msg); err != nil {\n\t\t\tfmt.Println(\"Received message error: \", err)\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// handle disConnected\n\tif err := doDisConnected(userId, conn); err != nil {\n\t\tfmt.Println(\"Client disconnected error: \", err)\n\t\treturn\n\t}\n\n\tfmt.Println(\"Client disconnected, userId: \", userId)\n}", "func (s *Socket) listenToMessagesIn() {\n\tfor {\n\t\tmessage := new(Message)\n\t\terr := s.connection.ReadJSON(&message)\n\t\tif err != nil {\n\t\t\tif websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {\n\t\t\t\tlog.Printf(\"Error: %v\", err)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\ts.messagesIn <- message\n\t}\n}", "func (irc *IrcCon) handleOutgoingMessages() {\n\tfor s := range irc.outgoing {\n\t\t_,err := fmt.Fprint(irc.con, s + \"\\r\\n\")\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\ttime.Sleep(time.Millisecond * 200)\n\t}\n}", "func (s *Server) HandleRecv(w http.ResponseWriter, r *http.Request) {\n\tif s.listing {\n\t\tp := strings.TrimPrefix(strings.Replace(r.URL.Path, \"recv/\", \"\", 1), \"/\")\n\t\tif p != \"\" {\n\t\t\tfmt.Fprintln(w, \"I recieved a push from:\",\n\t\t\t\tr.Header.Get(\"X-I2p-Destb32\"),\n\t\t\t\t\"And for now, I did nothing with it because I am dumb)\")\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprintln(w, \"FALSE\")\n\t\treturn\n\t}\n\tfmt.Fprintln(w, \"Listings disabled for this server\")\n\treturn\n}", "func cmdHandler(c *Client, msg []byte) {\n\tlog.Infof(\"Received Message: %s ...\", msg[:int(math.Min(float64(len(msg)), 128))])\n\tlog.Debugf(\"Count of goroutines=%d\", runtime.NumGoroutine())\n\n\t// Decode JSON message\n\tvar cmd manager.PlayerCommand\n\terr := json.Unmarshal(msg, &cmd)\n\tif err != nil {\n\t\tsendStatusError(c, \"Message could not be decoded as JSON\", err.Error())\n\t\treturn\n\t}\n\n\tswitch cmd.Cmd {\n\tcase \"status\":\n\t\tsendStatusOKMsg(c, \"\") //, statusString[gp_daemon_status])\n\tcase \"pre_start\":\n\t\tpreStartCommand(c)\n\tcase \"start\":\n\t\tstartCommand(c)\n\tcase \"stop\":\n\t\tstopCommand(c)\n\tcase \"script\":\n\t\tscriptCommand(c, &cmd)\n\tcase \"getmd5\":\n\t\thandleGetMD5(c, &cmd)\n\tcase \"datafile\":\n\t\thandleDataFile(c, &cmd)\n\tcase \"nextchunk\":\n\t\thandleDataChunk(c, &cmd)\n\tcase \"get_results\":\n\t\tgetResultsCommand(c, &cmd)\n\tdefault:\n\t\tsendStatusError(c, fmt.Sprintf(\"Message not supported: %s ...\", msg[:int(math.Min(float64(len(msg)), 128))]), \"\")\n\t}\n\tlog.Debug(\"Message handled\")\n}", "func (p *Paradise) HandleCommands() {\n\tfmt.Println(\"Got client on: \", p.ip)\n\tp.writeMessage(220, \"Welcome to Paradise\")\n\tfor {\n\t\tline, err := p.reader.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\t//continue\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tcommand, param := parseLine(line)\n\t\tp.command = command\n\t\tp.param = param\n\n\t\tfn := CommandMap[command]\n\t\tif fn == nil {\n\t\t\tp.writeMessage(550, \"not allowed\")\n\t\t} else {\n\t\t\tfn(p)\n\t\t}\n\t}\n}", "func process_messages() {\n\tvar content string\n\n\tfor {\n\t\t//Get available messages (if any)\n\t\tmsg := messenger.Msnger.Receive_message()\n\n\t\t//No message is available\n\t\tif msg.Type == mylib.NONE {\n\t\t\t//Wait ~1sec and try again\n\t\t\ttime.Sleep(1000000000)\n\t\t\tcontinue\n\t\t}\n\n\t\tcontent = msg.Content\n\t\t//Chat message: print to chat window\n\t\tif msg.Type == mylib.CHAT_MESSAGE {\n\t\t\tmesge := msg.Content\n\n\t\t\tif strings.HasPrefix(mesge, \"L \") {\n\t\t\t\tcontent = fmt.Sprintf(\"%v: %v\", msg.Orig_source, strings.TrimLeft(mesge, \"L \"))\n\t\t\t\tchatting.Msg += content + \"\\n\"\n\t\t\t\tqml.Changed(chatting, &chatting.Msg)\n\n\t\t\t} else if strings.HasPrefix(mesge, \"G \") {\n\t\t\t\tcontent = fmt.Sprintf(\"%v: %v\", msg.Orig_source, strings.TrimLeft(mesge, \"G \"))\n\t\t\t\tglobalchatting.Msg += content + \"\\n\"\n\t\t\t\tqml.Changed(globalchatting, &globalchatting.Msg)\n\t\t\t}\n\n\t\t} else if msg.Type == mylib.CREATE_ROOM {\n\t\t\t//A room was created: add room to room list\n\t\t\tdecoded := strings.Split(content, \":\")\n\t\t\trooms[decoded[0]] = fmt.Sprintf(\"%v:%v:%v\", decoded[1], decoded[2], decoded[3])\n\t\t} else if msg.Type == mylib.JOIN_ROOM {\n\t\t\t//A node joined a room: add node to members list if this node is in the same room\n\t\t\tdecoded := strings.Split(content, \":\")\n\t\t\tif my_room == decoded[0] {\n\t\t\t\troom_members = append(room_members, fmt.Sprintf(\"%v:%v:%v\", decoded[1], decoded[2], decoded[3]))\n\t\t\t}\n\t\t} else if msg.Type == mylib.START_GAME {\n\t\t\t//A game was started\n\t\t\tdecoded := strings.Split(content, \":\")\n\t\t\tdelete(rooms, decoded[0])\n\t\t\tif my_room == decoded[0] {\n\t\t\t\tin_game = true\n\t\t\t}\n\t\t} else if msg.Type == mylib.DELETE_ROOM {\n\t\t\t//A room was deleted: remove room from room list\n\t\t\tdecoded := strings.Split(content, \":\")\n\t\t\tdelete(rooms, decoded[0])\n\t\t\tif my_room == decoded[0] {\n\t\t\t\tmy_room = \"\"\n\t\t\t\tin_room = false\n\t\t\t\troom_members = make([]string, 0, 0)\n\t\t\t}\n\t\t} else if msg.Type == mylib.LEAVE_ROOM {\n\t\t\t//A node left a room: remove node from members list if this node is in the same room\n\t\t\tcontent = msg.Content\n\t\t\tdecoded := strings.Split(content, \":\")\n\t\t\tif my_room == decoded[0] {\n\t\t\t\tfor i := range room_members {\n\t\t\t\t\tif room_members[i] == fmt.Sprintf(\"%v:%v:%v\", decoded[1], decoded[2], decoded[3]) {\n\t\t\t\t\t\troom_members = append(room_members[:i], room_members[i+1:]...)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else if msg.Type == mylib.MOVE {\n\t\t\tdecoded := strings.Split(content, \":\")\n\t\t\t// Should be board_num, player_team, player_color, turn, origLoc, newLoc, capture_pieceString\n\t\t\tboard_num, _ := strconv.Atoi(decoded[0])\n\t\t\tteam, _ := strconv.Atoi(decoded[1])\n\t\t\tcolor, _ := strconv.Atoi(decoded[2])\n\t\t\tturn, _ := strconv.Atoi(decoded[3])\n\t\t\torigLoc, _ := strconv.Atoi(decoded[4])\n\t\t\tnewLoc, _ := strconv.Atoi(decoded[5])\n\t\t\tcapturedI := decoded[6]\n\t\t\tcapturedT := decoded[7]\n\t\t\tcapturedU, _ := strconv.Atoi(decoded[8])\n\t\t\tUpdateFromOpponent(board_num, team, color, turn, origLoc, newLoc, capturedI, capturedT, capturedU)\n\t\t} else if msg.Type == mylib.PLACE {\n\t\t\tdecoded := strings.Split(content, \":\")\n\t\t\t// Should be board_num, player_team, player_color, turn, origLoc, newLoc, capture_pieceString\n\t\t\tboard_num, _ := strconv.Atoi(decoded[0])\n\t\t\tteam, _ := strconv.Atoi(decoded[1])\n\t\t\tcolor, _ := strconv.Atoi(decoded[2])\n\t\t\tturn, _ := strconv.Atoi(decoded[3])\n\t\t\tloc, _ := strconv.Atoi(decoded[4])\n\t\t\tpiece, _ := strconv.Atoi(decoded[5])\n\t\t\tpieceImage := decoded[6]\n\t\t\tpieceType := decoded[7]\n\t\t\tpieceTeam, _ := strconv.Atoi(decoded[8])\n\t\t\tUpdatePlace(board_num, team, color, turn, loc, piece, pieceImage, pieceType, pieceTeam)\n\t\t} else if msg.Type == mylib.GAMEOVER {\n\t\t\tdecoded := strings.Split(content, \":\")\n\t\t\tboard_num, _ := strconv.Atoi(decoded[0])\n\t\t\tteam, _ := strconv.Atoi(decoded[1])\n\t\t\tcause := decoded[2]\n\t\t\tEndGame(board_num, team, cause)\n\t\t\tif cause == \"King\" {\n\t\t\t\t// The king was captured\n\t\t\t} else {\n\t\t\t\t// Somebody ran out of time\n\t\t\t}\n\t\t}\n\t\tmsg.Type = mylib.NONE\n\t}\n}", "func (c *Common) HandleReceivedMessage(message messages.Messager) {\n\n switch message.(type) {\n case *messages.LocationMessage:\n c.rp.HandleLocationMessage(message.(*messages.LocationMessage))\n case *messages.MotorSpeedMessage:\n c.rp.HandleMotorSpeedMessage(message.(*messages.MotorSpeedMessage))\n }\n\n}", "func ListenForMessages(socket *websocket.Conn, onMessage func(structs.Message) error, onError func(error)) {\n\tfor {\n\t\t_, bytes, err := socket.ReadMessage()\n\t\tif err != nil {\n\t\t\tif isCloseError(err) {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tonError(errors.Wrap(err, \"Error reading from socket\"))\n\t\t\tcontinue\n\t\t}\n\t\tmessage, err := MessageFromJSON(bytes)\n\t\tif err != nil {\n\t\t\tonError(errors.Wrap(err, fmt.Sprintf(\"Error unmarshaling message: '%s'\", bytes)))\n\t\t}\n\t\terr = onMessage(message)\n\t\tif err != nil {\n\t\t\tonError(errors.Wrap(err, \"Error returned by on message handler\"))\n\t\t}\n\t}\n}", "func (cg *CandlesGroup) parseMessage(msg []byte) {\n\tt := bytes.TrimLeftFunc(msg, unicode.IsSpace)\n\tvar err error\n\t// either a channel data array or an event object, raw json encoding\n\tif bytes.HasPrefix(t, []byte(\"[\")) {\n\t\tcg.handleMessage(msg)\n\t} else if bytes.HasPrefix(t, []byte(\"{\")) {\n\t\tif err = cg.handleEvent(msg); err != nil {\n\t\t\tlog.Println(\"[BITFINEX] Error handling event: \", err)\n\t\t}\n\t} else {\n\t\terr = fmt.Errorf(\"[BITFINEX] unexpected message: %s\", msg)\n\t}\n\tif err != nil {\n\t\tfmt.Println(\"[BITFINEX] Error handleMessage: \", err, string(msg))\n\t}\n}", "func (mgmt *Management) handleFrontendMessage(received []byte) {\n\tvar cm collaborationMessage\n\terr := json.Unmarshal(received, &cm)\n\tif err != nil {\n\t\tlog.Println(\"Error while unmarshalling collaborationMessage:\", err)\n\t}\n\n\tswitch cm.Event {\n\tcase \"ABTU\":\n\t\tmgmt.doc.FrontendToABTU <- cm.Content\n\tcase \"AccessControl\":\n\t//\tTODO Handle access control messages\n\tcase \"Cursor\":\n\t//\tTODO Handle cursor messages\n\t}\n}", "func (c *Conn) OnMessage(messageType int, p []byte) {\n\tfor _, str := range strings.Split(string(p), \"\\r\\n\") {\n\t\tif str != \"\" {\n\t\t\tdata, _ := utils.GbkToUtf8([]byte(str))\n\t\t\tdoc := xml.NewDecoder(bytes.NewReader(data))\n\t\t\tnode := c.respParseAttr(doc)\n\t\t\tnode.Raw = string(data)\n\t\t\tswitch node.getAttr(\"id\") {\n\t\t\tcase \"1\":\n\t\t\t\tstatus := node.getElem(\"result\").getAttr(\"status\")\n\t\t\t\tif status == \"ok\" {\n\t\t\t\t\tc.key1 = node.getElem(\"key\").getAttr(\"key1\")\n\t\t\t\t\t// 初始化心跳\n\t\t\t\t\tc.loginChatServerSuccess()\n\t\t\t\t} else {\n\t\t\t\t\tEventChan <- EventMessage{Type: \"error\", Msg: fmt.Sprintf(\"%v\", \"进入直播间失败\")}\n\t\t\t\t}\n\t\t\tcase \"2\":\n\t\t\tcase \"3\":\n\t\t\tdefault:\n\t\t\t\tc.socketData(node)\n\t\t\t}\n\t\t\tc.pushNode(node)\n\t\t}\n\t}\n}" ]
[ "0.6561378", "0.6528215", "0.64411", "0.64160395", "0.6366687", "0.6283122", "0.6273615", "0.6269291", "0.6257093", "0.6256277", "0.61980337", "0.6155845", "0.6121732", "0.61168164", "0.601344", "0.6010404", "0.59898597", "0.5966513", "0.59574014", "0.5955264", "0.58938617", "0.5886083", "0.58251786", "0.58229846", "0.5765552", "0.57468873", "0.57331735", "0.5732407", "0.5658851", "0.56189036", "0.5618875", "0.55945134", "0.5594153", "0.5589223", "0.5542452", "0.55244976", "0.5524469", "0.5517586", "0.5517586", "0.5513526", "0.5509128", "0.5503757", "0.54992527", "0.5495385", "0.5493428", "0.5487539", "0.54849243", "0.5484654", "0.54842305", "0.5484129", "0.5474079", "0.5474019", "0.5465442", "0.5453857", "0.54402745", "0.54353", "0.543004", "0.54296136", "0.54280156", "0.5420371", "0.5418044", "0.5417457", "0.541648", "0.54151595", "0.5414307", "0.54119277", "0.54083675", "0.5405065", "0.54020584", "0.5393194", "0.5391204", "0.53851795", "0.5384205", "0.5359276", "0.53577554", "0.5348933", "0.53401935", "0.53360623", "0.53127044", "0.53103966", "0.5307052", "0.5306911", "0.53047556", "0.52935183", "0.52796584", "0.5277324", "0.52768785", "0.52745837", "0.52745", "0.52657014", "0.52649117", "0.5263485", "0.5260125", "0.5254791", "0.52530473", "0.5251399", "0.5250213", "0.5247093", "0.52410215", "0.5237266" ]
0.71781325
0
handleGob handles the "GOB" request. It decodes the received GOB data into a struct.
handleGob обрабатывает запрос "GOB". Он декодирует полученные данные GOB в структуру.
func handleGob(rw *bufio.ReadWriter) { log.Print("Receive GOB data:") var data complexData // Create a decoder that decodes directly into a struct variable. dec := gob.NewDecoder(rw) err := dec.Decode(&data) if err != nil { log.Println("Error decoding GOB data:", err) return } // Print the complexData struct and the nested one, too, to prove // that both travelled across the wire. log.Printf("Outer complexData struct: \n%#v\n", data) log.Printf("Inner complexData struct: \n%#v\n", data.C) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (d *Decoder) GOB(val interface{}) {\n\tgobd := gob.NewDecoder(d.buf)\n\tif err := gobd.Decode(val); err != nil {\n\t\tlog.Panicf(\"gob: failed to decode: %v\", err)\n\t}\n}", "func (z *Rat) GobDecode(buf []byte) error {}", "func GobDecode(ctx context.Context, data []byte, obj interface{}) error {\n\treturn gob.NewDecoder(bytes.NewBuffer(data)).Decode(obj)\n}", "func DecodeGob(data []byte, v interface{}) error {\n\tb := bytes.NewBuffer(data)\n\treturn gob.NewDecoder(b).Decode(v)\n}", "func GobDecode(buffer []byte, value interface{}) error {\n buf := bytes.NewBuffer(buffer)\n decoder := gob.NewDecoder(buf)\n err := decoder.Decode(value)\n if err != nil {\n return gobDebug.Error(err)\n }\n return nil\n}", "func GobDecode(data []byte, obj interface{}) error {\n\treturn gob.NewDecoder(bytes.NewBuffer(data)).Decode(obj)\n}", "func gobDecode(buf []byte, into interface{}) error {\n\tif buf == nil {\n\t\treturn nil\n\t}\n\tdec := gob.NewDecoder(bytes.NewReader(buf))\n\treturn dec.Decode(into)\n}", "func (z *Int) GobDecode(buf []byte) error {}", "func GobDecode(b []byte) (interface{}, error) {\n\tvar result interface{}\n\terr := gob.NewDecoder(bytes.NewBuffer(b)).Decode(&result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result, nil\n}", "func GobUnmarshal(i interface{}, b []byte) error {\n\tbuf := bytes.NewBuffer(b)\n\tdecoder := gob.NewDecoder(buf)\n\treturn decoder.Decode(i)\n}", "func (s *Store) GobDecode(data []byte) error {\n\ts.access.Lock()\n\tdefer s.access.Unlock()\n\n\tbuf := bytes.NewBuffer(data)\n\n\tdecoder := gob.NewDecoder(buf)\n\tvar version uint8\n\terr := decoder.Decode(&version)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = decoder.Decode(&s.data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor key, _ := range s.data {\n\t\ts.doKeyChanged(key)\n\t}\n\n\treturn nil\n}", "func (z *Float) GobDecode(buf []byte) error {}", "func ReadGob(path string, object interface{}) error {\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\tdecoder := gob.NewDecoder(file)\n\tif err = decoder.Decode(object); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (g *Gammas) GobDecode(data []byte) error {\n\tvar err error\n\tfor len(data) > 0 {\n\t\tg2 := new(bn256.G2)\n\t\tdata, err = g2.Unmarshal(data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*g = append(*g, g2)\n\t}\n\treturn nil\n}", "func FromGob(data []byte, dst interface{}) error {\n\treturn NewGobber().From(data, dst)\n}", "func (loc *LogOddsCell) GobDecode(buf []byte) error {\n\tr := bytes.NewBuffer(buf)\n\tdecoder := gob.NewDecoder(r)\n\n\terr := decoder.Decode(&loc.logOddsVal)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func GOB() (ret httprpc.Codec) {\n\treturn Danger(\n\t\tfunc(w io.Writer) DangerEncoder {\n\t\t\treturn gob.NewEncoder(w)\n\t\t},\n\t\tfunc(r io.Reader) DangerDecoder {\n\t\t\treturn gob.NewDecoder(r)\n\t\t},\n\t)\n}", "func (s *CountMinSketch) GobDecode(data []byte) error {\n\tbuf := bytes.NewBuffer(data)\n\t_, err := s.ReadFrom(buf)\n\treturn err\n}", "func (t *Time) GobDecode(data []byte) error {}", "func gobInfoDecode(gobBytes []byte) (*storage.GobInfo, error) {\n\tgobInfo := &storage.GobInfo{}\n\tbuf := bytes.NewReader(gobBytes)\n\tgobDec := realgob.NewDecoder(buf)\n\terr := gobDec.Decode(gobInfo)\n\treturn gobInfo, err\n}", "func (d *DFA) GobDecode(bs []byte) error {\n\tbuffer := bytes.NewBuffer(bs)\n\tdecoder := gob.NewDecoder(buffer)\n\tvar initial State\n\tvar table []Cell\n\tif err := decoder.Decode(&initial); err != nil {\n\t\treturn errors.Wrapf(err, \"could not GOB decode initial state\")\n\t}\n\tif err := decoder.Decode(&table); err != nil {\n\t\treturn errors.Wrapf(err, \"could not GOB decode sparse table\")\n\t}\n\td.initial = initial\n\td.table = table\n\treturn nil\n}", "func (info *ImageInfoType) GobDecode(buf []byte) (err error) {\n\tfields := []interface{}{&info.data, &info.smask, &info.n, &info.w, &info.h,\n\t\t&info.cs, &info.pal, &info.bpc, &info.f, &info.dp, &info.trns, &info.scale, &info.dpi}\n\tr := bytes.NewBuffer(buf)\n\tdecoder := gob.NewDecoder(r)\n\tfor j := 0; j < len(fields) && err == nil; j++ {\n\t\terr = decoder.Decode(fields[j])\n\t}\n\n\tinfo.i, err = generateImageID(info)\n\treturn\n}", "func (k *Key) GobDecode(buf []byte) error {\n\tnk, err := NewKeyEncoded(string(buf))\n\tif err != nil {\n\t\treturn err\n\t}\n\t*k = *nk\n\treturn nil\n}", "func NewGobCode(conn io.ReadWriteCloser) Codec {\n\tbuf := bufio.NewWriter(conn)\n\treturn &GobCodec{conn: conn, buf: buf, dec: gob.NewDecoder(conn), enc: gob.NewEncoder(buf)}\n}", "func client(ip string) error {\r\n\t// Some test data. Note how GOB even handles maps, slices, and\r\n\t// recursive data structures without problems.\r\n\ttestStruct := complexData{\r\n\t\tN: 23,\r\n\t\tS: \"string data\",\r\n\t\tM: map[string]int{\"one\": 1, \"two\": 2, \"three\": 3},\r\n\t\tP: []byte(\"abc\"),\r\n\t\tC: &complexData{\r\n\t\t\tN: 256,\r\n\t\t\tS: \"Recursive structs? Piece of cake!\",\r\n\t\t\tM: map[string]int{\"01\": 1, \"10\": 2, \"11\": 3},\r\n\t\t},\r\n\t}\r\n\r\n\t// Open a connection to the server.\r\n\trw, err := Open(ip + Port)\r\n\tif err != nil {\r\n\t\treturn errors.Wrap(err, \"Client: Failed to open connection to \"+ip+Port)\r\n\t}\r\n\r\n\t// Send a STRING request.\r\n\t// Send the request name.\r\n\t// Send the data.\r\n\tlog.Println(\"Send the string request.\")\r\n\tn, err := rw.WriteString(\"STRING\\n\")\r\n\tif err != nil {\r\n\t\treturn errors.Wrap(err, \"Could not send the STRING request (\"+strconv.Itoa(n)+\" bytes written)\")\r\n\t}\r\n\tn, err = rw.WriteString(\"Additional data.\\n\")\r\n\tif err != nil {\r\n\t\treturn errors.Wrap(err, \"Could not send additional STRING data (\"+strconv.Itoa(n)+\" bytes written)\")\r\n\t}\r\n\tlog.Println(\"Flush the buffer.\")\r\n\terr = rw.Flush()\r\n\tif err != nil {\r\n\t\treturn errors.Wrap(err, \"Flush failed.\")\r\n\t}\r\n\r\n\t// Read the reply.\r\n\tlog.Println(\"Read the reply.\")\r\n\tresponse, err := rw.ReadString('\\n')\r\n\tif err != nil {\r\n\t\treturn errors.Wrap(err, \"Client: Failed to read the reply: '\"+response+\"'\")\r\n\t}\r\n\r\n\tlog.Println(\"STRING request: got a response:\", response)\r\n\r\n\t// Send a GOB request.\r\n\t// Create an encoder that directly transmits to `rw`.\r\n\t// Send the request name.\r\n\t// Send the GOB.\r\n\tlog.Println(\"Send a struct as GOB:\")\r\n\tlog.Printf(\"Outer complexData struct: \\n%#v\\n\", testStruct)\r\n\tlog.Printf(\"Inner complexData struct: \\n%#v\\n\", testStruct.C)\r\n\tenc := gob.NewEncoder(rw)\r\n\tn, err = rw.WriteString(\"GOB\\n\")\r\n\tif err != nil {\r\n\t\treturn errors.Wrap(err, \"Could not write GOB data (\"+strconv.Itoa(n)+\" bytes written)\")\r\n\t}\r\n\terr = enc.Encode(testStruct)\r\n\tif err != nil {\r\n\t\treturn errors.Wrapf(err, \"Encode failed for struct: %#v\", testStruct)\r\n\t}\r\n\terr = rw.Flush()\r\n\tif err != nil {\r\n\t\treturn errors.Wrap(err, \"Flush failed.\")\r\n\t}\r\n\treturn nil\r\n}", "func (val *Value) GobDecode(buf []byte) error {\n\tr := bytes.NewReader(buf)\n\tdec := gob.NewDecoder(r)\n\n\tvar gv gobValue\n\terr := dec.Decode(&gv)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error decoding cty.Value: %s\", err)\n\t}\n\tif gv.Version != 0 {\n\t\treturn fmt.Errorf(\"unsupported cty.Value encoding version %d; only 0 is supported\", gv.Version)\n\t}\n\n\t// big.Float seems to, for some reason, lose its \"pointerness\" when we\n\t// round-trip it, so we'll fix that here.\n\tif bf, ok := gv.V.(big.Float); ok {\n\t\tgv.V = &bf\n\t}\n\n\tval.ty = gv.Ty\n\tval.v = gv.V\n\n\treturn nil\n}", "func (b *Binance) ReadGob(file string) error {\n\tf, _ := os.Open(file)\n\tgob.Register(&stg.KeepStrategy{})\n\tdecode := gob.NewDecoder(f)\n\tif err := decode.Decode(b); err != nil {\n\t\treturn err\n\t}\n\tos.Remove(file)\n\treturn nil\n}", "func (t *Type) GobDecode(buf []byte) error {\n\tr := bytes.NewReader(buf)\n\tdec := gob.NewDecoder(r)\n\n\tvar gt gobType\n\terr := dec.Decode(&gt)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error decoding cty.Type: %s\", err)\n\t}\n\tif gt.Version != 0 {\n\t\treturn fmt.Errorf(\"unsupported cty.Type encoding version %d; only 0 is supported\", gt.Version)\n\t}\n\n\tt.typeImpl = gt.Impl\n\n\treturn nil\n}", "func MustGobDecode(b []byte) interface{} {\n\tbDecoded, err := GobDecode(b)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn bDecoded\n}", "func (t *Timestamp) GobDecode(data []byte) error {\n\tvar tm time.Time\n\n\tif err := tm.UnmarshalBinary(data); err != nil {\n\t\treturn err\n\t}\n\n\t*t = Timestamp(tm)\n\n\treturn nil\n}", "func (set *AppleSet) GobDecode(b []byte) error {\n\tset.s.Lock()\n\tdefer set.s.Unlock()\n\n\tbuf := bytes.NewBuffer(b)\n\treturn gob.NewDecoder(buf).Decode(&set.m)\n}", "func (g *Graph) GobDecode(b []byte) (err error) {\n\t// decode into graphGob\n\tgGob := &graphGob{}\n\tbuf := bytes.NewBuffer(b)\n\tdec := gob.NewDecoder(buf)\n\n\terr = dec.Decode(gGob)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// add the vertexes\n\tfor _, key := range gGob.Vertexes {\n\t\tg.Add(key)\n\t}\n\n\t// connect the vertexes\n\tfor key, neighbors := range gGob.Edges {\n\t\tfor otherKey, weight := range neighbors {\n\t\t\tif ok := g.Connect(key, otherKey, weight); !ok {\n\t\t\t\treturn errors.New(\"invalid edge endpoints\")\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}", "func RatGobDecode(z *big.Rat, buf []byte) error", "func (d *Decimal) GobDecode(data []byte) error {\n\treturn d.UnmarshalBinary(data)\n}", "func (r *Record) GetGobField(d *Db, number uint16, e interface{}) error {\n\tif r.GetFieldType(d, number) != BLOBTYPE {\n\t\treturn WDBError(\"Not an blob valued field\")\n\t}\n\n\tenc := C.wg_get_field(d.db, r.rec, C.wg_int(number))\n\tslen := int(C.wg_decode_blob_len(d.db, enc))\n\tsval := C.wg_decode_blob(d.db, enc)\n\n\tvar goSlice []byte\n\tsliceHeader := (*reflect.SliceHeader)((unsafe.Pointer(&goSlice)))\n\tsliceHeader.Cap = slen\n\tsliceHeader.Len = slen\n\tsliceHeader.Data = uintptr(unsafe.Pointer(sval))\n\n\tbuffer := bytes.NewBuffer(goSlice)\n\tdecoder := gob.NewDecoder(buffer)\n\n\tif err := decoder.Decode(e); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func NewGobSerializer() gbus.Serializer {\n\treturn &Gob{\n\t\tlock: &sync.Mutex{},\n\t\tregisteredSchemas: make(map[string]reflect.Type),\n\t}\n}", "func IntGobDecode(z *big.Int, buf []byte) error", "func (bo *BytesObj) GJSONParse() (res gjson.Result) {\n\tif bo != nil && bo.IsObject() && len(bo.data) > 0 {\n\t\tunsafeStr := *(*string)(unsafe.Pointer(&bo.data))\n\t\tres = gjson.Parse(unsafeStr)\n\t}\n\treturn\n}", "func ToGob(src interface{}) ([]byte, error) {\n\treturn NewGobber().To(src)\n}", "func (e *Encoder) PutGOB(val interface{}) {\n\tgobe := gob.NewEncoder(e)\n\tif err := gobe.Encode(val); err != nil {\n\t\tlog.Panicf(\"gob: failed to encode %v: %v\", val, err)\n\t}\n}", "func GobGenerateDecoder(r io.Reader) Decoder {\n\treturn gob.NewDecoder(r)\n}", "func EncodeGob(data interface{}) ([]byte, error) {\n\tb := new(bytes.Buffer)\n\terr := gob.NewEncoder(b).Encode(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn b.Bytes(), nil\n}", "func (a *Array) GobDecode(data []byte) error {\n\tbuf := bytes.NewReader(data)\n\tdec := gob.NewDecoder(buf)\n\n\terr := checkErr(\n\t\tdec.Decode(&a.bits),\n\t\tdec.Decode(&a.length),\n\t)\n\n\tif err != nil {\n\t\terr = fmt.Errorf(\"bit: decode failed (%v)\", err)\n\t}\n\n\treturn err\n}", "func (t *Tensor) GobDecode(b []byte) error {\n\tr := bytes.NewReader(b)\n\tdec := gob.NewDecoder(r)\n\n\tvar dt tf.DataType\n\terr := dec.Decode(&dt)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar shape []int64\n\terr = dec.Decode(&shape)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar tensor *tf.Tensor\n\tswitch dt {\n\tcase tf.String:\n\t\t// TensorFlow Go package currently does not support\n\t\t// string serialization. Let's do it ourselves.\n\t\tvar str string\n\t\terr = dec.Decode(&str)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttensor, err = tf.NewTensor(str)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\ttensor, err = tf.ReadTensor(dt, shape, r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tt.Tensor = tensor\n\treturn nil\n}", "func GobEncode(data interface{}) []byte {\n\tvar buff bytes.Buffer\n\n\tencoder := gob.NewEncoder(&buff)\n\tif err := encoder.Encode(data); err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\treturn buff.Bytes()\n}", "func EncodeGob(p interface{}) (data []byte, err error) {\n\tb := bytes.Buffer{}\n\tencoder := gob.NewEncoder(&b)\n\terr = encoder.Encode(p)\n\tif err != nil {\n\t\treturn\n\t}\n\tdata = b.Bytes()\n\treturn\n}", "func handleGetData(request []byte, bc *Blockchain) {\n\tvar buff bytes.Buffer\n\tvar payload getdata\n\n\tbuff.Write(request[commandLength:])\n\tdec := gob.NewDecoder(&buff)\n\terr := dec.Decode(&payload)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tif payload.Type == \"block\" {\n\t\tblock, err := bc.GetBlock([]byte(payload.ID))\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tsendBlock(payload.AddrFrom, &block)\n\t}\n\n\tif payload.Type == \"tx\" {\n\t\ttxID := hex.EncodeToString(payload.ID)\n\t\ttx := mempool[txID]\n\n\t\tsendTx(payload.AddrFrom, &tx)\n\t\t// delete(mempool, txID)\n\t}\n}", "func FloatGobDecode(z *big.Float, buf []byte) error", "func decodeLeaseRequestGob(hdrBytes []byte, gobBytes []byte) (leaseReq *LeaseRequest, jreq *jsonRequest) {\n\tvar err error\n\n\tjreq = &jsonRequest{}\n\tioReq := &ioRequestRetryRpc{}\n\n\thdrBuf := bytes.NewBuffer(hdrBytes)\n\n\t// Unmarshal the IoRequest header (always binary)\n\terr = binary.Read(hdrBuf, binary.LittleEndian, &ioReq.Hdr.Len)\n\tif err != nil {\n\t\tpanic(\"ioReq.Hdr.Len\")\n\t}\n\terr = binary.Read(hdrBuf, binary.LittleEndian, &ioReq.Hdr.Protocol)\n\tif err != nil {\n\t\tpanic(\"ioReq.Hdr.Protocol\")\n\t}\n\terr = binary.Read(hdrBuf, binary.LittleEndian, &ioReq.Hdr.Version)\n\tif err != nil {\n\t\tpanic(\"ioReq.Hdr.Version\")\n\t}\n\terr = binary.Read(hdrBuf, binary.LittleEndian, &ioReq.Hdr.Type)\n\tif err != nil {\n\t\tpanic(\"ioReq.Hdr.Type\")\n\t}\n\terr = binary.Read(hdrBuf, binary.LittleEndian, &ioReq.Hdr.Magic)\n\tif err != nil {\n\t\tpanic(\"ioReq.Hdr.Magic\")\n\t}\n\n\t// now unmarshal the jsonRequest fields using gob (can't fail)\n\t_, _ = decodeLeaseRequestGobBuffer.Write(gobBytes)\n\terr = decodeLeaseRequestGobDecoder.Decode(jreq)\n\tif err != nil {\n\t\tpanic(\"decodeLeaseRequestGobDecoder.Decode\")\n\t}\n\tleaseReq = jreq.Params[0].(*LeaseRequest)\n\n\treturn\n}", "func gobEncode(data interface{}) []byte {\n\tvar buff bytes.Buffer\n\n\tenc := gob.NewEncoder(&buff)\n\terr := enc.Encode(data)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\treturn buff.Bytes()\n}", "func NewGobTranscoder() *GobTranscoder {\n\tret := &GobTranscoder{\n\t\tinBytes: &bytes.Buffer{},\n\t\toutBytes: &bytes.Buffer{},\n\t\tencoderMut: &sync.Mutex{},\n\t\tdecoderMut: &sync.Mutex{},\n\t}\n\tret.encoder = gob.NewEncoder(ret.outBytes)\n\tret.decoder = gob.NewDecoder(ret.inBytes)\n\treturn ret\n}", "func (x *Rat) GobEncode() ([]byte, error) {}", "func (t *capsuleType) GobEncode() ([]byte, error) {\n\treturn nil, fmt.Errorf(\"cannot gob-encode capsule type %q\", t.FriendlyName(friendlyTypeName))\n}", "func (b *BoatHandle) Recv(e interface{}) error {\n\treturn b.stdoutGob.Decode(e)\n}", "func GobDecodeFromFile(filename string, object interface{}) error {\n file, err := os.Open(filename)\n if err != nil {\n // Might be caused by file does not exist\n return gobDebug.Error(err)\n }\n defer file.Close()\n decoder := gob.NewDecoder(file)\n if err := decoder.Decode(object); err != nil {\n return gobDebug.Error(err)\n }\n return nil\n}", "func (server *Server) handleRequestBlob(client *Client, message *Message) {\n\trequestBlob := &protocol.RequestBlob{}\n\terr := protobuf.Unmarshal(message.buffer, requestBlob)\n\tif err != nil {\n\t\tclient.Panic(err)\n\t\treturn\n\t}\n\n\t//userState := &protocol.UserState{}\n\n\t// Request for user textures\n\t// TODO: Why count if you only want to know 1 count?\n\tif len(requestBlob.SessionTexture) > 0 {\n\t\tfor _, sid := range requestBlob.SessionTexture {\n\t\t\tif target, ok := server.clients[sid]; ok {\n\t\t\t\t// TODO: NOT OK, use errors, don't leave everyone including yourself in the fucking dark\n\t\t\t\t// TODO: No, and its a validation!!!!!\n\t\t\t\tif target.user == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif target.user.HasTexture() {\n\t\t\t\t\t// TODO: Replace this shit alter, just get the first major structure changes in\n\t\t\t\t\t//buffer, err := blobStore.Get(target.user.TextureBlob)\n\t\t\t\t\t//if err != nil {\n\t\t\t\t\t//\tserver.Panic(err)\n\t\t\t\t\t//\treturn\n\t\t\t\t\t//}\n\t\t\t\t\t//userState.Reset()\n\t\t\t\t\t//userState.Session = protobuf.Uint32(uint32(target.Session()))\n\t\t\t\t\t//// TODO: What is a texture????? BETTER NAMES\n\t\t\t\t\t//userState.Texture = buffer\n\t\t\t\t\t//if err := client.sendMessage(userState); err != nil {\n\t\t\t\t\t//\tclient.Panic(err)\n\t\t\t\t\t//\treturn\n\t\t\t\t\t//}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Request for user comments\n\t// TODO: Stop counting os high!\n\tif len(requestBlob.SessionComment) > 0 {\n\t\tfor _, sid := range requestBlob.SessionComment {\n\t\t\t// TODO: Err not ok!\n\t\t\tif target, ok := server.clients[sid]; ok {\n\t\t\t\t// TODO: REPEATED VALIDATION!!!!!\n\t\t\t\tif target.user == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif target.user.HasComment() {\n\t\t\t\t\t// TODO: Ughh just comment blob shit out now for the first major structure changes to work and tackle this after\n\t\t\t\t\t//buffer, err := requestBlob.Get(target.user.CommentBlob)\n\t\t\t\t\t//if err != nil {\n\t\t\t\t\t//\t// TODO: There is no reason to repeat these fucntions for each class, its just bad\n\t\t\t\t\t//\tserver.Panic(err)\n\t\t\t\t\t//\treturn\n\t\t\t\t\t//}\n\t\t\t\t\t//userState.Reset()\n\t\t\t\t\t//userState.Session = protobuf.Uint32(uint32(target.Session()))\n\t\t\t\t\t//userState.Comment = protobuf.String(string(buffer))\n\t\t\t\t\t//if err := client.sendMessage(userState); err != nil {\n\t\t\t\t\t//\tclient.Panic(err)\n\t\t\t\t\t//\treturn\n\t\t\t\t\t//}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tchannelState := &protocol.ChannelState{}\n\n\t// Request for channel descriptions\n\t// TODO: Added up, there is SO MUCH WASTE. THESE ARE PER MESSAGE!\n\tif len(requestBlob.ChannelDescription) > 0 {\n\t\tfor _, cid := range requestBlob.ChannelDescription {\n\t\t\tif channel, ok := server.Channels[cid]; ok {\n\t\t\t\tif channel.HasDescription() {\n\t\t\t\t\tchannelState.Reset()\n\t\t\t\t\t//buffer, err := requestBlob.Get(channel.DescriptionBlob)\n\t\t\t\t\t//if err != nil {\n\t\t\t\t\t//\tserver.Panic(err)\n\t\t\t\t\t//\treturn\n\t\t\t\t\t//}\n\t\t\t\t\t//// TODO: you should be asking yourself, if you are doing a conversion everytime you use a variable, is there something majorly wrong? the answer is yes\n\t\t\t\t\t//channelState.ChannelID = protobuf.Uint32(channel.ID)\n\t\t\t\t\t//channelState.Description = protobuf.String(string(buffer))\n\t\t\t\t\t//if err := client.sendMessage(channelState); err != nil {\n\t\t\t\t\t//\tclient.Panic(err)\n\t\t\t\t\t//\treturn\n\t\t\t\t\t//}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func Unmarshal(data []byte, typ DataFormat, target interface{}) {\n\tswitch typ {\n\tcase GOB:\n\t\tbuf := bytes.NewReader(data)\n\t\tgob.NewDecoder(buf).Decode(target)\n\n\tdefault:\n\t\tif err := json.Unmarshal(data, target); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}", "func gobHisto(vs ...*Histo) (err error) {\n\tfile, err := os.Create(\"r-g-b.gob\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\tenc := gob.NewEncoder(file)\n\tfor _, v := range vs {\n\t\terr = enc.Encode(v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (b *Binance) SaveGob(file string) error {\n\tf, _ := os.OpenFile(file, os.O_RDWR|os.O_CREATE, 0777)\n\tgob.Register(&stg.KeepStrategy{})\n\tencode := gob.NewEncoder(f)\n\tif err := encode.Encode(b); err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\treturn nil\n}", "func NewGOBCodec() *GOBCodec {\n\tr := GOBCodec(0)\n\treturn &r\n}", "func BenchmarkDecodingGobTweet(b *testing.B) {\n\tb.StartTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\ttw := Tweet{}\n\t\tdec := gob.NewDecoder(&gobTw)\n\t\terr := dec.Decode(&tw)\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"Error unmarshaling json: %v\", err)\n\t\t}\n\t}\n}", "func (m *Model) GOB() ([]byte, error) {\n\tif m.Emm != nil {\n\t\treturn m.Emm.GOB()\n\t}\n\tif m.Snow != nil {\n\t\treturn m.Snow.GOB()\n\t}\n\treturn nil, nil\n}", "func SaveGob(path string, object interface{}) error {\n\tfile, err := os.Create(path)\n\tdefer file.Close()\n\tif err == nil {\n\t\tencoder := gob.NewEncoder(file)\n\t\tencoder.Encode(object)\n\t}\n\treturn err\n}", "func TestPutGOB(t *testing.T) {\n\tassert := audit.NewTestingAssertion(t, true)\n\t// Setup the test server.\n\tmux := newMultiplexer(assert)\n\tts := restaudit.StartServer(mux, assert)\n\tdefer ts.Close()\n\terr := mux.Register(\"test\", \"gob\", NewTestHandler(\"putgob\", assert))\n\tassert.Nil(err)\n\t// Perform test requests.\n\treq := restaudit.NewRequest(\"POST\", \"/base/test/gob\")\n\treqData := TestCounterData{\"test\", 4711}\n\treq.MarshalBody(assert, restaudit.ApplicationGOB, reqData)\n\tresp := ts.DoRequest(req)\n\tresp.AssertStatusEquals(200)\n\trespData := TestCounterData{}\n\tresp.AssertUnmarshalledBody(&respData)\n\tassert.Equal(respData, reqData)\n}", "func (serv *Server) handleBin(conn int, payload []byte) {}", "func (proxy *HuobiProxy) decode(message interface{}) error {\n\tgzip, err := gzip.NewReader(proxy.conn)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdecoder := json.NewDecoder(gzip)\n\terr = decoder.Decode(message)\n\n\treturn err\n}", "func TestEncodeDecodeGob(t *testing.T) {\n\ttestEncodeDecodeFunctions(t,\n\t\tencodeLeaseRequestGob, encodeLeaseReplyGob,\n\t\tdecodeLeaseRequestGob, decodeLeaseReplyGob)\n}", "func encodeLeaseRequestGob(leaseReq *LeaseRequest, jreq *jsonRequest) (hdrBytes []byte, gobBytes []byte) {\n\tvar err error\n\n\thdrBuf := &bytes.Buffer{}\n\n\t// the Lease Request is part of the gob request\n\tjreq.Params[0] = leaseReq\n\n\t// marshal jreq (and lease request)\n\terr = encodeLeaseRequestGobEncoder.Encode(jreq)\n\tif err != nil {\n\t\tpanic(\"encodeLeaseRequestGobEncoder\")\n\t}\n\n\t// consume the results encoded in the (global) buffer\n\tgobBytes = make([]byte, encodeLeaseRequestGobBuffer.Len())\n\tn, err := encodeLeaseRequestGobBuffer.Read(gobBytes)\n\tif n != cap(gobBytes) {\n\t\tpanic(\"didn't read enough bytes\")\n\t}\n\n\t// now create the IoRequest header and Marshal it\n\t// (this is always binary)\n\tioReq := ioRequestRetryRpc{\n\t\tHdr: ioHeader{\n\t\t\tLen: uint32(len(gobBytes)),\n\t\t\tProtocol: uint16(1),\n\t\t\tVersion: 1,\n\t\t\tType: 1,\n\t\t\tMagic: 0xCAFEFEED,\n\t\t},\n\t}\n\n\terr = binary.Write(hdrBuf, binary.LittleEndian, ioReq.Hdr.Len)\n\tif err != nil {\n\t\tpanic(\"ioReq.Hdr.Len\")\n\t}\n\terr = binary.Write(hdrBuf, binary.LittleEndian, ioReq.Hdr.Protocol)\n\tif err != nil {\n\t\tpanic(\"ioReq.Hdr.Protocol\")\n\t}\n\terr = binary.Write(hdrBuf, binary.LittleEndian, ioReq.Hdr.Version)\n\tif err != nil {\n\t\tpanic(\"ioReq.Hdr.Version\")\n\t}\n\terr = binary.Write(hdrBuf, binary.LittleEndian, ioReq.Hdr.Type)\n\tif err != nil {\n\t\tpanic(\"ioReq.Hdr.Type\")\n\t}\n\terr = binary.Write(hdrBuf, binary.LittleEndian, ioReq.Hdr.Magic)\n\tif err != nil {\n\t\tpanic(\"ioReq.Hdr.Magic\")\n\t}\n\n\thdrBytes = hdrBuf.Bytes()\n\treturn\n}", "func GobMarshal(i interface{}) ([]byte, error) {\n\tbuf := bytes.NewBuffer(nil)\n\tencoder := gob.NewEncoder(buf)\n\terr := encoder.Encode(i)\n\treturn buf.Bytes(), err\n}", "func handleEscreveBloco(writer http.ResponseWriter, req *http.Request) {\n\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\tvar m Mensagem\n\n\tdecoder := json.NewDecoder(req.Body)\n\tif err := decoder.Decode(&m); err != nil {\n\t\trespondWithJSON(writer, req, http.StatusBadRequest, req.Body)\n\t\treturn\n\t}\n\tdefer req.Body.Close()\n\n\t//garante atomicidade ao criar o bloco\n\tmutex.Lock()\n\n\tnovoBloco := geraBloco(Blockchain[len(Blockchain)-1], m.Dados, m.Dificuldade)\n\n\t//desfaz o lock\n\tmutex.Unlock()\n\n\tif blocoValido(novoBloco, Blockchain[len(Blockchain)-1]) {\n\t\tBlockchain = append(Blockchain, novoBloco)\n\t\tspew.Dump(novoBloco)\n\t}\n\n\trespondWithJSON(writer, req, http.StatusCreated, novoBloco)\n}", "func decodeLeaseReplyGob(hdrBytes []byte, gobBytes []byte) (leaseReply *LeaseReply, jreply *jsonReply) {\n\tvar err error\n\n\tleaseReply = &LeaseReply{}\n\tjreply = &jsonReply{}\n\tjreply.Result = leaseReply\n\tioReply := &ioReplyRetryRpc{}\n\n\thdrBuf := bytes.NewBuffer(hdrBytes)\n\n\t// Unmarshal the IoReply header (always binary)\n\terr = binary.Read(hdrBuf, binary.LittleEndian, &ioReply.Hdr.Len)\n\tif err != nil {\n\t\tpanic(\"ioReply.Hdr.Len\")\n\t}\n\terr = binary.Read(hdrBuf, binary.LittleEndian, &ioReply.Hdr.Protocol)\n\tif err != nil {\n\t\tpanic(\"ioReply.Hdr.Protocol\")\n\t}\n\terr = binary.Read(hdrBuf, binary.LittleEndian, &ioReply.Hdr.Version)\n\tif err != nil {\n\t\tpanic(\"ioReply.Hdr.Version\")\n\t}\n\terr = binary.Read(hdrBuf, binary.LittleEndian, &ioReply.Hdr.Type)\n\tif err != nil {\n\t\tpanic(\"ioReply.Hdr.Type\")\n\t}\n\terr = binary.Read(hdrBuf, binary.LittleEndian, &ioReply.Hdr.Magic)\n\tif err != nil {\n\t\tpanic(\"ioReply.Hdr.Magic\")\n\t}\n\n\t// now unmarshal the jsonReply fields using gob (can't fail)\n\t_, _ = decodeLeaseReplyGobBuffer.Write(gobBytes)\n\terr = decodeLeaseReplyGobDecoder.Decode(jreply)\n\tif err != nil {\n\t\tpanic(\"decodeLeaseReplyGobDecoder.Decode\")\n\t}\n\tleaseReply = jreply.Result.(*LeaseReply)\n\n\treturn\n}", "func BenchmarkRpcLeaseDecodeGob(b *testing.B) {\n\tbenchmarkRpcLeaseDecode(b,\n\t\tencodeLeaseRequestGob, encodeLeaseReplyGob,\n\t\tdecodeLeaseRequestGob, decodeLeaseReplyGob)\n}", "func (server *Server) handleRequestBlob(client *Client, msg *Message) {\n\tblobreq := &mumbleproto.RequestBlob{}\n\terr := proto.Unmarshal(msg.buf, blobreq)\n\tif err != nil {\n\t\tclient.Panic(err)\n\t\treturn\n\t}\n\n\tuserstate := &mumbleproto.UserState{}\n\n\t// Request for user textures\n\tif len(blobreq.SessionTexture) > 0 {\n\t\tfor _, sid := range blobreq.SessionTexture {\n\t\t\tif target, ok := server.clients[sid]; ok {\n\t\t\t\tif target.user == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif target.user.HasTexture() {\n\t\t\t\t\tbuf, err := BlobStore.Get(target.user.TextureBlob)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tserver.Panicf(\"Blobstore error: %v\", err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tuserstate.Reset()\n\t\t\t\t\tuserstate.Session = proto.Uint32(uint32(target.Session()))\n\t\t\t\t\tuserstate.Texture = buf\n\t\t\t\t\tif err := client.sendMessage(userstate); err != nil {\n\t\t\t\t\t\tclient.Panic(err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Request for user comments\n\tif len(blobreq.SessionComment) > 0 {\n\t\tfor _, sid := range blobreq.SessionComment {\n\t\t\tif target, ok := server.clients[sid]; ok {\n\t\t\t\tif target.user == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif target.user.HasComment() {\n\t\t\t\t\tbuf, err := BlobStore.Get(target.user.CommentBlob)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tserver.Panicf(\"Blobstore error: %v\", err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tuserstate.Reset()\n\t\t\t\t\tuserstate.Session = proto.Uint32(uint32(target.Session()))\n\t\t\t\t\tuserstate.Comment = proto.String(string(buf))\n\t\t\t\t\tif err := client.sendMessage(userstate); err != nil {\n\t\t\t\t\t\tclient.Panic(err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tchanstate := &mumbleproto.ChannelState{}\n\n\t// Request for channel descriptions\n\tif len(blobreq.ChannelDescription) > 0 {\n\t\tfor _, cid := range blobreq.ChannelDescription {\n\t\t\tif channel, ok := server.Channels[int(cid)]; ok {\n\t\t\t\tif channel.HasDescription() {\n\t\t\t\t\tchanstate.Reset()\n\t\t\t\t\tbuf, err := BlobStore.Get(channel.DescriptionBlob)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tserver.Panicf(\"Blobstore error: %v\", err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tchanstate.ChannelId = proto.Uint32(uint32(channel.Id))\n\t\t\t\t\tchanstate.Description = proto.String(string(buf))\n\t\t\t\t\tif err := client.sendMessage(chanstate); err != nil {\n\t\t\t\t\t\tclient.Panic(err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func GobEncode(v interface{}) ([]byte, error) {\n\tvar buf bytes.Buffer\n\terr := gob.NewEncoder(&buf).Encode(&v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}", "func (s *BasePlSqlParserListener) EnterLob_segname(ctx *Lob_segnameContext) {}", "func GobEncode(value interface{}) []byte {\n buf := bytes.NewBuffer(make([]byte, 0, 1024))\n encoder := gob.NewEncoder(buf)\n // encode unknown type might cause some error\n err := encoder.Encode(value)\n if err != nil {\n gobDebug.Panicf(\"Failed to encode a value: %+v\\n%v\\n\", value, err)\n }\n return buf.Bytes()\n}", "func (info *ImageInfoType) GobEncode() (buf []byte, err error) {\n\tfields := []interface{}{info.data, info.smask, info.n, info.w, info.h, info.cs,\n\t\tinfo.pal, info.bpc, info.f, info.dp, info.trns, info.scale, info.dpi}\n\tw := new(bytes.Buffer)\n\tencoder := gob.NewEncoder(w)\n\tfor j := 0; j < len(fields) && err == nil; j++ {\n\t\terr = encoder.Encode(fields[j])\n\t}\n\tif err == nil {\n\t\tbuf = w.Bytes()\n\t}\n\treturn\n}", "func gobEncode(value interface{}) ([]byte, error) {\n\tvar buf bytes.Buffer\n\tenc := gob.NewEncoder(&buf)\n\terr := enc.Encode(value)\n\treturn buf.Bytes(), err\n}", "func (s *DataAPI) handleOrderBook(thing interface{}) (interface{}, error) {\n\treq, ok := thing.(*msgjson.OrderBookSubscription)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"orderbook request unparseable\")\n\t}\n\n\tmkt, err := dex.MarketName(req.Base, req.Quote)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't parse requested market\")\n\t}\n\treturn s.bookSource.Book(mkt)\n}", "func FacebookCallbackGETHandler(w http.ResponseWriter, r *http.Request) {\r\n\r\n\tc := appengine.NewContext(r)\r\n\tlog.Debugf(c, \">>>> FacebookCallbackGETHandler\")\r\n\r\n\tmode := r.FormValue(\"hub.mode\")\r\n\tlog.Debugf(c, \"Hub Mode: %v\", mode)\r\n\r\n\tchallenge := r.FormValue(\"hub.challenge\")\r\n\tlog.Debugf(c, \"Hub Challenge: %v\", challenge)\r\n\r\n\tverify_token := r.FormValue(\"hub.verify_token\")\r\n\tlog.Debugf(c, \"Hub Verify Token: %v\", verify_token)\r\n\r\n\tif verify_token != VERIFY_TOKEN {\r\n\t\tlog.Errorf(c, \"Error, bad verification token: %v\", verify_token)\r\n\t\thttp.Error(w, \"Bad Request\", http.StatusBadRequest)\r\n\t\treturn\r\n\t}\r\n\r\n\tif mode != \"subscribe\" {\r\n\t\tlog.Errorf(c, \"Error, bad mode: %v\", mode)\r\n\t\thttp.Error(w, \"Bad Request\", http.StatusBadRequest)\r\n\t\treturn\r\n\t}\r\n\r\n\tfmt.Fprintf(w, \"%v\", challenge)\r\n}", "func (client *GremlinResourcesClient) getGremlinGraphHandleResponse(resp *http.Response) (GremlinResourcesClientGetGremlinGraphResponse, error) {\n\tresult := GremlinResourcesClientGetGremlinGraphResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GremlinGraphGetResults); err != nil {\n\t\treturn GremlinResourcesClientGetGremlinGraphResponse{}, err\n\t}\n\treturn result, nil\n}", "func (s *BasePlSqlParserListener) EnterLob_item(ctx *Lob_itemContext) {}", "func (g *Gammas) GobEncode() ([]byte, error) {\n\tbuff := bytes.Buffer{}\n\tif g != nil {\n\t\tfor _, g2 := range *g {\n\t\t\tbuff.Write(g2.Marshal())\n\t\t}\n\t}\n\treturn buff.Bytes(), nil\n}", "func writeLob(wr *bufio.Writer) error {\n\n\tif err := wr.WriteByte(0); err != nil {\n\t\treturn err\n\t}\n\tif err := wr.WriteInt32(0); err != nil {\n\t\treturn err\n\t}\n\tif err := wr.WriteInt32(0); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (m *Messenger) handle(w http.ResponseWriter, r *http.Request) {\n\tif r.Method == \"GET\" {\n\t\tm.verifyHandler(w, r)\n\t\treturn\n\t}\n\n\tvar rec Receive\n\n\t// consume a *copy* of the request body\n\tbody, _ := ioutil.ReadAll(r.Body)\n\tr.Body = ioutil.NopCloser(bytes.NewBuffer(body))\n\n\terr := json.Unmarshal(body, &rec)\n\tif err != nil {\n\t\terr = xerrors.Errorf(\"could not decode response: %w\", err)\n\t\tfmt.Println(err)\n\t\tfmt.Println(\"could not decode response:\", err)\n\t\trespond(w, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif rec.Object != \"page\" {\n\t\tfmt.Println(\"Object is not page, undefined behaviour. Got\", rec.Object)\n\t\trespond(w, http.StatusUnprocessableEntity)\n\t\treturn\n\t}\n\n\tif m.verify {\n\t\tif err := m.checkIntegrity(r); err != nil {\n\t\t\tfmt.Println(\"could not verify request:\", err)\n\t\t\trespond(w, http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t}\n\n\tm.dispatch(rec)\n\n\trespond(w, http.StatusAccepted) // We do not return any meaningful response immediately so it should be 202\n}", "func encodeLeaseReplyGob(leaseReply *LeaseReply, jreply *jsonReply) (hdrBytes []byte, gobBytes []byte) {\n\tvar err error\n\n\thdrBuf := &bytes.Buffer{}\n\n\t// the Lease Reply is part of the JSON reply\n\tjreply.Result = leaseReply\n\n\t// marshal jreq (and lease request)\n\terr = encodeLeaseReplyGobEncoder.Encode(jreply)\n\tif err != nil {\n\t\tpanic(\"encodeLeaseReplyGobEncoder\")\n\t}\n\n\t// consume the results encoded in the (global) buffer\n\tgobBytes = make([]byte, encodeLeaseReplyGobBuffer.Len())\n\tn, err := encodeLeaseReplyGobBuffer.Read(gobBytes)\n\tif n != cap(gobBytes) {\n\t\tpanic(\"didn't read enough bytes\")\n\t}\n\n\t// now create the IoReply header and Marshal it\n\t// (this is always binary)\n\tioReply := ioReplyRetryRpc{\n\t\tHdr: ioHeader{\n\t\t\tLen: uint32(len(gobBytes)),\n\t\t\tProtocol: uint16(1),\n\t\t\tVersion: 1,\n\t\t\tType: 1,\n\t\t\tMagic: 0xCAFEFEED,\n\t\t},\n\t}\n\n\terr = binary.Write(hdrBuf, binary.LittleEndian, ioReply.Hdr.Len)\n\tif err != nil {\n\t\tpanic(\"ioReply.Hdr.Len\")\n\t}\n\terr = binary.Write(hdrBuf, binary.LittleEndian, ioReply.Hdr.Protocol)\n\tif err != nil {\n\t\tpanic(\"ioReply.Hdr.Protocol\")\n\t}\n\terr = binary.Write(hdrBuf, binary.LittleEndian, ioReply.Hdr.Version)\n\tif err != nil {\n\t\tpanic(\"ioReply.Hdr.Version\")\n\t}\n\terr = binary.Write(hdrBuf, binary.LittleEndian, ioReply.Hdr.Type)\n\tif err != nil {\n\t\tpanic(\"ioReply.Hdr.Type\")\n\t}\n\terr = binary.Write(hdrBuf, binary.LittleEndian, ioReply.Hdr.Magic)\n\tif err != nil {\n\t\tpanic(\"ioReply.Hdr.Magic\")\n\t}\n\n\thdrBytes = hdrBuf.Bytes()\n\treturn\n}", "func (recv *Value) GetObject() Object {\n\tretC := C.g_value_get_object((*C.GValue)(recv.native))\n\tretGo := *ObjectNewFromC(unsafe.Pointer(retC))\n\n\treturn retGo\n}", "func NewGobDecoderLight() *GobDecoderLight {\n\tret := &GobDecoderLight{\n\t\tbytes: &bytes.Buffer{},\n\t}\n\tret.decoder = gob.NewDecoder(ret.bytes)\n\treturn ret\n}", "func (s *CountMinSketch) GobEncode() ([]byte, error) {\n\tvar buf bytes.Buffer\n\t_, err := s.WriteTo(&buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}", "func (d *Person) GobEncode() ([]byte, error) {\n\tw := new(bytes.Buffer)\n\tencoder := gob.NewEncoder(w)\n\terr := encoder.Encode(d.Id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = encoder.Encode(d.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn w.Bytes(), nil\n}", "func HandleDecompression(r *retryablehttp.Request, bodyOrig []byte) (bodyDec []byte, err error) {\n\tencodingHeader := strings.ToLower(r.Header.Get(\"Accept-Encoding\"))\n\tif encodingHeader == \"gzip\" {\n\t\tgzipreader, err := gzip.NewReader(bytes.NewReader(bodyOrig))\n\t\tif err != nil {\n\t\t\treturn bodyDec, err\n\t\t}\n\t\tdefer gzipreader.Close()\n\n\t\tbodyDec, err = ioutil.ReadAll(gzipreader)\n\t\tif err != nil {\n\t\t\treturn bodyDec, err\n\t\t}\n\n\t\treturn bodyDec, nil\n\t}\n\n\treturn bodyOrig, nil\n}", "func EncodeGobZlib(p interface{}) (data []byte, err error) {\n\tb := bytes.Buffer{}\n\tcompressor, err := zlib.NewWriterLevel(&b, zlib.BestCompression)\n\tif err != nil {\n\t\treturn\n\t}\n\tencoder := gob.NewEncoder(compressor)\n\terr = encoder.Encode(p)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = compressor.Close()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdata = b.Bytes()\n\treturn\n}", "func DecodeGMMessage(ws *websocket.Conn) (*GMMessage, error) {\n\n\t// gob decoding\n\tvar m GMMessage\n\tvar msg = make([]byte, 2048)\n\tl, err := ws.Read(msg)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"DecodeGMMessage() ws.Read() error: %s\", err)\n\t}\n\traw := msg[0:l]\n\tdecBuf := bytes.NewBuffer(raw)\n\terr = gob.NewDecoder(decBuf).Decode(&m)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"DecodeGMMessage() gob.Decode() error: %s\", err)\n\t}\n\treturn &m, nil\n}", "func (b BoatAPI) Recv(e interface{}) error {\n\treturn b.stdin.Decode(e)\n}", "func ParseBinReader(r io.Reader, path string) (*GLTF, error) {\n\n\t// Read header\n\tvar header GLBHeader\n\terr := binary.Read(r, binary.LittleEndian, &header)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check magic and version\n\tif header.Magic != GLBMagic {\n\t\treturn nil, fmt.Errorf(\"invalid GLB Magic field\")\n\t}\n\tif header.Version < 2 {\n\t\treturn nil, fmt.Errorf(\"GLB version:%v not supported\", header.Version)\n\t}\n\n\t// Read first chunk (JSON)\n\tbuf, err := readChunk(r, GLBJson)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse JSON into gltf object\n\tbb := bytes.NewBuffer(buf)\n\tgltf, err := ParseJSONReader(bb, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check for and read second chunk (binary, optional)\n\tdata, err := readChunk(r, GLBBin)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgltf.data = data\n\n\treturn gltf, nil\n}", "func (s *BasePlSqlParserListener) EnterLob_parameters(ctx *Lob_parametersContext) {}", "func BobPurchaseDataAPIHandler(w http.ResponseWriter, r *http.Request) {\n\tLog := Logger.NewSessionLogger()\n\n\tLog.Infof(\"start purchase data...\")\n\tvar plog PodLog\n\tplog.Result = LOG_RESULT_FAILED\n\tplog.Operation = LOG_OPERATION_TYPE_BOB_TX\n\tdefer func() {\n\t\terr := insertLogToDB(plog)\n\t\tif err != nil {\n\t\t\tLog.Warnf(\"insert log error! %v\", err)\n\t\t\treturn\n\t\t}\n\t\tnodeRecovery(w, Log)\n\t}()\n\n\trequestData := r.FormValue(\"request_data\")\n\tvar data RequestData\n\terr := json.Unmarshal([]byte(requestData), &data)\n\tif err != nil {\n\t\tLog.Warnf(\"invalid parameter. data=%v, err=%v\", requestData, err)\n\t\tfmt.Fprintf(w, RESPONSE_INCOMPLETE_PARAM)\n\t\treturn\n\t}\n\tLog.Debugf(\"success to parse request data. data=%v\", requestData)\n\n\tif data.MerkleRoot == \"\" || data.AliceIP == \"\" || data.AliceAddr == \"\" || data.BulletinFile == \"\" || data.PubPath == \"\" {\n\t\tLog.Warnf(\"invalid parameter. merkleRoot=%v, AliceIP=%v, AliceAddr=%v, bulletinFile=%v, PubPath=%v\",\n\t\t\tdata.MerkleRoot, data.AliceIP, data.AliceAddr, data.BulletinFile, data.PubPath)\n\t\tfmt.Fprintf(w, RESPONSE_INCOMPLETE_PARAM)\n\t\treturn\n\t}\n\tLog.Debugf(\"read parameters. merkleRoot=%v, AliceIP=%v, AliceAddr=%v, bulletinFile=%v, PubPath=%v\",\n\t\tdata.MerkleRoot, data.AliceIP, data.AliceAddr, data.BulletinFile, data.PubPath)\n\n\tplog.Detail = fmt.Sprintf(\"merkleRoot=%v, AliceIP=%v, AliceAddr=%v, bulletinFile=%v, PubPath=%v\",\n\t\tdata.MerkleRoot, data.AliceIP, data.AliceAddr, data.BulletinFile, data.PubPath)\n\n\tbulletin, err := readBulletinFile(data.BulletinFile, Log)\n\tif err != nil {\n\t\tLog.Warnf(\"failed to read bulletin File. err=%v\", err)\n\t\tfmt.Fprintf(w, RESPONSE_PURCHASE_FAILED)\n\t\treturn\n\t}\n\tplog.Detail = fmt.Sprintf(\"%v, merkle root=%v,\", plog.Detail, bulletin.SigmaMKLRoot)\n\n\tLog.Debugf(\"step0: prepare for transaction...\")\n\tvar params = BobConnParam{data.AliceIP, data.AliceAddr, bulletin.Mode, data.SubMode, data.OT, data.UnitPrice, \"\", bulletin.SigmaMKLRoot}\n\tnode, conn, params, err := preBobConn(params, ETHKey, Log)\n\tif err != nil {\n\t\tLog.Warnf(\"failed to prepare net for transaction. err=%v\", err)\n\t\tfmt.Fprintf(w, RESPONSE_PURCHASE_FAILED)\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif err := node.Close(); err != nil {\n\t\t\tfmt.Errorf(\"failed to close client node: %v\", err)\n\t\t}\n\t\tif err := conn.Close(); err != nil {\n\t\t\tLog.Errorf(\"failed to close connection on client side: %v\", err)\n\t\t}\n\t}()\n\tLog.Debugf(\"[%v]step0: success to establish connecting session with Alice. Alice IP=%v, Alice address=%v\", params.SessionID, params.AliceIPAddr, params.AliceAddr)\n\tplog.Detail = fmt.Sprintf(\"%v, sessionID=%v,\", plog.Detail, params.SessionID)\n\tplog.SessionId = params.SessionID\n\n\tvar tx BobTransaction\n\ttx.SessionID = params.SessionID\n\ttx.Status = TRANSACTION_STATUS_START\n\ttx.Bulletin = bulletin\n\ttx.AliceIP = params.AliceIPAddr\n\ttx.AliceAddr = params.AliceAddr\n\ttx.Mode = params.Mode\n\ttx.SubMode = params.SubMode\n\ttx.OT = params.OT\n\ttx.UnitPrice = params.UnitPrice\n\ttx.BobAddr = fmt.Sprintf(\"%v\", ETHKey.Address.Hex())\n\n\tLog.Debugf(\"[%v]step0: success to prepare for transaction...\", params.SessionID)\n\ttx.Status = TRANSACTION_STATUS_START\n\terr = insertBobTxToDB(tx)\n\tif err != nil {\n\t\tLog.Warnf(\"failed to save transaction to db for Bob. err=%v\", err)\n\t\tfmt.Fprintf(w, fmt.Sprintf(RESPONSE_TRANSACTION_FAILED, \"failed to save transaction to db for Bob.\"))\n\t\treturn\n\t}\n\n\tvar response string\n\tif tx.Mode == TRANSACTION_MODE_PLAIN_POD {\n\t\tswitch tx.SubMode {\n\t\tcase TRANSACTION_SUB_MODE_COMPLAINT:\n\t\t\tif tx.OT {\n\t\t\t\tresponse = BobTxForPOC(node, ETHKey, tx, data.Demands, data.Phantoms, data.BulletinFile, data.PubPath, Log)\n\t\t\t} else {\n\t\t\t\tresponse = BobTxForPC(node, ETHKey, tx, data.Demands, data.BulletinFile, data.PubPath, Log)\n\t\t\t}\n\t\tcase TRANSACTION_SUB_MODE_ATOMIC_SWAP:\n\t\t\tresponse = BobTxForPAS(node, ETHKey, tx, data.Demands, data.BulletinFile, data.PubPath, Log)\n\t\t}\n\t} else if tx.Mode == TRANSACTION_MODE_TABLE_POD {\n\t\tswitch tx.SubMode {\n\t\tcase TRANSACTION_SUB_MODE_COMPLAINT:\n\t\t\tif tx.OT {\n\t\t\t\tresponse = BobTxForTOC(node, ETHKey, tx, data.Demands, data.Phantoms, data.BulletinFile, data.PubPath, Log)\n\t\t\t} else {\n\t\t\t\tresponse = BobTxForTC(node, ETHKey, tx, data.Demands, data.BulletinFile, data.PubPath, Log)\n\t\t\t}\n\t\tcase TRANSACTION_SUB_MODE_ATOMIC_SWAP:\n\t\t\tresponse = BobTxForTAS(node, ETHKey, tx, data.Demands, data.BulletinFile, data.PubPath, Log)\n\t\tcase TRANSACTION_SUB_MODE_VRF:\n\t\t\tif tx.OT {\n\t\t\t\tresponse = BobTxForTOQ(node, ETHKey, tx, data.KeyName, data.KeyValue, data.PhantomKeyValue, data.BulletinFile, data.PubPath, Log)\n\t\t\t} else {\n\t\t\t\tresponse = BobTxForTQ(node, ETHKey, tx, data.KeyName, data.KeyValue, data.BulletinFile, data.PubPath, Log)\n\t\t\t}\n\t\t}\n\t}\n\tvar resp Response\n\terr = json.Unmarshal([]byte(response), &resp)\n\tif err != nil {\n\t\tLog.Warnf(\"failed to parse response. response=%v, err=%v\", response, err)\n\t\tfmt.Fprintf(w, RESPONSE_FAILED_TO_RESPONSE)\n\t\treturn\n\t}\n\tif resp.Code == \"0\" {\n\t\tplog.Result = LOG_RESULT_SUCCESS\n\t}\n\tLog.Debugf(\"[%v]the transaction finish. merkel root=%v, response=%v\", params.SessionID, bulletin.SigmaMKLRoot, response)\n\tfmt.Fprintf(w, response)\n\treturn\n}", "func (tg *TradesGroup) handleMessage(msg []byte) {\n\tvar resp []interface{}\n\tvar e event\n\tvar err error\n\n\tif err := json.Unmarshal(msg, &resp); err != nil {\n\t\treturn\n\t}\n\tchanID := int64Value(resp[0])\n\tif chanID > 0 {\n\t\te, err = tg.get(chanID)\n\t\tif err != nil {\n\t\t\tlog.Println(\"[BITFINEX] Error getting subscriptions: \", chanID, err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\treturn\n\t}\n\n\tif ut, ok := resp[1].(string); ok {\n\t\tif ut == \"hb\" {\n\t\t\treturn\n\t\t}\n\t\tif ut == \"tu\" {\n\t\t\t// handling update\n\t\t\tdataType := \"u\"\n\t\t\tif d, ok := resp[2].([]interface{}); ok {\n\t\t\t\ttrade := tg.mapTrade(e.Symbol, d)\n\t\t\t\tgo tg.publish([]schemas.Trade{trade}, dataType, nil)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\tif snap, ok := resp[1].([]interface{}); ok {\n\t\t// handling snapshot\n\t\tvar trades []schemas.Trade\n\t\tdataType := \"s\"\n\t\tfor _, trade := range snap {\n\t\t\tif d, ok := trade.([]interface{}); ok {\n\t\t\t\ttrades = append(trades, tg.mapTrade(e.Symbol, d))\n\t\t\t}\n\t\t}\n\t\tgo tg.publish(trades, dataType, nil)\n\t\treturn\n\t}\n\treturn\n}", "func (serv *Server) handleBadRequest(conn int) {\n\tvar (\n\t\tlogp = `handleBadRequest`\n\t\tframeClose []byte = NewFrameClose(false, StatusBadRequest, nil)\n\n\t\terr error\n\t)\n\n\terr = Send(conn, frameClose, serv.Options.ReadWriteTimeout)\n\tif err != nil {\n\t\tlog.Printf(`%s: %s`, logp, err)\n\t\tgoto out\n\t}\n\n\t_, err = Recv(conn, serv.Options.ReadWriteTimeout)\n\tif err != nil {\n\t\tlog.Printf(`%s: %s`, logp, err)\n\t}\nout:\n\tserv.ClientRemove(conn)\n}", "func Unmarshal(data []byte, o interface{}) error {\n\tbuf := bytes.NewBuffer(data)\n\tdecoder := gob.NewDecoder(buf)\n\n\terr := decoder.Decode(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}" ]
[ "0.6926054", "0.66555226", "0.6592844", "0.6570786", "0.6549313", "0.64678264", "0.6391482", "0.62011355", "0.6196608", "0.61929303", "0.61607766", "0.59881604", "0.5935874", "0.5881003", "0.58700323", "0.58210737", "0.58197033", "0.57850504", "0.5690908", "0.5690442", "0.5686748", "0.56638813", "0.56572914", "0.5608017", "0.55713826", "0.55372554", "0.5523141", "0.54800683", "0.5429624", "0.5420902", "0.54192984", "0.53919137", "0.53774", "0.5356768", "0.5334358", "0.532737", "0.5301362", "0.5293168", "0.5288366", "0.5273113", "0.52659565", "0.5260154", "0.52507067", "0.5224778", "0.5180355", "0.51612175", "0.5159923", "0.51460814", "0.5133972", "0.5119151", "0.51149905", "0.50819886", "0.50699764", "0.50220436", "0.4986858", "0.49383414", "0.49376664", "0.4926881", "0.48956898", "0.48747617", "0.48611993", "0.4855154", "0.48474643", "0.4822705", "0.48036206", "0.4782992", "0.4777453", "0.4766349", "0.47632957", "0.47583956", "0.47030598", "0.46997923", "0.4695346", "0.46656644", "0.46491122", "0.46442923", "0.46354702", "0.46353072", "0.46335134", "0.46263716", "0.46195844", "0.4612684", "0.46069264", "0.4592263", "0.45920724", "0.45889163", "0.45848116", "0.45788744", "0.45721906", "0.4567531", "0.45659015", "0.45588705", "0.45518538", "0.45466805", "0.45461625", "0.45418277", "0.45380887", "0.45158634", "0.45107564", "0.45102805" ]
0.8036416
0
/ Main Main starts either a client or a server, depending on whether the `connect` flag is set. Without the flag, the process starts as a server, listening for incoming requests. With the flag the process starts as a client and connects to the host specified by the flag value. Try "localhost" or "127.0.0.1" when running both processes on the same machine. main
/ Main Main запускает либо клиент, либо сервер в зависимости от того, установлена ли флаг `connect`. Без флага процесс запускается как сервер, ожидающий входящие запросы. С флагом процесс запускается как клиент и подключается к хосту, указанному значением флага. Попробуйте "localhost" или "127.0.0.1", когда запускаете оба процесса на одной машине. main
func main() { connect := flag.String("connect", "", "IP address of process to join. If empty, go into listen mode.") flag.Parse() // If the connect flag is set, go into client mode. if *connect != "" { err := client(*connect) if err != nil { log.Println("Error:", errors.WithStack(err)) } log.Println("Client done.") return } // Else go into server mode. err := server() if err != nil { log.Println("Error:", errors.WithStack(err)) } log.Println("Server done.") }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func main() {\n\tflag.StringVar(&MODE, \"mode\", MODE, \"server/client\")\n\tflag.StringVar(&SERVER_ADDR, \"server\", SERVER_ADDR, \"mode: server => listen, mode: client => connect to\")\n\tflag.StringVar(&PayLoad, \"pl\", PayLoad, \"PayLoad\")\n\tflag.BoolVar(&PrintDump, \"d\", PrintDump, \"Print dump\")\n\tflag.PrintDefaults()\n\tflag.Parse()\n\n\tswitch strings.ToUpper(MODE) {\n\tcase \"S\":\n\t\tserver(SERVER_ADDR)\n\tdefault:\n\t\tclient(SERVER_ADDR)\n\t}\n}", "func main() {\n\tserver.New().Start()\n}", "func main() {\n\tportNo := os.Args[1]\n\tstartServerMode(portNo)\n}", "func main() {\n\n\tfmt.Println(\"Launching server...\")\n\n\tconnMap = make(map[string]net.Conn) // Allocate and initialise a map with no given size\n\tuserMap = make(map[net.Conn]string) // Allocate and initialise a map with no given size\n\n\targs := os.Args\n\n\tvar connPort = \"\"\n\n\tif len(args) == 2 && checkServerPort(args[1]) { // Verify a port number is given and check it\n\t\tconnPort = args[1]\n\t} else { // Else use port 8081 by default\n\t\tconnPort = \"8081\"\n\t}\n\n\tfmt.Print(\"IP address: \")\n\tgetPreferredIPAddress() // Prints out the preferred IP address of the specific computer\n\tfmt.Println(\"Port number: \" + connPort)\n\n\t// Listens for connection requests\n\tln, err := net.Listen(\"tcp\", \":\"+connPort)\n\n\t// Error check\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\t// Defer (wait till surrounding functions have finished) the execution of ln.Close()\n\tdefer ln.Close()\n\n\t// Semi-infinite loop that accepts connections, checks for errors and executes a goroutine\n\tfor {\n\t\tconn, err := ln.Accept()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Accept error: \", err)\n\t\t\treturn\n\t\t}\n\t\tgo connection(conn) // goroutine execution of the connection function concurrently\n\t}\n}", "func main() {\n\n\tvar logger *simple.Logger\n\n\tif os.Getenv(\"LOG_LEVEL\") == \"\" {\n\t\tlogger = &simple.Logger{Level: \"info\"}\n\t} else {\n\t\tlogger = &simple.Logger{Level: os.Getenv(\"LOG_LEVEL\")}\n\t}\n\terr := validator.ValidateEnvars(logger)\n\tif err != nil {\n\t\tos.Exit(-1)\n\t}\n\n\t// setup our client connectors (message producer)\n\tconn := connectors.NewClientConnectors(logger)\n\n\t// call the start server function\n\tlogger.Info(\"Starting server on port \" + os.Getenv(\"SERVER_PORT\"))\n\tstartHttpServer(conn)\n}", "func main() {\n\tserver.StartUp(false)\n}", "func main() {\n\tif len(os.Args) != 2 {\n\t\tlog.Fatal(\"Usage: ./server-go [server port]\")\n\t}\n\tserver_port := os.Args[1]\n\tserver(server_port)\n}", "func ClientMain(player Player) {\n\taddr := DefaultServerAddress\n\tif len(os.Args) > 1 {\n\t\tport, err := strconv.Atoi(os.Args[1])\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"invalid value for port: %q\", os.Args[1])\n\t\t}\n\t\taddr = &net.TCPAddr{\n\t\t\tIP: net.IPv4(127, 0, 0, 1),\n\t\t\tPort: port,\n\t\t}\n\t}\n\tvar state BasicState\n\tclient, err := OpenClient(addr, player, &state)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"cannot connect to server: %s\", err)\n\t\tos.Exit(1)\n\t}\n\tclient.DebugTo = os.Stderr\n\terr = client.Run()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error while running: %s\", err)\n\t\tos.Exit(2)\n\t}\n}", "func main() {\n args := args.Parse(os.Args)\n fmt.Println(\"[MAIN] App starting\")\n\n switch args.Mode {\n case \"agent\":\n go agent.Run(args.Source, args.ServerAddress, args.ServerPort)\n case \"server\":\n go server.Run(args.BindAddress, args.BindPort)\n case \"mixed\":\n go server.Run(args.BindAddress, args.BindPort)\n go agent.Run(args.Source, args.ServerAddress, args.ServerPort)\n default:\n fmt.Println(\"[MAIN] No agent, no server running\")\n }\n\n for {\n time.Sleep(100 * time.Millisecond)\n }\n}", "func main() {\r\n\tbind := fmt.Sprintf(\"%s:%s\", getIP(), getPort())\r\n\tlog.Println(\"Listening on\", bind)\r\n\r\n\terr := http.ListenAndServe(bind, http.HandlerFunc(mainHandle))\r\n\tif err != nil {\r\n\t\tpanic(\"ListenAndServe: \" + err.Error())\r\n\t}\r\n}", "func main() {\n\terr := clientMain()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func main() {\n\tbindTo := flag.String(\n\t\t\"l\", \"0.0.0.0:999\", \"interface and port to listen at\")\n\tflag.Parse()\n\trunServer(*bindTo)\n}", "func main() {\n\ta := App{}\n\ta.Initialize()\n\ta.Run(\":8000\")\n}", "func main() {\n\ts := master.New()\n\tif err := s.Run(port); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func main() {\n if len(os.Args) != 2 {\n log.Panic(\"args:\", \"<port>\")\n }\n port := os.Args[1]\n startServer(port)\n}", "func main() {\n\terr := runClient()\n\tif err != nil {\n\t\tlog.Fatalln(err.Error())\n\t}\n\n\treturn\n}", "func main() {\n\tflag.Parse()\n\n\tproxy := launcher.NewProxy()\n\tif !*quiet {\n\t\tproxy.Logger = os.Stdout\n\t}\n\n\tl, err := net.Listen(\"tcp\", *addr)\n\tif err != nil {\n\t\tutils.E(err)\n\t}\n\n\tfmt.Println(\"Remote control url is\", \"ws://\"+l.Addr().String())\n\n\tsrv := &http.Server{Handler: proxy}\n\tutils.E(srv.Serve(l))\n}", "func mainClient(ctx *cli.Context) error {\n\tcheckClientSyntax(ctx)\n\taddr := \":\" + strconv.Itoa(warpServerDefaultPort)\n\tswitch ctx.NArg() {\n\tcase 1:\n\t\taddr = ctx.Args()[0]\n\t\tif !strings.Contains(addr, \":\") {\n\t\t\taddr += \":\" + strconv.Itoa(warpServerDefaultPort)\n\t\t}\n\tcase 0:\n\tdefault:\n\t\tfatal(errInvalidArgument(), \"Too many parameters\")\n\t}\n\thttp.HandleFunc(\"/ws\", serveWs)\n\tconsole.Infoln(\"Listening on\", addr)\n\tfatalIf(probe.NewError(http.ListenAndServe(addr, nil)), \"Unable to start client\")\n\treturn nil\n}", "func main() {\n\targs := os.Args[1:]\n\tswitch len(args) {\n\tcase 0:\n\t\tstartServerFromFile(configFile)\n\t\treturn\n\tcase 1:\n\t\ta := args[0]\n\t\tif a == createFile {\n\t\t\twriteConfigurationToFile()\n\t\t\treturn\n\t\t} else if a == help || a == help2 {\n\t\t\tprintHelp()\n\t\t\treturn\n\t\t}\n\tcase 2:\n\t\tif args[0] == useConfig && args[1] != \"\" {\n\t\t\tstartServerFromFile(args[1])\n\t\t\treturn\n\t\t}\n\t}\n\tprintHelp()\n}", "func main() {\n\tfmt.Printf(\"%sBuilding SQL Connection%s\\n\", GREEN, NC)\n\tMySQL.BuildConnection()\n\t/* How to print to stdout */\n\tfmt.Printf(\"Main starting\\n\")\n\t/* This is how you call functions in Go */\n\tserve()\n}", "func main() {\n\n\t// Ref. https://gobyexample.com/command-line-arguments\n\targsWithProg := os.Args\n\n\tif len(argsWithProg) > 1 {\n\t\t// os.Args[0] will be \"smi-main\"\n\t\tswitch os.Args[1] {\n\t\tcase \"s\":\n\t\t\tsmi.Server()\n\t\tcase \"c\":\n\t\t\tsmi.Client()\n\t\t}\n\n\t} else {\n\t\tfmt.Println(\"Please specify the mode: s for server, c for client: smi-main s or smi-main c\")\n\t}\n}", "func main() {\n\tconfig.SetVersion(\"0.1.0\")\n\tconfig.Load()\n\tsync.StartProcessing()\n\tserver.StartServer()\n}", "func main() {\n\tserver := server.NewHTTPServer()\n\tserver.Start(3000)\n}", "func TestMain(m *testing.M) {\n\tflag.Parse()\n\tvar err error\n\ts, err := NewServer(TESTDB, 10, 2, ioutil.Discard , \":9123\")\n\tif err!=nil {\n\t\tpanic(err)\n\t}\n\tts=s\n\ts.Start()\n\tos.Exit(m.Run())\n}", "func main() {\n\tif err := cmd.RunServer(); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\tos.Exit(1)\n\t}\n}", "func main() {\n\tvar (\n\t\thostname string\n\t\tid string\n name string\n client string\n\t\tcache *infra.Cache\n\t\tserver *infra.Server\n\t\tconsole *infra.Console\n\t)\n\n\tflag.Parse()\n\n\tcache = infra.NewCache()\n\n\thostname = *localAddress + \":\" + *localPort\n client = *clientAddress\n\n\t// If an id isn't provided, we use the hostname instead\n\tif *instanceId != \"\" {\n\t\tid = *instanceId\n\t} else {\n\t\tid = hostname\n\t}\n \n if *carrierName != \"\" {\n name = *carrierName\n } else if *ringAddress != \"\" {\n name = *ringAddress\n } else {\n name = hostname\n }\n \n server = infra.NewServer(id, name, hostname, client, cache)\n\tconsole = infra.NewConsole(cache, server)\n\n\t// Spawn goroutines to handle both interfaces\n\tgo server.Run(*ringAddress)\n\tgo console.Run()\n\n\t// Wait fo the server to finish\n\t<-server.Done()\n}", "func StartMainServer(mainHost string, workerCount int) {\n\tserver := &fasthttp.Server{\n\t\tHandler: anyHTTPHandler,\n\t}\n\n\tpreforkServer := prefork.New(server, workerCount)\n\n\tif !prefork.IsChild() {\n\t\tfmt.Printf(\"Server started server on http://%s\\n\", mainHost)\n\t}\n\n\tif err := preforkServer.ListenAndServe(mainHost); err != nil {\n\t\tpanic(err)\n\t}\n}", "func main() {\n\t//init()\n\thostname, err := os.Hostname()\n\tif err != nil {\n\t\thostname = \"MISSING\"\n\t}\n\n\tinnerPort := os.Getenv(\"BACKEND_PORT\")\n\tif innerPort == \"\" {\n\t\tlog.Printf(\"Running on %s:5001\", hostname)\n\t\tlog.Fatal(http.ListenAndServe(\":5001\", nil))\n\t} else {\n\t\tlog.Printf(\"Running on %s:%s\", hostname, innerPort)\n\t\tlog.Fatal(http.ListenAndServe(fmt.Sprintf(\":%s\", innerPort), nil))\n\t}\n}", "func main() {\n\n\t// Process args.\n\n\t// the TCP address on which the fserver listens to RPC connections from the aserver\n\tfserverTcp := os.Args[1]\n\tfserverTcpG = fserverTcp\n\n\t// the UDP address on which the fserver receives client connections\n\tfserver := os.Args[2]\n\tfserverUdpAddr, err := net.ResolveUDPAddr(\"udp\", fserver)\n\thandleError(err)\n\n\tmsg := make([]byte, 1024)\n\n\t// Global fserver ip:port info\n\tfserverIpPort = fserver\n\n\t// Read the rest of the args as a fortune message\n\tfortune := strings.Join(os.Args[3:], \" \")\n\tfortuneG = fortune\n\n\t// Debug to see input from command line args\n\tfmt.Printf(\"fserver Listening on %s\\nFortune: %s\\n\", fserverIpPort, fortune)\n\n\t// concurrent running of rcp connection\n\n\tconn, err := net.ListenUDP(\"udp\", fserverUdpAddr)\n\thandleError(err)\n\n\tgo handleRpcConnection()\n\tdefer conn.Close()\n\n\t// refactor to global variable\n\tconndp = conn\n\t// udp client concurrency\n\tfor {\n\t\tn, clientAddr, err := conn.ReadFromUDP(msg)\n\t\thandleError(err)\n\t\tgo handleClientConnection(msg[:], n, clientAddr.String())\n\t}\n}", "func main() {\n\tfmt.Println(\"app start.\")\n\tgo loginserver.StartListen()\n\tgo gateserver.StartListen()\n\t<-make(chan int)\n}", "func main() {\n\n\t//init api\n\tserver.Init()\n}", "func mainExample() {\n\tfmt.Printf(\"webserv main running.\\n\")\n\tw := NewWebServer(\"127.0.0.1:7708\", nil)\n\tw.Start()\n\tselect {}\n\t// ...\n\tw.Stop()\n}", "func main() {\n\tregisterHandlers()\n\tappChatroom.Run() // run the chatroom app\n\t// start the server\n\tch := make(chan bool) // a channel used to get errors\n\tdefer close(ch)\n\tgo startHTTPServer(ch)\n\tgo startHTTPSServer(ch)\n\t<-ch\n\t<-ch\n\tlog.Fatal(\"Servers stopped with errors.\")\n}", "func main() {\n\thttp.ListenAndServe(\"127.0.0.1:8080\", NewServer())\n}", "func main() {\n\t// get environment variables\n\tport := os.Getenv(portEnv)\n\t// default for port\n\tif port == \"\" {\n\t\tport = \"8080\"\n\t}\n\tlog.Print(\"[Info][Main] Creating server...\")\n\ts, err := sessions.NewServer(\":\"+port, os.Getenv(redisAddressEnv),\n\t\tos.Getenv(gameServerImageEnv), deserialiseEnvMap(os.Getenv(gameNodeSelectorEnv)),\n\t\tos.Getenv(cpuLimitEnv))\n\n\tif err != nil {\n\t\tlog.Fatalf(\"[Error][Main] %+v\", err)\n\t}\n\n\tif err := s.Start(); err != nil {\n\t\tlog.Fatalf(\"[Error][Main] %+v\", err)\n\t}\n}", "func main() {\n\tvar port int\n\tvar version bool\n\n\t// parse the flags\n\tflag.IntVar(&port, \"port\", 8080, \"used port\")\n\tflag.BoolVar(&version, \"V\", false, \"version of the program\")\n\tflag.Parse()\n\n\t// if user type -V, the V flag is set up to true\n\tif version {\n\t\t// display the information about the version\n\t\tfmt.Println(\"version 1.0_a\")\n\t\t// otherwise run the server\n\t} else {\n\t\tportNr := strconv.Itoa(port)\n\t\thttp.HandleFunc(\"/time\", getTime)\n\t\thttp.HandleFunc(\"/\", unknownRoute)\n\t\terr := http.ListenAndServe(\":\"+portNr, nil)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"ListenAndServe: \", err)\n\t\t}\n\t}\n}", "func main() {\n\tserverIP := os.Args[1]\n\tdataPath := os.Args[2]\n\n\tPublicIp = node.GeneratePublicIP()\n\tfmt.Println(\"The public IP is: [%s], DataPath is: %s\", ERR_COL+PublicIp+ERR_END, ERR_COL+dataPath+ERR_END)\n\t// Listener for clients -> cluster\n\tln1, _ := net.Listen(\"tcp\", PublicIp+\"0\")\n\n\t// Listener for server and other nodes\n\tln2, _ := net.Listen(\"tcp\", PublicIp+\"0\")\n\n\tInitializeDataStructs()\n\t// Open Filesystem on Disk\n\tnode.MountFiles(dataPath, WriteIdCh)\n\t// Open Peer to Peer RPC\n\tListenPeerRpc(ln2)\n\t// Connect to the Server\n\tnode.InitiateServerConnection(serverIP, PeerRpcAddr)\n\t// Open Cluster to App RPC\n\tListenClusterRpc(ln1)\n}", "func main() {\n\tname := flag.String(\"name\", \"echo\", \"server name\")\n\tport := flag.String(\"port\", \"3000\", \"server port\")\n\tflag.Parse()\n\n\t// Echo instance\n\te := echo.New()\n\n\t// Middleware\n\te.Use(middleware.Logger())\n\te.Use(middleware.Recover())\n\n\t// Route => handler\n\te.GET(\"/\", func(c echo.Context) error {\n\t\treturn c.HTML(http.StatusOK, fmt.Sprintf(\"<div style='font-size: 8em;'>Hello from upstream server %s!</div>\", *name))\n\t})\n\te.GET(\"/alive\", func(c echo.Context) error {\n\t\tdata := map[string]interface{}{\n\t\t\t\"alive\": true,\n\t\t\t\"hostname\": \"localhost:\" + *port,\n\t\t\t\"serviceName\": *name,\n\t\t\t\"num_cpu\": runtime.NumCPU(),\n\t\t\t\"num_goroutine\": runtime.NumGoroutine(),\n\t\t\t\"go_version\": runtime.Version(),\n\t\t\t\"build_date\": Buildstamp,\n\t\t\t\"commit\": Commit,\n\t\t\t\"startup_time\": startupTime,\n\t\t}\n\t\treturn c.JSON(http.StatusOK, data)\n\t})\n\n\t// Start server\n\te.Logger.Fatal(e.Start(fmt.Sprintf(\":%s\", *port)))\n}", "func main() {\r\n\targuments := os.Args\r\n\tif len(arguments) == 1 {\r\n\t\tfmt.Println(\"Please provide a port number!\")\r\n\t\treturn\r\n\t}\r\n\r\n\tPORT := \":\" + arguments[1]\r\n\tl, err := net.Listen(\"tcp4\", PORT)\r\n\tif err != nil {\r\n\t\tfmt.Println(err)\r\n\t\treturn\r\n\t}\r\n\tdefer l.Close()\r\n\trand.Seed(time.Now().Unix())\r\n\r\n\tfor {\r\n\t\tc, err := l.Accept()\r\n\t\tif err != nil {\r\n\t\t\tfmt.Println(err)\r\n\t\t\treturn\r\n\t\t}\r\n\t\tgo handleConnection_server(c)\r\n\t}\r\n}", "func MainServer(server string) {\n\n\t// Build core, and start goroutine\n\tcore := NewCore()\n\tgo core.main()\n\n\t// Build TCP listener and start goroutine\n\tlis := &Listener{core: core}\n\tgo lis.Listen(\"tcp\", server)\n\n\t// Register monitoring server\n\tgo monitoringServer(core)\n\n\t// Setup SIGINT signal handler, and wait\n\tchannel := make(chan os.Signal)\n\tsignal.Notify(channel, os.Interrupt)\n\t<-channel\n\tlog.Println(\"Stop\")\n}", "func main() {\n\tvar configurationFile string\n\tvar isMaster bool\n\tvar isScribe bool\n\tvar isAddama bool\n\n\tflag.BoolVar(&isMaster, \"m\", false, \"Start as master node.\")\n\tflag.BoolVar(&isScribe, \"s\", false, \"Start as scribe node.\")\n\tflag.BoolVar(&isAddama, \"a\", false, \"Start as addama node.\")\n\tflag.StringVar(&configurationFile, \"config\", \"golem.config\", \"A configuration file for golem services\")\n\tflag.Parse()\n\n\tconfigFile, err := goconf.ReadConfigFile(configurationFile)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tGlobalLogger(configFile)\n\tGlobalTls(configFile)\n\tSubIOBufferSize(\"default\", configFile)\n\tGoMaxProc(\"default\", configFile)\n\tConBufferSize(\"default\", configFile)\n\tStartHtmlHandler(configFile)\n\n\tif isMaster {\n\t\tStartMaster(configFile)\n\t} else if isScribe {\n\t\tStartScribe(configFile)\n\t} else if isAddama {\n\t\tStartAddama(configFile)\n\t} else {\n\t\tStartWorker(configFile)\n\t}\n}", "func main() {\n\t// starting server\n\tcuxs.StartServer(engine.Router())\n}", "func (d *Daemon) Main(serve func(string, string)) error {\n\tsetUmask()\n\tserve(d.SockPath, d.DbPath)\n\treturn nil\n}", "func main() {\n\tport, exists := os.LookupEnv(\"PORT\")\n\tif !exists {\n\t\tport = \"2001\"\n\t}\n\tfmt.Println(\"Running on port \" + port)\n\thttp.HandleFunc(\"/\", RelayServer)\n\thttp.ListenAndServe(\":\"+port, nil)\n}", "func main() {\n\tcfg := drc.NewConfig()\n\terr := cfg.Parse(os.Args[1:])\n\tif cfg.Version {\n\t\tutils.PrintRawInfo(appName)\n\t\tos.Exit(0)\n\t}\n\tswitch errors.Cause(err) {\n\tcase nil:\n\tcase flag.ErrHelp:\n\t\tos.Exit(0)\n\tdefault:\n\t\tlog.Fatalf(\"parse cmd flags errors: %s\", err)\n\t}\n\n\terr = logutil.InitLogger(&cfg.Log)\n\tif err != nil {\n\t\tlog.Fatalf(\"initialize log error: %s\", err)\n\t}\n\tutils.LogRawInfo(appName)\n\n\tsvr := drc.NewServer(cfg)\n\n\tsc := make(chan os.Signal, 1)\n\tsignal.Notify(sc,\n\t\tsyscall.SIGHUP,\n\t\tsyscall.SIGINT,\n\t\tsyscall.SIGTERM,\n\t\tsyscall.SIGQUIT)\n\n\tgo func() {\n\t\tsig := <-sc\n\t\tlog.Infof(\"got signal [%d], exit\", sig)\n\t\tsvr.Close()\n\t}()\n\n\tif err = svr.Start(); err != nil {\n\t\tlog.Fatalf(\"run server failed: %v\", err)\n\t}\n\tsvr.Close()\n}", "func main() {\n\tflag.Parse()\n\tfmt.Println(\"start the program\")\n\n\t// go myServer()\n\t// go myClient()\n\n\tfor {\n\t\t// start the app\n\t\twaitc := make(chan struct{}) // a wait lock\n\n\t\t// start the server thread\n\t\tgo func() {\n\t\t\tfmt.Println(\"start the server\")\n\t\t\tserver.InitFileServer()\n\t\t\tdefer close(waitc)\n\t\t}()\n\n\t\t// start the client thread\n\t\t// go func() {\n\t\t// \t// for {\n\t\t// \tserverAddr, server := filesource.SearchAddressForThefile(\"Liben.jpg\")\n\t\t// \tfmt.Println(*serverAddr)\n\t\t// \tfmt.Println(*server)\n\t\t// \tclient.InitFileClient(serverAddr, server)\n\t\t// \tclient.DownloadFile(\"Liben.jpg\")\n\t\t// \t// }\n\t\t// }()\n\n\t\t// start the input thread\n\t\t// go input()\n\n\t\t<-waitc\n\t\t// finished in this round restart the app\n\t\tfmt.Println(\"restart the app\")\n\t}\n}", "func main() {\n\tlogrus.SetFormatter(&logrus.TextFormatter{\n\t\tForceColors: true,\n\t\tFullTimestamp: true,\n\t})\n\tmlog := logrus.WithFields(logrus.Fields{\n\t\t\"component\": componentName,\n\t\t\"version\": env.Version(),\n\t})\n\n\tgrpc_logrus.ReplaceGrpcLogger(mlog.WithField(\"component\", componentName+\"_grpc\"))\n\tmlog.Infof(\"Starting %s\", componentName)\n\n\tgrpcServer, err := createGRPCServer(mlog)\n\tif err != nil {\n\t\tmlog.WithError(err).Fatal(\"failed to create grpc server\")\n\t}\n\t// Start go routines\n\tgo handleExitSignals(grpcServer, mlog)\n\tserveGRPC(env.ServiceAddr(), grpcServer, mlog)\n}", "func main() {\n\t// The ccid is assigned to the chaincode on install (using the “peer lifecycle chaincode install <package>” command) for instance\n\n\tif len(os.Args) < 3 {\n\t\tfmt.Println(\"Please supply:\\n- installed chaincodeID (using the “peer lifecycle chaincode install <package>” command)\\n- chaincode address (host:port)\")\n\t\treturn\n\t}\n\n\tccid := os.Args[1]\n\taddress := os.Args[2]\n\n\tserver := &shim.ChaincodeServer{\n\t\tCCID: ccid,\n\t\tAddress: address,\n\t\tCC: new(SimpleChaincode),\n\t\tTLSProps: shim.TLSProperties{\n\t\t\tDisabled: true,\n\t\t},\n\t}\n\n\tfmt.Println(\"Start Chaincode server on \" + address)\n\terr := server.Start()\n\tif err != nil {\n\t\tfmt.Printf(\"Error starting Simple chaincode: %s\", err)\n\t\treturn\n\t}\n}", "func main() {\n initApplicationConfiguration()\n runtime.GOMAXPROCS(2) // in order for the rpc and http servers to work in parallel\n\n go servers.StartRPCServer()\n servers.StartHTTPServer()\n}", "func main() {\n\tif len(os.Args) < 3 {\n\t\tfmt.Printf(\"wrong parameters\\nUsage: %s host port\\n\", os.Args[0])\n\t\tos.Exit(2)\n\t}\n\n\tmsg, err := client.Dial(os.Args[1], os.Args[2])\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(2)\n\t}\n\tfmt.Println(*msg)\n}", "func main() {\n\tcore.Start()\n}", "func main() {\n\t//\n\t// Load startup flags\n\t//\n\tflags := cmd.LoadFlags()\n\n\t//\n\t// Load env.\n\t//\n\tif flags.EnvFile != \"\" {\n\t\terr := env.LoadEnvFile(flags.EnvFile)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\t//\n\t// Select service\n\t//\n\treg := registry.NewRegistryContainer()\n\n\treg.Add(portGateway.ServiceName, portGateway.FactoryMethod)\n\treg.Add(portService.ServiceName, portService.FactoryMethod)\n\n\tserviceFactory, err := reg.Get(flags.Kind)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t//\n\t// Create service\n\t//\n\tservice, err := serviceFactory()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t//\n\t// Run till the death comes\n\t//\n\tlog.Printf(\"[%s] started serving on '%s'\", flags.Kind, flags.Address)\n\tlog.Fatal(service.Serve(flags.Address))\n}", "func main() {\n\tvar addr string\n\tflag.StringVar(&addr, \"e\", \":4040\", \"service address endpoint\")\n\tflag.Parse()\n\n\t// create local addr for socket\n\tladdr, err := net.ResolveTCPAddr(\"tcp\", addr)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\t// announce service using ListenTCP\n\t// which a TCPListener.\n\tl, err := net.ListenTCP(\"tcp\", laddr)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\tdefer l.Close()\n\tfmt.Println(\"listening at (tcp)\", laddr.String())\n\n\t// req/response loop\n\tfor {\n\t\t// use TCPListener to block and wait for TCP\n\t\t// connection request using AcceptTCP which creates a TCPConn\n\t\tconn, err := l.AcceptTCP()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"failed to accept conn:\", err)\n\t\t\tconn.Close()\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Println(\"connected to: \", conn.RemoteAddr())\n\n\t\tgo handleConnection(conn)\n\t}\n}", "func main() {\n\t// Make websocket\n\tlog.Println(\"Starting sync server\")\n\n\t// TODO: Use command line flag credentials.\n\tclient, err := db.NewClient(\"localhost:28015\")\n\tif err != nil {\n\t\tlog.Fatal(\"Couldn't initialize database: \", err.Error())\n\t}\n\tdefer client.Close()\n\n\trouter := sync.NewServer(client)\n\n\t// Make web server\n\tn := negroni.Classic()\n\tn.UseHandler(router)\n\tn.Run(\":8000\")\n}", "func main() {\n\twebserver.ServerStart()\n\twebserver.ServerRequest()\n}", "func main() {\n\tlog.SetLevel(log.DebugLevel)\n\n\tif err := setupDB(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tserver, err := setupServer()\n\tif err != nil {\n\t\tlog.Fatal(\"error setting up server: \", err)\n\t}\n\n\tlog.Println(\"--- listening on \", server.Addr)\n\terr = server.ListenAndServe()\n\tif err != nil {\n\t\tlog.Fatal(\"error starting server: \", err)\n\t}\n}", "func main() {\n\t\n\tvar config Config\n\tReadConfig(&config)\n\n\tvar inputScanner *bufio.Scanner\n\n\tif config.Server.Enable {\n\t\t// communicate with TCP/IP server\n\t\tfmt.Printf(\"server mode\\n\")\n\t\t// TODO need to set inputScanner\n\n\t} else if config.Engines.Enable {\n\t\t// play games with multiple engines used\n\t\t// In this mode, we need to hold a full state of the game because no one send the game state.\n\t\tfmt.Printf(\"multi-engine mode\\n\")\n\t\tpanic(\"not implemented now. Can you send pull request?\")\n\t} else {\n\t\t// CLI mode\n\t\tfmt.Printf(\"cli mode\\n\")\n\t\tinputScanner = bufio.NewScanner(os.Stdin)\n\t}\n\n\tConnectEngine(inputScanner, config.Cli.Path)\n}", "func main() {\n\tif len(os.Args) < 3 {\n\t\tfmt.Printf(\"argument is Invalid :%v\\n\", os.Args)\n\t\treturn\n\t}\n\tswitch os.Args[1] {\n\tcase \"master\":\n\t\tstartReq, err := json.Marshal(common.Request{\n\t\t\tUrl: os.Args[2],\n\t\t\tFlag: 1,\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Error(\"err:%v\", err)\n\t\t}\n\t\tdistribute.NewMaster().Run(startReq)\n\tcase \"slave\":\n\t\tdistribute.NewSlave(os.Args[2]).Run()\n\t}\n}", "func main() {\n\tfmt.Println(\"Go Demo with net/http server\")\n\n\t// initialize empty itemStore\n\titemStore := store.InitializeStore()\n\tserver.StartRouter(itemStore)\n}", "func main() {\n\n\tlog.Printf(\"Server started\")\n\n\trouter := sw.NewRouter()\n\n\tport := os.Getenv(\"PORT\")\n\tif port == \"\" {\n\t\tport = \"5000\"\n\t}\n\n\theadersOk := handlers.AllowedHeaders([]string{\"X-Requested-With\", \"Content-Type\"})\n\toriginsOk := handlers.AllowedOrigins([]string{\"*\"})\n\tmethodsOk := handlers.AllowedMethods([]string{\"GET\", \"HEAD\", \"POST\", \"PUT\", \"OPTIONS\"})\n\n\tlog.Fatal(http.ListenAndServe(\":\"+port, handlers.CORS(originsOk, headersOk, methodsOk)(router)))\n}", "func init() {\n\t// use all cpus in the system for concurrency\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\tlog.SetOutput(os.Stdout)\n\n\tflag.Parse()\n\n\tif *showUsage {\n\t\tflag.Usage()\n\t\tos.Exit(0)\n\t}\n\n\tvar err error\n\tClient, err = as.NewClient(*Host, *Port)\n\tif err != nil {\n\t\tPanicOnError(err)\n\t}\n}", "func main(){\n\tr := ctrl.HttpRouter()\n\tr.Run(\":1234\")\n}", "func main(){\n\n\tname := \"localhost\"\n\tport := 0\n\tfmt.Scanf(\"%d\\n\", &port)\n\t//lanzo a los dos servidores de arriba\n\tgo servRegister(name,port)\t\n\n\tfriendPort := 0\n\t//solicto a este port que me responda de alguan forma si port es diferente de friendport\n\tfmt.Scanf(\"%d\\n\",&friendPort)\n\tif port != friendPort{\n\t\t//agrego a la libreta al friendport, \n\t\tlib[friendPort] = name\n\t\tcliRegister(name, friendPort, port)\n\t}\n\t//lo lanzo sin go para que bloquee\n\tservAdder(name, port)\n\t\n}", "func main() {\n\t// load config and construct the server shared environment\n\tcfg := common.LoadConfig()\n\tlog := services.NewLogger(cfg)\n\n\t// create repository\n\trepo, err := repository.NewRepository(cfg, log)\n\tif err != nil {\n\t\tlog.Fatalf(\"Can not create application data repository. Terminating!\")\n\t}\n\n\t// setup GraphQL API handler\n\thttp.Handle(\"/api\", handlers.ApiHandler(cfg, repo, log))\n\n\t// show the server opening info and start the server with DefaultServeMux\n\tlog.Infof(\"Welcome to Fantom Rocks API server on [%s]\", cfg.BindAddr)\n\tlog.Fatal(http.ListenAndServe(cfg.BindAddr, nil))\n}", "func main() {\n\tservice.StartWebServer(\"8081\")\n}", "func main() {\n\n\t// Calls startup logic\n\tcommon.StartUp()\n\t// Get the mux router object\n\trouter := routers.InitRoutes()\n\n\tserver := &http.Server{\n\t\tAddr: common.AppConfig.Server,\n\t\tHandler: router,\n\t}\n\tlog.Println(\"Listening [products]...\")\n\tserver.ListenAndServe()\n}", "func main() {\n\tconst port = 8090\n\tfmt.Printf(\"Listening on port: %d\\n\", port)\n\thttp.HandleFunc(\"/\", requestHandler)\n\thttp.ListenAndServe(fmt.Sprintf(\":%d\", port), nil)\n}", "func Main(c *config.Config) error {\n\tif c.IsDebug() {\n\t\tutils.PrettyPrint(c)\n\t}\n\tsignalsToCatch := []os.Signal{\n\t\tos.Interrupt,\n\t\tos.Kill,\n\t\tsyscall.SIGHUP,\n\t\tsyscall.SIGINT,\n\t\tsyscall.SIGABRT,\n\t\tsyscall.SIGTERM,\n\t\tsyscall.SIGQUIT,\n\t}\n\tname := fmt.Sprintf(\"%s (%s)\", c.Name, utils.ExecutableName())\n\tstopCh := make(chan os.Signal, 1)\n\tsignal.Notify(stopCh, signalsToCatch...)\n\ta, err := core.NewAPI(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif err = a.Shutdown(); err != nil {\n\t\t\tc.Log().Error(err)\n\t\t\treturn\n\t\t}\n\t\tc.Log().Infof(\"%s shut down\", name)\n\t}()\n\tc.Log().Infof(\"starting %s...\", name)\n\terr = hosts.Start(a, c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif err = hosts.Shutdown(); err != nil {\n\t\t\tc.Log().Error(err)\n\t\t\treturn\n\t\t}\n\t\tc.Log().Infof(\"%s shut down\", name)\n\t}()\n\tc.Log().Infof(\"%s %s started\", name, c.Version())\n\t<-stopCh\n\tc.Log().Infof(\"%s shutting down\", name)\n\treturn nil\n}", "func main() {\n\tfmt.Println(\"server is up and running!!\")\n\truntime.GOMAXPROCS(4)\n\n\tapp := gin.Default()\n\n\tsearch.RouterMain(app)\n\n\terr := app.Run(\"0.0.0.0:5000\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Println(\"server got fired!!!!\")\n}", "func main() {\n\thelloWorld()\n\tfunctions()\n\tmathFunction()\n\tserver()\n}", "func main() {\n\targs := os.Args[1:]\n\tcentralSystem := ocpp16.NewCentralSystem(nil, nil)\n\thandler := &CentralSystemHandler{chargePoints: map[string]*ChargePointState{}}\n\tcentralSystem.SetNewChargePointHandler(func(chargePointId string) {\n\t\thandler.chargePoints[chargePointId] = &ChargePointState{connectors: map[int]*ConnectorInfo{}, transactions: map[int]*TransactionInfo{}}\n\t\tlog.WithField(\"client\", chargePointId).Info(\"new charge point connected\")\n\t})\n\tcentralSystem.SetChargePointDisconnectedHandler(func(chargePointId string) {\n\t\tlog.WithField(\"client\", chargePointId).Info(\"charge point disconnected\")\n\t\tdelete(handler.chargePoints, chargePointId)\n\t})\n\tcentralSystem.SetCentralSystemCoreListener(handler)\n\tvar listenPort = defaultListenPort\n\tif len(args) > 0 {\n\t\tport, err := strconv.Atoi(args[0])\n\t\tif err != nil {\n\t\t\tlistenPort = port\n\t\t}\n\t}\n\tlog.Infof(\"starting central system on port %v\", listenPort)\n\tcentralSystem.Start(listenPort, \"/{ws}\")\n\tlog.Info(\"stopped central system\")\n}", "func main() {\n\te := godotenv.Load()\n\tif e != nil {\n\t\tfmt.Print(e)\n\t}\n\n\tr := routers.SetupRouter()\n\trouters.MirrorRouter(r)\n\trouters.ProxyRouter(r)\n\n\tport := os.Getenv(\"port\")\n\n\t// For run on requested port\n\tif len(os.Args) > 1 {\n\t\treqPort := os.Args[1]\n\t\tif reqPort != \"\" {\n\t\t\tport = reqPort\n\t\t}\n\t}\n\n\tif port == \"\" {\n\t\tport = \"8080\" //localhost\n\t}\n\ttype Job interface {\n\t\tRun()\n\t}\n\n\tr.Run(\":\" + port)\n}", "func main() {\n\t// create a listener on TCP port 7777\n\tlis, err := net.Listen(\"tcp\", \":7777\")\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\n\t// create a server instance\n\ts := api.Server{}\n\n\t// create the TLS creds\n\tcreds, err := credentials.NewServerTLSFromFile(\"cert/server.crt\", \"cert/server.key\")\n\tif err != nil {\n\t\tlog.Fatalf(\"could not load TLS keys: %s\", err)\n\t}\n\n\t// add credentials to the gRPC options\n\topts := []grpc.ServerOption{grpc.Creds(creds)}\n\n\t// create a gRPC server object\n\tgrpcServer := grpc.NewServer(opts...)\n\n\t// attach the Ping service to the server\n\tapi.RegisterPingServer(grpcServer, &s)\n\n\t// start the server\n\tif err := grpcServer.Serve(lis); err != nil {\n\t\tlog.Fatalf(\"failed to serve: %v\", err)\n\t}\n}", "func main() {\n\tflag.BoolVar(&debug, \"debug\", false, \"Enable debug output\")\n\tflag.StringVar(&host, \"host\", \"127.0.0.1\", \"Host to listen on\")\n\tflag.StringVar(&port, \"port\", \"5000\", \"Port to listen on\")\n\tflag.Parse()\n\n\t// create the host:port string for use\n\tlistenAddress := fmt.Sprintf(\"%s:%s\", host, port)\n\tif debug {\n\t\tlog.Printf(\"Listening on %s\", listenAddress)\n\t}\n\n\t// Map /config to our configHandler and wrap it in the log middleware\n\thttp.Handle(\"/config/\", logMiddleware(http.HandlerFunc(configHandler)))\n\n\t// Run forever on all interfaces on port 5000\n\tlog.Fatal(http.ListenAndServe(listenAddress, nil))\n}", "func main() {\n\tgo func() { log.Fatal(echoServer()) }()\n\n\terr := clientMain()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func main() {\n\tconfig, err := config.New()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdb, err := db.New(config)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdb.Migrate()\n\n\trouter := routes.New(db)\n\n\tsrv := server.New(router, config)\n\tif err := srv.Start(); err != nil {\n\t\tlog.Fatalf(\"Error on starting the server %s\", err)\n\t\tos.Exit(1)\n\t}\n\n\tdefer db.Close()\n}", "func main() {\n\tflag.Parse()\n\tfmt.Println(\"start the program\")\n\t// fmt.Println(*serverAddr)\n\n\tfor {\n\t\t// start the app\n\t\twaitc := make(chan struct{}) // a wait lock\n\n\t\t// start the server thread\n\t\tgo func() {\n\t\t\tfmt.Println(\"start the server\")\n\t\t\tserver.InitFileServer()\n\t\t\tdefer close(waitc)\n\t\t}()\n\n\t\t// start the client thread\n\t\t// go func() {\n\t\t// \tfor {\n\t\t// \t\tmsg := <-msgc // a message to send\n\t\t// \t\tclient.InitChatClient(*myTitle, serverAddr)\n\n\t\t// \t\terr := client.Chat(msg)\n\t\t// \t\tif err != nil {\n\t\t// \t\t\t// restart the client\n\t\t// \t\t\tfmt.Printf(\"send Err: %v\", err)\n\t\t// \t\t}\n\t\t// \t}\n\t\t// }()\n\n\t\t// start the input thread\n\t\t// go input()\n\n\t\t<-waitc\n\t\t// finished in this round restart the app\n\t\tfmt.Println(\"restart the app\")\n\t}\n}", "func main() {\n\n\tlog.Println(\"launching tcp server...\")\n\n\t// start tcp listener on all interfaces\n\t// note that each connection consumes a file descriptor\n\t// you may need to increase your fd limits if you have many concurrent clients\n\tln, err := net.Listen(\"tcp\", \":8081\")\n\tif err != nil {\n\t\tlog.Fatalf(\"could not listen: %s\", err)\n\t}\n\tdefer ln.Close()\n\n\tfor {\n\t\tlog.Println(\"waiting for incoming TCP connections...\")\n\t\t// Accept blocks until there is an incoming TCP connection\n\t\tincoming, err := ln.Accept()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"couldn't accept %s\", err)\n\t\t}\n\n\t\tincomingConn, err := yamux.Client(incoming, yamux.DefaultConfig())\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"couldn't create yamux %s\", err)\n\t\t}\n\n\t\tlog.Println(\"starting a gRPC server over incoming TCP connection\")\n\n\t\tvar conn *grpc.ClientConn\n\t\t// gRPC dial over incoming net.Conn\n\t\tconn, err = grpc.Dial(\":7777\", grpc.WithInsecure(),\n\t\t\tgrpc.WithDialer(func(target string, timeout time.Duration) (net.Conn, error) {\n\t\t\t\treturn incomingConn.Open()\n\t\t\t}),\n\t\t)\n\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"did not connect: %s\", err)\n\t\t}\n\n\t\t// handle connection in goroutine so we can accept new TCP connections\n\t\tgo handleConn(conn)\n\t}\n}", "func Main() {\n\n\tcheckSupportArch()\n\n\tif len(os.Args) > 1 {\n\t\tcmd := os.Args[1]\n\t\tfmt.Println(cmd)\n\t}\n\n\tstartEtcdOrProxyV2()\n}", "func main() {\n\ta := App{}\n\t//\ta.Initialize(\"user\", \"password\", \"db\", \"db_mysql\", 3306)\n\ta.Initialize(\n\t\tos.Getenv(\"DB_USER\"),\n\t\tos.Getenv(\"DB_PASSWORD\"),\n\t\tos.Getenv(\"DB_NAME\"),\n\t\tos.Getenv(\"DB_HOST\"),\n\t\t3306)\n\n\ta.Run(\":8081\")\n}", "func Main() {\n\tusage := `iOS client v 0.01\n\nUsage:\n sim listen [<sock>]\n sim ls\n\n The commands work as following:\n sim ls will dump a list of currently active testmanagerd simulator sockets. Copy paste a path out of there to use with listen\n sim listen will either take the first available simulator that is running or you can pass it a socket path for a specific sim if you want. once it is running, start a xcuitest in xcode and watch the files with DTX dump being created\n`\n\targuments, err := docopt.ParseDoc(usage)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tls, _ := arguments.Bool(\"ls\")\n\tif ls {\n\t\tlist, err := fu.ListSockets()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Could not get sockets because: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tlog.Println(list)\n\t\treturn\n\t}\n\n\tsock, _ := arguments.String(\"<sock>\")\n\tif sock == \"\" {\n\t\tlog.Print(\"No socket specified, trying to find active sockets..\")\n\t\tsock, err = fu.FirstSocket()\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"could not find socket\")\n\t\t}\n\t\tlog.Printf(\"Using socket:%s\", sock)\n\t}\n\tnewSocket, _ := fu.MoveSock(sock)\n\thandle := proxy.Launch(sock, newSocket)\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\t<-c\n\tlog.Print(\"CTRL+C detected, shutting down\")\n\thandle.Stop()\n\tfu.MoveBack(sock)\n}", "func main() {\n\tconfig := types.SetupConfig()\n\tlog.Printf(\"main.SetupConfig: %#v\\n\", config)\n\n\t/***** Start three GreeterServers(with one of them to be the slowServer). *****/\n\tgrpcAddress := strings.Split(config.GrpcAddress, \",\")\n\tfor i := 0; i < 3; i++ {\n\t\tlis, err := net.Listen(\"tcp\", grpcAddress[i])\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"main.Listen: %v\", err)\n\t\t}\n\t\tdefer lis.Close()\n\t\ts := grpc.NewServer()\n\t\tpb.RegisterPortsDbServer(s, &server{})\n\t\tgo s.Serve(lis)\n\t}\n\n\t/***** Wait for user exiting the program *****/\n\tselect {}\n}", "func main() {\n\thttp.HandleFunc(\"/\", handlers.Home)\n\thttp.HandleFunc(\"/about\", handlers.About)\n\n\tfmt.Printf(\"Staring application on port %s\", portNumber)\n\t_ = http.ListenAndServe(portNumber, nil)\n}", "func main() {\n\n\tlog.SetVerbose(log.DEBUG)\n\n\tdefer func() {\n\t\tif r := recover(); nil != r {\n\t\t\tlog.Error(\"%v\", r)\n\t\t}\n\t}()\n\n\t// parse command line args\n\tvar configFile = flag.String(\"conf\", \"conf.json\", \"configuration file\")\n\tflag.Parse()\n\n\tlog.Info(\"Initializing broker with options from %s.\", *configFile)\n\n\t// init configuration\n\tconfig, err := config.Init(*configFile)\n\tcheckError(err)\n\n\tlog.Info(\"Options read were: %v\", config)\n\n\tport, err := strconv.Atoi(config.Get(\"port\", PORT))\n\tcheckError(err)\n\n\tlog.SetPrefix(fmt.Sprintf(\"broker@%d: \", port))\n\n\tbroker, err = brokerimpl.New(config)\n\tcheckError(err)\n\n\tlistenHttp(port)\n\n}", "func main() {\n\n\targuments := os.Args\n\tif len(arguments) == 1 {\n\t\tfmt.Println(\"Please provide a socket file\")\n\n\t\t// make a sys call to exit the process\n\t\tos.Exit(100)\n\t}\n\n\tsocketFile := arguments[1]\n\n\tlistener, err := net.Listen(\"unix\", socketFile)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(100)\n\t}\n\n\tfor {\n\t\tconn, err := listener.Accept()\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(100)\n\t\t}\n\n\t\t// run the server in another goroutine, thread or\n\t\t// in a way process but child process\n\t\t// because the server.go and main.go belong to same\n\t\t// package, we can access the function without importing it\n\t\tgo echoServer(conn)\n\t}\n\n}", "func Main() {\n\tflag.Parse()\n\n\tif err := run(); err != nil {\n\t\tlog.Warningf(\"%v\", err)\n\t\tos.Exit(1)\n\t}\n}", "func main() {\n\tfmt.Println(\"################################\")\n\tfmt.Println(\"#### Hello from MyAppStatus ####\")\n\tfmt.Println(\"################################\")\n\n\tapp.StartServer()\n}", "func main() {\n\n\tgo EdgeMapper()\n\tgo mapClients()\n\tgo handleDb()\n\trouter := mux.NewRouter()\n\trouter.HandleFunc(\"/\", simpleHandler)\n\trouter.HandleFunc(\"/webSocket\", handleClientSocket)\n\trouter.HandleFunc(\"/ws\", handleEdgeSocket)\n\trouter.PathPrefix(\"/\").Handler(http.FileServer(http.Dir(\"./\")))\n\n\terr := http.ListenAndServe(\":4000\", router)\n\n\t//\terr := http.ListenAndServe(\":\"+os.Getenv(\"PORT\"), router)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func main() {\n\n\tc.InitConfig()\n\n\tdb.Connect()\n\n\te := echo.New()\n\n\tr.InitRoutes(e)\n\n\te.Use(middleware.CORSWithConfig(middleware.CORSConfig{\n\t\tAllowOrigins: []string{\"http://localhost:3000\"},\n\t\tAllowHeaders: []string{echo.HeaderOrigin, echo.HeaderContentType, echo.HeaderAccept},\n\t}))\n\n\te.Use(middleware.RequestID())\n\te.Pre(middleware.RemoveTrailingSlash())\n\te.Use(middleware.Recover())\n\n\te.Logger.Fatal(e.Start(\":80\"))\n}", "func main() {\n\n\t// Loads env variables\n\t//err := godotenv.Load()\n\t//if err != nil {\n\t//\tlog.Fatal(\"Error loading .env file\")\n\t//\treturn\n\t//}\n\n\t//http.HandleFunc(\"/\", handler)\n\t//log.Fatal(http.ListenAndServe(fmt.Sprintf(\":%s\", \"8080\"), nil))\n\tgodotenv.Load()\n\n\trouter := entry.Initialize()\n\trouter.Run(\":3000\")\n}", "func init() {\n\thostPtr := flag.String(\"host\", \"localhost\", \"ip of host\")\n\tportPtr := flag.String(\"port\", \"12345\", \"port on which to run server\")\n\tflag.Parse()\n\thost = *hostPtr\n\tport = *portPtr\n}", "func main() {\n\n\tif rpcR := checkRunning(); rpcR != nil {\n\t\t// R is running, send stdin to it\n\t\tsendToR(rpcR)\n\t} else {\n\t\t// R is not running, start\n\t\tfmt.Println(\"Starting R\")\n\t\tif r := startR(os.Args[1:]...); r != nil {\n\t\t\t<-r.wait\n\t\t}\n\t}\n}", "func main() {\n\t//establish connection to the primary replica\n\t//connect to server\n\tconn_main_replica, err := net.Dial(\"tcp\", \"localhost:8084\")\n\tdefer conn_main_replica.Close()\n\tif err != nil {\n\t\tpanic(\"Failed connect to conn_main_replica\\n\")\n\t}\n\n\t//load user list for faster access to a list of current users\n\tload_user_list()\n\thandle_requests(conn_main_replica)\n}", "func main() {\n\tfmt.Println(\"Client.go\");\n}", "func main() {\n\n\tuseTLS, err := strconv.ParseBool(os.Args[1])\n\tif err != nil {\n\t\tlogrus.Errorf(\"invalid argument: %s\", err.Error())\n\t}\n\n\tlogrus.Infof(\"Starting HTTP server with tls: %v\", useTLS)\n\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"/nginx_status\", serveNginx)\n\tmux.HandleFunc(\"/json\", serveJSON)\n\n\tif useTLS {\n\t\tstartHTTPS(mux)\n\t} else {\n\t\tstartHTTP(mux)\n\t}\n}", "func main() {\n\n\trouter := NewRouter()\n\tlog.Fatal(http.ListenAndServe(\":8080\", router))\n}", "func main() {\n\t// create a background context (i.e. one that never cancels)\n\tctx := context.Background()\n\n\t// start a libp2p node that listens on a random local TCP port,\n\t// but without running the built-in ping protocol\n\tnode, err := libp2p.New(ctx,\n\t\tlibp2p.ListenAddrStrings(\"/ip4/127.0.0.1/tcp/1234\"),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// configure our own ping protocol\n\tpingService := &ping.PingService{Host: node}\n\tnode.SetStreamHandler(ping.ID, pingService.PingHandler)\n\n\t// print the node's PeerInfo in multiaddr format\n\tpeerInfo := peerstore.AddrInfo{\n\t\tID: node.ID(),\n\t\tAddrs: node.Addrs(),\n\t}\n\taddrs, err := peerstore.AddrInfoToP2pAddrs(&peerInfo)\n\tfmt.Println(\"libp2p node address:\", addrs[0])\n\n\t// print the node's listening addresses\n\tfmt.Println(\"Listen addresses:\", node.Addrs())\n\n\t// shut the node down\n\tif err := node.Close(); err != nil {\n\t\tpanic(err)\n\t}\n}", "func main() {\n\t// load config\n\tconfig.Init()\n\n\t// services\n\tservices.Init()\n\n\t// start gin server\n\trouter.RunGin()\n}", "func main() {\n\t/**\n\t * 先调用ParseCommand()函数解析命令行参数\n\t */\n\tcmd := parseCmd()\n\tif cmd.versionFlag {\n\t\t/**\n\t\t * 。如果用户输入了-version选项,则输出版本信息\n\t\t */\n\t\tfmt.Println(\"version 1.8.0\")\n\t} else if cmd.helpFlag || cmd.class == \"\" {\n\t\t/**\n\t\t * 如果解析出现错误,或者用户输入了-help选项,则调用PrintUsage()函数打印出帮助信息\n\t\t */\n\t\tprintUsage()\n\t} else {\n\t\t/**\n\t\t * 如果一切正常,则调用startJVM()函数启动Java虚拟机\n\t\t * 因为我们还没有真正开始编写Java虚拟机,所以startJVM()函数暂时只是打印一些信息而已,\n\t\t */\n\t\tstartJVM(cmd)\n\t}\n}", "func main() {\n\n\tconfigFile := flag.String(\"config\", \"config/config.yaml\", \"configuration file\")\n\t// debug := flag.Bool(\"debug\", false, \"enable debug mode\")\n\tflag.Parse()\n\n\t// Load global configuration from file\n\terr := cfg.loadConfig(*configFile)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"ConfigFile\": *configFile,\n\t\t\t\"Error\": err,\n\t\t}).Fatal(\"Unable to read configuration file\")\n\t}\n\n\tr := setupRouter()\n\n\t// Check for PORT environment variable config override.\n\tp := cfg.Server.Port\n\tif s := os.Getenv(\"PORT\"); len(s) > 0 {\n\t\tp, _ = strconv.Atoi(s)\n\t}\n\n\t// TODO fix path for logrus. Using fmt temporarily\n\t// log.WithFields(log.Fields{\n\t// \t\"Address\": cfg.Server.Address,\n\t// \t\"Port\": p,\n\t// }).Debug(\"Starting HTTP server\")\n\n\tfmt.Println(\"Starting HTTP Server\")\n\n\t// Listen and Serve at server address and port specified in config file\n\tr.Run(fmt.Sprintf(\"%s:%d\", cfg.Server.Address, p))\n}" ]
[ "0.70549303", "0.6946936", "0.670047", "0.6645153", "0.6628611", "0.66251695", "0.6601195", "0.6575071", "0.6574852", "0.6552101", "0.6540078", "0.64823025", "0.64497507", "0.64278376", "0.6411183", "0.63908523", "0.6383223", "0.6341063", "0.6328398", "0.63155854", "0.6315046", "0.63040376", "0.62960756", "0.6285047", "0.625506", "0.62469316", "0.6225739", "0.6212643", "0.62098104", "0.6194935", "0.6186158", "0.6175976", "0.61572874", "0.6149057", "0.6145319", "0.61401826", "0.6131459", "0.6125525", "0.6095188", "0.6080257", "0.6071658", "0.6070961", "0.60655737", "0.60630846", "0.60548675", "0.6052583", "0.60355014", "0.6025194", "0.6019952", "0.6009825", "0.6006614", "0.5997414", "0.59962827", "0.5994021", "0.5991499", "0.5990538", "0.5986967", "0.59782046", "0.5962863", "0.5958024", "0.5955935", "0.59520894", "0.5945274", "0.5944044", "0.5924219", "0.59207493", "0.5917511", "0.5909673", "0.59022266", "0.59022194", "0.5901981", "0.59002024", "0.5899645", "0.5895471", "0.58930135", "0.58910096", "0.5888845", "0.5883545", "0.58810115", "0.58778125", "0.58730674", "0.58691895", "0.58622324", "0.5854472", "0.5853803", "0.58467233", "0.58392555", "0.58231455", "0.58216596", "0.58212006", "0.5819386", "0.58121663", "0.58102846", "0.5804677", "0.5802714", "0.5794712", "0.5794383", "0.5791641", "0.57891554", "0.578806" ]
0.80571264
0
NewHealthController creates a health controller.
NewHealthController создает контроллер состояния здоровья.
func NewHealthController(service *goa.Service) *HealthController { return &HealthController{Controller: service.NewController("HealthController")} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewHealthController(router *mux.Router, r *render.Render) *HealthController {\n\tctrl := &HealthController{router, r}\n\tctrl.Register()\n\treturn ctrl\n}", "func NewHealthController() *HealthController {\n\treturn &HealthController{}\n}", "func NewHealthController() *HealthController {\n\treturn new(HealthController)\n}", "func NewHealthController(service *goa.Service, zapi_list ZAPIStructure) *HealthController {\n\treturn &HealthController{\n\t\tController: service.NewController(\"HealthController\"),\n\t\tzapi_list: zapi_list,\n\t}\n}", "func NewController() *Controller {\n\treturn &Controller{}\n}", "func NewController() Controller {\n\treturn &controller{}\n}", "func NewController(name string) *Controller {\n\treturn &Controller{\n\t\tRoutes: NewRoutes(name),\n\t}\n}", "func NewController() *Controller {\n\treturn &Controller{wrapper: NewWrapper()}\n}", "func NewController() *Controller {\n\treturn &Controller{Logger: logger.NewLogger()}\n}", "func NewController() controller.Controller {\n\treturn &Controller{}\n}", "func NewHealthCheckController(\n\tlogger logging.LoggerInterface,\n\tappMonitor application.MonitorIterface,\n\tdependenciesMonitor services.MonitorIterface,\n) *HealthCheckController {\n\treturn &HealthCheckController{\n\t\tlogger: logger,\n\t\tappMonitor: appMonitor,\n\t\tdependenciesMonitor: dependenciesMonitor,\n\t}\n}", "func NewController() *Controller {\n\treturn &Controller{\n\t\tClouds: make(map[string]CloudProvider),\n\t\t// WorkerOptions: NewWorkerOptions(),\n\t\tprovisionErr: NewErrCloudProvision(),\n\t}\n}", "func NewController() Controller {\n\treturn &controller{\n\t\tClient: client.NewClient(),\n\t}\n}", "func NewController() *Controller {\n controller := Controller{}\n\n return &controller\n}", "func NewController(cfg *config.Config) (*Controller, error) {\n\tsrv, err := service.NewService(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Controller{\n\t\tService: srv,\n\t}, nil\n}", "func New() *Controller {\n\treturn &Controller{}\n}", "func NewController() Controller {\n\treturn &controller{\n\t\tprojectCtl: project.Ctl,\n\t}\n}", "func NewController(client kubernetes.Interface) *Controller {\n\tshared := informers.NewSharedInformerFactory(client, time.Second*30)\n\tinform := shared.Apps().V1().Deployments()\n\tcontrl := &Controller{\n\t\tclient: client,\n\t\tinformer: inform.Informer(),\n\t\tlister: inform.Lister(),\n\t\tlogger: logrus.New(),\n\t\tqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), \"regitseel\"),\n\t}\n\n\tinform.Informer().AddEventHandler(\n\t\tcache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: contrl.enqueue,\n\t\t\tUpdateFunc: func(old, new interface{}) {\n\t\t\t\tcontrl.enqueue(new)\n\t\t\t},\n\t\t\tDeleteFunc: func(obj interface{}) {\n\t\t\t\td := obj.(*appsv1.Deployment)\n\t\t\t\tif err := contrl.delete(d); err != nil {\n\t\t\t\t\tcontrl.logger.Errorf(\"failed to delete from api: %v\", d.Name)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t)\n\n\treturn contrl\n}", "func NewHospitalController(hospitalService service.HospitalService) HospitalController {\r\n\treturn &hospitalController{\r\n\t\thospitalService: hospitalService,\r\n\t}\r\n}", "func New(b *base.Controller, moduleID string, cu categoryUsecases.Usecase) *Controller {\n\treturn &Controller{\n\t\tb,\n\t\tmoduleID,\n\t\tcu,\n\t}\n}", "func NewController() Controller {\n\treturn &controller{\n\t\tiManager: instance.Mgr,\n\t\tpManager: policy.Mgr,\n\t\tscheduler: scheduler.Sched,\n\t\texecutionMgr: task.NewExecutionManager(),\n\t}\n}", "func (app *Application) NewController(resource *Resource) *Controller {\n\tc := &Controller{\n\t\tresource: resource,\n\t\tcustomHandlers: make(map[route]handlerChain),\n\t}\n\n\tapp.controllers[c.resource] = c\n\treturn c\n}", "func NewController(\n\topt controller.Options,\n\tnotifications chan struct{},\n\tserviceInformer servinginformers.ServiceInformer,\n) *Controller {\n\tlogger, _ := zap.NewProduction()\n\topt.Logger = logger.Sugar()\n\tc := &Controller{\n\t\tBase: controller.NewBase(opt, controllerAgentName, \"Services\"),\n\t}\n\n\tc.Logger.Info(\"Setting up event handlers\")\n\tserviceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: c.Enqueue,\n\t\tUpdateFunc: controller.PassNew(c.Enqueue),\n\t\tDeleteFunc: c.Enqueue,\n\t})\n\n\treturn c\n}", "func NewController(address string) controller.Controller {\n\treturn &Controller{address, nil}\n}", "func NewController(exec boil.ContextExecutor) Controller {\n\trepo := &personRepository{executor: exec}\n\tsvc := &personService{repo: repo}\n\tpc := &personController{service: svc}\n\treturn pc\n}", "func NewController(brigade brigade.Service) Controller {\n\treturn &controller{\n\t\tbrigade: brigade,\n\t}\n}", "func NewController(service *service.PanicService) *PanicController {\n\tvar pc PanicController\n\tpc.Service = service\n\treturn &pc\n}", "func NewController(repository Repository) Controller {\n\treturn controller{repository: repository}\n}", "func NewController() *Controller {\n\treturn &Controller{\n\t\tstats: tabletenv.NewStats(servenv.NewExporter(\"MockController\", \"Tablet\")),\n\t\tqueryServiceEnabled: false,\n\t\tBroadcastData: make(chan *BroadcastData, 10),\n\t\tStateChanges: make(chan *StateChange, 10),\n\t\tqueryRulesMap: make(map[string]*rules.Rules),\n\t}\n}", "func New() *Controller {\n\treturn &Controller{\n\t\tValidatePayload: ValidatePayload,\n\t}\n}", "func NewController(m driver.StackAnalysisInterface) *Controller {\n\treturn &Controller{\n\t\tm: m,\n\t}\n}", "func NewController(params ControllerParams) (*Controller, error) {\n\t// If the BGP control plane is disabled, just return nil. This way the hive dependency graph is always static\n\t// regardless of config. The lifecycle has not been appended so no work will be done.\n\tif !params.DaemonConfig.BGPControlPlaneEnabled() {\n\t\treturn nil, nil\n\t}\n\n\tc := Controller{\n\t\tSig: params.Sig,\n\t\tBGPMgr: params.RouteMgr,\n\t\tPolicyResource: params.PolicyResource,\n\t\tNodeSpec: params.NodeSpec,\n\t}\n\n\tparams.Lifecycle.Append(&c)\n\n\treturn &c, nil\n}", "func NewController(params ControllerParams) (*Controller, error) {\n\t// If the BGP control plane is disabled, just return nil. This way the hive dependency graph is always static\n\t// regardless of config. The lifecycle has not been appended so no work will be done.\n\tif !params.DaemonConfig.BGPControlPlaneEnabled() {\n\t\treturn nil, nil\n\t}\n\n\tc := Controller{\n\t\tSig: params.Sig,\n\t\tBGPMgr: params.RouteMgr,\n\t\tPolicyResource: params.PolicyResource,\n\t\tLocalNodeStore: params.LocalNodeStore,\n\t}\n\n\tparams.Lifecycle.Append(&c)\n\n\treturn &c, nil\n}", "func NewController() Controller {\n\treturn &controller{\n\t\tprojectMgr: project.Mgr,\n\t\tmetaMgr: metamgr.NewDefaultProjectMetadataManager(),\n\t\tallowlistMgr: allowlist.NewDefaultManager(),\n\t}\n}", "func NewController(d *CSIDriver) csi.ControllerServer {\n\treturn &controller{\n\t\tdriver: d,\n\t\tcapabilities: newControllerCapabilities(),\n\t}\n}", "func NewController(commandBus command.Bus) Controller {\n\treturn &controllerImplement{commandBus}\n}", "func NewController(betValidator BetValidator, betService BetService) *Controller {\n\treturn &Controller{\n\t\tbetValidator: betValidator,\n\t\tbetService: betService,\n\t}\n}", "func NewController(betValidator BetValidator, betService BetService) *Controller {\n\treturn &Controller{\n\t\tbetValidator: betValidator,\n\t\tbetService: betService,\n\t}\n}", "func NewController(betService BetService) *Controller {\n\treturn &Controller{\n\t\tbetService: betService,\n\t}\n}", "func NewController(runner pitr.Runner, cluster cluster.Controller) Controller {\n\treturn Controller{\n\t\trunner: runner,\n\t\tcluster: cluster,\n\t}\n}", "func NewController(backendPool pool.Interface) *Controller {\n\treturn &Controller{\n\t\tbackendPool: backendPool,\n\t}\n}", "func NewController(t mockConstructorTestingTNewController) *Controller {\n\tmock := &Controller{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewController(t mockConstructorTestingTNewController) *Controller {\n\tmock := &Controller{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func (c *Config) NewController(e *env.Env) *Controller {\n\tctl := NewController(e)\n\tctl.DeviceIndex = c.DeviceIndex\n\tctl.Verbose = c.Verbose\n\treturn ctl\n}", "func NewController(logger *log.Logger, storageApiURL string, config resources.UbiquityPluginConfig) (*Controller, error) {\n\n\tremoteClient, err := remote.NewRemoteClient(logger, storageApiURL, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Controller{logger: logger, Client: remoteClient, exec: utils.NewExecutor()}, nil\n}", "func NewController() Controller {\n\treturn &controller{\n\t\treservedExpiration: defaultReservedExpiration,\n\t\tquotaMgr: quota.Mgr,\n\t}\n}", "func New(b *base.Controller, moduleID string, uu userUsecases.Usecase) *Controller {\n\treturn &Controller{\n\t\tb,\n\t\tmoduleID,\n\t\tuu,\n\t}\n}", "func NewController(cfg *rest.Config) *Controller {\n\tclient := appsv1client.NewForConfigOrDie(cfg)\n\tkubeClient := kubernetes.NewForConfigOrDie(cfg)\n\tqueue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())\n\tstopCh := make(chan struct{}) // TODO: hook this up to SIGTERM/SIGINT\n\n\tcsif := externalversions.NewSharedInformerFactoryWithOptions(clusterclient.NewForConfigOrDie(cfg), resyncPeriod)\n\n\tc := &Controller{\n\t\tqueue: queue,\n\t\tclient: client,\n\t\tclusterLister: csif.Cluster().V1alpha1().Clusters().Lister(),\n\t\tkubeClient: kubeClient,\n\t\tstopCh: stopCh,\n\t}\n\tcsif.WaitForCacheSync(stopCh)\n\tcsif.Start(stopCh)\n\n\tsif := informers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod)\n\tsif.Apps().V1().Deployments().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) { c.enqueue(obj) },\n\t\tUpdateFunc: func(_, obj interface{}) { c.enqueue(obj) },\n\t})\n\tsif.WaitForCacheSync(stopCh)\n\tsif.Start(stopCh)\n\n\tc.indexer = sif.Apps().V1().Deployments().Informer().GetIndexer()\n\tc.lister = sif.Apps().V1().Deployments().Lister()\n\n\treturn c\n}", "func NewController(db *sql.DB) *Controller {\n\treturn &Controller{db: db}\n}", "func NewController(app AppInterface) *Controller {\n\tc := new(Controller)\n\n\t// for debug logs\n\t// log.SetLevel(log.DebugLevel)\n\n\t// Save the handler\n\tc.app = app\n\treturn c\n}", "func NewController(cxn *connection.Connection, dryRun bool) *Controller {\n\tctl := &Controller{\n\t\tcxn: cxn,\n\t\tprogress: progressbars.New(),\n\t\tdryRun: dryRun,\n\t}\n\tctl.progress.RefreshRate = 3 * time.Second\n\treturn ctl\n}", "func NewController(dao Dao) *Controller {\n\treturn &Controller{Dao: dao}\n}", "func NewController(ctx context.Context, cmw configmap.Watcher) *controller.Impl {\n\tsubscriptionInformer := subscriptioninformersv1alpha1.Get(ctx)\n\teventActivationInformer := eventactivationinformersv1alpha1.Get(ctx)\n\tknativeLib, err := util.NewKnativeLib()\n\tif err != nil {\n\t\tpanic(\"Failed to initialize knative lib\")\n\t}\n\tStatsReporter, err := NewStatsReporter()\n\tif err != nil {\n\t\tpanic(\"Failed to Kyma Subscription Controller stats reporter\")\n\t}\n\n\tr := &Reconciler{\n\t\tBase: reconciler.NewBase(ctx, controllerAgentName, cmw),\n\t\tsubscriptionLister: subscriptionInformer.Lister(),\n\t\teventActivationLister: eventActivationInformer.Lister(),\n\t\tkymaEventingClient: eventbusclient.Get(ctx).EventingV1alpha1(),\n\t\tknativeLib: knativeLib,\n\t\topts: opts.DefaultOptions(),\n\t\ttime: util.NewDefaultCurrentTime(),\n\t\tStatsReporter: StatsReporter,\n\t}\n\timpl := controller.NewImpl(r, r.Logger, reconcilerName)\n\n\tsubscriptionInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue))\n\n\tregisterMetrics()\n\n\treturn impl\n}", "func newHelloController(helloService HelloService) *helloController {\n\treturn &helloController{\n\t\thelloService: helloService,\n\t}\n}", "func NewController(todoService todo.UseCase) *Controller {\n\treturn &Controller{\n\t\ttodoService: todoService,\n\t}\n}", "func NewController(t *testing.T) (*gomock.Controller, context.Context) {\n\tctx := context.Background()\n\treturn gomock.WithContext(ctx, t)\n}", "func NewController(customer customer.Service) *Controller {\n\treturn &Controller{\n\t\tcustomer: customer,\n\t}\n}", "func NewController(context *clusterd.Context, containerImage string) *Controller {\n\treturn &Controller{\n\t\tcontext: context,\n\t\tcontainerImage: containerImage,\n\t}\n}", "func NewController(namespace string) (*Controller, error) {\n\tconfig, err := clientcmd.BuildConfigFromFlags(*ClusterURL, \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclientset, err := typedv1.NewForConfig(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar ns *v1.Namespace\n\tif namespace != \"\" {\n\t\tns, err = clientset.Namespaces().Get(namespace, metav1.GetOptions{})\n\t} else {\n\t\tns, err = createNamespace(clientset)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Controller{\n\t\tclient: clientset,\n\t\tnamespace: ns,\n\t\trestConfig: config,\n\t\tfixedNs: namespace != \"\",\n\t}, nil\n}", "func NewController(\n\texperiment core.ExperimentStore,\n\tevent core.EventStore,\n\tttlc *TTLconfig,\n) *Controller {\n\treturn &Controller{\n\t\texperiment: experiment,\n\t\tevent: event,\n\t\tttlconfig: ttlc,\n\t}\n}", "func NewHealth(logger *log.Logger) health.Service {\n\treturn &healthsrvc{logger}\n}", "func NewController(\n\topts *reconciler.Options,\n\trevisionInformer servinginformers.RevisionInformer,\n\trevSynch RevisionSynchronizer,\n\tinformerResyncInterval time.Duration,\n) *controller.Impl {\n\n\tc := &Reconciler{\n\t\tBase: reconciler.NewBase(*opts, controllerAgentName),\n\t\trevisionLister: revisionInformer.Lister(),\n\t\trevSynch: revSynch,\n\t}\n\timpl := controller.NewImpl(c, c.Logger, \"Autoscaling\")\n\n\tc.Logger.Info(\"Setting up event handlers\")\n\trevisionInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: impl.Enqueue,\n\t\tUpdateFunc: controller.PassNew(impl.Enqueue),\n\t\tDeleteFunc: impl.Enqueue,\n\t})\n\n\treturn impl\n}", "func NewController(productService contract.ProductService) *Controller {\n\tonce.Do(func() {\n\t\tinstance = &Controller{\n\t\t\tproductService: productService,\n\t\t}\n\t})\n\treturn instance\n}", "func NewController(repository storage.Repository, resourceBaseURL string, objectType types.ObjectType, objectBlueprint func() types.Object) *BaseController {\n\treturn &BaseController{\n\t\trepository: repository,\n\t\tresourceBaseURL: resourceBaseURL,\n\t\tobjectBlueprint: objectBlueprint,\n\t\tobjectType: objectType,\n\t}\n}", "func New(ctx context.Context, config *config.CleanupConfig, db *database.Database, h *render.Renderer) *Controller {\n\tlogger := logging.FromContext(ctx)\n\treturn &Controller{\n\t\tconfig: config,\n\t\tdb: db,\n\t\th: h,\n\t\tlogger: logger,\n\t}\n}", "func NewCreateGoalController(cgtRepos *persistence.Services, logger *log.Logger, authorizationService authorization.JwtService) Controller {\n\tcreateGoalUsecase := usecase.NewCreateGoalUsecase(&cgtRepos.Achiever, &cgtRepos.Goal, authorizationService)\n\n\tctrl := &createGoalController{\n\t\tUsecase: createGoalUsecase,\n\t\tLogger: logger,\n\t\tAuthorization: authorizationService,\n\t}\n\treturn ctrl\n}", "func NewController(\n\tctx context.Context,\n\tcmw configmap.Watcher,\n) *controller.Impl {\n\tlogger := logging.FromContext(ctx)\n\n\tbrokercellInformer := brokercell.Get(ctx)\n\tbrokerLister := brokerinformer.Get(ctx).Lister()\n\tdeploymentLister := deploymentinformer.Get(ctx).Lister()\n\tsvcLister := serviceinformer.Get(ctx).Lister()\n\tepLister := endpointsinformer.Get(ctx).Lister()\n\thpaLister := hpainformer.Get(ctx).Lister()\n\n\tbase := reconciler.NewBase(ctx, controllerAgentName, cmw)\n\tr, err := NewReconciler(base, brokerLister, svcLister, epLister, deploymentLister)\n\tif err != nil {\n\t\tlogger.Fatal(\"Failed to create BrokerCell reconciler\", zap.Error(err))\n\t}\n\tr.hpaLister = hpaLister\n\timpl := v1alpha1brokercell.NewImpl(ctx, r)\n\n\tlogger.Info(\"Setting up event handlers.\")\n\n\t// TODO(https://github.com/google/knative-gcp/issues/912) Change period back to 5 min once controller\n\t// watches for data plane components.\n\tbrokercellInformer.Informer().AddEventHandlerWithResyncPeriod(controller.HandleAll(impl.Enqueue), 30*time.Second)\n\n\t// Watch data plane components created by brokercell so we can update brokercell status immediately.\n\t// 1. Watch deployments for ingress, fanout and retry\n\tdeploymentinformer.Get(ctx).Informer().AddEventHandler(handleResourceUpdate(impl))\n\t// 2. Watch ingress endpoints\n\tendpointsinformer.Get(ctx).Informer().AddEventHandler(handleResourceUpdate(impl))\n\t// 3. Watch hpa for ingress, fanout and retry deployments\n\thpainformer.Get(ctx).Informer().AddEventHandler(handleResourceUpdate(impl))\n\n\treturn impl\n}", "func New(ctx context.Context, config *config.ServerConfig, h *render.Renderer) *Controller {\n\tlogger := logging.FromContext(ctx)\n\n\treturn &Controller{\n\t\tconfig: config,\n\t\th: h,\n\t\tlogger: logger,\n\t}\n}", "func NewController(client CopilotClient, store model.ConfigStore, logger logger, timeout, checkInterval time.Duration) *Controller {\n\treturn &Controller{\n\t\tclient: client,\n\t\tstore: store,\n\t\tlogger: logger,\n\t\ttimeout: timeout,\n\t\tcheckInterval: checkInterval,\n\t\tstorage: storage{\n\t\t\tvirtualServices: make(map[string]*model.Config),\n\t\t\tdestinationRules: make(map[string]*model.Config),\n\t\t},\n\t}\n}", "func NewController() node.Initializer {\n\treturn controller{}\n}", "func NewController(region string, networkManager *NetworkManager, playerManager *PlayerManager, firebase *triebwerk.Firebase, masterServer MasterServerClient) *Controller {\n\treturn &Controller{\n\t\tnetworkManager: networkManager,\n\t\tplayerManager: playerManager,\n\t\tstate: model.NewGameState(region),\n\t\tfirebase: firebase,\n\t\tmasterServer: masterServer,\n\t}\n}", "func New(client vpnkit.Client, services corev1client.ServicesGetter) *Controller {\n\treturn &Controller{\n\t\tservices: services,\n\t\tclient: client,\n\t}\n}", "func New(s *service.Service) *Controller {\n\tlogger.Println(\"New controller instance was initialized\")\n\treturn &Controller{\n\t\tservice: s,\n\t}\n}", "func NewPatientController() *PatientController {\n\treturn &PatientController{}\n}", "func NewController(kubeClient kubernetes.Interface, nodesInformer informers.NodeInformer, nodeLabel *NodeLabel) *Controller {\n\tklog.V(4).Info(\"Creating event broadcaster\")\n\teventBroadcaster := record.NewBroadcaster()\n\teventBroadcaster.StartLogging(klog.Infof)\n\teventBroadcaster.StartRecordingToSink(&typed.EventSinkImpl{Interface: kubeClient.CoreV1().Events(\"\")})\n\n\tc := &Controller{\n\t\tkubeClient: kubeClient,\n\t\tnodesLister: nodesInformer.Lister(),\n\t\tnodesSynced: nodesInformer.Informer().HasSynced,\n\t\tworkqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerName),\n\t\trecorder: eventBroadcaster.NewRecorder(scheme.Scheme, api.EventSource{Component: controllerName}),\n\t\tnodeLabel: nodeLabel,\n\t}\n\n\tnodesInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: c.handleNode,\n\t\tUpdateFunc: func(old, new interface{}) {\n\t\t\tc.handleNode(new)\n\t\t},\n\t})\n\n\treturn c\n}", "func NewController(ctx context.Context) *Controller {\n\treturn &Controller{\n\t\tctx: ctx,\n\t\texplorerClient: newClientConn(1000, 10000),\n\t\trecordClient: newClientConn(1000, 10000),\n\t\tsubTree: make(chan *URLEntry, 1000),\n\t\trecord: make(chan *URLEntry, 1000),\n\t\turlCache: make(map[string]*URLEntry),\n\t\texplorerStat: newRoutineStat(0),\n\t\trecordStat: newRoutineStat(0),\n\t}\n}", "func New(tl TemplateLoader, td TemplateDecoder, logger log.Logger, configuration *EnvironmentServiceConfigController, statusPublisher StatusPublisher) *Controller {\n\treturn &Controller{\n\t\ttemplateLoader: tl,\n\t\tTemplateDecoder: td,\n\t\tLogger: logger,\n\t\tConfigurationController: configuration,\n\t\tstatusPublisher: statusPublisher,\n\t}\n}", "func NewController(le *logrus.Entry, bus bus.Bus, conf *Config) (*Controller, error) {\n\tdir := path.Clean(conf.GetDir())\n\tif _, err := os.Stat(dir); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"stat %s\", dir)\n\t}\n\treturn &Controller{\n\t\tle: le,\n\t\tbus: bus,\n\t\tdir: dir,\n\n\t\twatch: conf.GetWatch(),\n\t}, nil\n}", "func NewHealth(r router) *Health {\n\treturn &Health{\n\t\trouter: r,\n\t}\n}", "func NewHelloController(e *echo.Echo) {\n\thandler := &HelloController{}\n\n\te.GET(\"/hello\", handler.Hello)\n}", "func NewController(userService user.Service) chi.Router {\n\tc := Controller{userService}\n\tr := chi.NewRouter()\n\n\tr.Post(\"/\", c.AddUser)\n\tr.Get(\"/{userID}\", c.GetUser)\n\tr.Put(\"/{userID}/name\", c.UpdateName)\n\n\treturn r\n}", "func NewController(\n\tctx context.Context,\n\tcmw configmap.Watcher,\n) *controller.Impl {\n\tlogger := logging.FromContext(ctx)\n\n\tprovisionedserviceInformer := provisionedservice.Get(ctx)\n\n\t// TODO: setup additional informers here.\n\n\tr := &Reconciler{}\n\timpl := v1alpha1provisionedservice.NewImpl(ctx, r)\n\n\tlogger.Info(\"Setting up event handlers.\")\n\n\tprovisionedserviceInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue))\n\n\t// TODO: add additional informer event handlers here.\n\n\treturn impl\n}", "func NewController(client *k8s.KubeClient, nodeID string, serviceClient api.DriveServiceClient, eventRecorder *events.Recorder, log *logrus.Logger) *Controller {\n\treturn &Controller{\n\t\tclient: client,\n\t\tcrHelper: k8s.NewCRHelper(client, log),\n\t\tnodeID: nodeID,\n\t\tdriveMgrClient: serviceClient,\n\t\teventRecorder: eventRecorder,\n\t\tlog: log.WithField(\"component\", \"Controller\"),\n\t}\n}", "func NewController(cfg configuration.Controller, extractor interfaces.JetDropsExtractor, storage interfaces.Storage, pv int) (*Controller, error) {\n\tc := &Controller{\n\t\tcfg: cfg,\n\t\textractor: extractor,\n\t\tstorage: storage,\n\t\tjetDropRegister: make(map[types.Pulse]map[string]struct{}),\n\t\tmissedDataManager: NewMissedDataManager(time.Second*time.Duration(cfg.ReloadPeriod), time.Second*time.Duration(cfg.ReloadCleanPeriod)),\n\t\tplatformVersion: pv,\n\t}\n\treturn c, nil\n}", "func NewHealth() *Health {\n\treturn &Health{\n\t\tcomponents: map[interface{}]*HealthComponentStatus{},\n\t}\n}", "func NewController(\n\tcontrollerLogger *log.Logger,\n\trenderer *render.Render,\n\tauthPublisher *Publisher,\n\tuserRedis *user.RedisManager,\n) *Controller {\n\treturn &Controller{\n\t\tlogger: controllerLogger,\n\t\trender: renderer,\n\t\tauthPublisher: authPublisher,\n\t\tuserRedis: userRedis,\n\t}\n}", "func NewController(\n\tcontrollerLogger *log.Logger,\n\trenderer *render.Render,\n\tuserRedisManager *user.RedisManager,\n\tcategoryRedisManager *RedisManager,\n\tcategoryPublisher *Publisher,\n) *Controller {\n\treturn &Controller{\n\t\tlogger: controllerLogger,\n\t\trender: renderer,\n\t\tuserRedis: userRedisManager,\n\t\tcategoryRedis: categoryRedisManager,\n\t\tcategoryPublisher: categoryPublisher,\n\t}\n}", "func NewController(ctx context.Context, keypfx string, cli state.Repository) *Controller {\n\tctx, cancel := context.WithCancel(ctx)\n\tc := &Controller{\n\t\tkeypfx: fmt.Sprintf(\"%s/task-coordinator/%s\", keypfx, version),\n\t\tcli: cli,\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t\tdonec: make(chan struct{}),\n\t}\n\tgo c.run()\n\treturn c\n}", "func NewController(ctx context.Context, clientMap clientmap.ClientMap) (*Controller, error) {\n\tgardenClient, err := clientMap.GetClient(ctx, keys.ForGarden())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbackupBucketInformer, err := gardenClient.Cache().GetInformer(ctx, &gardencorev1beta1.BackupBucket{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get BackupBucket Informer: %w\", err)\n\t}\n\tbackupEntryInformer, err := gardenClient.Cache().GetInformer(ctx, &gardencorev1beta1.BackupEntry{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get BackupEntry Informer: %w\", err)\n\t}\n\tcontrollerDeploymentInformer, err := gardenClient.Cache().GetInformer(ctx, &gardencorev1beta1.ControllerDeployment{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get ControllerDeployment Informer: %w\", err)\n\t}\n\tcontrollerInstallationInformer, err := gardenClient.Cache().GetInformer(ctx, &gardencorev1beta1.ControllerInstallation{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get ControllerInstallation Informer: %w\", err)\n\t}\n\tcontrollerRegistrationInformer, err := gardenClient.Cache().GetInformer(ctx, &gardencorev1beta1.ControllerRegistration{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get ControllerRegistration Informer: %w\", err)\n\t}\n\tseedInformer, err := gardenClient.Cache().GetInformer(ctx, &gardencorev1beta1.Seed{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get Seed Informer: %w\", err)\n\t}\n\tshootInformer, err := gardenClient.Cache().GetInformer(ctx, &gardencorev1beta1.Shoot{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get Shoot Informer: %w\", err)\n\t}\n\n\tcontroller := &Controller{\n\t\tgardenClient: gardenClient.Client(),\n\n\t\tcontrollerRegistrationReconciler: NewControllerRegistrationReconciler(logger.Logger, gardenClient.Client()),\n\t\tcontrollerRegistrationSeedReconciler: NewControllerRegistrationSeedReconciler(logger.Logger, gardenClient),\n\t\tseedReconciler: NewSeedReconciler(logger.Logger, gardenClient.Client()),\n\n\t\tcontrollerRegistrationQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), \"controllerregistration\"),\n\t\tcontrollerRegistrationSeedQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), \"controllerregistration-seed\"),\n\t\tseedQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), \"seed\"),\n\t\tworkerCh: make(chan int),\n\t}\n\n\tbackupBucketInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: controller.backupBucketAdd,\n\t\tUpdateFunc: controller.backupBucketUpdate,\n\t\tDeleteFunc: controller.backupBucketDelete,\n\t})\n\n\tbackupEntryInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: controller.backupEntryAdd,\n\t\tUpdateFunc: controller.backupEntryUpdate,\n\t\tDeleteFunc: controller.backupEntryDelete,\n\t})\n\n\tcontrollerRegistrationInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) { controller.controllerRegistrationAdd(ctx, obj) },\n\t\tUpdateFunc: func(oldObj, newObj interface{}) { controller.controllerRegistrationUpdate(ctx, oldObj, newObj) },\n\t\tDeleteFunc: controller.controllerRegistrationDelete,\n\t})\n\n\tcontrollerDeploymentInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) { controller.controllerDeploymentAdd(ctx, obj) },\n\t\tUpdateFunc: func(oldObj, newObj interface{}) { controller.controllerDeploymentUpdate(ctx, oldObj, newObj) },\n\t})\n\n\tcontrollerInstallationInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: controller.controllerInstallationAdd,\n\t\tUpdateFunc: controller.controllerInstallationUpdate,\n\t})\n\n\tseedInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) { controller.seedAdd(obj, true) },\n\t\tUpdateFunc: controller.seedUpdate,\n\t\tDeleteFunc: controller.seedDelete,\n\t})\n\n\tshootInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: controller.shootAdd,\n\t\tUpdateFunc: controller.shootUpdate,\n\t\tDeleteFunc: controller.shootDelete,\n\t})\n\n\tcontroller.hasSyncedFuncs = append(controller.hasSyncedFuncs,\n\t\tbackupBucketInformer.HasSynced,\n\t\tbackupEntryInformer.HasSynced,\n\t\tcontrollerRegistrationInformer.HasSynced,\n\t\tcontrollerDeploymentInformer.HasSynced,\n\t\tcontrollerInstallationInformer.HasSynced,\n\t\tseedInformer.HasSynced,\n\t\tshootInformer.HasSynced,\n\t)\n\n\treturn controller, nil\n}", "func NewController(\n\tchopClient chopClientSet.Interface,\n\textClient apiExtensions.Interface,\n\tkubeClient kube.Interface,\n\tchopInformerFactory chopInformers.SharedInformerFactory,\n\tkubeInformerFactory kubeInformers.SharedInformerFactory,\n) *Controller {\n\n\t// Initializations\n\t_ = chopClientSetScheme.AddToScheme(scheme.Scheme)\n\n\t// Setup events\n\teventBroadcaster := record.NewBroadcaster()\n\teventBroadcaster.StartLogging(log.Info)\n\teventBroadcaster.StartRecordingToSink(\n\t\t&typedCoreV1.EventSinkImpl{\n\t\t\tInterface: kubeClient.CoreV1().Events(\"\"),\n\t\t},\n\t)\n\trecorder := eventBroadcaster.NewRecorder(\n\t\tscheme.Scheme,\n\t\tcoreV1.EventSource{\n\t\t\tComponent: componentName,\n\t\t},\n\t)\n\n\t// Create Controller instance\n\tcontroller := &Controller{\n\t\tkubeClient: kubeClient,\n\t\textClient: extClient,\n\t\tchopClient: chopClient,\n\t\tchiLister: chopInformerFactory.Clickhouse().V1().ClickHouseInstallations().Lister(),\n\t\tchiListerSynced: chopInformerFactory.Clickhouse().V1().ClickHouseInstallations().Informer().HasSynced,\n\t\tchitLister: chopInformerFactory.Clickhouse().V1().ClickHouseInstallationTemplates().Lister(),\n\t\tchitListerSynced: chopInformerFactory.Clickhouse().V1().ClickHouseInstallationTemplates().Informer().HasSynced,\n\t\tserviceLister: kubeInformerFactory.Core().V1().Services().Lister(),\n\t\tserviceListerSynced: kubeInformerFactory.Core().V1().Services().Informer().HasSynced,\n\t\tendpointsLister: kubeInformerFactory.Core().V1().Endpoints().Lister(),\n\t\tendpointsListerSynced: kubeInformerFactory.Core().V1().Endpoints().Informer().HasSynced,\n\t\tconfigMapLister: kubeInformerFactory.Core().V1().ConfigMaps().Lister(),\n\t\tconfigMapListerSynced: kubeInformerFactory.Core().V1().ConfigMaps().Informer().HasSynced,\n\t\tstatefulSetLister: kubeInformerFactory.Apps().V1().StatefulSets().Lister(),\n\t\tstatefulSetListerSynced: kubeInformerFactory.Apps().V1().StatefulSets().Informer().HasSynced,\n\t\tpodLister: kubeInformerFactory.Core().V1().Pods().Lister(),\n\t\tpodListerSynced: kubeInformerFactory.Core().V1().Pods().Informer().HasSynced,\n\t\trecorder: recorder,\n\t}\n\tcontroller.initQueues()\n\tcontroller.addEventHandlers(chopInformerFactory, kubeInformerFactory)\n\n\treturn controller\n}", "func NewController(c *config.Config) (*Controller, error) {\n\tipt, err := iptables.New()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tctr := &Controller{\n\t\tshutdown: make(chan bool),\n\t\tconfig: c,\n\t\tipt: ipt,\n\t}\n\tctr.bridgeAddr, ctr.subnet, err = net.ParseCIDR(c.Network.Subnet)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctr.bridgeInterface, err = CreateNetBridge(\"br\"+c.Network.InterfaceIdent, ctr.bridgeAddr, &net.IPNet{Mask: ctr.subnet.Mask})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tctr.wlanAddr = dhcp4.IPAdd(ctr.bridgeAddr, 1)\n\tif c.Network.Wireless.Interface != \"\" {\n\t\tif err := SetInterfaceAddr(c.Network.Wireless.Interface, &net.IPNet{IP: ctr.wlanAddr, Mask: ctr.subnet.Mask}); err != nil {\n\t\t\tDeleteNetBridge(ctr.bridgeInterface.Name)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif err := ctr.setupFirewall(); err != nil {\n\t\tDeleteNetBridge(ctr.bridgeInterface.Name)\n\t\treturn nil, err\n\t}\n\n\tif err := ctr.startHostapd(); err != nil {\n\t\tDeleteNetBridge(ctr.bridgeInterface.Name)\n\t\treturn nil, err\n\t}\n\n\tctr.wg.Add(1)\n\tgo ctr.circuitBreakerRoutine()\n\tctr.wg.Add(1)\n\tgo ctr.hostapdStatusRoutine()\n\tgo ctr.dhcpDNSRoutine()\n\treturn ctr, nil\n}", "func NewHealth(\n\tstate Status,\n\tmessage string,\n) Health {\n\treturn Health{\n\t\tStatus: state,\n\t\tUrgency: UNKNOWN, // set by the owning Monitor\n\t\tTime: time.Now(),\n\t\tMessage: Message(message),\n\t\tDuration: 0,\n\t}\n}", "func NewController(bookmarkservice *services.BookmarkService, bookmarkcategoryservice *services.BookmarkCategoryService, auth *AuthController) *Controller {\n\treturn &Controller{\n\t\tbmsrv: bookmarkservice,\n\t\tauth: auth,\n\t\tbmcsrv: bookmarkcategoryservice,\n\t}\n}", "func NewController(\n\tctx context.Context,\n\tcmw configmap.Watcher,\n) *controller.Impl {\n\tbrokerInformer := brokerinformer.Get(ctx)\n\teventTypeInformer := eventtypeinformer.Get(ctx)\n\n\tr := &Reconciler{\n\t\teventTypeLister: eventTypeInformer.Lister(),\n\t\tbrokerLister: brokerInformer.Lister(),\n\t}\n\timpl := eventtypereconciler.NewImpl(ctx, r)\n\n\teventTypeInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue))\n\n\t// Tracker is used to notify us that a EventType's Broker has changed so that\n\t// we can reconcile.\n\tr.tracker = impl.Tracker\n\tbrokerInformer.Informer().AddEventHandler(controller.HandleAll(\n\t\tcontroller.EnsureTypeMeta(\n\t\t\tr.tracker.OnChanged,\n\t\t\tv1.SchemeGroupVersion.WithKind(\"Broker\"),\n\t\t),\n\t))\n\n\treturn impl\n}", "func NewController() Controller {\n\treturn &controller{\n\t\tblobMgr: blob.Mgr,\n\t\tblobSizeExpiration: time.Hour * 24, // keep the size of blob in redis with 24 hours\n\t}\n}", "func NewController(informer cache.SharedIndexInformer, conf *config.Config, defaultClient client.ValiClient,\n\tlogger log.Logger) (Controller, error) {\n\tcontroller := &controller{\n\t\tclients: make(map[string]ControllerClient, expectedActiveClusters),\n\t\tconf: conf,\n\t\tdefaultClient: defaultClient,\n\t\tlogger: logger,\n\t}\n\n\tinformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: controller.addFunc,\n\t\tDeleteFunc: controller.delFunc,\n\t\tUpdateFunc: controller.updateFunc,\n\t})\n\n\tstopChan := make(chan struct{})\n\ttime.AfterFunc(conf.ControllerConfig.CtlSyncTimeout, func() {\n\t\tclose(stopChan)\n\t})\n\n\tif !cache.WaitForCacheSync(stopChan, informer.HasSynced) {\n\t\treturn nil, fmt.Errorf(\"failed to wait for caches to sync\")\n\t}\n\n\treturn controller, nil\n}", "func New(config Config) (*Controller, error) {\n\tif reflect.DeepEqual(config.Cluster, v1alpha1.KVMConfig{}) {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"%T.Cluster must not be empty\", config)\n\t}\n\tif config.Logger == nil {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"%T.Logger must not be empty\", config)\n\t}\n\tif config.ManagementK8sClient == nil {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"%T.ManagementK8sClient must not be empty\", config)\n\t}\n\tif config.Name == \"\" {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"%T.Name must not be empty\", config)\n\t}\n\tif config.Selector == nil {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"%T.Selector must not be empty\", config)\n\t}\n\tif config.WorkloadK8sClient == nil {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"%T.WorkloadK8sClient must not be empty\", config)\n\t}\n\n\tc := &Controller{\n\t\tmanagementK8sClient: config.ManagementK8sClient,\n\t\tworkloadK8sClient: config.WorkloadK8sClient,\n\t\tlogger: config.Logger,\n\n\t\tstopped: make(chan struct{}),\n\t\tlastReconciled: time.Time{},\n\t\tname: config.Name,\n\t\tselector: config.Selector,\n\t\tcluster: config.Cluster,\n\t}\n\n\treturn c, nil\n}", "func NewGreetController(svc *service.Greeting) *GreetController {\n\treturn &GreetController{\n\t\tsvc: svc,\n\t}\n}", "func NewController(s *SessionInfo, timeout time.Duration) *Controller {\n\tif timeout == 0 {\n\t\ttimeout = DefaultTimeout\n\t}\n\trng := rand.New(rand.NewSource(time.Now().UnixNano() + rand.Int63()))\n\treturn &Controller{\n\t\tsessionInfo: s,\n\t\ttimeout: timeout,\n\n\t\tswitches: map[string][]uint32{},\n\t\tswitchIndices: map[string]int{},\n\n\t\tseqID: uint16(rng.Int63()),\n\t}\n}", "func NewBookController() *BookController {\n\treturn new(BookController)\n}" ]
[ "0.81887347", "0.80589396", "0.7891792", "0.77446157", "0.68483025", "0.677226", "0.67417073", "0.6698271", "0.6693059", "0.66468036", "0.6549667", "0.6535941", "0.6533674", "0.6522994", "0.64922637", "0.6490057", "0.64858145", "0.64842665", "0.642976", "0.64031225", "0.6390199", "0.63852227", "0.6361572", "0.63380086", "0.63208735", "0.6317767", "0.63157684", "0.63122624", "0.6305745", "0.62379426", "0.6236916", "0.6225162", "0.6225049", "0.6203832", "0.6203049", "0.6187288", "0.61872786", "0.61872786", "0.61818963", "0.6181528", "0.61587036", "0.6157305", "0.6157305", "0.6147944", "0.6144085", "0.61321914", "0.6112596", "0.6106345", "0.61010355", "0.60879666", "0.6072976", "0.60720396", "0.60719144", "0.60671", "0.60664916", "0.60508156", "0.6044085", "0.6042744", "0.6010775", "0.6010587", "0.59864944", "0.59607387", "0.5947828", "0.5941752", "0.5929732", "0.59183747", "0.5908792", "0.5895651", "0.5891816", "0.5889743", "0.5876393", "0.5873848", "0.587323", "0.587168", "0.5858415", "0.58545655", "0.5850157", "0.5829847", "0.5825317", "0.5822145", "0.5818497", "0.5814762", "0.58144987", "0.57673144", "0.5758873", "0.5748169", "0.5743816", "0.57343644", "0.5719282", "0.570239", "0.5702168", "0.56968933", "0.5676256", "0.566698", "0.56619215", "0.5659986", "0.56570715", "0.565604", "0.5653676", "0.5633562" ]
0.80731326
1
NewGenerator starts foreground goroutine which generates sequence of unsigned ints and puts them in input channel, also it returnes stop channel which need to be triggered when generator need to be stopped
NewGenerator запускает фоновый го-рутин, которая генерирует последовательность беззнаковых целых чисел и помещает их в канал input, также возвращается канал stop, который необходимо активировать при остановке генератора
func NewGenerator(input chan<- uint) chan<- bool { stop := make(chan bool) go func() { var current uint = 1 for { select { case input <- current: current++ case <-stop: close(input) return } } }() return stop }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Generator(done chan struct{}) <-chan int {\n\tout := make(chan int)\n\n\tgo func() {\n\t\tdefer close(out)\n\n\t\tfor i, j := 0, 1; ; i, j = j, i+j {\n\t\t\tselect {\n\t\t\tcase out <- i:\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn out\n}", "func generate() chan int {\n\tch := make(chan int)\n\tgo func() {\n\t\tfor i := 2; ; i++ {\n\t\t\tch <- i\n\t\t}\n\t}()\n\treturn ch\n}", "func generate(source chan<- int) {\n\tfor i := 2; ; i++ {\n\t\tsource <- i // Send 'i' to channel 'source'.\n\t}\n}", "func gen(num int) <-chan int {\n\t// Run Step 1\n\t// Unbufferred Channel\n\tout := make(chan int, 1) //capacity\n\t// Run Step 2\n\tgo func() {\n\t\tfor i := 1; i < num; i++ {\n\t\t\t// Chi day dc vao channel khi ma len < capacity\n\t\t\tout <- i\n\t\t\t// In ra o day co nghia la da push dc\n\t\t\tfmt.Printf(\"\\n[GEN] channel: %d\", i)\n\t\t}\n\t\tclose(out)\n\t}()\n\t// Run Step\n\treturn out\n}", "func generator(nums ...int) <-chan int {\n\tout := make(chan int)\n\n\tgo func() {\n\t\tdefer close(out)\n\t\tfor _, val := range nums {\n\t\t\tout <- val\n\t\t}\n\t}()\n\n\treturn out\n}", "func generate(nums ...int) <-chan int {\n\t// create channel with results\n\tout := make(chan int)\n\n\t// run new gouroutine\n\tgo func() {\n\t\tfor _, n := range nums {\n\t\t\tfmt.Printf(\"Generating value %d \\n\", n)\n\t\t\tout <- n\n\t\t}\n\t\tclose(out)\n\t}()\n\n\t// return result channel immediately (it's empty at that time)\n\treturn out\n}", "func Generate(ch chan<- int) {\n\tfor i := 2; ; i++ {\n\t\tch <- i // send i to ch\n\t}\n}", "func generator2(nums ...int) <-chan int {\n\tres := make(chan int, buffSize)\n\tgo func() {\n\t\tfor _, n := range nums {\n\t\t\tres <- n\n\t\t}\n\t\tclose(res)\n\t}()\n\treturn res\n}", "func generate(a int) (out chan int) {\n\tout = make(chan int)\n\n\tgo func() {\n\t\tdefer close(out)\n\t\tfor i := 1; i <= a; i++ {\n\t\t\tout <- i\n\t\t}\n\t}()\n\n\treturn\n}", "func gen(integers ...int) <-chan int {\n\tout := make(chan int)\n\tgo func() {\n\t\tfor _, i := range integers {\n\t\t\tout <- i\n\t\t}\n\t\tclose(out)\n\t}()\n\treturn out\n}", "func generate(ch chan int){\n\tfor i:=2;;i++{\n\t\t//Send a number\n\t\tch <-i\n\t}\n}", "func Generate(ch chan<- int) {\n\tfor i := 2; ; i++ {\n\t\tch <- i // Send 'i' to channel 'ch'.\n\t}\n}", "func Generate(ch chan<- int) {\n\tfor i := 2; ; i++ {\n\t\tch <- i // Send 'i' to channel 'ch'.\n\t}\n}", "func gen() <-chan int {\n\tout := make(chan int)\n\tgo func() {\n\t\tfor i := 0; i < 100; i++ {\n\t\t\tfor j := 3; j < 13; j++ {\n\t\t\t\tout <- j\n\t\t\t}\n\t\t}\n\t\tclose(out)\n\t}()\n\treturn out\n}", "func gen(nums ...int)<-chan int{\n\tout := make(chan int, len(nums)) //buffered\n\tgo func(){\n\t\tfor _, n := range nums{\n\t\t\tout <- n\n\t\t}\n\t\tclose(out)\n\t}()\n\treturn out\n}", "func gen(nums ...int) <-chan int {\n\tout := make(chan int)\n\tgo func() {\n\t\tfor _, n := range nums {\n\t\t\tout <- n\n\t\t}\n\t\tclose(out)\n\t}()\n\treturn out\n}", "func gen(nums ...int) <-chan int {\n\tout := make(chan int)\n\tgo func() {\n\t\tfor _, n := range nums {\n\t\t\tout <- n\n\t\t}\n\t\tclose(out)\n\t}()\n\treturn out\n}", "func gen(nums ...int) <-chan int {\n\tout := make(chan int)\n\tgo func() {\n\t\tfor _, n := range nums {\n\t\t\tout <- n\n\t\t}\n\t\tclose(out)\n\t}()\n\treturn out\n}", "func gen2() <-chan int {\n\tout := make(chan int)\n\tgo func() {\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tfor j := 3; j < 13; j++ {\n\t\t\t\tout <- j\n\t\t\t}\n\t\t}\n\t\tclose(out)\n\t}()\n\treturn out\n}", "func NewGenerator(iterations int, concurrency int) *Generator {\n\treturn &Generator{\n\t\tconcurrency: uint32(concurrency),\n\t\tinputChan: make(chan bool),\n\t\tlock: &sync.Mutex{},\n\t\tmaxItems: uint32(iterations),\n\t\tpatterns: []Pattern{},\n\t}\n}", "func gen(done <-chan struct{}, nums ...int) <-chan int {\n\tout := make(chan int, len(nums))\n\tgo func() {\n\t\tfor _, n := range nums {\n\t\t\tselect {\n\t\t\tcase out <- n:\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tclose(out)\n\t}()\n\treturn out\n}", "func rand_generator() chan int {\n\tout:=make(chan int)\n\tgo func() {\n\t\tr:=rand.New(rand.NewSource(time.Now().UnixNano()))\n\t\tfor {\n\t\t\tout <- r.Intn(100)\n\t\t}\n\t}()\n\treturn out\n}", "func GenerateSerialIntsStream(ctx context.Context) <-chan interface{} {\n\ts := make(chan interface{})\n\tgo func() {\n\t\tdefer close(s)\n\t\tfor i := 0; ; i++ {\n\t\t\tselect {\n\t\t\tcase s <- i:\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn s\n}", "func NewNumberRangeGenerator(i interface{}) (outputChan chan stream.Record, controlChan chan ControlAction) {\n\tcfg := i.(*NumberRangeGeneratorConfig)\n\toutputChan = make(chan stream.Record, c.ChanSize)\n\tcontrolChan = make(chan ControlAction, 1)\n\tif cfg.IntervalSize == 0 {\n\t\tcfg.Log.Panic(cfg.Name, \" aborting due to interval size 0 which causes infinite loop\")\n\t}\n\tgo func() {\n\t\tif cfg.PanicHandlerFn != nil {\n\t\t\tdefer cfg.PanicHandlerFn()\n\t\t}\n\t\tcfg.Log.Info(cfg.Name, \" is running\")\n\t\tif cfg.WaitCounter != nil {\n\t\t\tcfg.WaitCounter.Add()\n\t\t\tdefer cfg.WaitCounter.Done()\n\t\t}\n\t\trowCount := int64(0)\n\t\tif cfg.StepWatcher != nil { // if we have been given a StepWatcher struct that can watch our rowCount and output channel length...\n\t\t\tcfg.StepWatcher.StartWatching(&rowCount, &outputChan)\n\t\t\tdefer cfg.StepWatcher.StopWatching()\n\n\t\t}\n\t\t// Iterate over the input records.\n\t\tsendRow := func(inputRec stream.Record, fromNum *float64, toNum *float64) (rowSentOK bool) {\n\t\t\t// Emit low date and hi date record.\n\t\t\trec := stream.NewRecord()\n\t\t\tif cfg.PassInputFieldsToOutput {\n\t\t\t\tinputRec.CopyTo(rec) // ensure the output record contains the input fields.\n\t\t\t}\n\t\t\tif cfg.OutputLeftPaddedNumZeros > 0 { // if we should output strings with leading zeros...\n\t\t\t\trec.SetData(cfg.OutputChanFieldName4LowNum, fmt.Sprintf(\"%0*.0f\", cfg.OutputLeftPaddedNumZeros, *fromNum))\n\t\t\t\trec.SetData(cfg.OutputChanFieldName4HighNum, fmt.Sprintf(\"%0*.0f\", cfg.OutputLeftPaddedNumZeros, *toNum))\n\t\t\t} else {\n\t\t\t\trec.SetData(cfg.OutputChanFieldName4LowNum, *fromNum)\n\t\t\t\trec.SetData(cfg.OutputChanFieldName4HighNum, *toNum)\n\t\t\t}\n\t\t\trowSentOK = safeSend(rec, outputChan, controlChan, sendNilControlResponse) // forward the record\n\t\t\tif rowSentOK {\n\t\t\t\tcfg.Log.Debug(cfg.Name, \" generated: lowNum=\", *fromNum, \"; highNum=\", *toNum)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase controlAction := <-controlChan: // if we have been asked to shutdown...\n\t\t\tcontrolAction.ResponseChan <- nil // respond that we're done with a nil error.\n\t\t\tcfg.Log.Info(cfg.Name, \" shutdown\")\n\t\t\treturn\n\t\tcase rec, ok := <-cfg.InputChan: // for each FromDate record...\n\t\t\tif !ok { // if the input chan was closed...\n\t\t\t\tcfg.InputChan = nil // disable this case.\n\t\t\t} else {\n\t\t\t\tcfg.Log.Info(cfg.Name, \" splitting number range \", rec.GetData(cfg.InputChanFieldName4LowNum), \" to \", rec.GetData(cfg.InputChanFieldName4HighNum), \" using interval value \", cfg.IntervalSize)\n\t\t\t\t// Get the FromDate and ToDate as strings.\n\t\t\t\tfromNumStr := rec.GetDataAsStringPreserveTimeZone(cfg.Log, cfg.InputChanFieldName4LowNum)\n\t\t\t\ttoNumStr := rec.GetDataAsStringPreserveTimeZone(cfg.Log, cfg.InputChanFieldName4HighNum)\n\t\t\t\t// Convert to float(64)\n\t\t\t\tfromNum, err := strconv.ParseFloat(fromNumStr, 64)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcfg.Log.Panic(cfg.Name, \" error parsing input field for low number: \", err)\n\t\t\t\t}\n\t\t\t\ttoNum, err := strconv.ParseFloat(toNumStr, 64)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcfg.Log.Panic(cfg.Name, \" error parsing input field for high number: \", err)\n\t\t\t\t}\n\n\t\t\t\t// Richard 20191011 - old extract field values direct to float:\n\t\t\t\t// fromNum, err := getFloat64FromInterface(rec.GetData(cfg.InputChanFieldName4LowNum))\n\t\t\t\t// toNum, err := getFloat64FromInterface(rec.GetData(cfg.InputChanFieldName4HighNum))\n\n\t\t\t\t// Add the increment and emit rows until it is greater than the ToDate.\n\t\t\t\tfor { // while we are outputting less than ToDate...\n\t\t\t\t\tto := fromNum + cfg.IntervalSize\n\t\t\t\t\tif to > toNum { // if this increment overruns the high number...\n\t\t\t\t\t\tbreak // don't output a row!\n\t\t\t\t\t}\n\t\t\t\t\tif rowSentOK := sendRow(rec, &fromNum, &to); !rowSentOK {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tatomic.AddInt64(&rowCount, 1) // increment the row count bearing in mind someone else is reporting on its values.\n\t\t\t\t\tfromNum = to // save FromDate with increment added.\n\t\t\t\t}\n\t\t\t\tif fromNum < toNum || atomic.AddInt64(&rowCount, 0) == 0 {\n\t\t\t\t\t// if we have a final portion of number to output a row for;\n\t\t\t\t\t// or we have not output a row (i.e. when min value = max value)...\n\t\t\t\t\tif rowSentOK := sendRow(rec, &fromNum, &toNum); !rowSentOK { // emit the final gap.\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tatomic.AddInt64(&rowCount, 1) // add a row count.\n\t\t\t\t}\n\t\t\t}\n\t\t\tif cfg.InputChan == nil { // if we processed all data...\n\t\t\t\tbreak // end gracefully.\n\t\t\t}\n\t\t}\n\t\t// Calculate output.\n\t\tclose(outputChan)\n\t\tcfg.Log.Info(cfg.Name, \" complete\")\n\t}()\n\treturn\n}", "func generate(ch chan<- int) {\n\tfor i := 2; i < maxPrimeNum+1; i++ {\n\t\tch <- i\n\t}\n}", "func Start(workerPoolSize int) {\n\tconsumer := Consumer{\n\t\tinputChan: make(chan int, workerPoolSize*10),\n\t\tjobsChan: make(chan int, workerPoolSize),\n\t}\n\n\t//generator := Generator{callbackFunc: consumer.callbackFunc}\n\tgenerator := FiniteGenerator{consumer}\n\tctx, cancelFunc := context.WithCancel(context.Background())\n\twg := &sync.WaitGroup{}\n\n\tgenerator.start()\n\t//go generator.start(ctx)\n\tgo consumer.startConsumer(ctx, cancelFunc)\n\n\twg.Add(workerPoolSize)\n\tfor i := 0; i < workerPoolSize; i++ {\n\t\tgo consumer.workerFunc(wg, i)\n\t}\n\n\t// chan for terminated signals\n\ttermChan := make(chan os.Signal, 1)\n\tsignal.Notify(termChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGTSTP)\n\n\tselect {\n\tcase <-termChan:\n\t\t// if terminated\n\t\tfmt.Println(\"=========Shutdown Signal=========\")\n\t\tcancelFunc()\n\tcase <-ctx.Done():\n\t\t// if normally exited\n\t\tfmt.Println(\"=========Normally exited==========\")\n\t}\n\t// Wait until all workers gracefully interupted\n\twg.Wait()\n\n\tfmt.Println(\"==============All workers done!========\")\n}", "func generateIntegers(ctx context.Context, integers chan int) {\n\tn := 0\n\tfor {\n\t\tselect {\n\t\tcase integers <- n:\n\t\t\tn++\n\t\tcase <-ctx.Done(): // React to Context being done. ctx.Done is a channel.\n\t\t\t// We can investigate ctx.Err() to check what has actually happened and act accordingly.\n\t\t\tif ctx.Err() == context.DeadlineExceeded {\n\t\t\t\tfmt.Println(\"Context timed out.\")\n\t\t\t} else if ctx.Err() == context.Canceled {\n\t\t\t\tfmt.Println(\"Context cancelled.\")\n\t\t\t}\n\n\t\t\t// Remember to close the channel to which we will no longer produce data.\n\t\t\tclose(integers) // Try commenting it out and see what happens.\n\t\t\treturn\n\t\tdefault:\n\t\t\tfmt.Println(\"Waiting for something to happen.\")\n\t\t\ttime.Sleep(100 * time.Millisecond) // Always sleep a while in the default case, otherwise you waste CPU cycles.\n\t\t}\n\t}\n}", "func Generator() <-chan int {\n\n\tout := make(chan int, 1)\n\n\tgo func() {\n\n\t\tout <- 2\n\t\tnum := 3\n\t\tcomposites := make(map[int][]int)\n\n\t\tfor {\n\t\t\tif _, ok := composites[num]; !ok {\n\t\t\t\tout <- num\n\t\t\t\tcomposites[num*num] = []int{num}\n\t\t\t} else {\n\t\t\t\tfor _, prime := range composites[num] {\n\t\t\t\t\tnext := num + prime\n\t\t\t\t\tfor next%2 == 0 {\n\t\t\t\t\t\tnext += prime\n\t\t\t\t\t}\n\t\t\t\t\tif _, ok := composites[next]; ok {\n\t\t\t\t\t\tcomposites[next] = append(composites[next], prime)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcomposites[next] = []int{prime}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tdelete(composites, num)\n\t\t\t}\n\t\t\tnum += 2\n\t\t}\n\t}()\n\n\treturn out\n\n}", "func NewGenerator(s []string) chan string {\n\tc := make(chan string)\n\tl := len(s)\n\tgo func() {\n\t\tfor {\n\t\t\tif l < 1 {\n\t\t\t\tc <- \"\"\n\t\t\t}\n\t\t\tfor _, ss := range s {\n\t\t\t\tc <- ss\n\t\t\t}\n\t\t}\n\t}()\n\treturn c\n}", "func generator(msg string) <-chan string { // return read-only channel\n\tc := make(chan string)\n\tgo func() {\n\t\tfor i := 0; ; i++ {\n\t\t\tc <- fmt.Sprintf(\"%s %d\", msg, i)\n\t\t\ttime.Sleep(5 * time.Millisecond)\n\t\t}\n\t}()\n\treturn c\n}", "func primeGenerator() chan int64 {\n\tc := make(chan int64)\n\n\tgo func() {\n\n\t\tc <- 2\n\n\t\tfor i := int64(3); ; i += 2 {\n\t\t\tif isPrime(i) {\n\t\t\t\tc <- i\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn c\n}", "func startFibonacci(length int) <-chan int {\n\t// make buffered channel\n\tc := make(chan int, length)\n\t\n\t// run generation concurrently\n\tgo generateFibonacciSequence(c, length)\n\t\n\t// return channel\n\treturn c\n}", "func GenerateRandIntsStream(ctx context.Context) <-chan interface{} {\n\ts := make(chan interface{})\n\tgo func() {\n\t\tdefer close(s)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase s <- rand.Int():\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn s\n}", "func GenerateConcurrent(bitsize int, stop chan struct{}) (<-chan *big.Int, <-chan error) {\n\tcount := runtime.GOMAXPROCS(0)\n\tints := make(chan *big.Int, count)\n\terrs := make(chan error, count)\n\n\t// In order to successfully close all goroutines below when the caller wants them to, they require\n\t// a channel that is close()d: just sending a struct{}{} would stop one but not all goroutines.\n\t// Instead of requiring the caller to close() the stop chan parameter we use our own chan for\n\t// this, so that we always stop all goroutines independent of whether the caller close()s stop\n\t// or sends a struct{}{} to it.\n\tstopped := make(chan struct{})\n\tgo func() {\n\t\tselect {\n\t\tcase <-stop:\n\t\t\tclose(stopped)\n\t\tcase <-stopped: // stopped can also be closed by a goroutine that encountered an error\n\t\t}\n\t}()\n\n\t// Start safe prime generation goroutines\n\tfor i := 0; i < count; i++ {\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\t// Pass stopped chan along; if closed, Generate() returns nil, nil\n\t\t\t\tx, err := Generate(bitsize, stopped)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs <- err\n\t\t\t\t\tclose(stopped)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// Only send result and continue generating if we have not been told to stop\n\t\t\t\tselect {\n\t\t\t\tcase <-stopped:\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t\tints <- x\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\treturn ints, errs\n}", "func CreateCounter(cxt context.Context) chan int {\n\tdestination := make(chan int)\n\n\tgo func() { //! go routine\n\t\tdefer close(destination)\n\t\tcounter := 1\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <- cxt.Done():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tdestination <- counter\n\t\t\t\tcounter ++\n\t\t\t\ttime.Sleep(1 * time.Second) //! simulasi slow\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn destination\n}", "func NewGenerator(options *Options) *Generator {\n\tvar g = &Generator{}\n\t// first we validate the flags\n\tif err := options.Validate(); err != nil {\n\t\tpanic(err)\n\t}\n\tg.options = options\n\t// we initiate the values on the generator\n\tg.init()\n\treturn g\n}", "func main() {\n\tch := make(chan int) // Create a new channel.\n\tgo Generate(ch) // Launch Generate goroutine.\n\tfor i := 0; i < 10; i++ {\n\t\tprime := <-ch\n\t\tfmt.Println(prime)\n\t\tch1 := make(chan int)\n\t\tgo Filter(ch, ch1, prime)\n\t\tch = ch1\n\t}\n}", "func generateGoroutines(done chan bool, numGoroutines int) {\n\tfor i := 0; i < numGoroutines; i++ {\n\t\tgo func(done chan bool) {\n\t\t\t<-done\n\t\t}(done)\n\t}\n}", "func genpipe2(nums ...int) <-chan int {\n\tout := make(chan int, len(nums))\n\tfor _, n := range nums {\n\t\tout <- n\n\t}\n\tclose(out)\n\treturn out\n}", "func genStreams() <-chan <-chan int {\n\tout := make(chan (<-chan int))\n\tgo func() {\n\t\tdefer close(out)\n\t\tfor i := 1; i <= 10; i++ {\n\t\t\tstream := make(chan int, 3)\n\t\t\tstream <- i\n\t\t\tstream <- i + 1\n\t\t\tstream <- i + 2\n\t\t\tclose(stream)\n\t\t\tout <- stream\n\t\t}\n\t}()\n\treturn out\n}", "func mux_rand_generator() chan int {\n\t// create two rand generators\n\tgenerator_1:=rand_generator()\n\tgenerator_2:=rand_generator()\n\n\t// create a mux channel\n\tout:=make(chan int)\n\t// read from generator_1 and integrate to channel out\n\tgo func(){\n\t\tfor{\n\t\t\tfmt.Println(\"read from generator 1\")\n\t\t\tout<-<-generator_1\n\t\t}\n\t}()\n\t// read from generator_2 and integrate to channel out\n\tgo func(){\n\t\tfor{\n\t\t\tfmt.Println(\"read from generator 2\")\n\t\t\tout<-<-generator_2\n\t\t}\n\t}()\n\treturn out\n}", "func makeLoopingOutputDevice(loop chan<- int, output chan<- int) func(int) {\n\treturn func(n int) {\n\t\tloop <- n\n\t\toutput <- n\n\t}\n}", "func gen_points(wg *sync.WaitGroup) (<-chan point) {\n\t//counter := 0\n\tout := make(chan point)\n\tgo func(wg *sync.WaitGroup) {\n\t\tfor y := -maxY/2; y < maxY; y++ {\n\t\t\tfor x := -maxX/2; x < maxX; x++ {\n\t\t\t\txx, yy := scale_pixel(x, y)\n\t\t\t\t//fmt.Println(counter)\n\t\t\t\t//counter++\n\t\t\t\tout <- point{xf: xx, yf: yy, xi: x, yi: y}\t\n\t\t\t}//end for\n\t\t}//end for\n\t\tclose(out)\t\n\t}(wg)//end go func\n\twg.Done()\t//This thread is finished\n\treturn out\n}", "func NewGenerator() Generator {\n\treturn Generator{\n\t\tcurrentState: rand.Intn(30),\n\t}\n}", "func mainSieve() {\n\tch := make(chan int) // Create a new channel.\n\tgo Generate(ch) // Launch Generate goroutine.\n\tfor i := 0; i < 10; i++ {\n\t\tprime := <-ch\n\t\tfmt.Println(prime)\n\t\tch1 := make(chan int)\n\t\tgo Filter(ch, ch1, prime)\n\t\tch = ch1\n\t}\n}", "func NewGenerator(hash hash.Hash, f NewCipher) (*Generator, error) {\n\tg := &Generator{\n\t\tkey: make([]byte, keySize, keySize),\n\t\thash: hash,\n\t\tnewCipher: f,\n\t}\n\n\tif err := g.updateCipher(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tctr, err := counter.New(uint(g.cipher.BlockSize()) * byteSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tg.counter = ctr\n\n\tg.buffer = make([]byte, len(g.counter), len(g.counter))\n\n\treturn g, nil\n}", "func generateInputMessages(wg *sync.WaitGroup, termChan chan bool) {\n\tdefer wg.Done()\n\n\tdoTerm := false\n\tticker := time.NewTicker(100 * time.Millisecond)\n\n\tconfig := &kafka.ConfigMap{\n\t\t\"client.id\": \"generator\",\n\t\t\"bootstrap.servers\": bootstrapServers,\n\t\t\"enable.idempotence\": true,\n\t\t\"go.logs.channel.enable\": true,\n\t\t\"go.logs.channel\": logsChan,\n\t}\n\n\tproducer, err := kafka.NewProducer(config)\n\tif err != nil {\n\t\tfatal(err)\n\t}\n\n\ttoppar := kafka.TopicPartition{Topic: &inputTopic, Partition: kafka.PartitionAny}\n\n\taddLog(fmt.Sprintf(\"Generator: producing events to topic %s\", inputTopic))\n\n\tfor !doTerm {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\t// Randomize the rate of cars by skipping 20% of ticks.\n\t\t\tif rand.Intn(5) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tsendIngressCarEvent(producer, toppar)\n\n\t\tcase e := <-producer.Events():\n\t\t\t// Handle delivery reports\n\t\t\tm, ok := e.(*kafka.Message)\n\t\t\tif !ok {\n\t\t\t\taddLog(fmt.Sprintf(\"Generator: Ignoring producer event %v\", e))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif m.TopicPartition.Error != nil {\n\t\t\t\taddLog(fmt.Sprintf(\"Generator: Message delivery failed: %v: ignoring\", m.TopicPartition))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\tcase <-termChan:\n\t\t\tdoTerm = true\n\t\t}\n\n\t}\n\n\taddLog(fmt.Sprintf(\"Generator: shutting down\"))\n\tproducer.Close()\n}", "func NewGenerator(length int, elements []int) *Generator {\n\t// Calculate number of possible outcomes.\n\tt := int(math.Pow(float64(len(elements)), float64(length)))\n\n\treturn &Generator{\n\t\telements: elements,\n\t\ttotal: t,\n\t\tposition: make([]int, length),\n\t}\n}", "func SourceRandom(length int) <-chan int {\n\tout := make(chan int, 80000000)\n\tgo func() {\n\t\tfor i := 0; i < length; i++ {\n\t\t\tout <- rand.Int()\n\t\t}\n\t\tclose(out)\n\t}()\n\treturn out\n}", "func NewDateRangeGenerator(i interface{}) (outputChan chan stream.Record, controlChan chan ControlAction) {\n\tcfg := i.(*DateRangeGeneratorConfig)\n\toutputChan = make(chan stream.Record, c.ChanSize)\n\tcontrolChan = make(chan ControlAction, 1)\n\tif cfg.IntervalSizeSeconds == 0 {\n\t\tcfg.Log.Panic(cfg.Name, \" aborting due to interval size 0 which causes infinite loop\")\n\t}\n\tgo func() {\n\t\tif cfg.PanicHandlerFn != nil {\n\t\t\tdefer cfg.PanicHandlerFn()\n\t\t}\n\t\tcfg.Log.Info(cfg.Name, \" is running\")\n\t\tif cfg.WaitCounter != nil {\n\t\t\tcfg.WaitCounter.Add()\n\t\t\tdefer cfg.WaitCounter.Done()\n\t\t}\n\t\trowCount := int64(0)\n\t\tif cfg.StepWatcher != nil { // if we have been given a StepWatcher struct that can watch our rowCount and output channel length...\n\t\t\tcfg.StepWatcher.StartWatching(&rowCount, &outputChan)\n\t\t\tdefer cfg.StepWatcher.StopWatching()\n\n\t\t}\n\t\t// Build the ToDate.\n\t\tvar toDate time.Time\n\t\tvar err error\n\t\tif cfg.InputChanFieldName4ToDate == \"\" && cfg.ToDateRFC3339orNow == \"\" { // if both possible toDates are missing...\n\t\t\tcfg.Log.Panic(cfg.Name, \" set one of InputChanFieldName4ToDate or a literal in ToDateRFC3339orNow\")\n\t\t}\n\t\tif cfg.InputChanFieldName4ToDate == \"\" { // if we have NOT been given a field name to fetch the toDate from...\n\t\t\t// Now we expect a literal in ToDateRFC3339orNow.\n\t\t\tif regexp.MustCompile(`(?i)(now)`).MatchString(cfg.ToDateRFC3339orNow) { // if we need to fetch Now for the ToDate...\n\t\t\t\ttoDate = time.Now().Truncate(time.Second) // now to the nearest second\n\t\t\t} else { // else we need to parse the ToDate received...\n\t\t\t\ttoDate, err = time.Parse(time.RFC3339, cfg.ToDateRFC3339orNow)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcfg.Log.Panic(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif cfg.UseUTC {\n\t\t\ttoDate = toDate.UTC()\n\t\t}\n\t\t// Iterate over the input records.\n\t\tsendRow := func(inputRec stream.Record, fromDate *time.Time, toDate *time.Time) (rowSentOK bool) {\n\t\t\t// Emit low date and hi date record.\n\t\t\trec := stream.NewRecord()\n\t\t\tif cfg.PassInputFieldsToOutput {\n\t\t\t\tinputRec.CopyTo(rec) // ensure the output record contains the input fields.\n\t\t\t}\n\t\t\trec.SetData(cfg.OutputChanFieldName4LowDate, *fromDate)\n\t\t\trec.SetData(cfg.OutputChanFieldName4HiDate, *toDate)\n\t\t\trowSentOK = safeSend(rec, outputChan, controlChan, sendNilControlResponse) // forward the record\n\t\t\tif rowSentOK {\n\t\t\t\tcfg.Log.Debug(cfg.Name, \" generated: lowDate=\", *fromDate, \"; highDate=\", *toDate)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase controlAction := <-controlChan: // if we have been asked to shutdown...\n\t\t\tcontrolAction.ResponseChan <- nil // respond that we're done with a nil error.\n\t\t\tcfg.Log.Info(cfg.Name, \" shutdown\")\n\t\t\treturn\n\t\tcase rec, ok := <-cfg.InputChan: // for each FromDate record...\n\t\t\tif !ok { // if the input chan was closed...\n\t\t\t\tcfg.InputChan = nil // disable this case.\n\t\t\t} else {\n\t\t\t\t// Get the toDate.\n\t\t\t\tif cfg.InputChanFieldName4ToDate != \"\" {\n\t\t\t\t\tvar castOK bool\n\t\t\t\t\ttoDate, castOK = rec.GetData(cfg.InputChanFieldName4ToDate).(time.Time)\n\t\t\t\t\tif !castOK {\n\t\t\t\t\t\tcfg.Log.Panic(cfg.Name, \" unexpected datatype for input field name \", cfg.InputChanFieldName4ToDate, \", expected time.Time\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcfg.Log.Info(cfg.Name, \" splitting date range \", rec.GetData(cfg.InputChanFieldName4FromDate), \" to \", toDate, \" using interval \", cfg.IntervalSizeSeconds, \" seconds\")\n\t\t\t\t// Get the FromDate.\n\t\t\t\tfromDate, err := getTimeFromInterface(rec.GetData(cfg.InputChanFieldName4FromDate))\n\t\t\t\tif err != nil {\n\t\t\t\t\tcfg.Log.Panic(cfg.Name, \" error - \", err)\n\t\t\t\t}\n\t\t\t\t// Add the increment and emit rows until it is greater than the ToDate.\n\t\t\t\tfor { // while we are outputting less than ToDate...\n\t\t\t\t\tto := fromDate.Add(time.Second * time.Duration(cfg.IntervalSizeSeconds))\n\t\t\t\t\tif to.After(toDate) { // if this increment overruns the max date...\n\t\t\t\t\t\tbreak // don't output a row!\n\t\t\t\t\t}\n\t\t\t\t\tif rowSentOK := sendRow(rec, &fromDate, &to); !rowSentOK {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tatomic.AddInt64(&rowCount, 1) // increment the row count bearing in mind someone else is reporting on its values.\n\t\t\t\t\tfromDate = to // save FromDate with increment added.\n\t\t\t\t}\n\t\t\t\tif fromDate.Before(toDate) || atomic.AddInt64(&rowCount, 0) == 0 {\n\t\t\t\t\t// if we have a final portion of time time to output a row for;\n\t\t\t\t\t// or we have not output a row (i.e. when min value = max value)...\n\t\t\t\t\tif rowSentOK := sendRow(rec, &fromDate, &toDate); !rowSentOK { // emit the final gap.\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tatomic.AddInt64(&rowCount, 1) // add a row count.\n\t\t\t\t}\n\t\t\t}\n\t\t\tif cfg.InputChan == nil { // if we processed all data...\n\t\t\t\tbreak // end gracefully.\n\t\t\t}\n\t\t}\n\t\t// Calculate output.\n\t\tclose(outputChan)\n\t\tcfg.Log.Info(cfg.Name, \" complete\")\n\t}()\n\treturn\n}", "func main() {\n // a channel is created with the make() helper function\n ch := make(chan int)\n cj := make(chan int)\n xt := make(chan bool) // a semaphore channel\n var wg sync.WaitGroup\n\n go func() {\n defer close(ch)\n for i := 0; i < 10; i++ {\n /*\n putting a value in a channel blocks execution until it is removed.\n To put a value IN, use the arrow pointing towards the channel.\n */\n ch <- i\n }\n }()\n\n /*\n fan-out pattern: single generator, multiple receivers\n using syng.WaitGroup to orchestrate processes\n */\n\n wg.Add(1)\n go func() {\n defer wg.Done()\n for v := range ch {\n // To take a value OUT, use the arrow with its back pointint to the channel.\n fmt.Println(\"foo:\", v)\n }\n }()\n\n wg.Add(1)\n go func() {\n defer wg.Done()\n for v := range ch {\n fmt.Println(\"bar:\", v)\n }\n }()\n\n /*\n fan-in pattern: multiple generator, single receiver.\n The channel must be closed after the generators are done.\n Using a semaphore channel to synchronize processes\n */\n go func() { // first generator: even numbers\n for i := 0; i < 10; i++ {\n if i%2 == 0 {\n cj <- i // blocks execution\n }\n }\n xt <- true // blocks execution\n }()\n\n go func() { // second generator: odd numbers\n for i := 0; i < 10; i++ {\n if i%2 != 0 {\n cj <- i // blocks execution\n }\n }\n xt <- true // blocks execution\n }()\n\n go func() {\n // this go function will terminate once the generators are done and close the channels, without blocking the execution of the receiver.\n <-xt // discards value from channel\n <-xt\n close(xt)\n close(cj)\n }()\n\n // receiver\n // range loops until the channel is closed (blocks execution)\n for v := range cj {\n fmt.Println(\"foobar:\", v)\n }\n\n // wait for all go routines to finish\n wg.Wait()\n}", "func startPipelineFunction(numbers chan<- int) {\n\tfor i := 1; i <= 10; i++ {\n\t\tnumbers <- i\n\t}\n\tclose(numbers)\n}", "func NewGenerator(opts Options) Generator {\n\tg := &generator{\n\t\topts: opts,\n\t\tlogger: opts.InstrumentOptions().Logger(),\n\t\tr: rand.New(opts.RandSource()),\n\t\tnumPoints: unifStats{\n\t\t\tmin: opts.MinNumPointsPerID(),\n\t\t\tmax: opts.MaxNumPointsPerID(),\n\t\t},\n\t\tidLength: normStats{\n\t\t\tmean: opts.IDLengthMean(),\n\t\t\tstddev: opts.IDLengthStddev(),\n\t\t},\n\t}\n\n\tfor i := 0; i < opts.NumIDs(); i++ {\n\t\tidLen := g.idLength.sample(g.r)\n\t\tg.ids = append(g.ids, randStringBytesMaskImprSrc(idLen, opts.RandSource()))\n\t}\n\treturn g\n}", "func NewGenerator(opts Options) Generator {\n\tg := &generator{\n\t\topts: opts,\n\t\tlogger: opts.InstrumentOptions().Logger(),\n\t\tr: rand.New(opts.RandSource()),\n\t\tnumPoints: unifStats{\n\t\t\tmin: opts.MinNumPointsPerID(),\n\t\t\tmax: opts.MaxNumPointsPerID(),\n\t\t},\n\t\tidLength: normStats{\n\t\t\tmean: opts.IDLengthMean(),\n\t\t\tstddev: opts.IDLengthStddev(),\n\t\t},\n\t}\n\n\tfor i := 0; i < opts.NumIDs(); i++ {\n\t\tidLen := g.idLength.sample(g.r)\n\t\tg.ids = append(g.ids, randStringBytesMaskImprSrc(idLen, opts.RandSource()))\n\t}\n\treturn g\n}", "func input_job(size int, query_chan chan<- []byte, scope int){\n var random_value int = -1\n for i := 1; i <= size; i++ {\n //get random num between 1 and size\n random_value = rand.Intn(scope) + 1\n //convert the int into []byte\n query_chan <- []byte(strconv.Itoa(random_value))\n fmt.Printf(\"输入随机数: %d\\n\", random_value)\n }\n //the last finished signal \"0\"\n query_chan <- []byte(strconv.Itoa(0))\n fmt.Println(\"随机数输入完成\")\n}", "func newGenerator(h hash.Hash, seed []byte) generator {\n\tif h == nil {\n\t\th = sha256.New()\n\t}\n\tb := h.Size()\n\tg := generator{\n\t\tkey: make([]byte, b),\n\t\tcounter: make([]byte, 16),\n\t\tmaxBytesPerRequest: (1 << 15) * b,\n\t\ttemp: make([]byte, b),\n\t\th: h,\n\t}\n\tif len(seed) != 0 {\n\t\t_, _ = g.Write(seed)\n\t}\n\treturn g\n}", "func TestGenerator_NewID(t *testing.T) {\n\tt.Run(\"It generates IDs of the proper length\", func(t *testing.T) {\n\t\twant := 14\n\n\t\tgen, _ := id.NewGenerator(id.WithLength(want))\n\t\tid, err := gen.NewID()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%v\", err)\n\t\t}\n\n\t\tif got := len(id); got != want {\n\t\t\tt.Fatalf(\"got: %d want: %d\", got, want)\n\t\t}\n\t})\n\n\tt.Run(\"It generates only unique ids\", func(t *testing.T) {\n\t\twant := 10000\n\t\tres := make(chan string, 10000)\n\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(10000)\n\t\tgo func() {\n\t\t\twg.Wait()\n\t\t\tclose(res)\n\t\t}()\n\n\t\t// 100 goroutines each generating 100 id's for a total of 10000\n\t\tgen, _ := id.NewGenerator()\n\t\tvar i int\n\t\tfor i < 100 {\n\t\t\tgo func() {\n\t\t\t\tvar n int\n\t\t\t\tfor n < 100 {\n\t\t\t\t\tid, err := gen.NewID()\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Fatalf(\"%v\", err)\n\t\t\t\t\t}\n\n\t\t\t\t\tres <- id\n\t\t\t\t\tn++\n\t\t\t\t\twg.Done()\n\t\t\t\t}\n\t\t\t}()\n\t\t\ti++\n\t\t}\n\n\t\tm := map[string]struct{}{}\n\t\tfor id := range res {\n\t\t\tm[id] = struct{}{}\n\t\t}\n\n\t\tif got := len(m); got != want {\n\t\t\tt.Fatalf(\"got: %d want: %d\", got, want)\n\t\t}\n\t})\n\n}", "func main(){\n\trand.Seed(time.Now().UnixNano())\n\n\tlog.Println(\"START\")\n\tstartTime := time.Now()\n\tc := make(chan int, 5)\n\tfor i:= 0; i < cap(c); i++ {\n\t\tgo source(c)\n\t}\n\trnd := <-c\n\tfmt.Println(time.Since(startTime))\n\tfmt.Println(\"result\", rnd)\n\ttime.Sleep(20 * time.Second)\n\tlog.Println(\"END\")\n}", "func NewGenerator(seed int64) *Generator {\n\tg := &Generator{\n\t\tseed: seed,\n\t\tnoise: opensimplex.New(seed),\n\t}\n\n\treturn g\n}", "func generator() {\r\n\r\n flagGo = true\r\n i := 10\r\n for i>1 { //Infinite loop\r\n var sample [samp_len] int //Array to save vaules\r\n sample = getValues() //Getting sample values\r\n writeFile(sample) //Writing samples to file\r\n time.Sleep(time.Second) //Waiting one second\r\n }\r\n}", "func Run(upTo int, printPrime bool) {\n\tsource := make(chan int) // Create a new channel.\n\tgo generate(source) // Launch Generate goroutine.\n\tfor i := 0; i < upTo; i++ {\n\t\tprime := <-source\n\t\tif printPrime {\n\t\t\tfmt.Println(i+1, prime)\n\t\t}\n\t\tdestination := make(chan int)\n\t\tgo filter(source, destination, prime) // launch filter in its own gorouting\n\t\tsource = destination\n\t}\n}", "func GetIntChan()<-chan int{\r\n\tnum:=5\r\n\tch :=make(chan int,num)\r\n\tfor i:=0;i<num;i++{\r\n\t\tch <- i\r\n\t}\r\n\tclose(ch)\r\n\treturn ch\r\n}", "func NewGenerator(i *GeneratorInput) (*Generator, error) {\n\tif i == nil {\n\t\ti = new(GeneratorInput)\n\t}\n\n\tg := &Generator{\n\t\tlowerLetters: i.LowerLetters,\n\t\tupperLetters: i.UpperLetters,\n\t\tdigits: i.Digits,\n\t\tsymbols: i.Symbols,\n\t}\n\n\tif g.lowerLetters == \"\" {\n\t\tg.lowerLetters = LowerLetters\n\t}\n\n\tif g.upperLetters == \"\" {\n\t\tg.upperLetters = UpperLetters\n\t}\n\n\tif g.digits == \"\" {\n\t\tg.digits = Digits\n\t}\n\n\tif g.symbols == \"\" {\n\t\tg.symbols = Symbols\n\t}\n\n\treturn g, nil\n}", "func GenerateRepeatStream(ctx context.Context, list ...interface{}) <-chan interface{} {\n\ts := make(chan interface{})\n\tgo func() {\n\t\tdefer close(s)\n\t\tfor {\n\t\t\tfor i := 0; i < len(list); i++ {\n\t\t\t\tselect {\n\t\t\t\tcase s <- list[i]:\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn s\n}", "func Generate(ctx context.Context, fn MsgGenFunc, msgPeriod time.Duration) <-chan *osc.Message {\n\tch := make(chan *osc.Message)\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tclose(ch)\n\t\t\t\treturn\n\t\t\tcase <-time.After(msgPeriod):\n\t\t\t\tch <- fn()\n\t\t\t}\n\t\t}\n\t}()\n\treturn ch\n}", "func FibonacciGen() chan int {\n\tc := make(chan int)\n\tgo func() {\n\t\tfor i, j := 0, 1; ; i, j = i+j, i {\n\t\t\tc <- i\n\t\t}\n\t}()\n\treturn c\n}", "func generator4(msg string) <-chan string {\n\tc := make(chan string)\n\tgo func() {\n\t\tfor i := 0; i < 5; i++ {\n\t\t\tc <- fmt.Sprintf(\"calling %s %d\", msg, i)\n\t\t\ttime.Sleep(time.Duration(rand.Intn(500)) * time.Millisecond)\n\t\t}\n\t}()\n\treturn c\n}", "func gen(accounts ...string) <-chan string {\n\tout := make(chan string)\n\tgo func() {\n\t\tfor _, account := range accounts {\n\t\t\tout <- account\n\t\t}\n\t\tclose(out)\n\t}()\n\treturn out\n}", "func NewGenerator() GeneratorFunc {\n\t// Generate security rules\n\t// * cannot be a repeated digit\n\t// * cannot be a suite of following digits (ascending and descending)\n\t// * TODO cannot be in the restricted codes\n\t// So basicaly i randomly pick one number as the first digit and next digit\n\t// cannot be the same, the previous or the next\n\tfilter := compose(\n\t\tnocurrent,\n\t\tnoprevious,\n\t\tnonext,\n\t)\n\treturn func() string {\n\t\tvar charset = []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}\n\t\tpin := make([]int, 4)\n\t\tvar current = charset[random.Intn(len(charset))]\n\t\tpin[0] = current\n\n\t\tfor i := 0; i < 3; i++ {\n\t\t\t// TODO i do not like that, need another way to do that\n\t\t\tcharset = []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}\n\t\t\tcurrent = in(filter(charset, current))\n\t\t\tpin[i+1] = current\n\t\t}\n\t\treturn strings.Trim(strings.Join(strings.Split(fmt.Sprint(pin), \" \"), \"\"), \"[]\")\n\t}\n}", "func EvenGenerator(done chan struct{}) <-chan int {\n\tout := make(chan int)\n\n\tgo func() {\n\t\tdefer close(out)\n\n\t\tfor i, j := 2, 8; ; i, j = j, 4*j+i {\n\t\t\tselect {\n\t\t\tcase out <- i:\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn out\n}", "func GenerateServiceRequest() <-chan Sig { // returns a receive only channel of string\n\tchannel := make(chan Sig)\n\n\tgo func() {\n\t\ttime.Sleep(2 * time.Second) //wait a while before starting\n\t\tIdentity := 0\n\t\t//msisdn := rand.Int()\n\n\t\t// Send CCR every x*rand seconds\n\t\tfor {\n\t\t\t// create random fake MSISDN\n\t\t\tmsisdn := random(1000000, 4999999)\n\t\t\tmsisdn = msisdn + 46702000000\n\n\t\t\tIdentity = Identity + 1\n\n\t\t\tinfoElem := Sig{\n\t\t\t\tCode: \"CCR\",\n\t\t\t\tmsisdn: msisdn,\n\t\t\t\tIdentity: Identity,\n\t\t\t}\n\n\t\t\tsleeptime := time.Second * time.Duration(rand.Intn(10))\n\t\t\tlog.Printf(\"Time until next CCR: %s\", sleeptime)\n\t\t\ttime.Sleep(sleeptime)\n\t\t\tchannel <- infoElem\n\t\t}\n\t}()\n\treturn channel\n}", "func (r *Runner) generate(output chan Result, wg *sizedwaitgroup.SizedWaitGroup) {\n\tif r.options.TargetUrl != \"\" {\n\t\tlog.Info(fmt.Sprintf(\"single target: %s\", r.options.TargetUrl))\n\t\twg.Add()\n\t\tgo r.process(output, r.options.TargetUrl, wg)\n\t} else {\n\t\turls, err := ReadFile(r.options.UrlFile)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"Cann't read url file\")\n\t\t} else {\n\t\t\tlog.Info(fmt.Sprintf(\"Read %d's url totaly\", len(urls)))\n\t\t\tfor _, u := range urls {\n\t\t\t\twg.Add()\n\t\t\t\tgo r.process(output, u, wg)\n\t\t\t}\n\t\t}\n\t}\n}", "func createSignalGenerator() Unit {\n\n\t/*\n\t * Create effects unit.\n\t */\n\tu := signalGenerator{\n\t\tunitStruct: unitStruct{\n\t\t\tunitType: UNIT_SIGNALGENERATOR,\n\t\t\tparams: []Parameter{\n\t\t\t\tParameter{\n\t\t\t\t\tName: \"input_amplitude\",\n\t\t\t\t\tType: PARAMETER_TYPE_NUMERIC,\n\t\t\t\t\tPhysicalUnit: \"%\",\n\t\t\t\t\tMinimum: 0,\n\t\t\t\t\tMaximum: 100,\n\t\t\t\t\tNumericValue: 100,\n\t\t\t\t\tDiscreteValueIndex: -1,\n\t\t\t\t\tDiscreteValues: nil,\n\t\t\t\t},\n\t\t\t\tParameter{\n\t\t\t\t\tName: \"input_gain\",\n\t\t\t\t\tType: PARAMETER_TYPE_NUMERIC,\n\t\t\t\t\tPhysicalUnit: \"dB\",\n\t\t\t\t\tMinimum: -60,\n\t\t\t\t\tMaximum: 0,\n\t\t\t\t\tNumericValue: 0,\n\t\t\t\t\tDiscreteValueIndex: -1,\n\t\t\t\t\tDiscreteValues: nil,\n\t\t\t\t},\n\t\t\t\tParameter{\n\t\t\t\t\tName: \"signal_type\",\n\t\t\t\t\tType: PARAMETER_TYPE_DISCRETE,\n\t\t\t\t\tPhysicalUnit: \"\",\n\t\t\t\t\tMinimum: -1,\n\t\t\t\t\tMaximum: -1,\n\t\t\t\t\tNumericValue: -1,\n\t\t\t\t\tDiscreteValueIndex: 0,\n\t\t\t\t\tDiscreteValues: []string{\n\t\t\t\t\t\t\"sine\",\n\t\t\t\t\t\t\"triangle\",\n\t\t\t\t\t\t\"square\",\n\t\t\t\t\t\t\"sawtooth\",\n\t\t\t\t\t\t\"noise\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tParameter{\n\t\t\t\t\tName: \"signal_frequency\",\n\t\t\t\t\tType: PARAMETER_TYPE_NUMERIC,\n\t\t\t\t\tPhysicalUnit: \"Hz\",\n\t\t\t\t\tMinimum: 1,\n\t\t\t\t\tMaximum: 20000,\n\t\t\t\t\tNumericValue: 440,\n\t\t\t\t\tDiscreteValueIndex: -1,\n\t\t\t\t\tDiscreteValues: nil,\n\t\t\t\t},\n\t\t\t\tParameter{\n\t\t\t\t\tName: \"signal_amplitude\",\n\t\t\t\t\tType: PARAMETER_TYPE_NUMERIC,\n\t\t\t\t\tPhysicalUnit: \"%\",\n\t\t\t\t\tMinimum: 0,\n\t\t\t\t\tMaximum: 100,\n\t\t\t\t\tNumericValue: 100,\n\t\t\t\t\tDiscreteValueIndex: -1,\n\t\t\t\t\tDiscreteValues: nil,\n\t\t\t\t},\n\t\t\t\tParameter{\n\t\t\t\t\tName: \"signal_gain\",\n\t\t\t\t\tType: PARAMETER_TYPE_NUMERIC,\n\t\t\t\t\tPhysicalUnit: \"dB\",\n\t\t\t\t\tMinimum: -60,\n\t\t\t\t\tMaximum: 0,\n\t\t\t\t\tNumericValue: 0,\n\t\t\t\t\tDiscreteValueIndex: -1,\n\t\t\t\t\tDiscreteValues: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn &u\n}", "func NewGeneratorBTC() Generator { return generatorBTC{} }", "func (p *password) Gen() <-chan []byte {\n\tch := make(chan []byte, p.options.Generate)\n\tgo p.gen(ch)\n\treturn ch\n}", "func main() {\n\tgo produce()\n\tgo consume()\n\t<-done\n}", "func SourceData(data ...int) <-chan int {\n\tfmt.Println(\"num:\", len(data))\n\tch := make(chan int, 80000000)\n\tgo func() {\n\t\tfor _, v := range data {\n\t\t\tch <- v\n\t\t}\n\t\tclose(ch)\n\t}()\n\treturn ch\n}", "func NewClockGenerator(clock Clicker, d time.Duration, bufferSize ...uint16) (clockGenerator *ClockGenerator) {\n\tvar (\n\t\tnextAt = clock.Now().Truncate(d)\n\t)\n\tclockGenerator = &ClockGenerator{active: true}\n\tif len(bufferSize) < 1 {\n\t\tclockGenerator.ch = make(chan time.Time)\n\t} else {\n\t\tclockGenerator.ch = make(chan time.Time, bufferSize[0])\n\t}\n\tgo func() {\n\t\tfor {\n\t\t\tvar now = clock.Now()\n\t\t\tif nextAt.After(now) {\n\t\t\t\tvar sleepDuration = nextAt.Sub(now)\n\t\t\t\ttime.Sleep(sleepDuration)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar lastClockedAt = nextAt\n\t\t\tnextAt = nextAt.Add(d)\n\t\t\tif !clockGenerator.active {\n\t\t\t\tclose(clockGenerator.ch)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tclockGenerator.ch <- lastClockedAt\n\t\t}\n\t}()\n\treturn\n}", "func NewGenerator() *Generator {\n\treturn &Generator{\n\t\ttpls: []string{},\n\t\tfuncs: map[string]interface{}{},\n\t\tdata: map[string]interface{}{},\n\t}\n}", "func NewGenerator(opts ...Option) *Generator {\n\tg := &Generator{}\n\n\tfor _, opt := range opts {\n\t\topt.apply(g)\n\t}\n\n\t// Default time source\n\tif g.clock == nil {\n\t\tg.clock = &systemClock{}\n\t}\n\n\t// Default entropy source\n\tif g.entropy == nil {\n\t\tg.entropy = ulid.Monotonic(rand.New(rand.NewSource(g.clock.Now().UnixNano())), 0)\n\t}\n\n\treturn g\n}", "func produceRandomNumber(c chan int) {\n\tfmt.Printf(\"About to send a random number to the channel.\\n\\n\")\n\t// Let's send something to the channel. The arrow operator shows the direction of data flow.\n\t// The call blocks until the receiver part is ready in case of non-buffered channels.\n\t// For buffered channels, they block only when buffer is full.\n\tc <- rand.Int() % 100 // Side note: every time we run the program we receive the same numbers - why?\n\tfmt.Println(\"! Successfully sent the random number to the channel.\")\n}", "func MockGen(c *gin.Context) {\n\tlog.Info(\"Mock Generator started\")\n\tvar id = \"3b-6cfc0958d2fb\"\n\tdevice := c.Param(\"device\")\n\tchannel := c.Param(\"channel\")\n\ttopic := \"/\" + device + \"/\" + channel\n\tlog.Info(\"Sending messages to topic: \", topic)\n\tticker := time.NewTicker(1 * time.Second)\n\tvar datum = make(map[string]interface{}, 2)\n\t//var data = make(map[string]interface{}, 1)\n\tvar temps = make(map[string]interface{}, 3)\n\n\tclientGone := c.Writer.CloseNotify()\n\tbuffer := make(chan string, 100)\n\tgo func() {\n\t\tfor range ticker.C {\n\t\t\trand.Seed(time.Now().UnixNano())\n\t\t\tdatum[\"timestamp\"] = time.Now().UnixNano() / int64(time.Millisecond)\n\t\t\ttemps[\"id\"] = id\n\t\t\ttemps[\"f\"] = rand.Intn(300-50) + 50\n\t\t\ttemps[\"c\"] = rand.Intn(150-20) + 20\n\t\t\tdatum[\"data\"] = temps\n\t\t\tjsondata, err := json.Marshal(datum)\n\t\t\tlog.Info(\"Generated message\", string(jsondata))\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase buffer <- string(jsondata):\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t}()\n\tc.Stream(func(w io.Writer) bool {\n\t\tselect {\n\t\tcase <-clientGone:\n\t\t\tlog.Info(\"Stopping generator\")\n\t\t\tticker.Stop()\n\t\t\treturn true\n\t\tcase message := <-buffer:\n\t\t\tc.JSON(200, message)\n\t\t\tc.String(200, \"\\n\")\n\t\t\t//c.SSEvent(\"\", message)\n\t\t\treturn true\n\t\t}\n\t})\n}", "func GenerateCSeq() int {\n\treturn rand.Int() % 50000\n}", "func epochRangeGen(epochs string) chan uint64 {\n\tc := make(chan uint64)\n\n\t// string parser for extracting epoch range\n\tgo func(input string) {\n\t\tfor _, part := range strings.Split(input, \",\") {\n\t\t\tif i := strings.Index(part[1:], \"-\"); i == -1 {\n\t\t\t\tn, err := strconv.ParseUint(part, 10, 64)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tc <- n\n\t\t\t} else {\n\t\t\t\tn1, err := strconv.ParseUint(part[:i+1], 10, 64)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tn2, err := strconv.ParseUint(part[i+2:], 10, 64)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif n2 < n1 {\n\t\t\t\t\tfmt.Printf(\"Invalid range %d-%d\\n\", n1, n2)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tfor ii := n1; ii <= n2; ii++ {\n\t\t\t\t\tc <- ii\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tclose(c)\n\t}(epochs)\n\treturn c\n}", "func New(workerID uint64) (Snowflake, error) {\n\tif workerID < 0 || workerID > maxWorkerID {\n\t\treturn nil, ErrInvalidWorkerID\n\t}\n\n\tsf := make(chan int64)\n\tgo generator(workerID, sf)\n\treturn sf, nil\n}", "func New(packetChannel chan string) *PacketGenerator {\n\tpacketGenerator := new(PacketGenerator)\n\treturn packetGenerator\n}", "func New(w io.Writer) *Generator {\n return &Generator{w}\n}", "func UnknownThenGoldGenerator(player *Player, actionPipe chan Action, needMovesSignal chan bool) {\n moveGeneratorTemplate(player, actionPipe, needMovesSignal, unknownThenGoldGenerator)\n}", "func CreateEvenGenerator() func() uint {\n\ti := uint(0)\n\treturn func() (retVal uint) {\n\t\tretVal = i\n\t\ti += 2\n\t\treturn\n\t}\n}", "func (gs *GenServer) ProcessLoop(pcs procChannels, pd Process, args ...interface{}) {\n\tstate := pd.(GenServerInt).Init(args...)\n\tgs.state = state\n\tpcs.init <- true\n\tvar chstop chan int\n\tchstop = make(chan int)\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tlog.Printf(\"GenServerInt recovered: %#v\", r)\n\t\t}\n\t}()\n\tfor {\n\t\tvar message etf.Term\n\t\tvar fromPid etf.Pid\n\t\tselect {\n\t\tcase reason := <-chstop:\n\t\t\tpd.(GenServerInt).Terminate(reason, gs.state)\n\t\tcase msg := <-pcs.in:\n\t\t\tmessage = msg\n\t\tcase msgFrom := <-pcs.inFrom:\n\t\t\tmessage = msgFrom[1]\n\t\t\tfromPid = msgFrom[0].(etf.Pid)\n\n\t\t}\n\t\tlib.Log(\"[%#v]. Message from %#v\\n\", gs.Self, fromPid)\n\t\tswitch m := message.(type) {\n\t\tcase etf.Tuple:\n\t\t\tswitch mtag := m[0].(type) {\n\t\t\tcase etf.Atom:\n\t\t\t\tgs.lock.Lock()\n\t\t\t\tswitch mtag {\n\t\t\t\tcase etf.Atom(\"$gen_call\"):\n\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tfromTuple := m[1].(etf.Tuple)\n\t\t\t\t\t\tcode, reply, state1 := pd.(GenServerInt).HandleCall(&fromTuple, &m[2], gs.state)\n\n\t\t\t\t\t\tgs.state = state1\n\t\t\t\t\t\tgs.lock.Unlock()\n\t\t\t\t\t\tif code < 0 {\n\t\t\t\t\t\t\tchstop <- code\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif reply != nil && code == 1 {\n\t\t\t\t\t\t\tpid := fromTuple[0].(etf.Pid)\n\t\t\t\t\t\t\tref := fromTuple[1]\n\t\t\t\t\t\t\trep := etf.Term(etf.Tuple{ref, *reply})\n\t\t\t\t\t\t\tgs.Send(pid, &rep)\n\t\t\t\t\t\t}\n\t\t\t\t\t}()\n\t\t\t\tcase etf.Atom(\"$gen_cast\"):\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tcode, state1 := pd.(GenServerInt).HandleCast(&m[1], gs.state)\n\t\t\t\t\t\tgs.state = state1\n\t\t\t\t\t\tgs.lock.Unlock()\n\t\t\t\t\t\tif code < 0 {\n\t\t\t\t\t\t\tchstop <- code\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t}()\n\t\t\t\tdefault:\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tcode, state1 := pd.(GenServerInt).HandleInfo(&message, gs.state)\n\t\t\t\t\t\tgs.state = state1\n\t\t\t\t\t\tgs.lock.Unlock()\n\t\t\t\t\t\tif code < 0 {\n\t\t\t\t\t\t\tchstop <- code\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t}()\n\t\t\t\t}\n\t\t\tcase etf.Ref:\n\t\t\t\tlib.Log(\"got reply: %#v\\n%#v\", mtag, message)\n\t\t\t\tgs.chreply <- &m\n\t\t\tdefault:\n\t\t\t\tlib.Log(\"mtag: %#v\", mtag)\n\t\t\t\tgs.lock.Lock()\n\t\t\t\tgo func() {\n\t\t\t\t\tcode, state1 := pd.(GenServerInt).HandleInfo(&message, gs.state)\n\t\t\t\t\tgs.state = state1\n\t\t\t\t\tgs.lock.Unlock()\n\t\t\t\t\tif code < 0 {\n\t\t\t\t\t\tchstop <- code\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t}\n\t\tdefault:\n\t\t\tlib.Log(\"m: %#v\", m)\n\t\t\tgs.lock.Lock()\n\t\t\tgo func() {\n\t\t\t\tcode, state1 := pd.(GenServerInt).HandleInfo(&message, gs.state)\n\t\t\t\tgs.state = state1\n\t\t\t\tgs.lock.Unlock()\n\t\t\t\tif code < 0 {\n\t\t\t\t\tchstop <- code\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}\n}", "func asChan(vs ...int) <-chan int {\n\tc := make(chan int)\n\tgo func() {\n\t\tdefer close(c)\n\t\tfor _, v := range vs {\n\t\t\tc <- v\n\t\t\ttime.Sleep(time.Duration(rand.Intn(1000)) * time.Millisecond)\n\t\t}\n\t\t//close(c)\n\t}()\n\treturn c\n}", "func fibonacciGen(n int, c chan int) {\n\tx, y := 0, 1\n\tfor i := 0; i < n; i++ {\n\t\tc <- x\n\t\tx, y = y, x+y\n\t}\n\tclose(c)\n}", "func fibonacciGen(n int, c chan int) {\n\tx, y := 0, 1\n\tfor i := 0; i < n; i++ {\n\t\tc <- x\n\t\tx, y = y, x+y\n\t}\n\tclose(c)\n}", "func GoldThenUnknownGenerator(player *Player, actionPipe chan Action, needMovesSignal chan bool) {\n moveGeneratorTemplate(player, actionPipe, needMovesSignal, goldThenUnknownGenerator)\n}", "func getNumberWithReturnChan() <-chan int {\n\t// create the channel\n\tc := make(chan int)\n\t// 3.0\n\tgo func() {\n\t\t// push the result into the channel\n\t\tc <- 5\n\t}()\n\n\t// 3.1\n\t// go func() {\n\t// \tfor i := 0; i < 3; i++ {\n\t// \t\tc <- i\n\t// \t}\n\t// \tclose(c)\n\t// }()\n\t// immediately return the channel\n\treturn c\n}", "func newLoop() *loop {\n\treturn &loop{\n\t\tinputCh: make(chan event, inputChSize),\n\t\thandleCb: dummyHandleCb,\n\t\tredrawCb: dummyRedrawCb,\n\n\t\tredrawCh: make(chan struct{}, 1),\n\t\tredrawFull: false,\n\t\tredrawMutex: new(sync.Mutex),\n\n\t\treturnCh: make(chan loopReturn, 1),\n\t}\n}", "func main() {\n c := make(chan int)\n // use go routine\n go func() {\n for i := 0; i < 10; i++ {\n c <- i\n }\n close(c)\n }()\n\n for n := range c {\n fmt.Println(n)\n }\n}", "func NewGenerator(h hash.Hash, seed []byte) io.ReadWriter {\n\tg := newGenerator(h, seed)\n\treturn &g\n}", "func newClockChan(d time.Duration) <-chan chan struct{} {\n\tch := make(chan chan struct{}, 1)\n\tgo func() { time.Sleep(d); ch <- make(chan struct{}) }()\n\treturn ch\n}", "func (a API) Generate(cmd *None) (e error) {\n\tRPCHandlers[\"generate\"].Call <-API{a.Ch, cmd, nil}\n\treturn\n}" ]
[ "0.6641966", "0.6627279", "0.65606636", "0.6544673", "0.6534481", "0.646908", "0.63416237", "0.6290985", "0.62610847", "0.6223949", "0.6180005", "0.6167735", "0.6167735", "0.616628", "0.6149821", "0.61059165", "0.61059165", "0.61059165", "0.6101464", "0.6064095", "0.6044233", "0.6017054", "0.5865704", "0.58400893", "0.5803619", "0.578187", "0.5760828", "0.5754095", "0.5753378", "0.56718373", "0.5655918", "0.5632857", "0.5616397", "0.56016785", "0.5582741", "0.5567577", "0.55657136", "0.5511396", "0.54784095", "0.5455717", "0.5391998", "0.5332831", "0.53315556", "0.5327547", "0.5302465", "0.52739376", "0.52546537", "0.52531594", "0.5238203", "0.5227085", "0.52116483", "0.5189294", "0.5186698", "0.5186698", "0.51843405", "0.5133452", "0.5095635", "0.50842565", "0.5074171", "0.5070068", "0.506947", "0.50529903", "0.5034915", "0.5028603", "0.5027213", "0.5018824", "0.5016938", "0.50026834", "0.49923384", "0.49823302", "0.4977874", "0.49776137", "0.49761683", "0.49640006", "0.49590576", "0.4952177", "0.49441683", "0.49360406", "0.4916259", "0.49099314", "0.4907861", "0.49014357", "0.48985213", "0.4897294", "0.4895451", "0.48948127", "0.48747456", "0.48690814", "0.48582467", "0.485305", "0.4849425", "0.4839249", "0.4839249", "0.48257735", "0.48240194", "0.48218974", "0.48006067", "0.48002237", "0.4784323", "0.47837338" ]
0.7869876
0
genFields generates fields config for given AST
genFields генерирует конфигурацию полей для заданного AST
func genFields(fs []*ast.FieldDefinition) *jen.Statement { // // Generate config for fields // // == Example input SDL // // type Dog { // name(style: NameComponentsStyle = SHORT): String! // givenName: String @deprecated(reason: "No longer supported; please use name field.") // } // // == Example output // // graphql.Fields{ // "name": graphql.Field{ ... }, // "givenName": graphql.Field{ ... }, // } // return jen.Qual(defsPkg, "Fields").Values(jen.DictFunc(func(d jen.Dict) { for _, f := range fs { d[jen.Lit(f.Name.Value)] = genField(f) } })) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (g *Generator) generate(typeName string) {\n\tfields := make([]Field, 0, 100)\n\tfor _, file := range g.pkg.files {\n\t\t// Set the state for this run of the walker.\n\t\tfile.typeName = typeName\n\t\tif file.file != nil {\n\t\t\tast.Inspect(file.file, file.genDecl)\n\t\t\tfields = append(fields, file.fields...)\n\t\t}\n\t}\n\tif len(fields) == 0 {\n\t\tlog.Fatalf(\"no fields defined for type %s\", typeName)\n\t}\n\t// TODO: for now we remove Default from the start (maybe move that to an option)\n\tlogicalTypeName := \"\\\"\" + strings.TrimPrefix(typeName, \"Default\") + \"\\\"\"\n\n\t// Generate code that will fail if the constants change value.\n\tg.Printf(\"func (d *%s) Serialize() ([]byte, error) {\\n\", typeName)\n\tg.Printf(\"wb := utils.NewWriteBufferByteBased(utils.WithByteOrderForByteBasedBuffer(binary.BigEndian))\\n\")\n\tg.Printf(\"\\tif err := d.SerializeWithWriteBuffer(context.Background(), wb); err != nil {\\n\")\n\tg.Printf(\"\\t\\treturn nil, err\\n\")\n\tg.Printf(\"\\t}\\n\")\n\tg.Printf(\"\\treturn wb.GetBytes(), nil\\n\")\n\tg.Printf(\"}\\n\\n\")\n\tg.Printf(\"func (d *%s) SerializeWithWriteBuffer(ctx context.Context, writeBuffer utils.WriteBuffer) error {\\n\", typeName)\n\tg.Printf(\"\\tif err := writeBuffer.PushContext(%s); err != nil {\\n\", logicalTypeName)\n\tg.Printf(\"\\t\\treturn err\\n\")\n\tg.Printf(\"\\t}\\n\")\n\tfor _, field := range fields {\n\t\tfieldType := field.fieldType\n\t\tif field.isDelegate {\n\t\t\tg.Printf(\"\\t\\t\\tif err := d.%s.SerializeWithWriteBuffer(ctx, writeBuffer); err != nil {\\n\", fieldType.(*ast.Ident).Name)\n\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\tcontinue\n\t\t}\n\t\tfieldName := field.name\n\t\tfieldNameUntitled := \"\\\"\" + unTitle(fieldName) + \"\\\"\"\n\t\tif field.hasLocker != \"\" {\n\t\t\tg.Printf(\"if err := func()error {\\n\")\n\t\t\tg.Printf(\"\\td.\" + field.hasLocker + \".Lock()\\n\")\n\t\t\tg.Printf(\"\\tdefer d.\" + field.hasLocker + \".Unlock()\\n\")\n\t\t}\n\t\tneedsDereference := false\n\t\tif starFieldType, ok := fieldType.(*ast.StarExpr); ok {\n\t\t\tfieldType = starFieldType.X\n\t\t\tneedsDereference = true\n\t\t}\n\t\tif field.isStringer {\n\t\t\tif needsDereference {\n\t\t\t\tg.Printf(\"if d.%s != nil {\", field.name)\n\t\t\t}\n\t\t\tg.Printf(stringFieldSerialize, \"d.\"+field.name+\".String()\", fieldNameUntitled)\n\t\t\tif field.hasLocker != \"\" {\n\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t}\n\t\t\tif needsDereference {\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tswitch fieldType := fieldType.(type) {\n\t\tcase *ast.SelectorExpr:\n\t\t\t{\n\t\t\t\t// TODO: bit hacky but not sure how else we catch those ones\n\t\t\t\tx := fieldType.X\n\t\t\t\tsel := fieldType.Sel\n\t\t\t\txIdent, xIsIdent := x.(*ast.Ident)\n\t\t\t\tif xIsIdent {\n\t\t\t\t\tif xIdent.Name == \"atomic\" {\n\t\t\t\t\t\tif sel.Name == \"Uint32\" {\n\t\t\t\t\t\t\tg.Printf(uint32FieldSerialize, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif sel.Name == \"Uint64\" {\n\t\t\t\t\t\t\tg.Printf(uint64FieldSerialize, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif sel.Name == \"Int32\" {\n\t\t\t\t\t\t\tg.Printf(int32FieldSerialize, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif sel.Name == \"Bool\" {\n\t\t\t\t\t\t\tg.Printf(boolFieldSerialize, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif sel.Name == \"Value\" {\n\t\t\t\t\t\t\tg.Printf(serializableFieldTemplate, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif xIdent.Name == \"sync\" {\n\t\t\t\t\t\tfmt.Printf(\"\\t skipping field %s because it is %v.%v\\n\", fieldName, x, sel)\n\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tg.Printf(serializableFieldTemplate, \"d.\"+field.name, fieldNameUntitled)\n\t\tcase *ast.IndexExpr:\n\t\t\tx := fieldType.X\n\t\t\tif fieldType, isxFieldSelector := x.(*ast.SelectorExpr); isxFieldSelector { // TODO: we need to refactor this so we can reuse...\n\t\t\t\txIdent, xIsIdent := fieldType.X.(*ast.Ident)\n\t\t\t\tsel := fieldType.Sel\n\t\t\t\tif xIsIdent && xIdent.Name == \"atomic\" && sel.Name == \"Pointer\" {\n\t\t\t\t\tg.Printf(atomicPointerFieldTemplate, \"d.\"+field.name, field.name, fieldNameUntitled)\n\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Printf(\"no support yet for %#q\\n\", fieldType)\n\t\t\tcontinue\n\t\tcase *ast.Ident:\n\t\t\tswitch fieldType.Name {\n\t\t\tcase \"byte\":\n\t\t\t\tg.Printf(byteFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tcase \"int\":\n\t\t\t\tg.Printf(int64FieldSerialize, \"int64(d.\"+field.name+\")\", fieldNameUntitled)\n\t\t\tcase \"int32\":\n\t\t\t\tg.Printf(int32FieldSerialize, \"int32(d.\"+field.name+\")\", fieldNameUntitled)\n\t\t\tcase \"uint32\":\n\t\t\t\tg.Printf(uint32FieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tcase \"bool\":\n\t\t\t\tg.Printf(boolFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tcase \"string\":\n\t\t\t\tg.Printf(stringFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tcase \"error\":\n\t\t\t\tg.Printf(errorFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tdefault:\n\t\t\t\tfmt.Printf(\"\\t no support implemented for Ident with type %v\\n\", fieldType)\n\t\t\t\tg.Printf(\"{\\n\")\n\t\t\t\tg.Printf(\"_value := fmt.Sprintf(\\\"%%v\\\", d.%s)\\n\", fieldName)\n\t\t\t\tg.Printf(stringFieldSerialize, \"_value\", fieldNameUntitled)\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t}\n\t\tcase *ast.ArrayType:\n\t\t\tif eltType, ok := fieldType.Elt.(*ast.Ident); ok && eltType.Name == \"byte\" {\n\t\t\t\tg.Printf(\"if err := writeBuffer.WriteByteArray(%s, d.%s); err != nil {\\n\", fieldNameUntitled, field.name)\n\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t} else {\n\t\t\t\tg.Printf(\"if err := writeBuffer.PushContext(%s, utils.WithRenderAsList(true)); err != nil {\\n\\t\\treturn err\\n\\t}\\n\", fieldNameUntitled)\n\t\t\t\tg.Printf(\"for _, elem := range d.%s {\", field.name)\n\t\t\t\tswitch eltType := fieldType.Elt.(type) {\n\t\t\t\tcase *ast.SelectorExpr, *ast.StarExpr:\n\t\t\t\t\tg.Printf(\"\\n\\t\\tvar elem any = elem\\n\")\n\t\t\t\t\tg.Printf(serializableFieldTemplate, \"elem\", \"\\\"value\\\"\")\n\t\t\t\tcase *ast.Ident:\n\t\t\t\t\tswitch eltType.Name {\n\t\t\t\t\tcase \"int\":\n\t\t\t\t\t\tg.Printf(int64FieldSerialize, \"int64(d.\"+field.name+\")\", fieldNameUntitled)\n\t\t\t\t\tcase \"uint32\":\n\t\t\t\t\t\tg.Printf(uint32FieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\t\t\tcase \"bool\":\n\t\t\t\t\t\tg.Printf(boolFieldSerialize, \"elem\", \"\\\"\\\"\")\n\t\t\t\t\tcase \"string\":\n\t\t\t\t\t\tg.Printf(stringFieldSerialize, \"elem\", \"\\\"\\\"\")\n\t\t\t\t\tcase \"error\":\n\t\t\t\t\t\tg.Printf(errorFieldSerialize, \"elem\", \"\\\"\\\"\")\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tfmt.Printf(\"\\t no support implemented for Ident within ArrayType for %v\\n\", fieldType)\n\t\t\t\t\t\tg.Printf(\"_value := fmt.Sprintf(\\\"%%v\\\", elem)\\n\")\n\t\t\t\t\t\tg.Printf(stringFieldSerialize, \"_value\", fieldNameUntitled)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\tg.Printf(\"if err := writeBuffer.PopContext(%s, utils.WithRenderAsList(true)); err != nil {\\n\\t\\treturn err\\n\\t}\\n\", fieldNameUntitled)\n\t\t\t}\n\t\tcase *ast.MapType:\n\t\t\tg.Printf(\"if err := writeBuffer.PushContext(%s, utils.WithRenderAsList(true)); err != nil {\\n\\t\\treturn err\\n\\t}\\n\", fieldNameUntitled)\n\t\t\t// TODO: we use serializable or strings as we don't want to over-complex this\n\t\t\tg.Printf(\"for _name, elem := range d.%s {\\n\", fieldName)\n\t\t\tswitch keyType := fieldType.Key.(type) {\n\t\t\tcase *ast.Ident:\n\t\t\t\tswitch keyType.Name {\n\t\t\t\tcase \"uint\", \"uint8\", \"uint16\", \"uint32\", \"uint64\", \"int\", \"int8\", \"int16\", \"int32\", \"int64\": // TODO: add other types\n\t\t\t\t\tg.Printf(\"\\t\\tname := fmt.Sprintf(\\\"%s\\\", _name)\\n\", \"%v\")\n\t\t\t\tcase \"string\":\n\t\t\t\t\tg.Printf(\"\\t\\tname := _name\\n\")\n\t\t\t\tdefault:\n\t\t\t\t\tg.Printf(\"\\t\\tname := fmt.Sprintf(\\\"%s\\\", &_name)\\n\", \"%v\")\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tg.Printf(\"\\t\\tname := fmt.Sprintf(\\\"%s\\\", &_name)\\n\", \"%v\")\n\t\t\t}\n\t\t\tswitch eltType := fieldType.Value.(type) {\n\t\t\tcase *ast.StarExpr, *ast.SelectorExpr:\n\t\t\t\tg.Printf(\"\\n\\t\\tvar elem any = elem\\n\")\n\t\t\t\tg.Printf(\"\\t\\tif serializable, ok := elem.(utils.Serializable); ok {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\tif err := writeBuffer.PushContext(name); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\tif err := serializable.SerializeWithWriteBuffer(ctx, writeBuffer); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\tif err := writeBuffer.PopContext(name); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\t\tg.Printf(\"\\t\\t} else {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\telemAsString := fmt.Sprintf(\\\"%%v\\\", elem)\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\tif err := writeBuffer.WriteString(name, uint32(len(elemAsString)*8), \\\"UTF-8\\\", elemAsString); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\t\tg.Printf(\"\\t\\t}\\n\")\n\t\t\tcase *ast.Ident:\n\t\t\t\tswitch eltType.Name {\n\t\t\t\tcase \"bool\":\n\t\t\t\t\tg.Printf(boolFieldSerialize, \"elem\", \"name\")\n\t\t\t\tcase \"string\":\n\t\t\t\t\tg.Printf(stringFieldSerialize, \"elem\", \"name\")\n\t\t\t\tcase \"error\":\n\t\t\t\t\tg.Printf(errorFieldSerialize, \"elem\", \"name\")\n\t\t\t\tdefault:\n\t\t\t\t\tfmt.Printf(\"\\t no support implemented for Ident within MapType for %v\\n\", fieldType)\n\t\t\t\t\tg.Printf(\"\\t\\t_value := fmt.Sprintf(\\\"%%v\\\", elem)\\n\")\n\t\t\t\t\tg.Printf(stringFieldSerialize, \"_value\", \"name\")\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tfmt.Printf(\"\\t no support implemented within MapType %v\\n\", fieldType.Value)\n\t\t\t\tg.Printf(\"\\t\\t_value := fmt.Sprintf(\\\"%%v\\\", elem)\\n\")\n\t\t\t\tg.Printf(stringFieldSerialize, \"_value\", \"name\")\n\t\t\t}\n\t\t\tg.Printf(\"\\t}\\n\")\n\t\t\tg.Printf(\"if err := writeBuffer.PopContext(%s, utils.WithRenderAsList(true)); err != nil {\\n\\t\\treturn err\\n\\t}\\n\", fieldNameUntitled)\n\t\tcase *ast.ChanType:\n\t\t\tg.Printf(chanFieldSerialize, \"d.\"+field.name, fieldNameUntitled, field.name)\n\t\tcase *ast.FuncType:\n\t\t\tg.Printf(funcFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\tdefault:\n\t\t\tfmt.Printf(\"no support implemented %#v\\n\", fieldType)\n\t\t}\n\t\tif field.hasLocker != \"\" {\n\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\tg.Printf(\"}\\n\")\n\t\t}\n\t}\n\tg.Printf(\"\\tif err := writeBuffer.PopContext(%s); err != nil {\\n\", logicalTypeName)\n\tg.Printf(\"\\t\\treturn err\\n\")\n\tg.Printf(\"\\t}\\n\")\n\tg.Printf(\"\\treturn nil\\n\")\n\tg.Printf(\"}\\n\")\n\tg.Printf(\"\\n\")\n\tg.Printf(stringerTemplate, typeName)\n}", "func genField(field *ast.FieldDefinition) *jen.Statement {\n\t//\n\t// Generate config for field\n\t//\n\t// == Example input SDL\n\t//\n\t// interface Pet {\n\t// \"name of the pet\"\n\t// name(style: NameComponentsStyle = SHORT): String!\n\t// \"\"\"\n\t// givenName of the pet ★\n\t// \"\"\"\n\t// givenName: String @deprecated(reason: \"No longer supported; please use name field.\")\n\t// }\n\t//\n\t// == Example output\n\t//\n\t// &graphql.Field{\n\t// Name: \"name\",\n\t// Type: graphql.NonNull(graphql.String),\n\t// Description: \"name of the pet\",\n\t// DeprecationReason: \"\",\n\t// Args: FieldConfigArgument{ ... },\n\t// }\n\t//\n\t// &graphql.Field{\n\t// Name: \"givenName\",\n\t// Type: graphql.String,\n\t// Description: \"givenName of the pet\",\n\t// DeprecationReason: \"No longer supported; please use name field.\",\n\t// Args: FieldConfigArgument{ ... },\n\t// }\n\t//\n\treturn jen.Op(\"&\").Qual(defsPkg, \"Field\").Values(jen.Dict{\n\t\tjen.Id(\"Args\"): genArguments(field.Arguments),\n\t\tjen.Id(\"DeprecationReason\"): genDeprecationReason(field.Directives),\n\t\tjen.Id(\"Description\"): genDescription(field),\n\t\tjen.Id(\"Name\"): jen.Lit(field.Name.Value),\n\t\tjen.Id(\"Type\"): genOutputTypeReference(field.Type),\n\t})\n}", "func (g *Generator) generate(typeName string) {\n\tfields := make([]Field, 0, 100)\n\tfor _, file := range g.pkg.files {\n\t\t// Set the state for this run of the walker.\n\t\tfile.typeName = typeName\n\t\tfile.fields = nil\n\t\tif file.file != nil {\n\t\t\tast.Inspect(file.file, file.genDecl)\n\t\t\tg.additionalImports = append(g.additionalImports, file.additionalImports...)\n\t\t\tfields = append(fields, file.fields...)\n\t\t}\n\t}\n\n\tif len(fields) == 0 {\n\t\tlog.Fatalf(\"no values defined for type %s\", typeName)\n\t}\n\n\tg.build(fields, typeName)\n}", "func (fs *FileStat) GenerateFields() (string, error) {\n\ttb, e := fs.modTime.MarshalBinary()\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\tcb, e := fs.compressedBytes()\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\n\tformat := `\"%s\", \"%s\", %d, 0%o, binfs.MustHexDecode(\"%x\"), %t, binfs.MustHexDecode(\"%x\")`\n\treturn fmt.Sprintf(format,\n\t\tfs.path,\n\t\tfs.name,\n\t\tfs.size,\n\t\tfs.mode,\n\t\ttb,\n\t\tfs.isDir,\n\t\tcb,\n\t), nil\n}", "func (g *Generator) collectAndGenerate(typeName string, genFn GeneratorFunc) {\n\tfields := make([]Field, 0, 100)\n\timports := make([]Import, 0, 100)\n\tfor _, file := range g.pkg.files {\n\t\t// Set the state for this run of the walker.\n\t\tfile.typeName = typeName\n\t\tfile.fields = nil\n\t\tif file.file != nil {\n\t\t\tast.Inspect(file.file, file.genDecl)\n\t\t\tfields = append(fields, file.fields...)\n\t\t\timports = append(imports, file.imports...)\n\t\t}\n\t}\n\n\tgenFn(typeName, fields, imports)\n\n}", "func Generate(fields map[string]*indexer.Field) map[string]interface{} {\n\treturn mapFields(fields)\n}", "func GenerateBaseFields(conf CurveConfig) error {\n\tif err := goff.GenerateFF(\"fr\", \"Element\", conf.RTorsion, filepath.Join(conf.OutputDir, \"fr\"), false); err != nil {\n\t\treturn err\n\t}\n\tif err := goff.GenerateFF(\"fp\", \"Element\", conf.FpModulus, filepath.Join(conf.OutputDir, \"fp\"), false); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func TraceFieldGenerator(ctx context.Context) []zapcore.Field {\n\tspanCtx := trace.FromContext(ctx).SpanContext()\n\n\treturn []zapcore.Field{\n\t\tzap.Uint64(\"dd.trace_id\", binary.BigEndian.Uint64(spanCtx.TraceID[8:])),\n\t\tzap.Uint64(\"dd.span_id\", binary.BigEndian.Uint64(spanCtx.SpanID[:])),\n\t}\n}", "func expandFields(compiled *lang.CompiledExpr, define *lang.DefineExpr) lang.DefineFieldsExpr {\n\tvar fields lang.DefineFieldsExpr\n\tfor _, field := range define.Fields {\n\t\tif isEmbeddedField(field) {\n\t\t\tembedded := expandFields(compiled, compiled.LookupDefine(string(field.Type)))\n\t\t\tfields = append(fields, embedded...)\n\t\t} else {\n\t\t\tfields = append(fields, field)\n\t\t}\n\t}\n\treturn fields\n}", "func (n ClassNode) Codegen(scope *Scope, c *Compiler) value.Value {\n\tstructDefn := scope.FindType(n.Name).Type.(*types.StructType)\n\n\tfieldnames := make([]string, 0, len(n.Variables))\n\tfields := make([]types.Type, 0, len(n.Variables))\n\n\tnames := map[string]bool{}\n\n\tfor _, f := range n.Variables {\n\t\tt := f.Type.Name\n\t\tname := f.Name.String()\n\t\tif _, found := names[name]; found {\n\t\t\tlog.Fatal(\"Class '%s' has two fields/methods named '%s'\\n\", n.Name, f.Name)\n\t\t}\n\t\tnames[name] = true\n\t\tty := scope.FindType(t).Type\n\t\tty = f.Type.BuildPointerType(ty)\n\t\tfields = append(fields, ty)\n\t\tfieldnames = append(fieldnames, name)\n\t}\n\n\tthisArg := VariableDefnNode{}\n\tthisArg.Name = NewNamedReference(\"this\")\n\tthisArg.Type = GeodeTypeRef{}\n\tthisArg.Type.Array = false\n\tthisArg.Type.Name = n.Name\n\tthisArg.Type.PointerLevel = 1\n\n\tstructDefn.Fields = fields\n\tstructDefn.Names = fieldnames\n\n\tmethodBaseArgs := []VariableDefnNode{thisArg}\n\tfor _, m := range n.Methods {\n\t\tm.Name.Value = fmt.Sprintf(\"class.%s.%s\", n.Name, m.Name)\n\t\tif _, found := names[m.Name.String()]; found {\n\t\t\tlog.Fatal(\"Class '%s' has two fields/methods named '%s'\\n\", n.Name, m.Name)\n\t\t}\n\t\tnames[m.Name.String()] = true\n\t\tm.Args = append(methodBaseArgs, m.Args...)\n\t\tm.Declare(scope, c)\n\t\tm.Codegen(scope, c)\n\t}\n\n\treturn nil\n}", "func (p TreeWriter) getFields(leaf *yaml.RNode) (treeFields, error) {\n\tfieldsByName := map[string]*treeField{}\n\n\t// index nested and non-nested fields\n\tfor i := range p.Fields {\n\t\tf := p.Fields[i]\n\t\tseq, err := leaf.Pipe(&f)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif seq == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif fieldsByName[f.Name] == nil {\n\t\t\tfieldsByName[f.Name] = &treeField{name: f.Name}\n\t\t}\n\n\t\t// non-nested field -- add directly to the treeFields list\n\t\tif f.SubName == \"\" {\n\t\t\t// non-nested field -- only 1 element\n\t\t\tval, err := yaml.String(seq.Content()[0], yaml.Trim, yaml.Flow)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tfieldsByName[f.Name].value = val\n\t\t\tcontinue\n\t\t}\n\n\t\t// nested-field -- create a parent elem, and index by the 'match' value\n\t\tif fieldsByName[f.Name].subFieldByMatch == nil {\n\t\t\tfieldsByName[f.Name].subFieldByMatch = map[string]treeFields{}\n\t\t}\n\t\tindex := fieldsByName[f.Name].subFieldByMatch\n\t\tfor j := range seq.Content() {\n\t\t\telem := seq.Content()[j]\n\t\t\tmatches := f.Matches[elem]\n\t\t\tstr, err := yaml.String(elem, yaml.Trim, yaml.Flow)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// map the field by the name of the element\n\t\t\t// index the subfields by the matching element so we can put all the fields for the\n\t\t\t// same element under the same branch\n\t\t\tmatchKey := strings.Join(matches, \"/\")\n\t\t\tindex[matchKey] = append(index[matchKey], &treeField{name: f.SubName, value: str})\n\t\t}\n\t}\n\n\t// iterate over collection of all queried fields in the Resource\n\tfor _, field := range fieldsByName {\n\t\t// iterate over collection of elements under the field -- indexed by element name\n\t\tfor match, subFields := range field.subFieldByMatch {\n\t\t\t// create a new element for this collection of fields\n\t\t\t// note: we will convert name to an index later, but keep the match for sorting\n\t\t\telem := &treeField{name: match}\n\t\t\tfield.matchingElementsAndFields = append(field.matchingElementsAndFields, elem)\n\n\t\t\t// iterate over collection of queried fields for the element\n\t\t\tfor i := range subFields {\n\t\t\t\t// add to the list of fields for this element\n\t\t\t\telem.matchingElementsAndFields = append(elem.matchingElementsAndFields, subFields[i])\n\t\t\t}\n\t\t}\n\t\t// clear this cached data\n\t\tfield.subFieldByMatch = nil\n\t}\n\n\t// put the fields in a list so they are ordered\n\tfieldList := treeFields{}\n\tfor _, v := range fieldsByName {\n\t\tfieldList = append(fieldList, v)\n\t}\n\n\t// sort the fields\n\tsort.Sort(fieldList)\n\tfor i := range fieldList {\n\t\tfield := fieldList[i]\n\t\t// sort the elements under this field\n\t\tsort.Sort(field.matchingElementsAndFields)\n\n\t\tfor i := range field.matchingElementsAndFields {\n\t\t\telement := field.matchingElementsAndFields[i]\n\t\t\t// sort the elements under a list field by their name\n\t\t\tsort.Sort(element.matchingElementsAndFields)\n\t\t\t// set the name of the element to its index\n\t\t\telement.name = fmt.Sprintf(\"%d\", i)\n\t\t}\n\t}\n\n\treturn fieldList, nil\n}", "func generate(copyrights string, collector *collector, templateBuilder templateBuilder) {\n\tfor _, pkg := range collector.Packages {\n\t\tfileTemplate := fileTpl{\n\t\t\tCopyright: copyrights,\n\n\t\t\tStandardImports: []string{\n\t\t\t\t\"fmt\",\n\t\t\t\t\"unicode\",\n\t\t\t\t\"unicode/utf8\",\n\t\t\t},\n\n\t\t\tCustomImports: []string{\n\t\t\t\t\"github.com/google/uuid\",\n\t\t\t},\n\t\t}\n\t\tfor _, f := range pkg.Files {\n\t\t\tfor _, d := range f.Decls {\n\t\t\t\tg, ok := d.(*ast.GenDecl)\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tstructs := structSearch(g)\n\t\t\t\tif len(structs) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tfor _, s := range structs {\n\t\t\t\t\tatLeastOneField := false\n\n\t\t\t\t\tfor _, field := range s.Type.Fields.List {\n\n\t\t\t\t\t\tpos := collector.FileSet.Position(field.Type.Pos())\n\t\t\t\t\t\ttyp := collector.Info.TypeOf(field.Type)\n\n\t\t\t\t\t\tcomposedType := \"\"\n\t\t\t\t\t\tbaseName := getType(typ, &composedType)\n\t\t\t\t\t\tfmt.Println(\"Add validation: \", pos, \": \", baseName, \"/\", composedType)\n\n\t\t\t\t\t\tif err := templateBuilder.generateCheck(field, s.Name, baseName, composedType); err != nil {\n\t\t\t\t\t\t\tfmt.Printf(\"struct %s: %s\\n\", s.Name, err)\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tatLeastOneField = true\n\t\t\t\t\t}\n\n\t\t\t\t\tif !atLeastOneField {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\terr := templateBuilder.generateMethod(s.Name)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tfmt.Printf(\"struct gen %s: %s\\n\", s.Name, err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfileTemplate.Package = pkg.Name\n\t\terr := templateBuilder.generateFile(pkg.Path, fileTemplate)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Generation error\", err)\n\t\t}\n\t}\n}", "func (f *File) genDecl(node ast.Node) bool {\n\tdecl, ok := node.(*ast.GenDecl)\n\tif !ok || decl.Tok != token.TYPE {\n\t\t// We only care about type declarations.\n\t\treturn true\n\t}\n\tfor _, spec := range decl.Specs {\n\t\ttypeSpec := spec.(*ast.TypeSpec)\n\t\tstructDecl, ok := typeSpec.Type.(*ast.StructType)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tif typeSpec.Name.Name != f.typeName {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Printf(\"Handling %s\\n\", typeSpec.Name.Name)\n\t\tfor _, field := range structDecl.Fields.List {\n\t\t\tif field.Tag != nil && field.Tag.Value == \"`ignore:\\\"true\\\"`\" {\n\t\t\t\tvar name string\n\t\t\t\tif len(field.Names) != 0 {\n\t\t\t\t\tname = field.Names[0].Name\n\t\t\t\t} else {\n\t\t\t\t\tname = \"<delegate>\"\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\"\\t ignoring field %s %v\\n\", name, field.Type)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tisStringer := false\n\t\t\tif field.Tag != nil && field.Tag.Value == \"`stringer:\\\"true\\\"`\" { // TODO: Check if we do that a bit smarter\n\t\t\t\tisStringer = true\n\t\t\t}\n\t\t\thasLocker := \"\"\n\t\t\tif field.Tag != nil && strings.HasPrefix(field.Tag.Value, \"`hasLocker:\\\"\") { // TODO: Check if we do that a bit smarter\n\t\t\t\thasLocker = strings.TrimPrefix(field.Tag.Value, \"`hasLocker:\\\"\")\n\t\t\t\thasLocker = strings.TrimSuffix(hasLocker, \"\\\"`\")\n\t\t\t}\n\t\t\tif len(field.Names) == 0 {\n\t\t\t\tfmt.Printf(\"\\t adding delegate\\n\")\n\t\t\t\tswitch ft := field.Type.(type) {\n\t\t\t\tcase *ast.Ident:\n\t\t\t\t\tf.fields = append(f.fields, Field{\n\t\t\t\t\t\tfieldType: ft,\n\t\t\t\t\t\tisDelegate: true,\n\t\t\t\t\t\tisStringer: isStringer,\n\t\t\t\t\t\thasLocker: hasLocker,\n\t\t\t\t\t})\n\t\t\t\t\tcontinue\n\t\t\t\tcase *ast.StarExpr:\n\t\t\t\t\tswitch set := ft.X.(type) {\n\t\t\t\t\tcase *ast.Ident:\n\t\t\t\t\t\tf.fields = append(f.fields, Field{\n\t\t\t\t\t\t\tfieldType: set,\n\t\t\t\t\t\t\tisDelegate: true,\n\t\t\t\t\t\t\tisStringer: isStringer,\n\t\t\t\t\t\t\thasLocker: hasLocker,\n\t\t\t\t\t\t})\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tpanic(fmt.Sprintf(\"Only pointer to struct delegates supported now. Type %T\", field.Type))\n\t\t\t\t\t}\n\t\t\t\tcase *ast.SelectorExpr:\n\t\t\t\t\tf.fields = append(f.fields, Field{\n\t\t\t\t\t\tfieldType: ft.Sel,\n\t\t\t\t\t\tisDelegate: true,\n\t\t\t\t\t\tisStringer: isStringer,\n\t\t\t\t\t\thasLocker: hasLocker,\n\t\t\t\t\t})\n\t\t\t\t\tcontinue\n\t\t\t\tdefault:\n\t\t\t\t\tpanic(fmt.Sprintf(\"Only struct delegates supported now. Type %T\", field.Type))\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Printf(\"\\t adding field %s %v\\n\", field.Names[0].Name, field.Type)\n\t\t\tf.fields = append(f.fields, Field{\n\t\t\t\tname: field.Names[0].Name,\n\t\t\t\tfieldType: field.Type,\n\t\t\t\tisStringer: isStringer,\n\t\t\t\thasLocker: hasLocker,\n\t\t\t})\n\t\t}\n\t}\n\treturn false\n}", "func fields(spec *ast.TypeSpec) []*ast.Field {\n\ts := make([]*ast.Field, 0)\n\tif structType, ok := spec.Type.(*ast.StructType); ok {\n\t\tfor _, field := range structType.Fields.List {\n\t\t\tif keyname(field) != \"\" {\n\t\t\t\ts = append(s, field)\n\t\t\t}\n\t\t}\n\t}\n\treturn s\n}", "func Fields() error {\n\treturn devtools.GenerateFieldsYAML()\n}", "func (g *generator) structFields(t reflect.Type) []field {\n\tvar fields []field\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tf := t.Field(i)\n\t\tif g.ignoreField(t, f) {\n\t\t\tcontinue\n\t\t}\n\t\tname, _ := parseTag(g.fieldTagKey, f.Tag)\n\t\tif name == \"\" {\n\t\t\tname = f.Name\n\t\t}\n\t\tfields = append(fields, field{\n\t\t\tName: name,\n\t\t\tType: f.Type,\n\t\t\tZero: zeroValue(f.Type),\n\t\t})\n\t}\n\treturn fields\n}", "func generateStruct(a *AnnotationDoc, packageName string, imports []string, indent string) (string, []string) {\n\tvar allAnnotationsPackages []string\n\tpossiblePackagesForA := combinePackages(imports, []string{packageName})\n\tts, foundPackageOfA, foundImportsOfA := getAnnotationStruct(a.Name, possiblePackagesForA)\n\tallAnnotationsPackages = combinePackages(allAnnotationsPackages, []string{foundPackageOfA})\n\tstr, _ := ts.Type.(*ast.StructType)\n\tvar b bytes.Buffer\n\tb.WriteString(indent)\n\tb.WriteString(foundPackageOfA)\n\tb.WriteString(\".\")\n\tb.WriteString(a.Name)\n\tb.WriteString(\"{\\n\")\n\tchildIndent := indent + \" \"\n\tfor _, f := range str.Fields.List {\n\t\tfieldName := getFieldName(f)\n\t\tdefValue := getDefaultValue(f)\n\t\tfieldKey := fieldName\n\t\t// consider special case when only default parameter is specified\n\t\tif len(str.Fields.List) == 1 && len(a.Content) == 1 {\n\t\t\tfor key := range a.Content {\n\t\t\t\tif key == DEFAULT_PARAM {\n\t\t\t\t\tfieldKey = DEFAULT_PARAM\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tvalue, found := a.Content[fieldKey]\n\t\tif found {\n\t\t\tswitch t := value.(type) {\n\t\t\tcase string:\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tb.WriteString(getLiteral(f.Type, t, false))\n\t\t\t\tb.WriteString(\",\\n\")\n\t\t\tcase []string:\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tb.WriteString(getFieldConstructor(f.Type))\n\t\t\t\tb.WriteString(\"\\n\")\n\t\t\t\tfor _, elem := range t {\n\t\t\t\t\tb.WriteString(childIndent + \" \")\n\t\t\t\t\tb.WriteString(elem)\n\t\t\t\t\tb.WriteString(\",\\n\")\n\t\t\t\t}\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tb.WriteString(\"}\")\n\t\t\tcase []AnnotationDoc:\n\t\t\t\t// calculate array's elements\n\t\t\t\tvar bb bytes.Buffer\n\t\t\t\tfor _, sa := range t {\n\t\t\t\t\tchildCode, foundImportsOfChild := generateStruct(&sa, foundPackageOfA, foundImportsOfA, childIndent+\" \")\n\t\t\t\t\tallAnnotationsPackages = combinePackages(allAnnotationsPackages, foundImportsOfChild)\n\t\t\t\t\tbb.WriteString(childCode)\n\t\t\t\t\tbb.WriteString(\",\\n\")\n\t\t\t\t}\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\t// insert array initialzer of child annotation type\n\t\t\t\ts := writeArrayInitializer(&b, bb.String())\n\t\t\t\t// append array of child annotations\n\t\t\t\tb.WriteString(\"{\\n\")\n\t\t\t\tb.WriteString(childIndent + \" \")\n\t\t\t\tb.WriteString(s)\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tb.WriteString(\"},\\n\")\n\t\t\tcase AnnotationDoc:\n\t\t\t\tchildCode, foundImportsOfChild := generateStruct(&t, foundPackageOfA, foundImportsOfA, childIndent)\n\t\t\t\tallAnnotationsPackages = combinePackages(allAnnotationsPackages, foundImportsOfChild)\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tif isOptional(f.Type) {\n\t\t\t\t\tb.WriteString(\"&\")\n\t\t\t\t}\n\t\t\t\tb.WriteString(strings.TrimLeft(childCode, \" \"))\n\t\t\t\tb.WriteString(\",\\n\")\n\t\t\tdefault:\n\t\t\t\tpanic(\"Unexpected annotation value type\")\n\t\t\t}\n\t\t} else {\n\t\t\tb.WriteString(childIndent)\n\t\t\tb.WriteString(defValue)\n\t\t\tb.WriteString(\",\\n\")\n\t\t}\n\t}\n\tb.WriteString(indent)\n\tb.WriteString(\"}\")\n\treturn b.String(), allAnnotationsPackages\n}", "func MapFieldsToTypExpr(args ...*ast.Field) []ast.Expr {\n\tr := []ast.Expr{}\n\tfor idx, f := range args {\n\t\tif len(f.Names) == 0 {\n\t\t\tf.Names = []*ast.Ident{ast.NewIdent(fmt.Sprintf(\"f%d\", idx))}\n\t\t}\n\n\t\tfor _ = range f.Names {\n\t\t\tr = append(r, f.Type)\n\t\t}\n\n\t}\n\treturn r\n}", "func GenStructFromAllOfTypes(allOf []TypeDefinition) string {\n\t// Start out with struct {\n\tobjectParts := []string{\"struct {\"}\n\tfor _, td := range allOf {\n\t\tref := td.Schema.RefType\n\t\tif ref != \"\" {\n\t\t\t// We have a referenced type, we will generate an inlined struct\n\t\t\t// member.\n\t\t\t// struct {\n\t\t\t// InlinedMember\n\t\t\t// ...\n\t\t\t// }\n\t\t\tobjectParts = append(objectParts,\n\t\t\t\tfmt.Sprintf(\" // Embedded struct due to allOf(%s)\", ref))\n\t\t\tobjectParts = append(objectParts,\n\t\t\t\tfmt.Sprintf(\" %s `yaml:\\\",inline\\\"`\", ref))\n\t\t} else {\n\t\t\t// Inline all the fields from the schema into the output struct,\n\t\t\t// just like in the simple case of generating an object.\n\t\t\tobjectParts = append(objectParts, \" // Embedded fields due to inline allOf schema\")\n\t\t\tobjectParts = append(objectParts, GenFieldsFromProperties(td.Schema.Properties)...)\n\n\t\t\tif td.Schema.HasAdditionalProperties {\n\t\t\t\taddPropsType := td.Schema.AdditionalPropertiesType.GoType\n\t\t\t\tif td.Schema.AdditionalPropertiesType.RefType != \"\" {\n\t\t\t\t\taddPropsType = td.Schema.AdditionalPropertiesType.RefType\n\t\t\t\t}\n\n\t\t\t\tadditionalPropertiesPart := fmt.Sprintf(\"AdditionalProperties map[string]%s `json:\\\"-\\\"`\", addPropsType)\n\t\t\t\tif !StringInArray(additionalPropertiesPart, objectParts) {\n\t\t\t\t\tobjectParts = append(objectParts, additionalPropertiesPart)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tobjectParts = append(objectParts, \"}\")\n\treturn strings.Join(objectParts, \"\\n\")\n}", "func (b *Builder) InputFields(source reflect.Value, parent reflect.Value) graphql.InputObjectConfigFieldMap {\n\tresult := make(graphql.InputObjectConfigFieldMap, 0)\n\tnodes := b.buildObject(source, parent)\n\tfor _, node := range nodes {\n\t\tif node.skip {\n\t\t\tcontinue\n\t\t}\n\t\tif !node.source.CanSet() {\n\t\t\tcontinue\n\t\t}\n\t\tif node.readOnly {\n\t\t\tcontinue\n\t\t}\n\n\t\tname := node.alias\n\t\tif name == \"\" {\n\t\t\tname = strcase.ToLowerCamel(node.name)\n\t\t}\n\t\tgType := b.mapInput(node.source, parent)\n\t\tif node.required {\n\t\t\tgType = graphql.NewNonNull(gType)\n\t\t}\n\n\t\tfield := &graphql.InputObjectFieldConfig{\n\t\t\tType: gType,\n\t\t}\n\t\tresult[name] = field\n\t}\n\treturn result\n}", "func parse(r io.Reader) ([]field, error) {\n\tinData, err := models.Unmarshal(r)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshalling models.yml: %w\", err)\n\t}\n\n\tvar fields []field\n\tfor collectionName, collection := range inData {\n\t\tfor fieldName, modelField := range collection.Fields {\n\t\t\tf := field{}\n\t\t\tf.Name = collectionName + \"/\" + fieldName\n\t\t\tf.GoName = goName(collectionName) + \"_\" + goName(fieldName)\n\t\t\tf.GoType = goType(modelField.Type)\n\t\t\tf.Collection = firstLower(goName(collectionName))\n\t\t\tf.FQField = collectionName + \"/%d/\" + fieldName\n\t\t\tf.Required = modelField.Required\n\n\t\t\tif modelField.Type == \"relation\" || modelField.Type == \"generic-relation\" {\n\t\t\t\tf.SingleRelation = true\n\t\t\t}\n\n\t\t\tif strings.Contains(fieldName, \"$\") {\n\t\t\t\tf.TemplateAttr = \"replacement\"\n\t\t\t\tf.TemplateAttrType = \"string\"\n\t\t\t\tf.TemplateFQField = collectionName + \"/%d/\" + strings.Replace(fieldName, \"$\", \"$%s\", 1)\n\t\t\t\tf.GoType = goType(modelField.Template.Fields.Type)\n\n\t\t\t\tif modelField.Template.Replacement != \"\" {\n\t\t\t\t\tf.TemplateAttr = modelField.Template.Replacement + \"ID\"\n\t\t\t\t\tf.TemplateAttrType = \"int\"\n\t\t\t\t\tf.TemplateFQField = collectionName + \"/%d/\" + strings.Replace(fieldName, \"$\", \"$%d\", 1)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfields = append(fields, f)\n\t\t}\n\t}\n\n\t// TODO: fix models-to-go to return fields in input order.\n\tsort.Slice(fields, func(i, j int) bool {\n\t\treturn fields[i].GoName < fields[j].GoName\n\t})\n\n\treturn fields, nil\n}", "func (f *File) genDecl(node ast.Node) bool {\n\tdecl, ok := node.(*ast.GenDecl)\n\tif !ok || decl.Tok != token.TYPE { // We only care about Type declarations.\n\t\treturn true\n\t}\n\t// The name of the type of the constants we are declaring.\n\t// Can change if this is a multi-element declaration.\n\ttyp := \"\"\n\t// Loop over the elements of the declaration. Each element is a ValueSpec:\n\t// a list of names possibly followed by a type, possibly followed by values.\n\t// If the type and value are both missing, we carry down the type (and value,\n\t// but the \"go/types\" package takes care of that).\n\tfor _, spec := range decl.Specs {\n\t\ttspec := spec.(*ast.TypeSpec) // Guaranteed to succeed as this is TYPE.\n\t\tif tspec.Type != nil {\n\t\t\t// \"X T\". We have a type. Remember it.\n\t\t\ttyp = tspec.Name.Name\n\t\t}\n\t\tif typ != f.typeName {\n\t\t\t// This is not the type we're looking for.\n\t\t\tcontinue\n\t\t}\n\t\t// We now have a list of names (from one line of source code) all being\n\t\t// declared with the desired type.\n\n\t\tstructType, ok := tspec.Type.(*ast.StructType)\n\t\tif !ok {\n\t\t\t//not a struct type\n\t\t\tcontinue\n\t\t}\n\n\t\ttypesObj, typeObjOk := f.pkg.defs[tspec.Name]\n\t\tif !typeObjOk {\n\t\t\tlog.Fatalf(\"no type info found for struct %s\", typ)\n\t\t}\n\n\t\tfor _, fieldLine := range structType.Fields.List {\n\t\t\tfor _, field := range fieldLine.Names {\n\t\t\t\t//skip struct padding\n\t\t\t\tif field.Name == \"_\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfieldObj, _, _ := types.LookupFieldOrMethod(typesObj.Type(), false, f.pkg.typesPkg, field.Name)\n\n\t\t\t\ttypeStr := fieldObj.Type().String()\n\t\t\t\ttags := parseFieldTags(fieldLine.Tag)\n\n\t\t\t\t//Skip here so we don't include rubbish import lines\n\t\t\t\tif tags[\"exclude_dao\"].Value == \"true\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tprocessedTypeStr, importPath := processTypeStr(typeStr)\n\t\t\t\t//log.Printf(\"processedTypeStr: %s, importPath: %s\", processedTypeStr, importPath)\n\n\t\t\t\tif importPath != \"\" && !importExists(importPath, f.imports) {\n\n\t\t\t\t\tf.imports = append(f.imports, Import{importPath})\n\n\t\t\t\t}\n\n\t\t\t\tv := Field{\n\t\t\t\t\tName: field.Name,\n\t\t\t\t\tTags: tags,\n\t\t\t\t\tTypeName: processedTypeStr,\n\t\t\t\t}\n\t\t\t\tf.fields = append(f.fields, v)\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (f *File) genDecl(node ast.Node) bool {\n\tdecl, ok := node.(*ast.GenDecl)\n\n\tif !ok || decl.Tok != token.TYPE {\n\t\t// We only care about types declarations.\n\t\treturn true\n\t}\n\n\t// Loop over the elements of the declaration. Each element is a ValueSpec:\n\t// a list of names possibly followed by a type, possibly followed by values.\n\t// If the type and value are both missing, we carry down the type (and value,\n\t// but the \"go/types\" package takes care of that).\n\tfor _, spec := range decl.Specs {\n\t\ttspec := spec.(*ast.TypeSpec) // Guaranteed to succeed as this is TYPE.\n\n\t\tif tspec.Name.Name != f.typeName {\n\t\t\t// Not the type we're looking for.\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Printf(\"Type spec: %v name: %s\\n\", tspec.Type, tspec.Name.Name)\n\n\t\tif structType, ok := tspec.Type.(*ast.StructType); ok {\n\t\t\tlog.Printf(\"Located the struct type: %v\\n\", structType)\n\n\t\t\tfor _, field := range structType.Fields.List {\n\t\t\t\tlog.Printf(\"Field: %v\\n\", field)\n\n\t\t\t\tif ident, ok := field.Type.(*ast.Ident); ok {\n\t\t\t\t\t// Look at list of known types and determine if we have a translation.\n\t\t\t\t\ttp := KNOWN_SOURCE_TYPES[ident.Name]\n\n\t\t\t\t\tif tp != ST_UNKNOWN {\n\t\t\t\t\t\tlog.Printf(\"Primitive or local type found: %v => %s\\n\", ident.Name, tp.String())\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// TODO: We should probably consider all of these fields as local objects and add\n\t\t\t\t\t\t// foreign key links.\n\t\t\t\t\t\tlog.Printf(\"UNRECOGNIZED LOCAL TYPE seen: %v\\n\", ident.Name)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tif len(field.Names) == 1 {\n\t\t\t\t\t\tfieldName := field.Names[0].Name\n\t\t\t\t\t\tisPK := false\n\n\t\t\t\t\t\tif strings.ToLower(fieldName) == \"id\" {\n\t\t\t\t\t\t\tisPK = true\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tf.fields = append(f.fields,\n\t\t\t\t\t\t\tField{\n\t\t\t\t\t\t\t\tsrcName: fieldName,\n\t\t\t\t\t\t\t\tdbName: strings.ToLower(fieldName), // TODO: Override with annotations\n\t\t\t\t\t\t\t\tisPK: isPK,\n\t\t\t\t\t\t\t\tsrcType: ident.Name,\n\t\t\t\t\t\t\t\tdbType: \"string\",\n\t\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t} else if selector, ok := field.Type.(*ast.SelectorExpr); ok {\n\t\t\t\t\t// TODO: This likely means an object in another package. Foreign link?\n\t\t\t\t\tlog.Printf(\"Found selector: %s :: %s\\n\", selector.X, selector.Sel.Name)\n\t\t\t\t\ttypeName := fmt.Sprintf(\"%s.%s\", selector.X, selector.Sel.Name)\n\n\t\t\t\t\ttp := KNOWN_SOURCE_TYPES[typeName]\n\n\t\t\t\t\tif tp != ST_UNKNOWN {\n\t\t\t\t\t\tlog.Printf(\"Primitive or local type found: %v => %s\\n\", typeName, tp.String())\n\t\t\t\t\t\tf.additionalImports = append(f.additionalImports, fmt.Sprintf(\"%s\", selector.X))\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// TODO: We should probably consider all of these fields as local objects and add\n\t\t\t\t\t\t// foreign key links.\n\t\t\t\t\t\tlog.Printf(\"UNRECOGNIZED LOCAL TYPE seen: %v\\n\", typeName)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tif len(field.Names) == 1 {\n\t\t\t\t\t\tfieldName := field.Names[0].Name\n\t\t\t\t\t\tisPK := false\n\n\t\t\t\t\t\tif strings.ToLower(fieldName) == \"id\" {\n\t\t\t\t\t\t\tisPK = true\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tf.fields = append(f.fields,\n\t\t\t\t\t\t\tField{\n\t\t\t\t\t\t\t\tsrcName: fieldName,\n\t\t\t\t\t\t\t\tdbName: strings.ToLower(fieldName), // TODO: Override with annotations\n\t\t\t\t\t\t\t\tisPK: isPK,\n\t\t\t\t\t\t\t\tsrcType: typeName,\n\t\t\t\t\t\t\t\tdbType: \"string\",\n\t\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// TODO: Enumerate all different possible types here.\n\t\t\t\t\tlog.Printf(\"UNKNOWN TYPE seen: %v\\n\", field.Type)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (c *TypeConverter) genStructConverter(\n\tkeyPrefix string,\n\tfromPrefix string,\n\tindent string,\n\tfromFields []*compile.FieldSpec,\n\ttoFields []*compile.FieldSpec,\n\tfieldMap map[string]FieldMapperEntry,\n\tprevKeyPrefixes []string,\n) error {\n\n\tfor i := 0; i < len(toFields); i++ {\n\t\ttoField := toFields[i]\n\n\t\t// Check for same named field\n\t\tvar fromField *compile.FieldSpec\n\t\tfor j := 0; j < len(fromFields); j++ {\n\t\t\tif fromFields[j].Name == toField.Name {\n\t\t\t\tfromField = fromFields[j]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\ttoSubIdentifier := keyPrefix + PascalCase(toField.Name)\n\t\ttoIdentifier := \"out.\" + toSubIdentifier\n\t\toverriddenIdentifier := \"\"\n\t\tfromIdentifier := \"\"\n\n\t\t// Check for mapped field\n\t\tvar overriddenField *compile.FieldSpec\n\n\t\t// check if this toField satisfies a fieldMap transform\n\t\ttransformFrom, ok := fieldMap[toSubIdentifier]\n\t\tif ok {\n\t\t\t// no existing direct fromField, just assign the transform\n\t\t\tif fromField == nil {\n\t\t\t\tfromField = transformFrom.Field\n\t\t\t\tif c.useRecurGen {\n\t\t\t\t\tfromIdentifier = \"inOriginal.\" + transformFrom.QualifiedName\n\t\t\t\t} else {\n\t\t\t\t\tfromIdentifier = \"in.\" + transformFrom.QualifiedName\n\t\t\t\t}\n\t\t\t\t// else there is a conflicting direct fromField\n\t\t\t} else {\n\t\t\t\t// depending on Override flag either the direct fromField or transformFrom is the OverrideField\n\t\t\t\tif transformFrom.Override {\n\t\t\t\t\t// check for required/optional setting\n\t\t\t\t\tif !transformFrom.Field.Required {\n\t\t\t\t\t\toverriddenField = fromField\n\t\t\t\t\t\toverriddenIdentifier = \"in.\" + fromPrefix +\n\t\t\t\t\t\t\tPascalCase(overriddenField.Name)\n\t\t\t\t\t}\n\t\t\t\t\t// If override is true and the new field is required,\n\t\t\t\t\t// there's a default instantiation value and will always\n\t\t\t\t\t// overwrite.\n\t\t\t\t\tfromField = transformFrom.Field\n\t\t\t\t\tif c.useRecurGen {\n\t\t\t\t\t\tfromIdentifier = \"inOriginal.\" + transformFrom.QualifiedName\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfromIdentifier = \"in.\" + transformFrom.QualifiedName\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// If override is false and the from field is required,\n\t\t\t\t\t// From is always populated and will never be overwritten.\n\t\t\t\t\tif !fromField.Required {\n\t\t\t\t\t\toverriddenField = transformFrom.Field\n\t\t\t\t\t\tif c.useRecurGen {\n\t\t\t\t\t\t\tfromIdentifier = \"inOriginal.\" + transformFrom.QualifiedName\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\toverriddenIdentifier = \"in.\" + transformFrom.QualifiedName\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// neither direct or transform fromField was found\n\t\tif fromField == nil {\n\t\t\t// search the fieldMap toField identifiers for matching identifier prefix\n\t\t\t// e.g. the current toField is a struct and something within it has a transform\n\t\t\t// a full match identifiers for transform non-struct types would have been caught above\n\t\t\thasStructFieldMapping := false\n\t\t\tfor toID := range fieldMap {\n\t\t\t\tif strings.HasPrefix(toID, toSubIdentifier) {\n\t\t\t\t\thasStructFieldMapping = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// if there's no fromField and no fieldMap transform that could be applied\n\t\t\tif !hasStructFieldMapping {\n\t\t\t\tvar bypass bool\n\t\t\t\t// check if required field is filled from other resources\n\t\t\t\t// it can be used to set system default (customized tracing /auth required for clients),\n\t\t\t\t// or header propagating\n\t\t\t\tif c.optionalEntries != nil {\n\t\t\t\t\tfor toID := range c.optionalEntries {\n\t\t\t\t\t\tif strings.HasPrefix(toID, toSubIdentifier) {\n\t\t\t\t\t\t\tbypass = true\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// the toField is either covered by optionalEntries, or optional and\n\t\t\t\t// there's nothing that maps to it or its sub-fields so we should skip it\n\t\t\t\tif bypass || !toField.Required {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// unrecoverable error\n\t\t\t\treturn errors.Errorf(\n\t\t\t\t\t\"required toField %s does not have a valid fromField mapping\",\n\t\t\t\t\ttoField.Name,\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\n\t\tif fromIdentifier == \"\" && fromField != nil {\n\t\t\t// should we set this if no fromField ??\n\t\t\tfromIdentifier = \"in.\" + fromPrefix + PascalCase(fromField.Name)\n\t\t}\n\n\t\tif prevKeyPrefixes == nil {\n\t\t\tprevKeyPrefixes = []string{}\n\t\t}\n\n\t\tvar overriddenFieldName string\n\t\tvar overriddenFieldType compile.TypeSpec\n\t\tif overriddenField != nil {\n\t\t\toverriddenFieldName = overriddenField.Name\n\t\t\toverriddenFieldType = overriddenField.Type\n\t\t}\n\n\t\t// Override thrift type names to avoid naming collisions between endpoint\n\t\t// and client types.\n\t\tswitch toFieldType := compile.RootTypeSpec(toField.Type).(type) {\n\t\tcase\n\t\t\t*compile.BoolSpec,\n\t\t\t*compile.I8Spec,\n\t\t\t*compile.I16Spec,\n\t\t\t*compile.I32Spec,\n\t\t\t*compile.EnumSpec,\n\t\t\t*compile.I64Spec,\n\t\t\t*compile.DoubleSpec,\n\t\t\t*compile.StringSpec:\n\n\t\t\terr := c.genConverterForPrimitive(\n\t\t\t\ttoField,\n\t\t\t\ttoIdentifier,\n\t\t\t\tfromField,\n\t\t\t\tfromIdentifier,\n\t\t\t\toverriddenField,\n\t\t\t\toverriddenIdentifier,\n\t\t\t\tindent,\n\t\t\t\tprevKeyPrefixes,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase *compile.BinarySpec:\n\t\t\tfor _, line := range checkOptionalNil(indent, c.uninitialized, toIdentifier, prevKeyPrefixes, c.useRecurGen) {\n\t\t\t\tc.append(line)\n\t\t\t}\n\t\t\tc.append(toIdentifier, \" = []byte(\", fromIdentifier, \")\")\n\t\tcase *compile.StructSpec:\n\t\t\tvar (\n\t\t\t\tstFromPrefix = fromPrefix\n\t\t\t\tstFromType compile.TypeSpec\n\t\t\t\tfromTypeName string\n\t\t\t)\n\t\t\tif fromField != nil {\n\t\t\t\tstFromType = fromField.Type\n\t\t\t\tstFromPrefix = fromPrefix + PascalCase(fromField.Name)\n\n\t\t\t\tfromTypeName, _ = c.getIdentifierName(stFromType)\n\t\t\t}\n\n\t\t\ttoTypeName, err := c.getIdentifierName(toFieldType)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif converterMethodName, ok := c.convStructMap[toFieldType.Name]; ok {\n\t\t\t\t// the converter for this struct has already been generated, so just use it\n\t\t\t\tc.append(indent, \"out.\", keyPrefix+PascalCase(toField.Name), \" = \", converterMethodName, \"(\", fromIdentifier, \")\")\n\t\t\t} else if c.useRecurGen && fromTypeName != \"\" {\n\t\t\t\t// generate a callable converter inside function literal\n\t\t\t\terr = c.genConverterForStructWrapped(\n\t\t\t\t\ttoField,\n\t\t\t\t\ttoFieldType,\n\t\t\t\t\ttoTypeName,\n\t\t\t\t\ttoSubIdentifier,\n\t\t\t\t\tfromTypeName,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t\tstFromType,\n\t\t\t\t\tfieldMap,\n\t\t\t\t\tprevKeyPrefixes,\n\t\t\t\t\tindent,\n\t\t\t\t)\n\t\t\t} else {\n\t\t\t\terr = c.genConverterForStruct(\n\t\t\t\t\ttoField.Name,\n\t\t\t\t\ttoFieldType,\n\t\t\t\t\ttoField.Required,\n\t\t\t\t\tstFromType,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t\tkeyPrefix+PascalCase(toField.Name),\n\t\t\t\t\tstFromPrefix,\n\t\t\t\t\tindent,\n\t\t\t\t\tfieldMap,\n\t\t\t\t\tprevKeyPrefixes,\n\t\t\t\t)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase *compile.ListSpec:\n\t\t\terr := c.genConverterForList(\n\t\t\t\ttoFieldParam{\n\t\t\t\t\ttoFieldType,\n\t\t\t\t\ttoField.Name,\n\t\t\t\t\ttoField.Required,\n\t\t\t\t\ttoIdentifier,\n\t\t\t\t},\n\t\t\t\tfromFieldParam{\n\t\t\t\t\tfromField.Type,\n\t\t\t\t\tfromField.Name,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t},\n\t\t\t\toverriddenFieldParam{\n\t\t\t\t\toverriddenFieldType,\n\t\t\t\t\toverriddenFieldName,\n\t\t\t\t\toverriddenIdentifier,\n\t\t\t\t},\n\t\t\t\tindent,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase *compile.MapSpec:\n\t\t\terr := c.genConverterForMap(\n\t\t\t\ttoFieldParam{\n\t\t\t\t\ttoFieldType,\n\t\t\t\t\ttoField.Name,\n\t\t\t\t\ttoField.Required,\n\t\t\t\t\ttoIdentifier,\n\t\t\t\t},\n\t\t\t\tfromFieldParam{\n\t\t\t\t\tfromField.Type,\n\t\t\t\t\tfromField.Name,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t},\n\t\t\t\toverriddenFieldParam{\n\t\t\t\t\toverriddenFieldType,\n\t\t\t\t\toverriddenFieldName,\n\t\t\t\t\toverriddenIdentifier,\n\t\t\t\t},\n\t\t\t\tindent,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\t// fmt.Printf(\"Unknown type %s for field %s \\n\",\n\t\t\t// \ttoField.Type.TypeCode().String(), toField.Name,\n\t\t\t// )\n\n\t\t\t// pkgName, err := h.TypePackageName(toField.Type.IDLFile())\n\t\t\t// if err != nil {\n\t\t\t// \treturn nil, err\n\t\t\t// }\n\t\t\t// typeName := pkgName + \".\" + toField.Type.ThriftName()\n\t\t\t// line := toIdentifier + \"(*\" + typeName + \")\" + postfix\n\t\t\t// c.Lines = append(c.Lines, line)\n\t\t}\n\t}\n\n\treturn nil\n}", "func JsonFieldGenerator() gopter.Gen {\n\tif jsonFieldGenerator != nil {\n\t\treturn jsonFieldGenerator\n\t}\n\n\tgenerators := make(map[string]gopter.Gen)\n\tAddIndependentPropertyGeneratorsForJsonField(generators)\n\tjsonFieldGenerator = gen.Struct(reflect.TypeOf(JsonField{}), generators)\n\n\treturn jsonFieldGenerator\n}", "func NodesFromTypedef(pkg *packages.Package, f *ast.File, typed *ast.GenDecl) ([]models.EncodedNode, []string, []string) {\n\tpf := pkg.Fset.File(f.Pos())\n\n\tkind := KindTypename\n\tnodes := []models.EncodedNode{}\n\tstructs := []string{}\n\tifaces := []string{}\n\n\tfor _, spec := range typed.Specs {\n\t\ttspec, ok := spec.(*ast.TypeSpec)\n\t\tif !ok {\n\t\t\tpanic(fmt.Errorf(\"Unknown type for processing types: %#v\", spec))\n\t\t}\n\t\tdoc := \"\"\n\t\tif tspec.Comment != nil {\n\t\t\tdoc = tspec.Comment.Text()\n\t\t}\n\t\tpublic := true\n\t\tname := tspec.Name.Name\n\t\tif 'a' <= name[0] && name[0] <= 'z' {\n\t\t\tpublic = false\n\t\t}\n\n\t\tuid := fmt.Sprintf(\"%s.%s\", pkg.PkgPath, name)\n\t\tnodes = append(nodes, models.EncodedNode{\n\t\t\tComponent: models.Component{\n\t\t\t\tUID: uid,\n\t\t\t\tDisplayName: fmt.Sprintf(\"%s.%s\", pkg.Name, name),\n\t\t\t\tDescription: doc,\n\t\t\t\tKind: kind,\n\t\t\t\t// HACK one line for definition and one for closing curly brace\n\t\t\t\tLocation: pos2loc(pf.Name(), tspec.Name.NamePos - token.Pos(pf.Base()), uint(pf.Base()), spec, uint(2)),\n\t\t\t},\n\t\t\tPublic: public,\n\t\t})\n\t\tswitch typeTyped := tspec.Type.(type) {\n\t\tcase *ast.StructType:\n\t\t\tstructs = append(structs, uid)\n\t\t\tfor _, field := range typeTyped.Fields.List {\n\t\t\t\tfieldDoc := \"\"\n\t\t\t\tif field.Comment != nil {\n\t\t\t\t\tfieldDoc = field.Comment.Text()\n\t\t\t\t}\n\t\t\t\tfor _, fieldName := range field.Names {\n\t\t\t\t\tnodes = append(nodes, models.EncodedNode{\n\t\t\t\t\t\tComponent: models.Component{\n\t\t\t\t\t\t\tUID: fmt.Sprintf(\"%s.%s.%s\", pkg.PkgPath, name, fieldName.Name),\n\t\t\t\t\t\t\tDisplayName: fmt.Sprintf(\"%s.%s.%s\", pkg.Name, name, fieldName.Name),\n\t\t\t\t\t\t\tDescription: fieldDoc,\n\t\t\t\t\t\t\tKind: KindField,\n\t\t\t\t\t\t\t// NOTE for multiple fields on the same line this is ambiguous\n\t\t\t\t\t\t\tLocation: pos2loc(pf.Name(), fieldName.NamePos - token.Pos(pf.Base()), uint(pf.Base()), field, 1),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tPublic: public,\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\tcase *ast.InterfaceType:\n\t\t\tifaces = append(ifaces, uid)\n\t\t\tfor _, method := range typeTyped.Methods.List {\n\t\t\t\tmethodDoc := \"\"\n\t\t\t\tif method.Comment != nil {\n\t\t\t\t\tmethodDoc = method.Comment.Text()\n\t\t\t\t}\n\t\t\t\tfor _, methodName := range method.Names {\n\t\t\t\t\tnodes = append(nodes, models.EncodedNode{\n\t\t\t\t\t\tComponent: models.Component{\n\t\t\t\t\t\t\tUID: fmt.Sprintf(\"%s.%s.%s\", pkg.PkgPath, name, methodName.Name),\n\t\t\t\t\t\t\tDisplayName: fmt.Sprintf(\"%s.%s.%s\", pkg.Name, name, methodName.Name),\n\t\t\t\t\t\t\tDescription: methodDoc,\n\t\t\t\t\t\t\tKind: KindMethod,\n\t\t\t\t\t\t\tLocation: pos2loc(pf.Name(), methodName.NamePos - token.Pos(pf.Base()), uint(pf.Base()), method, 1),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tPublic: public,\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nodes, structs, ifaces\n}", "func (m *BgpConfiguration) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))\n res[\"asn\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetInt32Value()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetAsn(val)\n }\n return nil\n }\n res[\"ipAddress\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetIpAddress(val)\n }\n return nil\n }\n res[\"localIpAddress\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetLocalIpAddress(val)\n }\n return nil\n }\n res[\"@odata.type\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOdataType(val)\n }\n return nil\n }\n res[\"peerIpAddress\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetPeerIpAddress(val)\n }\n return nil\n }\n return res\n}", "func (m *Directory) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"administrativeUnits\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateAdministrativeUnitFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]AdministrativeUnitable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(AdministrativeUnitable)\n }\n }\n m.SetAdministrativeUnits(res)\n }\n return nil\n }\n res[\"attributeSets\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateAttributeSetFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]AttributeSetable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(AttributeSetable)\n }\n }\n m.SetAttributeSets(res)\n }\n return nil\n }\n res[\"customSecurityAttributeDefinitions\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateCustomSecurityAttributeDefinitionFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]CustomSecurityAttributeDefinitionable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(CustomSecurityAttributeDefinitionable)\n }\n }\n m.SetCustomSecurityAttributeDefinitions(res)\n }\n return nil\n }\n res[\"deletedItems\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateDirectoryObjectFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]DirectoryObjectable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(DirectoryObjectable)\n }\n }\n m.SetDeletedItems(res)\n }\n return nil\n }\n res[\"federationConfigurations\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateIdentityProviderBaseFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]IdentityProviderBaseable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(IdentityProviderBaseable)\n }\n }\n m.SetFederationConfigurations(res)\n }\n return nil\n }\n res[\"onPremisesSynchronization\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateOnPremisesDirectorySynchronizationFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]OnPremisesDirectorySynchronizationable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(OnPremisesDirectorySynchronizationable)\n }\n }\n m.SetOnPremisesSynchronization(res)\n }\n return nil\n }\n return res\n}", "func (m *BusinessScenarioPlanner) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"planConfiguration\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreatePlannerPlanConfigurationFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetPlanConfiguration(val.(PlannerPlanConfigurationable))\n }\n return nil\n }\n res[\"taskConfiguration\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreatePlannerTaskConfigurationFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetTaskConfiguration(val.(PlannerTaskConfigurationable))\n }\n return nil\n }\n res[\"tasks\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateBusinessScenarioTaskFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]BusinessScenarioTaskable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(BusinessScenarioTaskable)\n }\n }\n m.SetTasks(res)\n }\n return nil\n }\n return res\n}", "func AddIndependentPropertyGeneratorsForJsonField(gens map[string]gopter.Gen) {\n\tgens[\"SourceField\"] = gen.PtrOf(gen.AlphaString())\n}", "func (g *mapGen) genType() {\n\tg.P(\"type \", g.typeName, \" struct {\")\n\tg.P(\"m *map[\", getGoType(g.GeneratedFile, g.field.Message.Fields[0]), \"]\", getGoType(g.GeneratedFile, g.field.Message.Fields[1]))\n\tg.P(\"}\")\n\tg.P()\n}", "func (c *Core) generate(tab Table) (string, error) {\n\tref := []reflect.StructField{}\n\tfor _, col := range tab.Columns {\n\t\tv := reflect.StructField{\n\t\t\tName: strings.Title(col.Name),\n\t\t}\n\t\tif col.Annotations != \"\" {\n\t\t\tv.Tag = reflect.StructTag(col.Annotations)\n\t\t}\n\t\tswitch col.Type {\n\t\tcase \"float\":\n\t\t\tv.Type = reflect.TypeOf(float64(0))\n\t\tcase \"varchar\":\n\t\t\tv.Type = reflect.TypeOf(string(\"\"))\n\t\tcase \"integer\", \"int\", \"tinyint\":\n\t\t\tv.Type = reflect.TypeOf(int(0))\n\t\tcase \"bigint\":\n\t\t\tv.Type = reflect.TypeOf(int64(0))\n\t\tcase \"timestamp\":\n\t\t\tv.Type = reflect.TypeOf(time.Time{})\n\t\t}\n\t\tref = append(ref, v)\n\t}\n\treturn fmt.Sprintf(\"type %s %s\", strings.Title(tab.Name), reflect.StructOf(ref).String()), nil\n}", "func getNodeFields() []string {\n\trt := reflect.TypeOf((*tailcfg.Node)(nil)).Elem()\n\tret := make([]string, rt.NumField())\n\tfor i := 0; i < rt.NumField(); i++ {\n\t\tret[i] = rt.Field(i).Name\n\t}\n\treturn ret\n}", "func (p *Parser) parseTypes(file *ast.File) (ret []structConfig) {\n\tast.Inspect(file, func(n ast.Node) bool {\n\t\tdecl, ok := n.(*ast.GenDecl)\n\t\tif !ok || decl.Tok != token.TYPE {\n\t\t\treturn true\n\t\t}\n\n\t\tfor _, spec := range decl.Specs {\n\t\t\tvar (\n\t\t\t\tdata structConfig\n\t\t\t)\n\t\t\ttypeSpec, _ok := spec.(*ast.TypeSpec)\n\t\t\tif !_ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// We only care about struct declaration (for now)\n\t\t\tvar structType *ast.StructType\n\t\t\tif structType, ok = typeSpec.Type.(*ast.StructType); !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdata.StructName = typeSpec.Name.Name\n\t\t\tfor _, v := range structType.Fields.List {\n\t\t\t\tvar (\n\t\t\t\t\toptionField fieldConfig\n\t\t\t\t)\n\n\t\t\t\tif t, _ok := v.Type.(*ast.Ident); _ok {\n\t\t\t\t\toptionField.FieldType = t.String()\n\t\t\t\t} else {\n\t\t\t\t\tif v.Tag != nil {\n\t\t\t\t\t\tif strings.Contains(v.Tag.Value, \"gorm\") && strings.Contains(v.Tag.Value, \"time\") {\n\t\t\t\t\t\t\toptionField.FieldType = \"time.Time\"\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif len(v.Names) > 0 {\n\t\t\t\t\toptionField.FieldName = v.Names[0].String()\n\t\t\t\t\toptionField.ColumnName = gorm.ToDBName(optionField.FieldName)\n\t\t\t\t\toptionField.HumpName = SQLColumnToHumpStyle(optionField.ColumnName)\n\t\t\t\t}\n\n\t\t\t\tdata.OptionFields = append(data.OptionFields, optionField)\n\t\t\t}\n\n\t\t\tret = append(ret, data)\n\t\t}\n\t\treturn true\n\t})\n\treturn\n}", "func (a *Aggregate) makeFields(parts []string) map[string]string {\n\tfields := make(map[string]string, len(parts))\n\tfor _, part := range parts {\n\t\tkv := strings.SplitN(part, protocol.AggregateKVDelimiter, 2)\n\t\tif len(kv) != 2 {\n\t\t\tcontinue\n\t\t}\n\t\tfields[kv[0]] = kv[1]\n\t}\n\treturn fields\n}", "func TypeFields(t *Type) (fields []*Field) {\n\n\tif t == nil {\n\t\treturn\n\t}\n\n\tfor _, spec := range t.Decl.Specs {\n\n\t\ttypeSpec := spec.(*ast.TypeSpec)\n\n\t\t// struct type\n\t\tif str, ok := typeSpec.Type.(*ast.StructType); ok {\n\n\t\t\tfor _, f := range str.Fields.List {\n\t\t\t\tfields = append(fields, &Field{\n\t\t\t\t\tField: f,\n\t\t\t\t\tType: t,\n\t\t\t\t})\n\t\t\t}\n\n\t\t\treturn\n\t\t}\n\n\t\t// interface type methods\n\t\tif str, ok := typeSpec.Type.(*ast.InterfaceType); ok {\n\t\t\tfor _, field := range str.Methods.List {\n\t\t\t\tif ident, ok := field.Type.(*ast.Ident); ok && ident.Obj != nil {\n\t\t\t\t\tfield.Names = []*ast.Ident{ident}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor _, f := range str.Methods.List {\n\t\t\t\tfields = append(fields, &Field{\n\t\t\t\t\tField: f,\n\t\t\t\t\tType: t,\n\t\t\t\t})\n\t\t\t}\n\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}", "func getFieldList(p *program.Program, f *ast.FunctionDecl, fieldTypes []string) (\n\t_ *goast.FieldList, err error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"error in function field list. err = %v\", err)\n\t\t}\n\t}()\n\tr := []*goast.Field{}\n\tfor i := range fieldTypes {\n\t\tif len(f.Children()) <= i {\n\t\t\terr = fmt.Errorf(\"not correct type/children: %d, %d\",\n\t\t\t\tlen(f.Children()), len(fieldTypes))\n\t\t\treturn\n\t\t}\n\t\tn := f.Children()[i]\n\t\tif v, ok := n.(*ast.ParmVarDecl); ok {\n\t\t\tt, err := types.ResolveType(p, fieldTypes[i])\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"FieldList type: %s. %v\", fieldTypes[i], err)\n\t\t\t\tp.AddMessage(p.GenerateWarningMessage(err, f))\n\t\t\t\terr = nil // ignore error\n\t\t\t\tt = \"C4GO_UNDEFINE_TYPE\"\n\t\t\t}\n\n\t\t\tif t == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tr = append(r, &goast.Field{\n\t\t\t\tNames: []*goast.Ident{util.NewIdent(v.Name)},\n\t\t\t\tType: goast.NewIdent(t),\n\t\t\t})\n\t\t}\n\t}\n\n\t// for function argument: ...\n\tif strings.Contains(f.Type, \"...\") {\n\t\tr = append(r, &goast.Field{\n\t\t\tNames: []*goast.Ident{util.NewIdent(\"c4goArgs\")},\n\t\t\tType: &goast.Ellipsis{\n\t\t\t\tEllipsis: 1,\n\t\t\t\tElt: &goast.InterfaceType{\n\t\t\t\t\tInterface: 1,\n\t\t\t\t\tMethods: &goast.FieldList{\n\t\t\t\t\t\tOpening: 1,\n\t\t\t\t\t},\n\t\t\t\t\tIncomplete: false,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t}\n\n\treturn &goast.FieldList{\n\t\tList: r,\n\t}, nil\n}", "func (s VirtualNodeSpec) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.BackendDefaults != nil {\n\t\tv := s.BackendDefaults\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"backendDefaults\", v, metadata)\n\t}\n\tif s.Backends != nil {\n\t\tv := s.Backends\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"backends\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Listeners != nil {\n\t\tv := s.Listeners\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"listeners\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Logging != nil {\n\t\tv := s.Logging\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"logging\", v, metadata)\n\t}\n\tif s.ServiceDiscovery != nil {\n\t\tv := s.ServiceDiscovery\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"serviceDiscovery\", v, metadata)\n\t}\n\treturn nil\n}", "func IterFields(t *Type) (*Field, Iter)", "func (b *basic) ToGoCode(n *ecsgen.Node) (string, error) {\n\t// we can only generate a Go struct definition for an Object, verify\n\t// we're not shooting ourselves in the foot\n\tif !n.IsObject() {\n\t\treturn \"\", fmt.Errorf(\"node %s is not an object\", n.Path)\n\t}\n\n\t// Now enumerate the Node's fields and sort the keys so the resulting Go code\n\t// is deterministically generated\n\tfieldKeys := []string{}\n\n\tfor key := range n.Children {\n\t\tfieldKeys = append(fieldKeys, key)\n\t}\n\n\tsort.Strings(fieldKeys)\n\n\t// Create a new buffer to write the struct definition to\n\tbuf := new(strings.Builder)\n\n\t// comment and type definition\n\tbuf.WriteString(fmt.Sprintf(\"// %s defines the object located at ECS path %s.\", n.TypeIdent().Pascal(), n.Path))\n\tbuf.WriteString(\"\\n\")\n\tbuf.WriteString(fmt.Sprintf(\"type %s struct {\", n.TypeIdent().Pascal()))\n\tbuf.WriteString(\"\\n\")\n\n\t// Enumerate the fields and generate their field definition, adding it\n\t// to the buffer as a line item.\n\tfor _, k := range fieldKeys {\n\t\tscalarField := n.Children[k]\n\t\tbuf.WriteString(\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"\\t%s %s `json:\\\"%s,omitempty\\\" yaml:\\\"%s,omitempty\\\" ecs:\\\"%s\\\"`\",\n\t\t\t\tscalarField.FieldIdent().Pascal(),\n\t\t\t\tGoFieldType(scalarField),\n\t\t\t\tscalarField.Name,\n\t\t\t\tscalarField.Name,\n\t\t\t\tscalarField.Path,\n\t\t\t),\n\t\t)\n\t\tbuf.WriteString(\"\\n\")\n\t}\n\n\t// Close the type definition and return the result\n\tbuf.WriteString(\"}\")\n\tbuf.WriteString(\"\\n\")\n\n\t// if the user included the JSON operator flag, add the implementation\n\tif b.IncludeJSONMarshal {\n\t\t// Now we implement at json.Marshaler implementation for each specific type that\n\t\t// removes any nested JSON types that might exist.\n\t\t//\n\t\t// We do this by enumerating every field in the type and check to see\n\t\t// if it's got a zero value.\n\t\tbuf.WriteString(\"\\n\")\n\t\tbuf.WriteString(\"// MarshalJSON implements the json.Marshaler interface and removes zero values from returned JSON.\")\n\t\tbuf.WriteString(\"\\n\")\n\t\tbuf.WriteString(\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"func (b %s) MarshalJSON() ([]byte, error) {\",\n\t\t\t\tn.TypeIdent().Pascal(),\n\t\t\t),\n\t\t)\n\t\tbuf.WriteString(\"\\n\")\n\n\t\t// Define the result struct we will populate non-zero fields with\n\t\tbuf.WriteString(\"\\tres := map[string]interface{}{}\")\n\t\tbuf.WriteString(\"\\n\")\n\t\tbuf.WriteString(\"\\n\")\n\n\t\t// enumerate the fields for the object fields\n\t\tfor _, fieldName := range fieldKeys {\n\t\t\tfield := n.Children[fieldName]\n\t\t\tbuf.WriteString(\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"\\tif val := reflect.ValueOf(b.%s); !val.IsZero() {\", field.FieldIdent().Pascal(),\n\t\t\t\t),\n\t\t\t)\n\t\t\tbuf.WriteString(\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"\\t\\tres[\\\"%s\\\"] = b.%s\",\n\t\t\t\t\tfield.Name,\n\t\t\t\t\tfield.FieldIdent().Pascal(),\n\t\t\t\t),\n\t\t\t)\n\t\t\tbuf.WriteString(\"\\t}\")\n\t\t\tbuf.WriteString(\"\\n\")\n\t\t\tbuf.WriteString(\"\\n\")\n\t\t}\n\n\t\t// add a line spacer and return the marshaled JSON result\n\t\tbuf.WriteString(\"\\n\")\n\t\tbuf.WriteString(\"\\treturn json.Marshal(res)\")\n\t\tbuf.WriteString(\"\\n\")\n\t\tbuf.WriteString(\"}\")\n\t\tbuf.WriteString(\"\\n\")\n\t}\n\n\treturn buf.String(), nil\n}", "func contextFields(lvl ...int) Fields {\n\tlevel := 2\n\tif len(lvl) == 1 {\n\t\tlevel = lvl[0]\n\t}\n\tpc, file, line, _ := runtime.Caller(level)\n\t_, fileName := path.Split(file)\n\tparts := strings.Split(runtime.FuncForPC(pc).Name(), \".\")\n\tpl := len(parts)\n\tpackageName := \"\"\n\tfuncName := parts[pl-1]\n\n\tif len(parts) >= 0 && pl-2 < len(parts) {\n\t\tif parts[pl-2][0] == '(' {\n\t\t\tfuncName = parts[pl-2] + \".\" + funcName\n\t\t\tpackageName = strings.Join(parts[0:pl-2], \".\")\n\t\t} else {\n\t\t\tpackageName = strings.Join(parts[0:pl-1], \".\")\n\t\t}\n\n\t\tpkgs := strings.Split(packageName, \"/sigma/\")\n\t\tif len(pkgs) > 1 {\n\t\t\tpackageName = pkgs[1]\n\t\t}\n\t}\n\n\treturn Fields{\n\t\t\"package\": packageName,\n\t\t\"file\": fileName,\n\t\t\"func\": funcName,\n\t\t\"line\": line,\n\t}\n}", "func _fields(args ...interface{}) *ast.FieldList {\n\tlist := []*ast.Field{}\n\tnames := []*ast.Ident{}\n\tlasti := interface{}(nil)\n\tmaybePop := func() {\n\t\tif len(names) > 0 {\n\t\t\tvar last ast.Expr\n\t\t\tif lastte_, ok := lasti.(string); ok {\n\t\t\t\tlast = _x(lastte_)\n\t\t\t} else {\n\t\t\t\tlast = lasti.(ast.Expr)\n\t\t\t}\n\t\t\tlist = append(list, &ast.Field{\n\t\t\t\tNames: names,\n\t\t\t\tType: last,\n\t\t\t})\n\t\t\tnames = []*ast.Ident{}\n\t\t}\n\t}\n\tfor i := 0; i < len(args); i++ {\n\t\tname, ok := args[i].(*ast.Ident)\n\t\tif !ok {\n\t\t\tname = _i(args[i].(string))\n\t\t}\n\t\tte_ := args[i+1]\n\t\ti += 1\n\t\t// NOTE: This comparison could be improved, to say, deep equality,\n\t\t// but is that the behavior we want?\n\t\tif lasti == te_ {\n\t\t\tnames = append(names, name)\n\t\t\tcontinue\n\t\t} else {\n\t\t\tmaybePop()\n\t\t\tnames = append(names, name)\n\t\t\tlasti = te_\n\t\t}\n\t}\n\tmaybePop()\n\treturn &ast.FieldList{\n\t\tList: list,\n\t}\n}", "func (m *AccessPackage) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"accessPackagesIncompatibleWith\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateAccessPackageFromDiscriminatorValue , m.SetAccessPackagesIncompatibleWith)\n res[\"assignmentPolicies\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateAccessPackageAssignmentPolicyFromDiscriminatorValue , m.SetAssignmentPolicies)\n res[\"catalog\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetObjectValue(CreateAccessPackageCatalogFromDiscriminatorValue , m.SetCatalog)\n res[\"createdDateTime\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetTimeValue(m.SetCreatedDateTime)\n res[\"description\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetDescription)\n res[\"displayName\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetDisplayName)\n res[\"incompatibleAccessPackages\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateAccessPackageFromDiscriminatorValue , m.SetIncompatibleAccessPackages)\n res[\"incompatibleGroups\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateGroupFromDiscriminatorValue , m.SetIncompatibleGroups)\n res[\"isHidden\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetBoolValue(m.SetIsHidden)\n res[\"modifiedDateTime\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetTimeValue(m.SetModifiedDateTime)\n return res\n}", "func genArguments(args []*ast.InputValueDefinition) *jen.Statement {\n\t//\n\t// Generate config for arguments\n\t//\n\t// == Example input SDL\n\t//\n\t// type Dog {\n\t// name(\n\t// \"style is stylish\"\n\t// style: NameComponentsStyle = SHORT,\n\t// ): String!\n\t// }\n\t//\n\t// == Example output\n\t//\n\t// FieldConfigArgument{\n\t// \"style\": &ArgumentConfig{ ... }\n\t// },\n\t//\n\treturn jen.Qual(defsPkg, \"FieldConfigArgument\").Values(\n\t\tjen.DictFunc(func(d jen.Dict) {\n\t\t\tfor _, arg := range args {\n\t\t\t\td[jen.Lit(arg.Name.Value)] = genArgument(arg)\n\t\t\t}\n\t\t}),\n\t)\n}", "func (*Base) ObjectFields(p ASTPass, fields *ast.ObjectFields, ctx Context) {\n\tfor i := range *fields {\n\t\tp.ObjectField(p, &(*fields)[i], ctx)\n\t}\n}", "func printStructField(t *reflect.Type) {\n fieldNum := (*t).NumField()\n for i := 0; i < fieldNum; i++ {\n fmt.Printf(\"conf's field: %s\\n\", (*t).Field(i).Name)\n }\n fmt.Println(\"\")\n}", "func MapFieldsToNameExpr(args ...*ast.Field) []ast.Expr {\n\tresult := make([]ast.Expr, 0, len(args))\n\tfor _, f := range args {\n\t\tresult = append(result, MapIdentToExpr(f.Names...)...)\n\t}\n\treturn result\n}", "func (g *Generator) generate(typeInfo typeInfo) {\n\t// <key, value>\n\tvalues := make([]Value, 0, 100)\n\tfor _, file := range g.pkg.files {\n\t\t// Set the state for this run of the walker.\n\t\tfile.typeInfo = typeInfo\n\t\tfile.values = nil\n\t\tif file.file != nil {\n\t\t\tast.Inspect(file.file, file.genDecl)\n\t\t\tvalues = append(values, file.values...)\n\t\t}\n\t}\n\n\tif len(values) == 0 {\n\t\tlog.Fatalf(\"no values defined for type %+v\", typeInfo)\n\t}\n\tg.transformValueNames(values, transformMethod)\n\t// Generate code that will fail if the constants change value.\n\tfor _, im := range checkImportPackages {\n\t\tg.Printf(stringImport, im)\n\t}\n\n\tif useNew {\n\t\tfor _, im := range newImportPackages {\n\t\t\tg.Printf(stringImport, im)\n\t\t}\n\t}\n\tif useBinary {\n\t\tfor _, im := range binaryImportPackages {\n\t\t\tg.Printf(stringImport, im)\n\t\t}\n\t}\n\tif useJson {\n\t\tfor _, im := range jsonImportPackages {\n\t\t\tg.Printf(stringImport, im)\n\t\t}\n\t}\n\tif useText {\n\t\tfor _, im := range textImportPackages {\n\t\t\tg.Printf(stringImport, im)\n\t\t}\n\t}\n\tif useYaml {\n\t\tfor _, im := range yamlImportPackages {\n\t\t\tg.Printf(stringImport, im)\n\t\t}\n\t}\n\tif useSql {\n\t\tfor _, im := range sqlImportPackages {\n\t\t\tg.Printf(stringImport, im)\n\t\t}\n\t}\n\n\tg.buildEnumRegenerateCheck(values)\n\n\truns := splitIntoRuns(values)\n\tthreshold := 10\n\n\tif useString {\n\t\t// The decision of which pattern to use depends on the number of\n\t\t// runs in the numbers. If there's only one, it's easy. For more than\n\t\t// one, there's a tradeoff between complexity and size of the data\n\t\t// and code vs. the simplicity of a map. A map takes more space,\n\t\t// but so does the code. The decision here (crossover at 10) is\n\t\t// arbitrary, but considers that for large numbers of runs the cost\n\t\t// of the linear scan in the switch might become important, and\n\t\t// rather than use yet another algorithm such as binary search,\n\t\t// we punt and use a map. In any case, the likelihood of a map\n\t\t// being necessary for any realistic example other than bitmasks\n\t\t// is very low. And bitmasks probably deserve their own analysis,\n\t\t// to be done some other day.\n\t\tswitch {\n\t\tcase len(runs) == 1:\n\t\t\tg.buildOneRun(runs, typeInfo)\n\t\tcase len(runs) <= threshold:\n\t\t\tg.buildMultipleRuns(runs, typeInfo)\n\t\tdefault:\n\t\t\tg.buildMap(runs, typeInfo)\n\t\t}\n\t}\n\n\tif useNew {\n\t\tg.Printf(newTemplate, typeInfo.Name)\n\t}\n\tif useBinary {\n\t\tg.buildCheck(runs, typeInfo.Name, threshold)\n\t\tg.Printf(binaryTemplate, typeInfo.Name)\n\t}\n\tif useJson {\n\t\tg.buildCheck(runs, typeInfo.Name, threshold)\n\t\tg.Printf(jsonTemplate, typeInfo.Name)\n\t}\n\tif useText {\n\t\tg.buildCheck(runs, typeInfo.Name, threshold)\n\t\tg.Printf(textTemplate, typeInfo.Name)\n\t}\n\tif useYaml {\n\t\tg.buildCheck(runs, typeInfo.Name, threshold)\n\t\tg.Printf(yamlTemplate, typeInfo.Name)\n\t}\n\tif useSql {\n\t\tg.buildCheck(runs, typeInfo.Name, threshold)\n\t\tg.Printf(sqpTemplate, typeInfo.Name)\n\t}\n\n\tif useContains {\n\t\tg.Printf(containsTemplate, typeInfo.Name)\n\t}\n}", "func compileField(sf reflect.StructField, name string) interface{} {\n\tf := field{sField: sf.Index[0]}\n\n\tf.name = []byte(name)\n\n\tswitch sf.Type.Kind() {\n\tcase reflect.Struct:\n\t\treturn fieldStruct{f.sField, f.name, compileStruct(sf.Type)}\n\tcase reflect.Bool:\n\t\tf.write = encodeBool\n\t\tf.read = decodeBool\n\t\tf.requiredType = 1\n\tcase reflect.Int8:\n\t\tf.write = encodeInt8\n\t\tf.read = decodeInt8\n\t\tf.requiredType = 1\n\tcase reflect.Int16:\n\t\tf.write = encodeInt16\n\t\tf.read = decodeInt16\n\t\tf.requiredType = 2\n\tcase reflect.Int32:\n\t\tf.write = encodeInt32\n\t\tf.read = decodeInt32\n\t\tf.requiredType = 3\n\tcase reflect.Int64:\n\t\tf.write = encodeInt64\n\t\tf.read = decodeInt64\n\t\tf.requiredType = 4\n\tcase reflect.String:\n\t\tf.write = encodeString\n\t\tf.read = decodeString\n\t\tf.requiredType = 8\n\tcase reflect.Map:\n\t\tf.requiredType = 10\n\t\telem := sf.Type.Elem()\n\t\tvar elemField interface{}\n\t\tname := \"map:\" + sf.Name\n\t\tif elem.Kind() != reflect.Interface {\n\t\t\telemField = compileField(reflect.StructField{Type: elem, Index: []int{0}}, name)\n\t\t}\n\t\tf.write = func(w io.Writer, en *msgEncoder, fi reflect.Value) error {\n\t\t\tkeys := fi.MapKeys()\n\t\t\tfor _, key := range keys {\n\t\t\t\tif f, ok := elemField.(field); ok {\n\t\t\t\t\tv := fi.MapIndex(key)\n\t\t\t\t\twritePrefix(en, w, []byte(key.String()), f.requiredType)\n\t\t\t\t\terr := f.write(w, en, v)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif elemField == nil {\n\t\t\t\t\t\tv := fi.MapIndex(key).Elem()\n\t\t\t\t\t\ttemp := compileField(reflect.StructField{Type: v.Type(), Index: []int{0}}, \"\")\n\t\t\t\t\t\tif f, ok := temp.(field); ok {\n\t\t\t\t\t\t\twritePrefix(en, w, []byte(key.String()), f.requiredType)\n\t\t\t\t\t\t\terr := f.write(w, en, v)\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\twritePrefix(en, w, []byte(key.String()), 10)\n\t\t\t\t\t\t\tfs := temp.(fieldStruct)\n\t\t\t\t\t\t\terr := write(w, en, fs.m, v)\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\twritePrefix(en, w, []byte(key.String()), 10)\n\t\t\t\t\t\tfs := elemField.(fieldStruct)\n\t\t\t\t\t\tv := fi.MapIndex(key)\n\t\t\t\t\t\terr := write(w, en, fs.m, v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t}\n\t\t\tbs := en.b[:1]\n\t\t\tbs[0] = 0\n\t\t\t_, err := w.Write(bs)\n\t\t\treturn err\n\t\t}\n\t\tf.read = func(r io.Reader, de *msgDecoder, fi reflect.Value) error {\n\n\t\t\tma := reflect.MakeMap(sf.Type)\n\n\t\t\tname, t, err := readPrefix(r, de)\n\t\t\tfor ; t != 0; name, t, err = readPrefix(r, de) {\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tkeyVal := reflect.ValueOf(name)\n\n\t\t\t\tvar val reflect.Value\n\t\t\t\tif f, ok := elemField.(field); ok {\n\t\t\t\t\tval = reflect.New(elem)\n\t\t\t\t\terr := f.read(r, de, val)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif elemField == nil {\n\t\t\t\t\t\tv, err := fallbackRead(r, de)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t\tval = reflect.ValueOf(v)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tval = reflect.New(elem)\n\t\t\t\t\t\tfs := elemField.(fieldStruct)\n\t\t\t\t\t\terr := read(r, de, fs.m, val)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tma.SetMapIndex(keyVal, val)\n\t\t\t}\n\t\t\tfi.Set(ma)\n\t\t\treturn nil\n\t\t}\n\tcase reflect.Slice:\n\t\tf.requiredType = 9\n\t\telem := sf.Type.Elem()\n\t\tswitch elem.Kind() {\n\t\tcase reflect.Uint8: //Short-cut for byte arrays\n\t\t\tf.requiredType = 7\n\t\t\tf.write = func(w io.Writer, en *msgEncoder, fi reflect.Value) error {\n\t\t\t\tl := fi.Len()\n\t\t\t\tbs := en.b[:4]\n\t\t\t\tbinary.BigEndian.PutUint32(bs, uint32(l))\n\t\t\t\t_, err := w.Write(bs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\t_, err = w.Write(fi.Bytes())\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tf.read = func(r io.Reader, de *msgDecoder, fi reflect.Value) error {\n\t\t\t\tbs := de.b[:4]\n\t\t\t\t_, err := r.Read(bs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tl := binary.BigEndian.Uint32(bs)\n\t\t\t\tout := make([]byte, l)\n\t\t\t\t_, err = r.Read(out)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tfi.SetBytes(out)\n\t\t\t\treturn nil\n\t\t\t}\n\t\tcase reflect.Int32: //Short-cut for int32 arrays\n\t\t\tf.requiredType = 11\n\t\t\tf.write = func(w io.Writer, en *msgEncoder, fi reflect.Value) error {\n\t\t\t\tl := fi.Len()\n\t\t\t\tbs := en.b[:4]\n\t\t\t\tbinary.BigEndian.PutUint32(bs, uint32(l))\n\t\t\t\t_, err := w.Write(bs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tdata := fi.Interface().([]int32)\n\t\t\t\tfor i := range data {\n\t\t\t\t\tbinary.BigEndian.PutUint32(bs, uint32(data[i]))\n\t\t\t\t\t_, err := w.Write(bs)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tf.read = func(r io.Reader, de *msgDecoder, fi reflect.Value) error {\n\t\t\t\tbs := de.b[:4]\n\t\t\t\t_, err := r.Read(bs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tl := binary.BigEndian.Uint32(bs)\n\t\t\t\tout := make([]int32, l)\n\t\t\t\tfor i := range out {\n\t\t\t\t\t_, err := r.Read(bs)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tout[i] = int32(binary.BigEndian.Uint32(bs))\n\t\t\t\t}\n\t\t\t\tfi.Set(reflect.ValueOf(out))\n\t\t\t\treturn nil\n\t\t\t}\n\t\tdefault:\n\t\t\tname := \"slice:\" + sf.Name\n\t\t\telemField := compileField(reflect.StructField{Type: elem, Index: []int{0}}, name)\n\t\t\tf.write = func(w io.Writer, en *msgEncoder, fi reflect.Value) error {\n\t\t\t\tl := fi.Len()\n\t\t\t\tbs := en.b[:5]\n\t\t\t\tbinary.BigEndian.PutUint32(bs[1:], uint32(l))\n\t\t\t\tif f, ok := elemField.(field); ok {\n\t\t\t\t\tbs[0] = f.requiredType\n\t\t\t\t} else {\n\t\t\t\t\tbs[0] = 10\n\t\t\t\t}\n\t\t\t\t_, err := w.Write(bs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif f, ok := elemField.(field); ok {\n\t\t\t\t\tfor i := 0; i < l; i++ {\n\t\t\t\t\t\tv := fi.Index(i)\n\t\t\t\t\t\terr := f.write(w, en, v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tf := elemField.(fieldStruct)\n\t\t\t\t\tfor i := 0; i < l; i++ {\n\t\t\t\t\t\tv := fi.Index(i)\n\t\t\t\t\t\terr := write(w, en, f.m, v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tf.read = func(r io.Reader, de *msgDecoder, fi reflect.Value) error {\n\t\t\t\tbs := de.b[:5]\n\t\t\t\t_, err := r.Read(bs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif f, ok := elemField.(field); ok {\n\t\t\t\t\tif bs[0] != f.requiredType {\n\t\t\t\t\t\treturn ErrorIncorrectType\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif bs[0] != 10 {\n\t\t\t\t\t\treturn ErrorIncorrectType\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tl := int(binary.BigEndian.Uint32(bs[1:]))\n\t\t\t\tval := reflect.MakeSlice(sf.Type, l, l)\n\t\t\t\tif f, ok := elemField.(field); ok {\n\t\t\t\t\tfor i := 0; i < l; i++ {\n\t\t\t\t\t\tv := val.Index(i)\n\t\t\t\t\t\terr := f.read(r, de, v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tf := elemField.(fieldStruct)\n\t\t\t\t\tfor i := 0; i < l; i++ {\n\t\t\t\t\t\tv := val.Index(i)\n\t\t\t\t\t\terr := read(r, de, f.m, v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfi.Set(val)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\tcase reflect.Float32:\n\t\tf.requiredType = 5\n\t\tf.write = encodeFloat32\n\t\tf.read = decodeFloat32\n\tcase reflect.Float64:\n\t\tf.requiredType = 6\n\t\tf.write = encodeFloat64\n\t\tf.read = decodeFloat64\n\tdefault:\n\t\tpanic(fmt.Errorf(\"Unhandled type %s for %s\", sf.Type.Kind().String(), sf.Name))\n\t}\n\treturn f\n}", "func (m *EdiscoverySearch) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Search.GetFieldDeserializers()\n res[\"additionalSources\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateDataSourceFromDiscriminatorValue , m.SetAdditionalSources)\n res[\"addToReviewSetOperation\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetObjectValue(CreateEdiscoveryAddToReviewSetOperationFromDiscriminatorValue , m.SetAddToReviewSetOperation)\n res[\"custodianSources\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateDataSourceFromDiscriminatorValue , m.SetCustodianSources)\n res[\"dataSourceScopes\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetEnumValue(ParseDataSourceScopes , m.SetDataSourceScopes)\n res[\"lastEstimateStatisticsOperation\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetObjectValue(CreateEdiscoveryEstimateOperationFromDiscriminatorValue , m.SetLastEstimateStatisticsOperation)\n res[\"noncustodialSources\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateEdiscoveryNoncustodialDataSourceFromDiscriminatorValue , m.SetNoncustodialSources)\n return res\n}", "func Struct(rt reflect.Type, gens map[string]gopter.Gen) gopter.Gen {\n\tif rt.Kind() == reflect.Ptr {\n\t\trt = rt.Elem()\n\t}\n\tif rt.Kind() != reflect.Struct {\n\t\treturn Fail(rt)\n\t}\n\tfieldGens := []gopter.Gen{}\n\tfieldTypes := []reflect.Type{}\n\tassignable := reflect.New(rt).Elem()\n\tfor i := 0; i < rt.NumField(); i++ {\n\t\tfieldName := rt.Field(i).Name\n\t\tif !assignable.Field(i).CanSet() {\n\t\t\tcontinue\n\t\t}\n\n\t\tgen := gens[fieldName]\n\t\tif gen != nil {\n\t\t\tfieldGens = append(fieldGens, gen)\n\t\t\tfieldTypes = append(fieldTypes, rt.Field(i).Type)\n\t\t}\n\t}\n\n\tbuildStructType := reflect.FuncOf(fieldTypes, []reflect.Type{rt}, false)\n\tunbuildStructType := reflect.FuncOf([]reflect.Type{rt}, fieldTypes, false)\n\n\tbuildStructFunc := reflect.MakeFunc(buildStructType, func(args []reflect.Value) []reflect.Value {\n\t\tresult := reflect.New(rt)\n\t\tfor i := 0; i < rt.NumField(); i++ {\n\t\t\tif _, ok := gens[rt.Field(i).Name]; !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !assignable.Field(i).CanSet() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tresult.Elem().Field(i).Set(args[0])\n\t\t\targs = args[1:]\n\t\t}\n\t\treturn []reflect.Value{result.Elem()}\n\t})\n\tunbuildStructFunc := reflect.MakeFunc(unbuildStructType, func(args []reflect.Value) []reflect.Value {\n\t\ts := args[0]\n\t\tresults := []reflect.Value{}\n\t\tfor i := 0; i < s.NumField(); i++ {\n\t\t\tif _, ok := gens[rt.Field(i).Name]; !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !assignable.Field(i).CanSet() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tresults = append(results, s.Field(i))\n\t\t}\n\t\treturn results\n\t})\n\n\treturn gopter.DeriveGen(\n\t\tbuildStructFunc.Interface(),\n\t\tunbuildStructFunc.Interface(),\n\t\tfieldGens...,\n\t)\n}", "func typeFields(t reflect.Type) []field {\n\t// Anonymous fields to explore at the current level and the next.\n\tcurrent := []field{}\n\tnext := []field{{typ: t}}\n\n\t// Count of queued names for current level and the next.\n\tcount := map[reflect.Type]int{}\n\tnextCount := map[reflect.Type]int{}\n\n\t// Types already visited at an earlier level.\n\tvisited := map[reflect.Type]bool{}\n\n\t// Fields found.\n\tvar fields []field\n\n\tfor len(next) > 0 {\n\t\tcurrent, next = next, current[:0]\n\t\tcount, nextCount = nextCount, map[reflect.Type]int{}\n\n\t\tfor _, f := range current {\n\t\t\tif visited[f.typ] {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvisited[f.typ] = true\n\n\t\t\t// Scan f.typ for fields to include.\n\t\t\tfor i := 0; i < f.typ.NumField(); i++ {\n\t\t\t\tsf := f.typ.Field(i)\n\t\t\t\tisUnexported := sf.PkgPath != \"\"\n\t\t\t\tif sf.Anonymous {\n\t\t\t\t\tt := sf.Type\n\t\t\t\t\tif isUnexported && t.Kind() != reflect.Struct {\n\t\t\t\t\t\t// Ignore embedded fields of unexported non-struct types.\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\t// Do not ignore embedded fields of unexported struct types\n\t\t\t\t\t// since they may have exported fields.\n\t\t\t\t} else if isUnexported {\n\t\t\t\t\t// Ignore unexported non-embedded fields.\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tindex := make([]int, len(f.index)+1)\n\t\t\t\tcopy(index, f.index)\n\t\t\t\tindex[len(f.index)] = i\n\n\t\t\t\tft := sf.Type\n\n\t\t\t\t// Record found field and index sequence.\n\t\t\t\tif !sf.Anonymous || ft.Kind() != reflect.Struct {\n\t\t\t\t\tfields = append(fields, field{\n\t\t\t\t\t\tname: sf.Name,\n\t\t\t\t\t\tindex: index,\n\t\t\t\t\t\ttyp: ft,\n\t\t\t\t\t})\n\t\t\t\t\tif count[f.typ] > 1 {\n\t\t\t\t\t\t// If there were multiple instances, add a second,\n\t\t\t\t\t\t// so that the annihilation code will see a duplicate.\n\t\t\t\t\t\t// It only cares about the distinction between 1 or 2,\n\t\t\t\t\t\t// so don't bother generating any more copies.\n\t\t\t\t\t\tfields = append(fields, fields[len(fields)-1])\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// Record new anonymous struct to explore in next round.\n\t\t\t\tnextCount[ft]++\n\t\t\t\tif nextCount[ft] == 1 {\n\t\t\t\t\tnext = append(next, field{name: ft.Name(), index: index, typ: ft})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tsort.Sort(byIndex(fields))\n\n\treturn fields\n}", "func GenerateGoCode(preamble string, mainDefAddr string, includeDirectories []string, generate_tests bool) error {\n\n\toutDefs, version, err := XMLToFields(mainDefAddr, includeDirectories)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// merge enums together\n\tenums := make(map[string]*OutEnum)\n\tfor _, def := range outDefs {\n\t\tfor _, defEnum := range def.Enums {\n\t\t\tif _, ok := enums[defEnum.Name]; !ok {\n\t\t\t\tenums[defEnum.Name] = &OutEnum{\n\t\t\t\t\tName: defEnum.Name,\n\t\t\t\t\tDescription: defEnum.Description,\n\t\t\t\t}\n\t\t\t}\n\t\t\tenum := enums[defEnum.Name]\n\n\t\t\tfor _, v := range defEnum.Values {\n\t\t\t\tenum.Values = append(enum.Values, v)\n\t\t\t}\n\t\t}\n\t}\n\n\t// fill enum missing values\n\tfor _, enum := range enums {\n\t\tnextVal := 0\n\t\tfor _, v := range enum.Values {\n\t\t\tif v.Value != \"\" {\n\t\t\t\tnextVal, _ = strconv.Atoi(v.Value)\n\t\t\t\tnextVal++\n\t\t\t} else {\n\t\t\t\tv.Value = strconv.Itoa(nextVal)\n\t\t\t\tnextVal++\n\t\t\t}\n\t\t}\n\t}\n\n\t// get package name\n\t// remove underscores since they can lead to errors\n\t// (for instance, when package name ends with _test)\n\t_, inFile := filepath.Split(mainDefAddr)\n\tpkgName := strings.TrimSuffix(inFile, \".xml\")\n\n\t// dump\n\tif generate_tests {\n\t\treturn tplDialectTest.Execute(os.Stdout, map[string]interface{}{\n\t\t\t\"PkgName\": pkgName,\n\t\t\t\"Preamble\": preamble,\n\t\t\t\"Version\": func() int {\n\t\t\t\tret := int(version)\n\t\t\t\treturn ret\n\t\t\t}(),\n\t\t\t\"Defs\": outDefs,\n\t\t\t\"Enums\": enums,\n\t\t})\n\t} else {\n\t\treturn tplDialect.Execute(os.Stdout, map[string]interface{}{\n\t\t\t\"PkgName\": pkgName,\n\t\t\t\"Preamble\": preamble,\n\t\t\t\"Version\": func() int {\n\t\t\t\tret := int(version)\n\t\t\t\treturn ret\n\t\t\t}(),\n\t\t\t\"Defs\": outDefs,\n\t\t\t\"Enums\": enums,\n\t\t})\n\t}\n}", "func (s *DbRecorder) scanFields(ar Record) {\n\tv := reflect.Indirect(reflect.ValueOf(ar))\n\tt := v.Type()\n\tcount := t.NumField()\n\tkeys := make([]*field, 0, 2)\n\n\tfor i := 0; i < count; i++ {\n\t\tf := t.Field(i)\n\t\t// Skip fields with no tag.\n\t\tif len(f.Tag) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tsqtag := f.Tag.Get(\"stbl\")\n\t\tif len(sqtag) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tparts := s.parseTag(f.Name, sqtag)\n\t\tfield := new(field)\n\t\tfield.name = f.Name\n\t\tfield.column = parts[0]\n\t\tfor _, part := range parts[1:] {\n\t\t\tpart = strings.TrimSpace(part)\n\t\t\tswitch part {\n\t\t\tcase \"PRIMARY_KEY\", \"PRIMARY KEY\":\n\t\t\t\tfield.isKey = true\n\t\t\t\tkeys = append(keys, field)\n\t\t\tcase \"AUTO_INCREMENT\", \"SERIAL\", \"AUTO INCREMENT\":\n\t\t\t\tfield.isAuto = true\n\t\t\t}\n\t\t}\n\t\ts.fields = append(s.fields, field)\n\t\ts.key = keys\n\t}\n}", "func (Builder) Fields() []ent.Field {\n\treturn nil\n}", "func (a *api) h_POST_orgs_orgId_fields(c *gin.Context) {\n\torgId, err := parseInt64Param(c, \"orgId\")\n\ta.logger.Debug(\"POST /orgs/\", orgId, \"/fields\")\n\tif a.errorResponse(c, err) {\n\t\treturn\n\t}\n\n\taCtx := a.getAuthContext(c)\n\tif a.errorResponse(c, aCtx.AuthZOrgAdmin(orgId)) {\n\t\treturn\n\t}\n\n\tvar mis OrgMetaInfoArr\n\tif a.errorResponse(c, bindAppJson(c, &mis)) {\n\t\treturn\n\t}\n\n\tfis := a.metaInfos2FieldInfos(mis, orgId)\n\tif a.errorResponse(c, a.Dc.InsertNewFields(orgId, fis)) {\n\t\treturn\n\t}\n\n\ta.logger.Info(\"New fields were added for orgId=\", orgId, \" \", fis)\n\tc.Status(http.StatusCreated)\n}", "func ReflectFieldsFq(\n\tt reflect.Type,\n\ttypeMap TypeMap,\n\texclude ExcludeFieldTag,\n) graphql.Fields {\n\tif t.Kind() != reflect.Struct {\n\t\tpanic(fmt.Sprintf(`ReflectFieldsFq can only work on struct types.\n\t\t\tReceived instead %s`, t.Kind()))\n\t}\n\tfields := make(graphql.Fields)\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tf := t.Field(i)\n\t\tif includeField(f, exclude) {\n\t\t\tname := GqlName(GetFieldFirstTag(f, \"json\"))\n\t\t\tfields[string(name)] = ReflectFieldFq(name, f.Type, typeMap, exclude)\n\t\t}\n\t}\n\treturn fields\n}", "func (m *ProgramControl) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"controlId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetControlId(val)\n }\n return nil\n }\n res[\"controlTypeId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetControlTypeId(val)\n }\n return nil\n }\n res[\"createdDateTime\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetTimeValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCreatedDateTime(val)\n }\n return nil\n }\n res[\"displayName\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDisplayName(val)\n }\n return nil\n }\n res[\"owner\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateUserIdentityFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOwner(val.(UserIdentityable))\n }\n return nil\n }\n res[\"program\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateProgramFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetProgram(val.(Programable))\n }\n return nil\n }\n res[\"programId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetProgramId(val)\n }\n return nil\n }\n res[\"resource\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateProgramResourceFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetResource(val.(ProgramResourceable))\n }\n return nil\n }\n res[\"status\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetStatus(val)\n }\n return nil\n }\n return res\n}", "func (def *Definition) Fieldnames() []string {\n\ttypeList := make([]string, 0)\n\tt := TraverserMethods{EnterFunction: func(adaType IAdaType, parentType IAdaType, level int, x interface{}) error {\n\t\ttypeList = append(typeList, adaType.Name())\n\t\treturn nil\n\t}}\n\n\t_ = def.TraverseTypes(t, true, typeList)\n\treturn typeList\n}", "func (m *AccessPackageCatalog) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"accessPackages\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateAccessPackageFromDiscriminatorValue , m.SetAccessPackages)\n res[\"catalogType\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetEnumValue(ParseAccessPackageCatalogType , m.SetCatalogType)\n res[\"createdDateTime\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetTimeValue(m.SetCreatedDateTime)\n res[\"description\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetDescription)\n res[\"displayName\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetDisplayName)\n res[\"isExternallyVisible\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetBoolValue(m.SetIsExternallyVisible)\n res[\"modifiedDateTime\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetTimeValue(m.SetModifiedDateTime)\n res[\"state\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetEnumValue(ParseAccessPackageCatalogState , m.SetState)\n return res\n}", "func generateMembers(fields []libtypes.Field) *C.GoMembers {\n\n\tmemberArray := make([]C.GoMember, 0)\n\tfor _, field := range fields {\n\n\t\t// Create a go member\n\t\tmember := C.GoMember{}\n\n\t\t// Name of field\n\t\tmemberName := C.CString(field.Name)\n\t\tdefer C.free(unsafe.Pointer(memberName))\n\n\t\t// Array information\n\t\tmember.is_array_ = C.bool(field.IsArray)\n\t\tmember.array_size_ = C.size_t(field.ArrayLen)\n\n\t\tswitch field.Type {\n\t\tcase \"int8\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_INT8\n\t\tcase \"uint8\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_UINT8\n\t\tcase \"int16\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_INT16\n\t\tcase \"uint16\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_UINT16\n\t\tcase \"int32\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_INT32\n\t\tcase \"uint32\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_UINT32\n\t\tcase \"int64\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_INT64\n\t\tcase \"uint64\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_UINT64\n\t\tcase \"float32\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_FLOAT32\n\t\tcase \"float64\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_DOUBLE\n\t\tcase \"string\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_STRING\n\t\tcase \"bool\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_BOOL\n\t\tcase \"char\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_CHAR\n\t\tcase \"byte\":\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_BYTE\n\t\t// Note: Time and Duration are builtin MESSAGE types\n\t\tdefault:\n\t\t\t// We need to generated nested fields\n\t\t\tmsgType, _ := newDynamicMessageTypeNested(field.Type, field.Package)\n\t\t\tmember.type_id_ = C.rosidl_typesupport_introspection_c__ROS_TYPE_MESSAGE\n\t\t\t// Member field takes a typesupport definition\n\t\t\tmember.members_ = msgType.rosType\n\t\t}\n\n\t\tmemberArray = append(memberArray, member)\n\t}\n\n\tmembers := C.GoMembers{}\n\n\tmembers.member_array = memberArray\n\n\treturn &members\n}", "func (m *ReportRoot) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"appCredentialSignInActivities\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateAppCredentialSignInActivityFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]AppCredentialSignInActivityable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(AppCredentialSignInActivityable)\n }\n }\n m.SetAppCredentialSignInActivities(res)\n }\n return nil\n }\n res[\"applicationSignInDetailedSummary\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateApplicationSignInDetailedSummaryFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]ApplicationSignInDetailedSummaryable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(ApplicationSignInDetailedSummaryable)\n }\n }\n m.SetApplicationSignInDetailedSummary(res)\n }\n return nil\n }\n res[\"authenticationMethods\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateAuthenticationMethodsRootFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetAuthenticationMethods(val.(AuthenticationMethodsRootable))\n }\n return nil\n }\n res[\"credentialUserRegistrationDetails\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateCredentialUserRegistrationDetailsFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]CredentialUserRegistrationDetailsable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(CredentialUserRegistrationDetailsable)\n }\n }\n m.SetCredentialUserRegistrationDetails(res)\n }\n return nil\n }\n res[\"dailyPrintUsage\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreatePrintUsageFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]PrintUsageable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(PrintUsageable)\n }\n }\n m.SetDailyPrintUsage(res)\n }\n return nil\n }\n res[\"dailyPrintUsageByPrinter\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreatePrintUsageByPrinterFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]PrintUsageByPrinterable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(PrintUsageByPrinterable)\n }\n }\n m.SetDailyPrintUsageByPrinter(res)\n }\n return nil\n }\n res[\"dailyPrintUsageByUser\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreatePrintUsageByUserFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]PrintUsageByUserable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(PrintUsageByUserable)\n }\n }\n m.SetDailyPrintUsageByUser(res)\n }\n return nil\n }\n res[\"dailyPrintUsageSummariesByPrinter\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreatePrintUsageByPrinterFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]PrintUsageByPrinterable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(PrintUsageByPrinterable)\n }\n }\n m.SetDailyPrintUsageSummariesByPrinter(res)\n }\n return nil\n }\n res[\"dailyPrintUsageSummariesByUser\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreatePrintUsageByUserFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]PrintUsageByUserable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(PrintUsageByUserable)\n }\n }\n m.SetDailyPrintUsageSummariesByUser(res)\n }\n return nil\n }\n res[\"monthlyPrintUsageByPrinter\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreatePrintUsageByPrinterFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]PrintUsageByPrinterable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(PrintUsageByPrinterable)\n }\n }\n m.SetMonthlyPrintUsageByPrinter(res)\n }\n return nil\n }\n res[\"monthlyPrintUsageByUser\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreatePrintUsageByUserFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]PrintUsageByUserable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(PrintUsageByUserable)\n }\n }\n m.SetMonthlyPrintUsageByUser(res)\n }\n return nil\n }\n res[\"monthlyPrintUsageSummariesByPrinter\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreatePrintUsageByPrinterFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]PrintUsageByPrinterable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(PrintUsageByPrinterable)\n }\n }\n m.SetMonthlyPrintUsageSummariesByPrinter(res)\n }\n return nil\n }\n res[\"monthlyPrintUsageSummariesByUser\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreatePrintUsageByUserFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]PrintUsageByUserable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(PrintUsageByUserable)\n }\n }\n m.SetMonthlyPrintUsageSummariesByUser(res)\n }\n return nil\n }\n res[\"security\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateSecurityReportsRootFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetSecurity(val.(SecurityReportsRootable))\n }\n return nil\n }\n res[\"servicePrincipalSignInActivities\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateServicePrincipalSignInActivityFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]ServicePrincipalSignInActivityable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(ServicePrincipalSignInActivityable)\n }\n }\n m.SetServicePrincipalSignInActivities(res)\n }\n return nil\n }\n res[\"sla\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateServiceLevelAgreementRootFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetSla(val.(ServiceLevelAgreementRootable))\n }\n return nil\n }\n res[\"userCredentialUsageDetails\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateUserCredentialUsageDetailsFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]UserCredentialUsageDetailsable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(UserCredentialUsageDetailsable)\n }\n }\n m.SetUserCredentialUsageDetails(res)\n }\n return nil\n }\n return res\n}", "func (m *Store) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"defaultLanguageTag\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetDefaultLanguageTag)\n res[\"groups\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateGroupFromDiscriminatorValue , m.SetGroups)\n res[\"languageTags\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfPrimitiveValues(\"string\" , m.SetLanguageTags)\n res[\"sets\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateSetFromDiscriminatorValue , m.SetSets)\n return res\n}", "func generateGoTestBlock_Func(file *File, fe *feparser.FEFunc) []*StatementAndName {\n\t// Seed the random number generator with the hash of the\n\t// FEFunc, so that the numbers in the variable names\n\t// will stay the same as long as the FEFunc is the same.\n\t//rand.Seed(int64(MustHashAnyWithJSON(fe.CodeQL.Blocks)))\n\n\tchildren := make([]*StatementAndName, 0)\n\tfor blockIndex, block := range fe.CodeQL.Blocks {\n\t\tinps, outps, err := getIdentitiesByBlock_FEFunc(fe, block)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tfor inpIndex, inp := range inps {\n\t\t\tfor outpIndex, outp := range outps {\n\n\t\t\t\tchildBlock := generateGoChildBlock_Func(\n\t\t\t\t\tfile,\n\t\t\t\t\tfe,\n\t\t\t\t\tinp,\n\t\t\t\t\toutp,\n\t\t\t\t)\n\t\t\t\t{\n\t\t\t\t\tif childBlock != nil {\n\n\t\t\t\t\t\ttestFuncID := \"TaintStepTest_\" + feparser.FormatCodeQlName(fe.PkgPath+\"-\"+fe.Name) + Sf(\"_B%vI%vO%v\", blockIndex, inpIndex, outpIndex)\n\t\t\t\t\t\tenclosed := Func().Id(testFuncID).\n\t\t\t\t\t\t\tParamsFunc(\n\t\t\t\t\t\t\t\tfunc(group *Group) {\n\t\t\t\t\t\t\t\t\tgroup.Add(Id(\"sourceCQL\").Interface())\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t).\n\t\t\t\t\t\t\tInterface().\n\t\t\t\t\t\t\tAdd(childBlock)\n\n\t\t\t\t\t\tchildren = append(children, &StatementAndName{\n\t\t\t\t\t\t\tStatement: enclosed,\n\t\t\t\t\t\t\tTestFuncName: testFuncID,\n\t\t\t\t\t\t})\n\t\t\t\t\t} else {\n\t\t\t\t\t\tWarnf(Sf(\"NOTHING GENERATED; block %v, inp %v, outp %v\", blockIndex, inpIndex, outpIndex))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn children\n}", "func parseFields(criteria *measurev1.QueryRequest, metadata *commonv1.Metadata, groupByEntity bool) logical.UnresolvedPlan {\n\tprojFields := make([]*logical.Field, len(criteria.GetFieldProjection().GetNames()))\n\tfor i, fieldNameProj := range criteria.GetFieldProjection().GetNames() {\n\t\tprojFields[i] = logical.NewField(fieldNameProj)\n\t}\n\ttimeRange := criteria.GetTimeRange()\n\treturn indexScan(timeRange.GetBegin().AsTime(), timeRange.GetEnd().AsTime(), metadata,\n\t\tlogical.ToTags(criteria.GetTagProjection()), projFields, groupByEntity, criteria.GetCriteria())\n}", "func (op *metadataLookup) buildField() {\n\tlengthOfFields := len(op.fields)\n\top.executeCtx.Fields = make(field.Metas, lengthOfFields)\n\n\tidx := 0\n\tfor fieldID := range op.fields {\n\t\tf := op.fields[fieldID]\n\t\top.executeCtx.Fields[idx] = field.Meta{\n\t\t\tID: fieldID,\n\t\t\tType: f.DownSampling.GetFieldType(),\n\t\t\tName: f.DownSampling.FieldName(),\n\t\t}\n\t\tidx++\n\t}\n\t// first sort field by field id\n\top.executeCtx.SortFields()\n\t// after sort filed, build aggregation spec\n\top.executeCtx.DownSamplingSpecs = make(aggregation.AggregatorSpecs, lengthOfFields)\n\top.executeCtx.AggregatorSpecs = make(aggregation.AggregatorSpecs, lengthOfFields)\n\tfor fieldIdx, fieldMeta := range op.executeCtx.Fields {\n\t\tf := op.fields[fieldMeta.ID]\n\t\top.executeCtx.DownSamplingSpecs[fieldIdx] = f.DownSampling\n\t\top.executeCtx.AggregatorSpecs[fieldIdx] = f.Aggregator\n\t}\n}", "func handleSpecRecursive(module *common.Module, astFiles *AstFiles, spec interface{}, prefix, aliasPrefix, event string, iterator *common.StructField, dejavu map[string]bool) {\n\tif verbose {\n\t\tfmt.Printf(\"handleSpec spec: %+v, prefix: %s, aliasPrefix %s, event %s, iterator %+v\\n\", spec, prefix, aliasPrefix, event, iterator)\n\t}\n\n\tvar typeSpec *ast.TypeSpec\n\tvar structType *ast.StructType\n\tvar ok bool\n\tif typeSpec, ok = spec.(*ast.TypeSpec); !ok {\n\t\treturn\n\t}\n\tif structType, ok = typeSpec.Type.(*ast.StructType); !ok {\n\t\tlog.Printf(\"Don't know what to do with %s (%s)\", typeSpec.Name, spew.Sdump(typeSpec))\n\t\treturn\n\t}\n\n\tfor _, field := range structType.Fields.List {\n\t\tfieldCommentText := field.Comment.Text()\n\t\tfieldIterator := iterator\n\n\t\tvar tag reflect.StructTag\n\t\tif field.Tag != nil {\n\t\t\ttag = reflect.StructTag(field.Tag.Value[1 : len(field.Tag.Value)-1])\n\t\t}\n\n\t\tif e, ok := tag.Lookup(\"event\"); ok {\n\t\t\tevent = e\n\t\t\tif _, ok = module.EventTypes[e]; !ok {\n\t\t\t\tmodule.EventTypes[e] = common.NewEventTypeMetada()\n\t\t\t\tdejavu = make(map[string]bool) // clear dejavu map when it's a new event type\n\t\t\t}\n\t\t\tif e != \"*\" {\n\t\t\t\tmodule.EventTypes[e].Doc = fieldCommentText\n\t\t\t}\n\t\t}\n\n\t\tif isEmbedded := len(field.Names) == 0; isEmbedded {\n\t\t\tif fieldTag, found := tag.Lookup(\"field\"); found && fieldTag == \"-\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tident, _ := field.Type.(*ast.Ident)\n\t\t\tif ident == nil {\n\t\t\t\tif starExpr, ok := field.Type.(*ast.StarExpr); ok {\n\t\t\t\t\tident, _ = starExpr.X.(*ast.Ident)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif ident != nil {\n\t\t\t\tname := ident.Name\n\t\t\t\tif prefix != \"\" {\n\t\t\t\t\tname = prefix + \".\" + ident.Name\n\t\t\t\t}\n\n\t\t\t\tembedded := astFiles.LookupSymbol(ident.Name)\n\t\t\t\tif embedded != nil {\n\t\t\t\t\thandleEmbedded(module, ident.Name, prefix, event, field.Type)\n\t\t\t\t\thandleSpecRecursive(module, astFiles, embedded.Decl, name, aliasPrefix, event, fieldIterator, dejavu)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Printf(\"failed to resolve symbol for %+v in %s\", ident.Name, pkgname)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tfieldBasename := field.Names[0].Name\n\t\t\tif !unicode.IsUpper(rune(fieldBasename[0])) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif dejavu[fieldBasename] {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar opOverrides string\n\t\t\tvar fields []seclField\n\t\t\tif tags, err := structtag.Parse(string(tag)); err == nil && len(tags.Tags()) != 0 {\n\t\t\t\topOverrides, fields = parseTags(tags, typeSpec.Name.Name)\n\n\t\t\t\tif opOverrides == \"\" && fields == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\tfields = append(fields, seclField{name: fieldBasename})\n\t\t\t}\n\n\t\t\tfieldType, isPointer, isArray := getFieldIdentName(field.Type)\n\n\t\t\tprefixedFieldName := fieldBasename\n\t\t\tif prefix != \"\" {\n\t\t\t\tprefixedFieldName = fmt.Sprintf(\"%s.%s\", prefix, fieldBasename)\n\t\t\t}\n\n\t\t\tfor _, seclField := range fields {\n\t\t\t\thandleNonEmbedded(module, seclField, prefixedFieldName, event, fieldType, isPointer, isArray)\n\n\t\t\t\tif seclFieldIterator := seclField.iterator; seclFieldIterator != \"\" {\n\t\t\t\t\tfieldIterator = handleIterator(module, seclField, fieldType, seclFieldIterator, aliasPrefix, prefixedFieldName, event, fieldCommentText, opOverrides, isPointer, isArray)\n\t\t\t\t}\n\n\t\t\t\tif handler := seclField.handler; handler != \"\" {\n\n\t\t\t\t\thandleFieldWithHandler(module, seclField, aliasPrefix, prefix, prefixedFieldName, fieldType, seclField.containerStructName, event, fieldCommentText, opOverrides, handler, isPointer, isArray, fieldIterator)\n\n\t\t\t\t\tdelete(dejavu, fieldBasename)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif verbose {\n\t\t\t\t\tlog.Printf(\"Don't know what to do with %s: %s\", fieldBasename, spew.Sdump(field.Type))\n\t\t\t\t}\n\n\t\t\t\tdejavu[fieldBasename] = true\n\n\t\t\t\tif len(fieldType) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\talias := seclField.name\n\t\t\t\tif isBasicType(fieldType) {\n\t\t\t\t\thandleBasic(module, seclField, fieldBasename, alias, aliasPrefix, prefix, fieldType, event, opOverrides, fieldCommentText, seclField.containerStructName, fieldIterator, isArray)\n\t\t\t\t} else {\n\t\t\t\t\tspec := astFiles.LookupSymbol(fieldType)\n\t\t\t\t\tif spec != nil {\n\t\t\t\t\t\tnewPrefix, newAliasPrefix := fieldBasename, alias\n\n\t\t\t\t\t\tif prefix != \"\" {\n\t\t\t\t\t\t\tnewPrefix = prefix + \".\" + fieldBasename\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif aliasPrefix != \"\" {\n\t\t\t\t\t\t\tnewAliasPrefix = aliasPrefix + \".\" + alias\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\thandleSpecRecursive(module, astFiles, spec.Decl, newPrefix, newAliasPrefix, event, fieldIterator, dejavu)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlog.Printf(\"failed to resolve symbol for %+v in %s\", fieldType, pkgname)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif !seclField.exposedAtEventRootOnly {\n\t\t\t\t\tdelete(dejavu, fieldBasename)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (b *Builder) QueryFields(source reflect.Value, parent reflect.Value) (graphql.Fields, error) {\n\tresult := make(graphql.Fields, 0)\n\tif source.IsValid() && source.IsZero() {\n\t\tsource = reflect.New(source.Type())\n\t}\n\tnodes := b.buildObject(source, parent)\n\tfor _, node := range nodes {\n\t\tif node.skip {\n\t\t\tcontinue\n\t\t}\n\t\tif !node.source.CanSet() {\n\t\t\tcontinue\n\t\t}\n\t\tif node.inputOnly {\n\t\t\tcontinue\n\t\t}\n\t\tname := node.alias\n\t\tif name == \"\" {\n\t\t\tname = strcase.ToLowerCamel(node.name)\n\t\t}\n\t\tvar gType graphql.Type\n\t\tif node.isRelay {\n\t\t\tgType = b.buildConnection(node.source, parent)\n\t\t} else {\n\t\t\tgType = b.mapOutput(node.source, parent)\n\t\t}\n\t\tif gType == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif node.required {\n\t\t\tgType = graphql.NewNonNull(gType)\n\t\t}\n\n\t\tfield := &graphql.Field{\n\t\t\tName: name,\n\t\t\tType: gType,\n\t\t\tDescription: node.description,\n\t\t\tResolve: node.resolver,\n\t\t\tArgs: node.resolverArgs,\n\t\t}\n\t\tresult[name] = field\n\t}\n\treturn result, nil\n}", "func XMLToFields(filePathXML string, includeDirectories []string) ([]*OutDefinition, uint, error) {\n\toutDefs, version, err := do(\"\", filePathXML, includeDirectories)\n\tversionInt, _ := strconv.Atoi(version)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\treturn outDefs, uint(versionInt), nil\n}", "func (m *ParentLabelDetails) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))\n res[\"color\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetColor(val)\n }\n return nil\n }\n res[\"description\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDescription(val)\n }\n return nil\n }\n res[\"id\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetId(val)\n }\n return nil\n }\n res[\"isActive\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetBoolValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetIsActive(val)\n }\n return nil\n }\n res[\"name\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetName(val)\n }\n return nil\n }\n res[\"@odata.type\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOdataType(val)\n }\n return nil\n }\n res[\"parent\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateParentLabelDetailsFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetParent(val.(ParentLabelDetailsable))\n }\n return nil\n }\n res[\"sensitivity\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetInt32Value()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetSensitivity(val)\n }\n return nil\n }\n res[\"tooltip\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetTooltip(val)\n }\n return nil\n }\n return res\n}", "func (m *AttackSimulationRoot) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"simulationAutomations\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateSimulationAutomationFromDiscriminatorValue , m.SetSimulationAutomations)\n res[\"simulations\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateSimulationFromDiscriminatorValue , m.SetSimulations)\n return res\n}", "func init() {\n\tgroupFields := schema.Group{}.Fields()\n\t_ = groupFields\n\t// groupDescTenant is the schema descriptor for tenant field.\n\tgroupDescTenant := groupFields[0].Descriptor()\n\t// group.TenantValidator is a validator for the \"tenant\" field. It is called by the builders before save.\n\tgroup.TenantValidator = groupDescTenant.Validators[0].(func(string) error)\n\t// groupDescName is the schema descriptor for name field.\n\tgroupDescName := groupFields[1].Descriptor()\n\t// group.NameValidator is a validator for the \"name\" field. It is called by the builders before save.\n\tgroup.NameValidator = groupDescName.Validators[0].(func(string) error)\n\t// groupDescType is the schema descriptor for type field.\n\tgroupDescType := groupFields[2].Descriptor()\n\t// group.TypeValidator is a validator for the \"type\" field. It is called by the builders before save.\n\tgroup.TypeValidator = groupDescType.Validators[0].(func(string) error)\n\t// groupDescCreatedAt is the schema descriptor for created_at field.\n\tgroupDescCreatedAt := groupFields[5].Descriptor()\n\t// group.DefaultCreatedAt holds the default value on creation for the created_at field.\n\tgroup.DefaultCreatedAt = groupDescCreatedAt.Default.(func() time.Time)\n\t// groupDescUpdatedAt is the schema descriptor for updated_at field.\n\tgroupDescUpdatedAt := groupFields[6].Descriptor()\n\t// group.DefaultUpdatedAt holds the default value on creation for the updated_at field.\n\tgroup.DefaultUpdatedAt = groupDescUpdatedAt.Default.(func() time.Time)\n\t// group.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.\n\tgroup.UpdateDefaultUpdatedAt = groupDescUpdatedAt.UpdateDefault.(func() time.Time)\n\tnodeFields := schema.Node{}.Fields()\n\t_ = nodeFields\n\t// nodeDescTenant is the schema descriptor for tenant field.\n\tnodeDescTenant := nodeFields[0].Descriptor()\n\t// node.TenantValidator is a validator for the \"tenant\" field. It is called by the builders before save.\n\tnode.TenantValidator = nodeDescTenant.Validators[0].(func(string) error)\n\t// nodeDescName is the schema descriptor for name field.\n\tnodeDescName := nodeFields[1].Descriptor()\n\t// node.NameValidator is a validator for the \"name\" field. It is called by the builders before save.\n\tnode.NameValidator = nodeDescName.Validators[0].(func(string) error)\n\t// nodeDescType is the schema descriptor for type field.\n\tnodeDescType := nodeFields[2].Descriptor()\n\t// node.TypeValidator is a validator for the \"type\" field. It is called by the builders before save.\n\tnode.TypeValidator = nodeDescType.Validators[0].(func(string) error)\n\t// nodeDescCreatedAt is the schema descriptor for created_at field.\n\tnodeDescCreatedAt := nodeFields[5].Descriptor()\n\t// node.DefaultCreatedAt holds the default value on creation for the created_at field.\n\tnode.DefaultCreatedAt = nodeDescCreatedAt.Default.(func() time.Time)\n\t// nodeDescUpdatedAt is the schema descriptor for updated_at field.\n\tnodeDescUpdatedAt := nodeFields[6].Descriptor()\n\t// node.DefaultUpdatedAt holds the default value on creation for the updated_at field.\n\tnode.DefaultUpdatedAt = nodeDescUpdatedAt.Default.(func() time.Time)\n\t// node.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.\n\tnode.UpdateDefaultUpdatedAt = nodeDescUpdatedAt.UpdateDefault.(func() time.Time)\n\tpermissionFields := schema.Permission{}.Fields()\n\t_ = permissionFields\n\t// permissionDescTenant is the schema descriptor for tenant field.\n\tpermissionDescTenant := permissionFields[0].Descriptor()\n\t// permission.TenantValidator is a validator for the \"tenant\" field. It is called by the builders before save.\n\tpermission.TenantValidator = permissionDescTenant.Validators[0].(func(string) error)\n\t// permissionDescName is the schema descriptor for name field.\n\tpermissionDescName := permissionFields[1].Descriptor()\n\t// permission.NameValidator is a validator for the \"name\" field. It is called by the builders before save.\n\tpermission.NameValidator = permissionDescName.Validators[0].(func(string) error)\n\t// permissionDescCreatedAt is the schema descriptor for created_at field.\n\tpermissionDescCreatedAt := permissionFields[3].Descriptor()\n\t// permission.DefaultCreatedAt holds the default value on creation for the created_at field.\n\tpermission.DefaultCreatedAt = permissionDescCreatedAt.Default.(func() time.Time)\n\t// permissionDescUpdatedAt is the schema descriptor for updated_at field.\n\tpermissionDescUpdatedAt := permissionFields[4].Descriptor()\n\t// permission.DefaultUpdatedAt holds the default value on creation for the updated_at field.\n\tpermission.DefaultUpdatedAt = permissionDescUpdatedAt.Default.(func() time.Time)\n\t// permission.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.\n\tpermission.UpdateDefaultUpdatedAt = permissionDescUpdatedAt.UpdateDefault.(func() time.Time)\n\trouteFields := schema.Route{}.Fields()\n\t_ = routeFields\n\t// routeDescTenant is the schema descriptor for tenant field.\n\trouteDescTenant := routeFields[0].Descriptor()\n\t// route.TenantValidator is a validator for the \"tenant\" field. It is called by the builders before save.\n\troute.TenantValidator = routeDescTenant.Validators[0].(func(string) error)\n\t// routeDescName is the schema descriptor for name field.\n\trouteDescName := routeFields[1].Descriptor()\n\t// route.NameValidator is a validator for the \"name\" field. It is called by the builders before save.\n\troute.NameValidator = routeDescName.Validators[0].(func(string) error)\n\t// routeDescURI is the schema descriptor for uri field.\n\trouteDescURI := routeFields[2].Descriptor()\n\t// route.URIValidator is a validator for the \"uri\" field. It is called by the builders before save.\n\troute.URIValidator = routeDescURI.Validators[0].(func(string) error)\n\t// routeDescCreatedAt is the schema descriptor for created_at field.\n\trouteDescCreatedAt := routeFields[5].Descriptor()\n\t// route.DefaultCreatedAt holds the default value on creation for the created_at field.\n\troute.DefaultCreatedAt = routeDescCreatedAt.Default.(func() time.Time)\n\t// routeDescUpdatedAt is the schema descriptor for updated_at field.\n\trouteDescUpdatedAt := routeFields[6].Descriptor()\n\t// route.DefaultUpdatedAt holds the default value on creation for the updated_at field.\n\troute.DefaultUpdatedAt = routeDescUpdatedAt.Default.(func() time.Time)\n\t// route.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.\n\troute.UpdateDefaultUpdatedAt = routeDescUpdatedAt.UpdateDefault.(func() time.Time)\n\tuserFields := schema.User{}.Fields()\n\t_ = userFields\n\t// userDescTenant is the schema descriptor for tenant field.\n\tuserDescTenant := userFields[0].Descriptor()\n\t// user.TenantValidator is a validator for the \"tenant\" field. It is called by the builders before save.\n\tuser.TenantValidator = userDescTenant.Validators[0].(func(string) error)\n\t// userDescUUID is the schema descriptor for uuid field.\n\tuserDescUUID := userFields[1].Descriptor()\n\t// user.UUIDValidator is a validator for the \"uuid\" field. It is called by the builders before save.\n\tuser.UUIDValidator = userDescUUID.Validators[0].(func(string) error)\n\t// userDescIsSuper is the schema descriptor for is_super field.\n\tuserDescIsSuper := userFields[3].Descriptor()\n\t// user.DefaultIsSuper holds the default value on creation for the is_super field.\n\tuser.DefaultIsSuper = userDescIsSuper.Default.(bool)\n\t// userDescCreatedAt is the schema descriptor for created_at field.\n\tuserDescCreatedAt := userFields[5].Descriptor()\n\t// user.DefaultCreatedAt holds the default value on creation for the created_at field.\n\tuser.DefaultCreatedAt = userDescCreatedAt.Default.(func() time.Time)\n\t// userDescUpdatedAt is the schema descriptor for updated_at field.\n\tuserDescUpdatedAt := userFields[6].Descriptor()\n\t// user.DefaultUpdatedAt holds the default value on creation for the updated_at field.\n\tuser.DefaultUpdatedAt = userDescUpdatedAt.Default.(func() time.Time)\n\t// user.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.\n\tuser.UpdateDefaultUpdatedAt = userDescUpdatedAt.UpdateDefault.(func() time.Time)\n}", "func (m *SolutionsRoot) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))\n res[\"businessScenarios\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateBusinessScenarioFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]BusinessScenarioable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(BusinessScenarioable)\n }\n }\n m.SetBusinessScenarios(res)\n }\n return nil\n }\n res[\"@odata.type\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOdataType(val)\n }\n return nil\n }\n res[\"virtualEvents\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateVirtualEventsRootFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetVirtualEvents(val.(VirtualEventsRootable))\n }\n return nil\n }\n return res\n}", "func (s *Struct) generateEnums(dir string) error {\n\tfor _, f := range s.Fields {\n\t\tif f.Enum == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif err := f.Enum.generate(dir); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func IterFields(recurse bool, st reflect.Type, v reflect.Value, callback func(f reflect.Value, sf reflect.StructField, cmd ...string)) {\n\t// NOTE: if we're passed something that isn't a struct, then the program will\n\t// panic when we call NumField() as this is the reality of using reflection.\n\t//\n\t// we are relying on the consumer of this package to follow the instructions\n\t// given and to provide us with what we are expecting.\n\t//\n\t// so if we're not careful, then we violate the language type safety.\n\t// but we protect against this in the calling function by checking for a\n\t// struct before calling IterFields.\n\t//\n\tfor i := 0; i < v.NumField(); i++ {\n\t\tfield := v.Field(i)\n\n\t\t// we call Field() on the struct type so we can get a StructField type,\n\t\t// which we have to do in order to access the struct 'tags' on the field.\n\t\t//\n\t\t// it also gives us access to the field name so we can create the various\n\t\t// flags necessary (as well as determine the command that a user runs).\n\t\t//\n\t\tsf := st.Field(i)\n\n\t\tif field.Kind() == reflect.Struct {\n\t\t\t// when we see a struct we expect by convention for this to be a\n\t\t\t// 'command' that will have its own set of flags.\n\t\t\t//\n\t\t\tcmd := strings.ToLower(sf.Name)\n\t\t\tif _, ok := cmds[cmd]; !ok {\n\t\t\t\tcmds[cmd] = true\n\t\t\t}\n\n\t\t\t// we use CanInterface() because otherise if we were to call Interface()\n\t\t\t// on a field that was unexported, then the program would panic.\n\t\t\t//\n\t\t\tif recurse && field.CanInterface() {\n\t\t\t\t// we use Interface() to get the nested struct value as an interface{}.\n\t\t\t\t// this is done because if we called TypeOf on the field variable, then\n\t\t\t\t// we would end up with reflect.Value when really we need the nested\n\t\t\t\t// struct's concrete type definition (e.g. struct {...}).\n\t\t\t\t//\n\t\t\t\tst := reflect.TypeOf(field.Interface())\n\n\t\t\t\tfor i := 0; i < field.NumField(); i++ {\n\t\t\t\t\t// again, we get the field from the nested struct, as well as acquire\n\t\t\t\t\t// its StructField type for purposes already explained above.\n\t\t\t\t\t//\n\t\t\t\t\tfield := field.Field(i)\n\t\t\t\t\tst := st.Field(i)\n\n\t\t\t\t\t// because our callback function is going to attempt to set values on\n\t\t\t\t\t// these struct fields, we need to be sure they are 'settable' first.\n\t\t\t\t\t//\n\t\t\t\t\tif field.CanSet() {\n\t\t\t\t\t\tcallback(field, st, cmd)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t// we check if recurse is false because we don't want our nested commands\n\t\t\t// to accidentally add the top-level fields into our command flagset and\n\t\t\t// thus -h/--help would show the top-level fields in the help output.\n\t\t\t//\n\t\t\t// also, because our callback function is going to attempt to set values\n\t\t\t// on these struct fields, we need to be sure they are 'settable' first.\n\t\t\t//\n\t\t\t//\n\t\t\tif !recurse && field.CanSet() {\n\t\t\t\tcallback(field, sf)\n\t\t\t}\n\t\t}\n\t}\n}", "func fieldsWithNames(f *ast.Field) (fields []*ast.Field) {\n\tif f == nil {\n\t\treturn nil\n\t}\n\n\tif len(f.Names) == 0 {\n\t\tfields = append(fields, &ast.Field{\n\t\t\tDoc: f.Doc,\n\t\t\tNames: []*ast.Ident{{Name: printIdentField(f)}},\n\t\t\tType: f.Type,\n\t\t\tTag: f.Tag,\n\t\t\tComment: f.Comment,\n\t\t})\n\t\treturn\n\t}\n\tfor _, ident := range f.Names {\n\t\tfields = append(fields, &ast.Field{\n\t\t\tDoc: f.Doc,\n\t\t\tNames: []*ast.Ident{ident},\n\t\t\tType: f.Type,\n\t\t\tTag: f.Tag,\n\t\t\tComment: f.Comment,\n\t\t})\n\t}\n\treturn\n}", "func fields(t reflect.Type) map[string]interface{} {\n\tfieldCache.RLock()\n\tfs := fieldCache.m[t]\n\tfieldCache.RUnlock()\n\n\t//Cached version exists\n\tif fs != nil {\n\t\treturn fs\n\t}\n\t//This is to prevent multiple goroutines computing the same thing\n\tfieldCache.Lock()\n\tvar sy *sync.WaitGroup\n\tif sy, ok := fieldCache.create[t]; ok {\n\t\tfieldCache.Unlock()\n\t\tsy.Wait()\n\t\treturn fields(t)\n\t}\n\tsy = &sync.WaitGroup{}\n\tfieldCache.create[t] = sy\n\tsy.Add(1)\n\tfieldCache.Unlock()\n\n\tfs = compileStruct(t)\n\n\tfieldCache.Lock()\n\tfieldCache.m[t] = fs\n\tfieldCache.Unlock()\n\tsy.Done()\n\treturn fs\n}", "func (m *Planner) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"buckets\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreatePlannerBucketFromDiscriminatorValue , m.SetBuckets)\n res[\"plans\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreatePlannerPlanFromDiscriminatorValue , m.SetPlans)\n res[\"tasks\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreatePlannerTaskFromDiscriminatorValue , m.SetTasks)\n return res\n}", "func getFieldConstructor(e ast.Expr) string {\n\tswitch t := e.(type) {\n\tcase *ast.StarExpr:\n\t\tswitch t.X.(type) {\n\t\tcase *ast.StarExpr:\n\t\t\tpanic(\"Ponter on pointers is not supported in annotation struct\")\n\t\tcase *ast.ArrayType:\n\t\t\tpanic(\"Pointer on arrays is not supported in annotation struct\")\n\t\tdefault:\n\t\t\treturn \"&\" + getFieldConstructor(t.X)\n\t\t}\n\tcase *ast.ArrayType:\n\t\tswitch elemType := t.Elt.(type) {\n\t\tcase *ast.StarExpr:\n\t\t\tpanic(\"Array of pointers is not supported in annotation struct\")\n\t\tcase *ast.ArrayType:\n\t\t\tpanic(\"Array of arrays is not supported in annotation struct\")\n\t\tdefault:\n\t\t\treturn \"[]\" + getFieldConstructor(elemType)\n\t\t}\n\tcase *ast.Ident:\n\t\tswitch t.Name {\n\t\tcase \"int\", \"int8\", \"int16\", \"int32\", \"int64\",\n\t\t\t\"uint\", \"uint8\", \"uint16\", \"uint32\", \"uint64\",\n\t\t\t\"float32\", \"float64\", \"byte\", \"rune\", \"string\":\n\t\t\treturn t.Name + \"{\"\n\t\tcase \"complex64\", \"complex128\", \"uintptr\":\n\t\t\tpanic(\"Type '\" + t.Name + \"' is not supported in annotation struct\")\n\t\tdefault:\n\t\t\treturn t.Name + \"{\"\n\t\t}\n\tdefault:\n\t\tpanic(\"Unsupported field type in annotation\")\n\t}\n}", "func GenORMSetup(db *gorm.DB) {\n\n\t// relative to the models package, swith to ../controlers package\n\tfilename := filepath.Join(OrmPkgGenPath, \"setup.go\")\n\n\t// we should use go generate\n\tlog.Println(\"generating orm setup file : \" + filename)\n\n\tf, err := os.Create(filename)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\t// create the list of structs\n\tvar structs []models.Struct\n\tdb.Find(&structs)\n\n\tLISTOFSTRUCT := \"\\n\"\n\n\tdeleteCalls := \"\"\n\n\tfor idx, _struct := range structs {\n\t\tif idx != 0 {\n\t\t\tLISTOFSTRUCT += \",\\n\"\n\t\t}\n\t\tLISTOFSTRUCT += fmt.Sprintf(\"\\t\\t&%sDB{}\", _struct.Name)\n\n\t\tdeleteCalls += fmt.Sprintf(\"\\tdb.Delete(&%sDB{})\\n\", _struct.Name)\n\n\t\tfmt.Printf(\"\t\torm.LoadDB%s(%ss, db)\\n\", _struct.Name, _struct.Name)\n\t}\n\tres := strings.ReplaceAll(template, \"{{LISTOFSTRUCT}}\", LISTOFSTRUCT)\n\n\tres = strings.ReplaceAll(res, \"{{Deletes}}\", deleteCalls)\n\n\tfmt.Fprintf(f, \"%s\", res)\n\n\tdefer f.Close()\n}", "func (g *CodeGenerator) Generate() error {\n\tif len(g.opts.FilePath) == 0 {\n\t\treturn errors.New(\"invalid file path\")\n\t}\n\n\tif len(g.opts.PackageName) == 0 {\n\t\treturn errors.New(\"invalid package name\")\n\t}\n\n\t// generate package\n\tg.P(\"package \", g.opts.PackageName)\n\tg.P()\n\n\t// generate import path\n\tg.P(\"import (\")\n\tfor _, path := range g.opts.ImportPath {\n\t\tg.P(\"\\t\\\"\", path, \"\\\"\")\n\t}\n\tg.P(\")\")\n\tg.P()\n\n\t// generate variables\n\tfor _, v := range g.opts.Variables {\n\t\tvariableLine := fmt.Sprintf(\"var\\t%-15s\\t%-15s\\t//%-15s\", v.name, v.tp, v.comment)\n\t\tg.P(variableLine)\n\t\tg.P()\n\t}\n\n\t// generate structs\n\tfor _, s := range g.opts.Structs {\n\t\t// struct comment\n\t\tif len(s.comment) > 0 {\n\t\t\tg.P(\"// \", s.comment)\n\t\t}\n\n\t\t// struct begin\n\t\tg.P(\"type \", s.name, \" struct {\")\n\n\t\t// struct fields\n\t\tfieldLines := make([]string, s.fieldRaw.Size())\n\t\tit := s.fieldRaw.Iterator()\n\t\tfor it.Next() {\n\t\t\tfieldRaw := it.Value().(*ExcelFieldRaw)\n\n\t\t\t// don't need import\n\t\t\tif !fieldRaw.imp {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfieldLine := fmt.Sprintf(\"\\t%-15s\\t%-20s\\t%-20s\\t//%-10s\", it.Key(), fieldRaw.tp, fieldRaw.tag, fieldRaw.desc)\n\t\t\tfieldLines[fieldRaw.idx] = fieldLine\n\t\t}\n\n\t\t// print struct field in sort\n\t\tfor _, v := range fieldLines {\n\t\t\tif len(v) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tg.P(v)\n\t\t}\n\n\t\t// struct end\n\t\tg.P(\"}\")\n\t\tg.P()\n\t}\n\n\t// generate functions\n\tfor _, f := range g.opts.Functions {\n\t\t// function comment\n\t\tif len(f.comment) > 0 {\n\t\t\tg.P(\"// \", f.comment)\n\t\t}\n\n\t\t// function receiver\n\t\tvar receiver string\n\t\tif len(f.receiver) > 0 {\n\t\t\treceiver = fmt.Sprintf(\"(e *%s)\", f.receiver)\n\t\t}\n\n\t\t// function parameters\n\t\tparameters := strings.Join(f.parameters, \", \")\n\n\t\t// function begin\n\t\tg.P(\"func \", receiver, \" \", f.name, \"(\", parameters, \") \", f.retType, \" {\")\n\n\t\t// function body\n\t\tg.P(\"\\t\", f.body)\n\n\t\t// function end\n\t\tg.P(\"}\")\n\t\tg.P()\n\t}\n\n\treturn ioutil.WriteFile(g.opts.FilePath, g.buf.Bytes(), 0666)\n}", "func (m *DeviceConfigurationAssignment) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"intent\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseDeviceConfigAssignmentIntent)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetIntent(val.(*DeviceConfigAssignmentIntent))\n }\n return nil\n }\n res[\"source\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseDeviceAndAppManagementAssignmentSource)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetSource(val.(*DeviceAndAppManagementAssignmentSource))\n }\n return nil\n }\n res[\"sourceId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetSourceId(val)\n }\n return nil\n }\n res[\"target\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateDeviceAndAppManagementAssignmentTargetFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetTarget(val.(DeviceAndAppManagementAssignmentTargetable))\n }\n return nil\n }\n return res\n}", "func (m *DeviceManagementConfigurationPolicy) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"assignments\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateDeviceManagementConfigurationPolicyAssignmentFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]DeviceManagementConfigurationPolicyAssignmentable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(DeviceManagementConfigurationPolicyAssignmentable)\n }\n }\n m.SetAssignments(res)\n }\n return nil\n }\n res[\"createdDateTime\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetTimeValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCreatedDateTime(val)\n }\n return nil\n }\n res[\"creationSource\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCreationSource(val)\n }\n return nil\n }\n res[\"description\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDescription(val)\n }\n return nil\n }\n res[\"isAssigned\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetBoolValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetIsAssigned(val)\n }\n return nil\n }\n res[\"lastModifiedDateTime\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetTimeValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetLastModifiedDateTime(val)\n }\n return nil\n }\n res[\"name\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetName(val)\n }\n return nil\n }\n res[\"platforms\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseDeviceManagementConfigurationPlatforms)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetPlatforms(val.(*DeviceManagementConfigurationPlatforms))\n }\n return nil\n }\n res[\"priorityMetaData\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateDeviceManagementPriorityMetaDataFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetPriorityMetaData(val.(DeviceManagementPriorityMetaDataable))\n }\n return nil\n }\n res[\"roleScopeTagIds\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfPrimitiveValues(\"string\")\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]string, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = *(v.(*string))\n }\n }\n m.SetRoleScopeTagIds(res)\n }\n return nil\n }\n res[\"settingCount\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetInt32Value()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetSettingCount(val)\n }\n return nil\n }\n res[\"settings\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateDeviceManagementConfigurationSettingFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]DeviceManagementConfigurationSettingable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(DeviceManagementConfigurationSettingable)\n }\n }\n m.SetSettings(res)\n }\n return nil\n }\n res[\"technologies\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseDeviceManagementConfigurationTechnologies)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetTechnologies(val.(*DeviceManagementConfigurationTechnologies))\n }\n return nil\n }\n res[\"templateReference\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateDeviceManagementConfigurationPolicyTemplateReferenceFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetTemplateReference(val.(DeviceManagementConfigurationPolicyTemplateReferenceable))\n }\n return nil\n }\n return res\n}", "func dumpFields(documentType IDocumentBase) {\n\thandleType := reflect.TypeOf(documentType)\n\tlog.Printf(\"reflect.Kind: %s\", handleType.Kind())\n\tlog.Printf(\"reflect.Kind: %s\", handleType.Elem().Kind())\n\n\thandleStructType := reflect.TypeOf(documentType)\n\tif handleStructType.Kind() == reflect.Ptr {\n\t\thandleStructType = handleType.Elem()\n\t}\n\tlog.Printf(\"reflect.handleStructType.Kind: %s\", handleStructType.Kind())\n\n\tfor i := 0; i < handleStructType.NumField(); i++ {\n\t\tfield := handleStructType.Field(i) // Get the field, returns https://golang.org/pkg/reflect/#StructField\n\t\tlog.Printf(\"dumpFields Name: %s\", field.Name)\n\t\tlog.Printf(\"dumpFields Tags: %s\", field.Tag)\n\t}\n}", "func (s CreateRouteOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (n *Node) Fields() []uintptr {\n\t// we store the offsets for the fields in type properties\n\ttprops := kit.Types.Properties(n.Type(), true) // true = makeNew\n\tpnm := \"__FieldOffs\"\n\tif foff, ok := tprops[pnm]; ok {\n\t\treturn foff.([]uintptr)\n\t}\n\tfoff := make([]uintptr, 0)\n\tkitype := KiType()\n\tFlatFieldsValueFunc(n.This, func(stru interface{}, typ reflect.Type, field reflect.StructField, fieldVal reflect.Value) bool {\n\t\tif fieldVal.Kind() == reflect.Struct && kit.EmbeddedTypeImplements(field.Type, kitype) {\n\t\t\tfoff = append(foff, field.Offset)\n\t\t}\n\t\treturn true\n\t})\n\ttprops[pnm] = foff\n\treturn foff\n}", "func (s GetFunctionConfigurationOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.CodeSha256 != nil {\n\t\tv := *s.CodeSha256\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"CodeSha256\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CodeSize != nil {\n\t\tv := *s.CodeSize\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"CodeSize\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.DeadLetterConfig != nil {\n\t\tv := s.DeadLetterConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"DeadLetterConfig\", v, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Environment != nil {\n\t\tv := s.Environment\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"Environment\", v, metadata)\n\t}\n\tif s.FunctionArn != nil {\n\t\tv := *s.FunctionArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FunctionArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.FunctionName != nil {\n\t\tv := *s.FunctionName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FunctionName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Handler != nil {\n\t\tv := *s.Handler\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Handler\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.KMSKeyArn != nil {\n\t\tv := *s.KMSKeyArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"KMSKeyArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.LastModified != nil {\n\t\tv := *s.LastModified\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"LastModified\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.LastUpdateStatus) > 0 {\n\t\tv := s.LastUpdateStatus\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"LastUpdateStatus\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.LastUpdateStatusReason != nil {\n\t\tv := *s.LastUpdateStatusReason\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"LastUpdateStatusReason\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.LastUpdateStatusReasonCode) > 0 {\n\t\tv := s.LastUpdateStatusReasonCode\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"LastUpdateStatusReasonCode\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Layers != nil {\n\t\tv := s.Layers\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Layers\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.MasterArn != nil {\n\t\tv := *s.MasterArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"MasterArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MemorySize != nil {\n\t\tv := *s.MemorySize\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"MemorySize\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.RevisionId != nil {\n\t\tv := *s.RevisionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"RevisionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Role != nil {\n\t\tv := *s.Role\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Role\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Runtime) > 0 {\n\t\tv := s.Runtime\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Runtime\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.State) > 0 {\n\t\tv := s.State\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"State\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.StateReason != nil {\n\t\tv := *s.StateReason\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"StateReason\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.StateReasonCode) > 0 {\n\t\tv := s.StateReasonCode\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"StateReasonCode\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Timeout != nil {\n\t\tv := *s.Timeout\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Timeout\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.TracingConfig != nil {\n\t\tv := s.TracingConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"TracingConfig\", v, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.VpcConfig != nil {\n\t\tv := s.VpcConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"VpcConfig\", v, metadata)\n\t}\n\treturn nil\n}", "func (c *TypeConverter) GenStructConverter(\n\tfromFields []*compile.FieldSpec,\n\ttoFields []*compile.FieldSpec,\n\tfieldMap map[string]FieldMapperEntry,\n) error {\n\t// Add compiled FieldSpecs to the FieldMapperEntry\n\tfieldMap = addSpecToMap(fieldMap, fromFields, \"\")\n\t// Check for vlaues not populated recursively by addSpecToMap\n\tfor k, v := range fieldMap {\n\t\tif fieldMap[k].Field == nil {\n\t\t\treturn errors.Errorf(\n\t\t\t\t\"Failed to find field ( %s ) for transform.\",\n\t\t\t\tv.QualifiedName,\n\t\t\t)\n\t\t}\n\t}\n\n\tc.useRecurGen = c.isRecursiveStruct(toFields) || c.isRecursiveStruct(fromFields)\n\n\tif c.useRecurGen && len(fieldMap) != 0 {\n\t\tc.append(\"inOriginal := in; _ = inOriginal\")\n\t\tc.append(\"outOriginal := out; _ = outOriginal\")\n\t}\n\n\terr := c.genStructConverter(\"\", \"\", \"\", fromFields, toFields, fieldMap, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func loadFields(fieldBucket *bbolt.Bucket) (fields []field.Meta) {\n\tcursor := fieldBucket.Cursor()\n\tfor k, v := cursor.First(); k != nil; k, v = cursor.Next() {\n\t\tfields = append(fields, field.Meta{\n\t\t\tName: field.Name(k),\n\t\t\tID: field.ID(v[0]),\n\t\t\tType: field.Type(v[1]),\n\t\t})\n\t}\n\treturn\n}", "func compileStruct(t reflect.Type) map[string]interface{} {\n\tfs := map[string]interface{}{}\n\tcount := t.NumField()\n\tfor i := 0; i < count; i++ {\n\t\tf := t.Field(i)\n\t\tvar name string\n\t\tif !f.Anonymous {\n\t\t\tname = f.Name\n\t\t\tif tName := f.Tag.Get(\"nbt\"); len(tName) > 0 {\n\t\t\t\tname = tName\n\t\t\t}\n\t\t\tif name == \"ignore\" || f.Tag.Get(\"ignore\") == \"true\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else {\n\t\t\tname = f.Type.Name()\n\t\t\tif tName := f.Tag.Get(\"nbt\"); len(tName) > 0 {\n\t\t\t\tname = tName\n\t\t\t}\n\t\t\tif name == \"ignore\" || f.Tag.Get(\"ignore\") == \"true\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tfs[name] = compileField(f, name)\n\t}\n\treturn fs\n}", "func (s Route) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiGatewayManaged != nil {\n\t\tv := *s.ApiGatewayManaged\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiGatewayManaged\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.AuthorizationScopes != nil {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RequestModels != nil {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RequestParameters != nil {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Route) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func StructFields() {\n\tv := vertex{1, 2}\n\tv.X = 11\n\n\tfmt.Println(v.X, v.Y)\n}", "func (m *ManagementTemplateStep) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"acceptedVersion\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateManagementTemplateStepVersionFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetAcceptedVersion(val.(ManagementTemplateStepVersionable))\n }\n return nil\n }\n res[\"category\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseManagementCategory)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCategory(val.(*ManagementCategory))\n }\n return nil\n }\n res[\"createdByUserId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCreatedByUserId(val)\n }\n return nil\n }\n res[\"createdDateTime\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetTimeValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCreatedDateTime(val)\n }\n return nil\n }\n res[\"description\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDescription(val)\n }\n return nil\n }\n res[\"displayName\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDisplayName(val)\n }\n return nil\n }\n res[\"lastActionByUserId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetLastActionByUserId(val)\n }\n return nil\n }\n res[\"lastActionDateTime\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetTimeValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetLastActionDateTime(val)\n }\n return nil\n }\n res[\"managementTemplate\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateManagementTemplateFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetManagementTemplate(val.(ManagementTemplateable))\n }\n return nil\n }\n res[\"portalLink\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.CreateActionUrlFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetPortalLink(val.(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.ActionUrlable))\n }\n return nil\n }\n res[\"priority\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetInt32Value()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetPriority(val)\n }\n return nil\n }\n res[\"versions\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateManagementTemplateStepVersionFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]ManagementTemplateStepVersionable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(ManagementTemplateStepVersionable)\n }\n }\n m.SetVersions(res)\n }\n return nil\n }\n return res\n}", "func (m *TemplateParameter) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))\n res[\"description\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDescription(val)\n }\n return nil\n }\n res[\"displayName\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDisplayName(val)\n }\n return nil\n }\n res[\"jsonAllowedValues\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetJsonAllowedValues(val)\n }\n return nil\n }\n res[\"jsonDefaultValue\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetJsonDefaultValue(val)\n }\n return nil\n }\n res[\"@odata.type\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOdataType(val)\n }\n return nil\n }\n res[\"valueType\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseManagementParameterValueType)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetValueType(val.(*ManagementParameterValueType))\n }\n return nil\n }\n return res\n}", "func (s GetRouteOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func StructFields(v interface{}) (vType reflect.Type, vFields []*TField) {\n\tvar (\n\t\tfield reflect.StructField\n\t)\n\tvType = reflect.Indirect(reflect.ValueOf(v)).Type()\n\tnumFields := vType.NumField()\n\tvFields = make([]*TField, 0, numFields)\n\tfor i := 0; i < numFields; i++ {\n\t\tfield = vType.Field(i)\n\t\tfieldInfo := &TField{\n\t\t\tFname: field.Name,\n\t\t\tFtype: field.Type.String(),\n\t\t\tFkind: field.Type.Kind(),\n\t\t\tFtags: field.Tag,\n\t\t}\n\t\tif field.PkgPath == \"\" {\n\t\t\tfieldInfo.Fexported = true\n\t\t}\n\t\tvFields = append(vFields, fieldInfo)\n\t}\n\treturn\n}", "func dbFields(obj interface{}, skipKey bool) (table, key, fields string) {\n\tt := reflect.TypeOf(obj)\n\tlist := make([]string, 0, t.NumField())\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tf := t.Field(i)\n\t\tif isTable := f.Tag.Get(\"table\"); len(isTable) > 0 {\n\t\t\ttable = isTable\n\t\t}\n\t\tk := f.Tag.Get(\"sql\")\n\t\tif f.Tag.Get(\"key\") == \"true\" {\n\t\t\tkey = k\n\t\t\tif skipKey {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif len(k) > 0 {\n\t\t\tlist = append(list, k)\n\t\t}\n\t}\n\tfields = strings.Join(list, \",\")\n\treturn\n}", "func init() {\n\tcodegen.RegisterPlugin(\"types\", \"gen\", nil, Generate)\n}", "func StructFields(t reflect.Type) string {\n\tfields := make([]string, 0)\n\tif t.Kind() == reflect.Struct {\n\t\tfor i := 0; i < t.NumField(); i ++ {\n\t\t\tname := t.Field(i).Name\n\t\t\tif t.Field(i).Type.Kind() == reflect.Struct {\n\t\t\t\ts := StructFields(t.Field(i).Type)\n\t\t\t\tf := strings.Split(s, \", \")\n\t\t\t\tleft := FirstLower(name)\n\t\t\t\tfor _, v := range f {\n\t\t\t\t\tfields = append(fields, fmt.Sprintf(\"%s.%s\", left, FirstLower(v)))\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfields = append(fields, FirstLower(name))\n\t\t}\n\t}\n\n\treturn strings.Join(fields, \", \")\n}" ]
[ "0.62704337", "0.6208788", "0.61161774", "0.588618", "0.5744872", "0.5581054", "0.556311", "0.55467135", "0.5487712", "0.5455513", "0.54416054", "0.54179704", "0.5410756", "0.54015136", "0.53891087", "0.5364398", "0.5253189", "0.5208028", "0.52043945", "0.5097442", "0.5096631", "0.50294745", "0.50262094", "0.5002781", "0.49794614", "0.49494854", "0.49378783", "0.4932478", "0.4906218", "0.4894362", "0.48918608", "0.48858726", "0.48758674", "0.48757905", "0.4873638", "0.48704875", "0.48592275", "0.48588952", "0.48574054", "0.484549", "0.48419362", "0.48415983", "0.48413324", "0.4841029", "0.48327285", "0.4829235", "0.48267707", "0.48250237", "0.48143035", "0.48040703", "0.48031336", "0.47986373", "0.47876844", "0.4775719", "0.47700143", "0.4756326", "0.47468916", "0.4741483", "0.4740756", "0.4738409", "0.47368366", "0.473303", "0.47219402", "0.472103", "0.4720735", "0.47144935", "0.4708526", "0.4703656", "0.46961218", "0.46927565", "0.46846217", "0.46844286", "0.4682837", "0.46777657", "0.46691662", "0.46686304", "0.46638605", "0.46600878", "0.46583408", "0.46303263", "0.46222466", "0.46187404", "0.46060646", "0.46004346", "0.45963866", "0.45916227", "0.45889625", "0.45851684", "0.45793283", "0.4576849", "0.4572388", "0.45655093", "0.4563145", "0.45609367", "0.45592597", "0.45538422", "0.45524934", "0.45482114", "0.45459345", "0.45450222" ]
0.7535413
0
genField generates field config for given AST
genField генерирует конфигурацию поля для заданного AST
func genField(field *ast.FieldDefinition) *jen.Statement { // // Generate config for field // // == Example input SDL // // interface Pet { // "name of the pet" // name(style: NameComponentsStyle = SHORT): String! // """ // givenName of the pet ★ // """ // givenName: String @deprecated(reason: "No longer supported; please use name field.") // } // // == Example output // // &graphql.Field{ // Name: "name", // Type: graphql.NonNull(graphql.String), // Description: "name of the pet", // DeprecationReason: "", // Args: FieldConfigArgument{ ... }, // } // // &graphql.Field{ // Name: "givenName", // Type: graphql.String, // Description: "givenName of the pet", // DeprecationReason: "No longer supported; please use name field.", // Args: FieldConfigArgument{ ... }, // } // return jen.Op("&").Qual(defsPkg, "Field").Values(jen.Dict{ jen.Id("Args"): genArguments(field.Arguments), jen.Id("DeprecationReason"): genDeprecationReason(field.Directives), jen.Id("Description"): genDescription(field), jen.Id("Name"): jen.Lit(field.Name.Value), jen.Id("Type"): genOutputTypeReference(field.Type), }) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func genFields(fs []*ast.FieldDefinition) *jen.Statement {\n\t//\n\t// Generate config for fields\n\t//\n\t// == Example input SDL\n\t//\n\t// type Dog {\n\t// name(style: NameComponentsStyle = SHORT): String!\n\t// givenName: String @deprecated(reason: \"No longer supported; please use name field.\")\n\t// }\n\t//\n\t// == Example output\n\t//\n\t// graphql.Fields{\n\t// \"name\": graphql.Field{ ... },\n\t// \"givenName\": graphql.Field{ ... },\n\t// }\n\t//\n\treturn jen.Qual(defsPkg, \"Fields\").Values(jen.DictFunc(func(d jen.Dict) {\n\t\tfor _, f := range fs {\n\t\t\td[jen.Lit(f.Name.Value)] = genField(f)\n\t\t}\n\t}))\n}", "func (g *Generator) generate(typeName string) {\n\tfields := make([]Field, 0, 100)\n\tfor _, file := range g.pkg.files {\n\t\t// Set the state for this run of the walker.\n\t\tfile.typeName = typeName\n\t\tif file.file != nil {\n\t\t\tast.Inspect(file.file, file.genDecl)\n\t\t\tfields = append(fields, file.fields...)\n\t\t}\n\t}\n\tif len(fields) == 0 {\n\t\tlog.Fatalf(\"no fields defined for type %s\", typeName)\n\t}\n\t// TODO: for now we remove Default from the start (maybe move that to an option)\n\tlogicalTypeName := \"\\\"\" + strings.TrimPrefix(typeName, \"Default\") + \"\\\"\"\n\n\t// Generate code that will fail if the constants change value.\n\tg.Printf(\"func (d *%s) Serialize() ([]byte, error) {\\n\", typeName)\n\tg.Printf(\"wb := utils.NewWriteBufferByteBased(utils.WithByteOrderForByteBasedBuffer(binary.BigEndian))\\n\")\n\tg.Printf(\"\\tif err := d.SerializeWithWriteBuffer(context.Background(), wb); err != nil {\\n\")\n\tg.Printf(\"\\t\\treturn nil, err\\n\")\n\tg.Printf(\"\\t}\\n\")\n\tg.Printf(\"\\treturn wb.GetBytes(), nil\\n\")\n\tg.Printf(\"}\\n\\n\")\n\tg.Printf(\"func (d *%s) SerializeWithWriteBuffer(ctx context.Context, writeBuffer utils.WriteBuffer) error {\\n\", typeName)\n\tg.Printf(\"\\tif err := writeBuffer.PushContext(%s); err != nil {\\n\", logicalTypeName)\n\tg.Printf(\"\\t\\treturn err\\n\")\n\tg.Printf(\"\\t}\\n\")\n\tfor _, field := range fields {\n\t\tfieldType := field.fieldType\n\t\tif field.isDelegate {\n\t\t\tg.Printf(\"\\t\\t\\tif err := d.%s.SerializeWithWriteBuffer(ctx, writeBuffer); err != nil {\\n\", fieldType.(*ast.Ident).Name)\n\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\tcontinue\n\t\t}\n\t\tfieldName := field.name\n\t\tfieldNameUntitled := \"\\\"\" + unTitle(fieldName) + \"\\\"\"\n\t\tif field.hasLocker != \"\" {\n\t\t\tg.Printf(\"if err := func()error {\\n\")\n\t\t\tg.Printf(\"\\td.\" + field.hasLocker + \".Lock()\\n\")\n\t\t\tg.Printf(\"\\tdefer d.\" + field.hasLocker + \".Unlock()\\n\")\n\t\t}\n\t\tneedsDereference := false\n\t\tif starFieldType, ok := fieldType.(*ast.StarExpr); ok {\n\t\t\tfieldType = starFieldType.X\n\t\t\tneedsDereference = true\n\t\t}\n\t\tif field.isStringer {\n\t\t\tif needsDereference {\n\t\t\t\tg.Printf(\"if d.%s != nil {\", field.name)\n\t\t\t}\n\t\t\tg.Printf(stringFieldSerialize, \"d.\"+field.name+\".String()\", fieldNameUntitled)\n\t\t\tif field.hasLocker != \"\" {\n\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t}\n\t\t\tif needsDereference {\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tswitch fieldType := fieldType.(type) {\n\t\tcase *ast.SelectorExpr:\n\t\t\t{\n\t\t\t\t// TODO: bit hacky but not sure how else we catch those ones\n\t\t\t\tx := fieldType.X\n\t\t\t\tsel := fieldType.Sel\n\t\t\t\txIdent, xIsIdent := x.(*ast.Ident)\n\t\t\t\tif xIsIdent {\n\t\t\t\t\tif xIdent.Name == \"atomic\" {\n\t\t\t\t\t\tif sel.Name == \"Uint32\" {\n\t\t\t\t\t\t\tg.Printf(uint32FieldSerialize, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif sel.Name == \"Uint64\" {\n\t\t\t\t\t\t\tg.Printf(uint64FieldSerialize, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif sel.Name == \"Int32\" {\n\t\t\t\t\t\t\tg.Printf(int32FieldSerialize, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif sel.Name == \"Bool\" {\n\t\t\t\t\t\t\tg.Printf(boolFieldSerialize, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif sel.Name == \"Value\" {\n\t\t\t\t\t\t\tg.Printf(serializableFieldTemplate, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif xIdent.Name == \"sync\" {\n\t\t\t\t\t\tfmt.Printf(\"\\t skipping field %s because it is %v.%v\\n\", fieldName, x, sel)\n\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tg.Printf(serializableFieldTemplate, \"d.\"+field.name, fieldNameUntitled)\n\t\tcase *ast.IndexExpr:\n\t\t\tx := fieldType.X\n\t\t\tif fieldType, isxFieldSelector := x.(*ast.SelectorExpr); isxFieldSelector { // TODO: we need to refactor this so we can reuse...\n\t\t\t\txIdent, xIsIdent := fieldType.X.(*ast.Ident)\n\t\t\t\tsel := fieldType.Sel\n\t\t\t\tif xIsIdent && xIdent.Name == \"atomic\" && sel.Name == \"Pointer\" {\n\t\t\t\t\tg.Printf(atomicPointerFieldTemplate, \"d.\"+field.name, field.name, fieldNameUntitled)\n\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Printf(\"no support yet for %#q\\n\", fieldType)\n\t\t\tcontinue\n\t\tcase *ast.Ident:\n\t\t\tswitch fieldType.Name {\n\t\t\tcase \"byte\":\n\t\t\t\tg.Printf(byteFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tcase \"int\":\n\t\t\t\tg.Printf(int64FieldSerialize, \"int64(d.\"+field.name+\")\", fieldNameUntitled)\n\t\t\tcase \"int32\":\n\t\t\t\tg.Printf(int32FieldSerialize, \"int32(d.\"+field.name+\")\", fieldNameUntitled)\n\t\t\tcase \"uint32\":\n\t\t\t\tg.Printf(uint32FieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tcase \"bool\":\n\t\t\t\tg.Printf(boolFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tcase \"string\":\n\t\t\t\tg.Printf(stringFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tcase \"error\":\n\t\t\t\tg.Printf(errorFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tdefault:\n\t\t\t\tfmt.Printf(\"\\t no support implemented for Ident with type %v\\n\", fieldType)\n\t\t\t\tg.Printf(\"{\\n\")\n\t\t\t\tg.Printf(\"_value := fmt.Sprintf(\\\"%%v\\\", d.%s)\\n\", fieldName)\n\t\t\t\tg.Printf(stringFieldSerialize, \"_value\", fieldNameUntitled)\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t}\n\t\tcase *ast.ArrayType:\n\t\t\tif eltType, ok := fieldType.Elt.(*ast.Ident); ok && eltType.Name == \"byte\" {\n\t\t\t\tg.Printf(\"if err := writeBuffer.WriteByteArray(%s, d.%s); err != nil {\\n\", fieldNameUntitled, field.name)\n\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t} else {\n\t\t\t\tg.Printf(\"if err := writeBuffer.PushContext(%s, utils.WithRenderAsList(true)); err != nil {\\n\\t\\treturn err\\n\\t}\\n\", fieldNameUntitled)\n\t\t\t\tg.Printf(\"for _, elem := range d.%s {\", field.name)\n\t\t\t\tswitch eltType := fieldType.Elt.(type) {\n\t\t\t\tcase *ast.SelectorExpr, *ast.StarExpr:\n\t\t\t\t\tg.Printf(\"\\n\\t\\tvar elem any = elem\\n\")\n\t\t\t\t\tg.Printf(serializableFieldTemplate, \"elem\", \"\\\"value\\\"\")\n\t\t\t\tcase *ast.Ident:\n\t\t\t\t\tswitch eltType.Name {\n\t\t\t\t\tcase \"int\":\n\t\t\t\t\t\tg.Printf(int64FieldSerialize, \"int64(d.\"+field.name+\")\", fieldNameUntitled)\n\t\t\t\t\tcase \"uint32\":\n\t\t\t\t\t\tg.Printf(uint32FieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\t\t\tcase \"bool\":\n\t\t\t\t\t\tg.Printf(boolFieldSerialize, \"elem\", \"\\\"\\\"\")\n\t\t\t\t\tcase \"string\":\n\t\t\t\t\t\tg.Printf(stringFieldSerialize, \"elem\", \"\\\"\\\"\")\n\t\t\t\t\tcase \"error\":\n\t\t\t\t\t\tg.Printf(errorFieldSerialize, \"elem\", \"\\\"\\\"\")\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tfmt.Printf(\"\\t no support implemented for Ident within ArrayType for %v\\n\", fieldType)\n\t\t\t\t\t\tg.Printf(\"_value := fmt.Sprintf(\\\"%%v\\\", elem)\\n\")\n\t\t\t\t\t\tg.Printf(stringFieldSerialize, \"_value\", fieldNameUntitled)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\tg.Printf(\"if err := writeBuffer.PopContext(%s, utils.WithRenderAsList(true)); err != nil {\\n\\t\\treturn err\\n\\t}\\n\", fieldNameUntitled)\n\t\t\t}\n\t\tcase *ast.MapType:\n\t\t\tg.Printf(\"if err := writeBuffer.PushContext(%s, utils.WithRenderAsList(true)); err != nil {\\n\\t\\treturn err\\n\\t}\\n\", fieldNameUntitled)\n\t\t\t// TODO: we use serializable or strings as we don't want to over-complex this\n\t\t\tg.Printf(\"for _name, elem := range d.%s {\\n\", fieldName)\n\t\t\tswitch keyType := fieldType.Key.(type) {\n\t\t\tcase *ast.Ident:\n\t\t\t\tswitch keyType.Name {\n\t\t\t\tcase \"uint\", \"uint8\", \"uint16\", \"uint32\", \"uint64\", \"int\", \"int8\", \"int16\", \"int32\", \"int64\": // TODO: add other types\n\t\t\t\t\tg.Printf(\"\\t\\tname := fmt.Sprintf(\\\"%s\\\", _name)\\n\", \"%v\")\n\t\t\t\tcase \"string\":\n\t\t\t\t\tg.Printf(\"\\t\\tname := _name\\n\")\n\t\t\t\tdefault:\n\t\t\t\t\tg.Printf(\"\\t\\tname := fmt.Sprintf(\\\"%s\\\", &_name)\\n\", \"%v\")\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tg.Printf(\"\\t\\tname := fmt.Sprintf(\\\"%s\\\", &_name)\\n\", \"%v\")\n\t\t\t}\n\t\t\tswitch eltType := fieldType.Value.(type) {\n\t\t\tcase *ast.StarExpr, *ast.SelectorExpr:\n\t\t\t\tg.Printf(\"\\n\\t\\tvar elem any = elem\\n\")\n\t\t\t\tg.Printf(\"\\t\\tif serializable, ok := elem.(utils.Serializable); ok {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\tif err := writeBuffer.PushContext(name); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\tif err := serializable.SerializeWithWriteBuffer(ctx, writeBuffer); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\tif err := writeBuffer.PopContext(name); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\t\tg.Printf(\"\\t\\t} else {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\telemAsString := fmt.Sprintf(\\\"%%v\\\", elem)\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\tif err := writeBuffer.WriteString(name, uint32(len(elemAsString)*8), \\\"UTF-8\\\", elemAsString); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\t\tg.Printf(\"\\t\\t}\\n\")\n\t\t\tcase *ast.Ident:\n\t\t\t\tswitch eltType.Name {\n\t\t\t\tcase \"bool\":\n\t\t\t\t\tg.Printf(boolFieldSerialize, \"elem\", \"name\")\n\t\t\t\tcase \"string\":\n\t\t\t\t\tg.Printf(stringFieldSerialize, \"elem\", \"name\")\n\t\t\t\tcase \"error\":\n\t\t\t\t\tg.Printf(errorFieldSerialize, \"elem\", \"name\")\n\t\t\t\tdefault:\n\t\t\t\t\tfmt.Printf(\"\\t no support implemented for Ident within MapType for %v\\n\", fieldType)\n\t\t\t\t\tg.Printf(\"\\t\\t_value := fmt.Sprintf(\\\"%%v\\\", elem)\\n\")\n\t\t\t\t\tg.Printf(stringFieldSerialize, \"_value\", \"name\")\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tfmt.Printf(\"\\t no support implemented within MapType %v\\n\", fieldType.Value)\n\t\t\t\tg.Printf(\"\\t\\t_value := fmt.Sprintf(\\\"%%v\\\", elem)\\n\")\n\t\t\t\tg.Printf(stringFieldSerialize, \"_value\", \"name\")\n\t\t\t}\n\t\t\tg.Printf(\"\\t}\\n\")\n\t\t\tg.Printf(\"if err := writeBuffer.PopContext(%s, utils.WithRenderAsList(true)); err != nil {\\n\\t\\treturn err\\n\\t}\\n\", fieldNameUntitled)\n\t\tcase *ast.ChanType:\n\t\t\tg.Printf(chanFieldSerialize, \"d.\"+field.name, fieldNameUntitled, field.name)\n\t\tcase *ast.FuncType:\n\t\t\tg.Printf(funcFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\tdefault:\n\t\t\tfmt.Printf(\"no support implemented %#v\\n\", fieldType)\n\t\t}\n\t\tif field.hasLocker != \"\" {\n\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\tg.Printf(\"}\\n\")\n\t\t}\n\t}\n\tg.Printf(\"\\tif err := writeBuffer.PopContext(%s); err != nil {\\n\", logicalTypeName)\n\tg.Printf(\"\\t\\treturn err\\n\")\n\tg.Printf(\"\\t}\\n\")\n\tg.Printf(\"\\treturn nil\\n\")\n\tg.Printf(\"}\\n\")\n\tg.Printf(\"\\n\")\n\tg.Printf(stringerTemplate, typeName)\n}", "func (n ClassNode) Codegen(scope *Scope, c *Compiler) value.Value {\n\tstructDefn := scope.FindType(n.Name).Type.(*types.StructType)\n\n\tfieldnames := make([]string, 0, len(n.Variables))\n\tfields := make([]types.Type, 0, len(n.Variables))\n\n\tnames := map[string]bool{}\n\n\tfor _, f := range n.Variables {\n\t\tt := f.Type.Name\n\t\tname := f.Name.String()\n\t\tif _, found := names[name]; found {\n\t\t\tlog.Fatal(\"Class '%s' has two fields/methods named '%s'\\n\", n.Name, f.Name)\n\t\t}\n\t\tnames[name] = true\n\t\tty := scope.FindType(t).Type\n\t\tty = f.Type.BuildPointerType(ty)\n\t\tfields = append(fields, ty)\n\t\tfieldnames = append(fieldnames, name)\n\t}\n\n\tthisArg := VariableDefnNode{}\n\tthisArg.Name = NewNamedReference(\"this\")\n\tthisArg.Type = GeodeTypeRef{}\n\tthisArg.Type.Array = false\n\tthisArg.Type.Name = n.Name\n\tthisArg.Type.PointerLevel = 1\n\n\tstructDefn.Fields = fields\n\tstructDefn.Names = fieldnames\n\n\tmethodBaseArgs := []VariableDefnNode{thisArg}\n\tfor _, m := range n.Methods {\n\t\tm.Name.Value = fmt.Sprintf(\"class.%s.%s\", n.Name, m.Name)\n\t\tif _, found := names[m.Name.String()]; found {\n\t\t\tlog.Fatal(\"Class '%s' has two fields/methods named '%s'\\n\", n.Name, m.Name)\n\t\t}\n\t\tnames[m.Name.String()] = true\n\t\tm.Args = append(methodBaseArgs, m.Args...)\n\t\tm.Declare(scope, c)\n\t\tm.Codegen(scope, c)\n\t}\n\n\treturn nil\n}", "func JsonFieldGenerator() gopter.Gen {\n\tif jsonFieldGenerator != nil {\n\t\treturn jsonFieldGenerator\n\t}\n\n\tgenerators := make(map[string]gopter.Gen)\n\tAddIndependentPropertyGeneratorsForJsonField(generators)\n\tjsonFieldGenerator = gen.Struct(reflect.TypeOf(JsonField{}), generators)\n\n\treturn jsonFieldGenerator\n}", "func (op *metadataLookup) buildField() {\n\tlengthOfFields := len(op.fields)\n\top.executeCtx.Fields = make(field.Metas, lengthOfFields)\n\n\tidx := 0\n\tfor fieldID := range op.fields {\n\t\tf := op.fields[fieldID]\n\t\top.executeCtx.Fields[idx] = field.Meta{\n\t\t\tID: fieldID,\n\t\t\tType: f.DownSampling.GetFieldType(),\n\t\t\tName: f.DownSampling.FieldName(),\n\t\t}\n\t\tidx++\n\t}\n\t// first sort field by field id\n\top.executeCtx.SortFields()\n\t// after sort filed, build aggregation spec\n\top.executeCtx.DownSamplingSpecs = make(aggregation.AggregatorSpecs, lengthOfFields)\n\top.executeCtx.AggregatorSpecs = make(aggregation.AggregatorSpecs, lengthOfFields)\n\tfor fieldIdx, fieldMeta := range op.executeCtx.Fields {\n\t\tf := op.fields[fieldMeta.ID]\n\t\top.executeCtx.DownSamplingSpecs[fieldIdx] = f.DownSampling\n\t\top.executeCtx.AggregatorSpecs[fieldIdx] = f.Aggregator\n\t}\n}", "func TraceFieldGenerator(ctx context.Context) []zapcore.Field {\n\tspanCtx := trace.FromContext(ctx).SpanContext()\n\n\treturn []zapcore.Field{\n\t\tzap.Uint64(\"dd.trace_id\", binary.BigEndian.Uint64(spanCtx.TraceID[8:])),\n\t\tzap.Uint64(\"dd.span_id\", binary.BigEndian.Uint64(spanCtx.SpanID[:])),\n\t}\n}", "func compileField(sf reflect.StructField, name string) interface{} {\n\tf := field{sField: sf.Index[0]}\n\n\tf.name = []byte(name)\n\n\tswitch sf.Type.Kind() {\n\tcase reflect.Struct:\n\t\treturn fieldStruct{f.sField, f.name, compileStruct(sf.Type)}\n\tcase reflect.Bool:\n\t\tf.write = encodeBool\n\t\tf.read = decodeBool\n\t\tf.requiredType = 1\n\tcase reflect.Int8:\n\t\tf.write = encodeInt8\n\t\tf.read = decodeInt8\n\t\tf.requiredType = 1\n\tcase reflect.Int16:\n\t\tf.write = encodeInt16\n\t\tf.read = decodeInt16\n\t\tf.requiredType = 2\n\tcase reflect.Int32:\n\t\tf.write = encodeInt32\n\t\tf.read = decodeInt32\n\t\tf.requiredType = 3\n\tcase reflect.Int64:\n\t\tf.write = encodeInt64\n\t\tf.read = decodeInt64\n\t\tf.requiredType = 4\n\tcase reflect.String:\n\t\tf.write = encodeString\n\t\tf.read = decodeString\n\t\tf.requiredType = 8\n\tcase reflect.Map:\n\t\tf.requiredType = 10\n\t\telem := sf.Type.Elem()\n\t\tvar elemField interface{}\n\t\tname := \"map:\" + sf.Name\n\t\tif elem.Kind() != reflect.Interface {\n\t\t\telemField = compileField(reflect.StructField{Type: elem, Index: []int{0}}, name)\n\t\t}\n\t\tf.write = func(w io.Writer, en *msgEncoder, fi reflect.Value) error {\n\t\t\tkeys := fi.MapKeys()\n\t\t\tfor _, key := range keys {\n\t\t\t\tif f, ok := elemField.(field); ok {\n\t\t\t\t\tv := fi.MapIndex(key)\n\t\t\t\t\twritePrefix(en, w, []byte(key.String()), f.requiredType)\n\t\t\t\t\terr := f.write(w, en, v)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif elemField == nil {\n\t\t\t\t\t\tv := fi.MapIndex(key).Elem()\n\t\t\t\t\t\ttemp := compileField(reflect.StructField{Type: v.Type(), Index: []int{0}}, \"\")\n\t\t\t\t\t\tif f, ok := temp.(field); ok {\n\t\t\t\t\t\t\twritePrefix(en, w, []byte(key.String()), f.requiredType)\n\t\t\t\t\t\t\terr := f.write(w, en, v)\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\twritePrefix(en, w, []byte(key.String()), 10)\n\t\t\t\t\t\t\tfs := temp.(fieldStruct)\n\t\t\t\t\t\t\terr := write(w, en, fs.m, v)\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\twritePrefix(en, w, []byte(key.String()), 10)\n\t\t\t\t\t\tfs := elemField.(fieldStruct)\n\t\t\t\t\t\tv := fi.MapIndex(key)\n\t\t\t\t\t\terr := write(w, en, fs.m, v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t}\n\t\t\tbs := en.b[:1]\n\t\t\tbs[0] = 0\n\t\t\t_, err := w.Write(bs)\n\t\t\treturn err\n\t\t}\n\t\tf.read = func(r io.Reader, de *msgDecoder, fi reflect.Value) error {\n\n\t\t\tma := reflect.MakeMap(sf.Type)\n\n\t\t\tname, t, err := readPrefix(r, de)\n\t\t\tfor ; t != 0; name, t, err = readPrefix(r, de) {\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tkeyVal := reflect.ValueOf(name)\n\n\t\t\t\tvar val reflect.Value\n\t\t\t\tif f, ok := elemField.(field); ok {\n\t\t\t\t\tval = reflect.New(elem)\n\t\t\t\t\terr := f.read(r, de, val)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif elemField == nil {\n\t\t\t\t\t\tv, err := fallbackRead(r, de)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t\tval = reflect.ValueOf(v)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tval = reflect.New(elem)\n\t\t\t\t\t\tfs := elemField.(fieldStruct)\n\t\t\t\t\t\terr := read(r, de, fs.m, val)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tma.SetMapIndex(keyVal, val)\n\t\t\t}\n\t\t\tfi.Set(ma)\n\t\t\treturn nil\n\t\t}\n\tcase reflect.Slice:\n\t\tf.requiredType = 9\n\t\telem := sf.Type.Elem()\n\t\tswitch elem.Kind() {\n\t\tcase reflect.Uint8: //Short-cut for byte arrays\n\t\t\tf.requiredType = 7\n\t\t\tf.write = func(w io.Writer, en *msgEncoder, fi reflect.Value) error {\n\t\t\t\tl := fi.Len()\n\t\t\t\tbs := en.b[:4]\n\t\t\t\tbinary.BigEndian.PutUint32(bs, uint32(l))\n\t\t\t\t_, err := w.Write(bs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\t_, err = w.Write(fi.Bytes())\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tf.read = func(r io.Reader, de *msgDecoder, fi reflect.Value) error {\n\t\t\t\tbs := de.b[:4]\n\t\t\t\t_, err := r.Read(bs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tl := binary.BigEndian.Uint32(bs)\n\t\t\t\tout := make([]byte, l)\n\t\t\t\t_, err = r.Read(out)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tfi.SetBytes(out)\n\t\t\t\treturn nil\n\t\t\t}\n\t\tcase reflect.Int32: //Short-cut for int32 arrays\n\t\t\tf.requiredType = 11\n\t\t\tf.write = func(w io.Writer, en *msgEncoder, fi reflect.Value) error {\n\t\t\t\tl := fi.Len()\n\t\t\t\tbs := en.b[:4]\n\t\t\t\tbinary.BigEndian.PutUint32(bs, uint32(l))\n\t\t\t\t_, err := w.Write(bs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tdata := fi.Interface().([]int32)\n\t\t\t\tfor i := range data {\n\t\t\t\t\tbinary.BigEndian.PutUint32(bs, uint32(data[i]))\n\t\t\t\t\t_, err := w.Write(bs)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tf.read = func(r io.Reader, de *msgDecoder, fi reflect.Value) error {\n\t\t\t\tbs := de.b[:4]\n\t\t\t\t_, err := r.Read(bs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tl := binary.BigEndian.Uint32(bs)\n\t\t\t\tout := make([]int32, l)\n\t\t\t\tfor i := range out {\n\t\t\t\t\t_, err := r.Read(bs)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tout[i] = int32(binary.BigEndian.Uint32(bs))\n\t\t\t\t}\n\t\t\t\tfi.Set(reflect.ValueOf(out))\n\t\t\t\treturn nil\n\t\t\t}\n\t\tdefault:\n\t\t\tname := \"slice:\" + sf.Name\n\t\t\telemField := compileField(reflect.StructField{Type: elem, Index: []int{0}}, name)\n\t\t\tf.write = func(w io.Writer, en *msgEncoder, fi reflect.Value) error {\n\t\t\t\tl := fi.Len()\n\t\t\t\tbs := en.b[:5]\n\t\t\t\tbinary.BigEndian.PutUint32(bs[1:], uint32(l))\n\t\t\t\tif f, ok := elemField.(field); ok {\n\t\t\t\t\tbs[0] = f.requiredType\n\t\t\t\t} else {\n\t\t\t\t\tbs[0] = 10\n\t\t\t\t}\n\t\t\t\t_, err := w.Write(bs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif f, ok := elemField.(field); ok {\n\t\t\t\t\tfor i := 0; i < l; i++ {\n\t\t\t\t\t\tv := fi.Index(i)\n\t\t\t\t\t\terr := f.write(w, en, v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tf := elemField.(fieldStruct)\n\t\t\t\t\tfor i := 0; i < l; i++ {\n\t\t\t\t\t\tv := fi.Index(i)\n\t\t\t\t\t\terr := write(w, en, f.m, v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tf.read = func(r io.Reader, de *msgDecoder, fi reflect.Value) error {\n\t\t\t\tbs := de.b[:5]\n\t\t\t\t_, err := r.Read(bs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif f, ok := elemField.(field); ok {\n\t\t\t\t\tif bs[0] != f.requiredType {\n\t\t\t\t\t\treturn ErrorIncorrectType\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif bs[0] != 10 {\n\t\t\t\t\t\treturn ErrorIncorrectType\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tl := int(binary.BigEndian.Uint32(bs[1:]))\n\t\t\t\tval := reflect.MakeSlice(sf.Type, l, l)\n\t\t\t\tif f, ok := elemField.(field); ok {\n\t\t\t\t\tfor i := 0; i < l; i++ {\n\t\t\t\t\t\tv := val.Index(i)\n\t\t\t\t\t\terr := f.read(r, de, v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tf := elemField.(fieldStruct)\n\t\t\t\t\tfor i := 0; i < l; i++ {\n\t\t\t\t\t\tv := val.Index(i)\n\t\t\t\t\t\terr := read(r, de, f.m, v)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfi.Set(val)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\tcase reflect.Float32:\n\t\tf.requiredType = 5\n\t\tf.write = encodeFloat32\n\t\tf.read = decodeFloat32\n\tcase reflect.Float64:\n\t\tf.requiredType = 6\n\t\tf.write = encodeFloat64\n\t\tf.read = decodeFloat64\n\tdefault:\n\t\tpanic(fmt.Errorf(\"Unhandled type %s for %s\", sf.Type.Kind().String(), sf.Name))\n\t}\n\treturn f\n}", "func (g *Generator) generate(typeName string) {\n\tfields := make([]Field, 0, 100)\n\tfor _, file := range g.pkg.files {\n\t\t// Set the state for this run of the walker.\n\t\tfile.typeName = typeName\n\t\tfile.fields = nil\n\t\tif file.file != nil {\n\t\t\tast.Inspect(file.file, file.genDecl)\n\t\t\tg.additionalImports = append(g.additionalImports, file.additionalImports...)\n\t\t\tfields = append(fields, file.fields...)\n\t\t}\n\t}\n\n\tif len(fields) == 0 {\n\t\tlog.Fatalf(\"no values defined for type %s\", typeName)\n\t}\n\n\tg.build(fields, typeName)\n}", "func (p *Planner) addField(ref int) {\n\tfieldName := p.visitor.Operation.FieldNameString(ref)\n\n\talias := ast.Alias{\n\t\tIsDefined: p.visitor.Operation.FieldAliasIsDefined(ref),\n\t}\n\n\tif alias.IsDefined {\n\t\taliasBytes := p.visitor.Operation.FieldAliasBytes(ref)\n\t\talias.Name = p.upstreamOperation.Input.AppendInputBytes(aliasBytes)\n\t}\n\n\ttypeName := p.visitor.Walker.EnclosingTypeDefinition.NameString(p.visitor.Definition)\n\tfor i := range p.visitor.Config.Fields {\n\t\tisDesiredField := p.visitor.Config.Fields[i].TypeName == typeName &&\n\t\t\tp.visitor.Config.Fields[i].FieldName == fieldName\n\n\t\t// chech that we are on a desired field and field path contains a single element - mapping is plain\n\t\tif isDesiredField && len(p.visitor.Config.Fields[i].Path) == 1 {\n\t\t\t// define alias when mapping path differs from fieldName and no alias has been defined\n\t\t\tif p.visitor.Config.Fields[i].Path[0] != fieldName && !alias.IsDefined {\n\t\t\t\talias.IsDefined = true\n\t\t\t\taliasBytes := p.visitor.Operation.FieldNameBytes(ref)\n\t\t\t\talias.Name = p.upstreamOperation.Input.AppendInputBytes(aliasBytes)\n\t\t\t}\n\n\t\t\t// override fieldName with mapping path value\n\t\t\tfieldName = p.visitor.Config.Fields[i].Path[0]\n\n\t\t\t// when provided field is a root type field save new field name\n\t\t\tif ref == p.rootFieldRef {\n\t\t\t\tp.rootFieldName = fieldName\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfield := p.upstreamOperation.AddField(ast.Field{\n\t\tName: p.upstreamOperation.Input.AppendInputString(fieldName),\n\t\tAlias: alias,\n\t})\n\n\tselection := ast.Selection{\n\t\tKind: ast.SelectionKindField,\n\t\tRef: field.Ref,\n\t}\n\n\tp.upstreamOperation.AddSelection(p.nodes[len(p.nodes)-1].Ref, selection)\n\tp.nodes = append(p.nodes, field)\n}", "func (dr *defaultRender) OnParseField(out *jen.File, methodDefinition *jen.Group, field *atool.Arg, file *atool.File) {\n}", "func (sb *schemaBuilder) buildField(field reflect.StructField) (*graphql.Field, error) {\n\tretType, err := sb.getType(field.Type)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &graphql.Field{\n\t\tResolve: func(ctx context.Context, source, args interface{}, selectionSet *graphql.SelectionSet) (interface{}, error) {\n\t\t\tvalue := reflect.ValueOf(source)\n\t\t\tif value.Kind() == reflect.Ptr {\n\t\t\t\tvalue = value.Elem()\n\t\t\t}\n\t\t\treturn value.FieldByIndex(field.Index).Interface(), nil\n\t\t},\n\t\tType: retType,\n\t\tParseArguments: nilParseArguments,\n\t}, nil\n}", "func (f *File) genDecl(node ast.Node) bool {\n\tdecl, ok := node.(*ast.GenDecl)\n\tif !ok || decl.Tok != token.TYPE {\n\t\t// We only care about type declarations.\n\t\treturn true\n\t}\n\tfor _, spec := range decl.Specs {\n\t\ttypeSpec := spec.(*ast.TypeSpec)\n\t\tstructDecl, ok := typeSpec.Type.(*ast.StructType)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tif typeSpec.Name.Name != f.typeName {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Printf(\"Handling %s\\n\", typeSpec.Name.Name)\n\t\tfor _, field := range structDecl.Fields.List {\n\t\t\tif field.Tag != nil && field.Tag.Value == \"`ignore:\\\"true\\\"`\" {\n\t\t\t\tvar name string\n\t\t\t\tif len(field.Names) != 0 {\n\t\t\t\t\tname = field.Names[0].Name\n\t\t\t\t} else {\n\t\t\t\t\tname = \"<delegate>\"\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\"\\t ignoring field %s %v\\n\", name, field.Type)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tisStringer := false\n\t\t\tif field.Tag != nil && field.Tag.Value == \"`stringer:\\\"true\\\"`\" { // TODO: Check if we do that a bit smarter\n\t\t\t\tisStringer = true\n\t\t\t}\n\t\t\thasLocker := \"\"\n\t\t\tif field.Tag != nil && strings.HasPrefix(field.Tag.Value, \"`hasLocker:\\\"\") { // TODO: Check if we do that a bit smarter\n\t\t\t\thasLocker = strings.TrimPrefix(field.Tag.Value, \"`hasLocker:\\\"\")\n\t\t\t\thasLocker = strings.TrimSuffix(hasLocker, \"\\\"`\")\n\t\t\t}\n\t\t\tif len(field.Names) == 0 {\n\t\t\t\tfmt.Printf(\"\\t adding delegate\\n\")\n\t\t\t\tswitch ft := field.Type.(type) {\n\t\t\t\tcase *ast.Ident:\n\t\t\t\t\tf.fields = append(f.fields, Field{\n\t\t\t\t\t\tfieldType: ft,\n\t\t\t\t\t\tisDelegate: true,\n\t\t\t\t\t\tisStringer: isStringer,\n\t\t\t\t\t\thasLocker: hasLocker,\n\t\t\t\t\t})\n\t\t\t\t\tcontinue\n\t\t\t\tcase *ast.StarExpr:\n\t\t\t\t\tswitch set := ft.X.(type) {\n\t\t\t\t\tcase *ast.Ident:\n\t\t\t\t\t\tf.fields = append(f.fields, Field{\n\t\t\t\t\t\t\tfieldType: set,\n\t\t\t\t\t\t\tisDelegate: true,\n\t\t\t\t\t\t\tisStringer: isStringer,\n\t\t\t\t\t\t\thasLocker: hasLocker,\n\t\t\t\t\t\t})\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tpanic(fmt.Sprintf(\"Only pointer to struct delegates supported now. Type %T\", field.Type))\n\t\t\t\t\t}\n\t\t\t\tcase *ast.SelectorExpr:\n\t\t\t\t\tf.fields = append(f.fields, Field{\n\t\t\t\t\t\tfieldType: ft.Sel,\n\t\t\t\t\t\tisDelegate: true,\n\t\t\t\t\t\tisStringer: isStringer,\n\t\t\t\t\t\thasLocker: hasLocker,\n\t\t\t\t\t})\n\t\t\t\t\tcontinue\n\t\t\t\tdefault:\n\t\t\t\t\tpanic(fmt.Sprintf(\"Only struct delegates supported now. Type %T\", field.Type))\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Printf(\"\\t adding field %s %v\\n\", field.Names[0].Name, field.Type)\n\t\t\tf.fields = append(f.fields, Field{\n\t\t\t\tname: field.Names[0].Name,\n\t\t\t\tfieldType: field.Type,\n\t\t\t\tisStringer: isStringer,\n\t\t\t\thasLocker: hasLocker,\n\t\t\t})\n\t\t}\n\t}\n\treturn false\n}", "func AddIndependentPropertyGeneratorsForJsonField(gens map[string]gopter.Gen) {\n\tgens[\"SourceField\"] = gen.PtrOf(gen.AlphaString())\n}", "func (c *TypeConverter) genStructConverter(\n\tkeyPrefix string,\n\tfromPrefix string,\n\tindent string,\n\tfromFields []*compile.FieldSpec,\n\ttoFields []*compile.FieldSpec,\n\tfieldMap map[string]FieldMapperEntry,\n\tprevKeyPrefixes []string,\n) error {\n\n\tfor i := 0; i < len(toFields); i++ {\n\t\ttoField := toFields[i]\n\n\t\t// Check for same named field\n\t\tvar fromField *compile.FieldSpec\n\t\tfor j := 0; j < len(fromFields); j++ {\n\t\t\tif fromFields[j].Name == toField.Name {\n\t\t\t\tfromField = fromFields[j]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\ttoSubIdentifier := keyPrefix + PascalCase(toField.Name)\n\t\ttoIdentifier := \"out.\" + toSubIdentifier\n\t\toverriddenIdentifier := \"\"\n\t\tfromIdentifier := \"\"\n\n\t\t// Check for mapped field\n\t\tvar overriddenField *compile.FieldSpec\n\n\t\t// check if this toField satisfies a fieldMap transform\n\t\ttransformFrom, ok := fieldMap[toSubIdentifier]\n\t\tif ok {\n\t\t\t// no existing direct fromField, just assign the transform\n\t\t\tif fromField == nil {\n\t\t\t\tfromField = transformFrom.Field\n\t\t\t\tif c.useRecurGen {\n\t\t\t\t\tfromIdentifier = \"inOriginal.\" + transformFrom.QualifiedName\n\t\t\t\t} else {\n\t\t\t\t\tfromIdentifier = \"in.\" + transformFrom.QualifiedName\n\t\t\t\t}\n\t\t\t\t// else there is a conflicting direct fromField\n\t\t\t} else {\n\t\t\t\t// depending on Override flag either the direct fromField or transformFrom is the OverrideField\n\t\t\t\tif transformFrom.Override {\n\t\t\t\t\t// check for required/optional setting\n\t\t\t\t\tif !transformFrom.Field.Required {\n\t\t\t\t\t\toverriddenField = fromField\n\t\t\t\t\t\toverriddenIdentifier = \"in.\" + fromPrefix +\n\t\t\t\t\t\t\tPascalCase(overriddenField.Name)\n\t\t\t\t\t}\n\t\t\t\t\t// If override is true and the new field is required,\n\t\t\t\t\t// there's a default instantiation value and will always\n\t\t\t\t\t// overwrite.\n\t\t\t\t\tfromField = transformFrom.Field\n\t\t\t\t\tif c.useRecurGen {\n\t\t\t\t\t\tfromIdentifier = \"inOriginal.\" + transformFrom.QualifiedName\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfromIdentifier = \"in.\" + transformFrom.QualifiedName\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// If override is false and the from field is required,\n\t\t\t\t\t// From is always populated and will never be overwritten.\n\t\t\t\t\tif !fromField.Required {\n\t\t\t\t\t\toverriddenField = transformFrom.Field\n\t\t\t\t\t\tif c.useRecurGen {\n\t\t\t\t\t\t\tfromIdentifier = \"inOriginal.\" + transformFrom.QualifiedName\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\toverriddenIdentifier = \"in.\" + transformFrom.QualifiedName\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// neither direct or transform fromField was found\n\t\tif fromField == nil {\n\t\t\t// search the fieldMap toField identifiers for matching identifier prefix\n\t\t\t// e.g. the current toField is a struct and something within it has a transform\n\t\t\t// a full match identifiers for transform non-struct types would have been caught above\n\t\t\thasStructFieldMapping := false\n\t\t\tfor toID := range fieldMap {\n\t\t\t\tif strings.HasPrefix(toID, toSubIdentifier) {\n\t\t\t\t\thasStructFieldMapping = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// if there's no fromField and no fieldMap transform that could be applied\n\t\t\tif !hasStructFieldMapping {\n\t\t\t\tvar bypass bool\n\t\t\t\t// check if required field is filled from other resources\n\t\t\t\t// it can be used to set system default (customized tracing /auth required for clients),\n\t\t\t\t// or header propagating\n\t\t\t\tif c.optionalEntries != nil {\n\t\t\t\t\tfor toID := range c.optionalEntries {\n\t\t\t\t\t\tif strings.HasPrefix(toID, toSubIdentifier) {\n\t\t\t\t\t\t\tbypass = true\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// the toField is either covered by optionalEntries, or optional and\n\t\t\t\t// there's nothing that maps to it or its sub-fields so we should skip it\n\t\t\t\tif bypass || !toField.Required {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// unrecoverable error\n\t\t\t\treturn errors.Errorf(\n\t\t\t\t\t\"required toField %s does not have a valid fromField mapping\",\n\t\t\t\t\ttoField.Name,\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\n\t\tif fromIdentifier == \"\" && fromField != nil {\n\t\t\t// should we set this if no fromField ??\n\t\t\tfromIdentifier = \"in.\" + fromPrefix + PascalCase(fromField.Name)\n\t\t}\n\n\t\tif prevKeyPrefixes == nil {\n\t\t\tprevKeyPrefixes = []string{}\n\t\t}\n\n\t\tvar overriddenFieldName string\n\t\tvar overriddenFieldType compile.TypeSpec\n\t\tif overriddenField != nil {\n\t\t\toverriddenFieldName = overriddenField.Name\n\t\t\toverriddenFieldType = overriddenField.Type\n\t\t}\n\n\t\t// Override thrift type names to avoid naming collisions between endpoint\n\t\t// and client types.\n\t\tswitch toFieldType := compile.RootTypeSpec(toField.Type).(type) {\n\t\tcase\n\t\t\t*compile.BoolSpec,\n\t\t\t*compile.I8Spec,\n\t\t\t*compile.I16Spec,\n\t\t\t*compile.I32Spec,\n\t\t\t*compile.EnumSpec,\n\t\t\t*compile.I64Spec,\n\t\t\t*compile.DoubleSpec,\n\t\t\t*compile.StringSpec:\n\n\t\t\terr := c.genConverterForPrimitive(\n\t\t\t\ttoField,\n\t\t\t\ttoIdentifier,\n\t\t\t\tfromField,\n\t\t\t\tfromIdentifier,\n\t\t\t\toverriddenField,\n\t\t\t\toverriddenIdentifier,\n\t\t\t\tindent,\n\t\t\t\tprevKeyPrefixes,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase *compile.BinarySpec:\n\t\t\tfor _, line := range checkOptionalNil(indent, c.uninitialized, toIdentifier, prevKeyPrefixes, c.useRecurGen) {\n\t\t\t\tc.append(line)\n\t\t\t}\n\t\t\tc.append(toIdentifier, \" = []byte(\", fromIdentifier, \")\")\n\t\tcase *compile.StructSpec:\n\t\t\tvar (\n\t\t\t\tstFromPrefix = fromPrefix\n\t\t\t\tstFromType compile.TypeSpec\n\t\t\t\tfromTypeName string\n\t\t\t)\n\t\t\tif fromField != nil {\n\t\t\t\tstFromType = fromField.Type\n\t\t\t\tstFromPrefix = fromPrefix + PascalCase(fromField.Name)\n\n\t\t\t\tfromTypeName, _ = c.getIdentifierName(stFromType)\n\t\t\t}\n\n\t\t\ttoTypeName, err := c.getIdentifierName(toFieldType)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif converterMethodName, ok := c.convStructMap[toFieldType.Name]; ok {\n\t\t\t\t// the converter for this struct has already been generated, so just use it\n\t\t\t\tc.append(indent, \"out.\", keyPrefix+PascalCase(toField.Name), \" = \", converterMethodName, \"(\", fromIdentifier, \")\")\n\t\t\t} else if c.useRecurGen && fromTypeName != \"\" {\n\t\t\t\t// generate a callable converter inside function literal\n\t\t\t\terr = c.genConverterForStructWrapped(\n\t\t\t\t\ttoField,\n\t\t\t\t\ttoFieldType,\n\t\t\t\t\ttoTypeName,\n\t\t\t\t\ttoSubIdentifier,\n\t\t\t\t\tfromTypeName,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t\tstFromType,\n\t\t\t\t\tfieldMap,\n\t\t\t\t\tprevKeyPrefixes,\n\t\t\t\t\tindent,\n\t\t\t\t)\n\t\t\t} else {\n\t\t\t\terr = c.genConverterForStruct(\n\t\t\t\t\ttoField.Name,\n\t\t\t\t\ttoFieldType,\n\t\t\t\t\ttoField.Required,\n\t\t\t\t\tstFromType,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t\tkeyPrefix+PascalCase(toField.Name),\n\t\t\t\t\tstFromPrefix,\n\t\t\t\t\tindent,\n\t\t\t\t\tfieldMap,\n\t\t\t\t\tprevKeyPrefixes,\n\t\t\t\t)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase *compile.ListSpec:\n\t\t\terr := c.genConverterForList(\n\t\t\t\ttoFieldParam{\n\t\t\t\t\ttoFieldType,\n\t\t\t\t\ttoField.Name,\n\t\t\t\t\ttoField.Required,\n\t\t\t\t\ttoIdentifier,\n\t\t\t\t},\n\t\t\t\tfromFieldParam{\n\t\t\t\t\tfromField.Type,\n\t\t\t\t\tfromField.Name,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t},\n\t\t\t\toverriddenFieldParam{\n\t\t\t\t\toverriddenFieldType,\n\t\t\t\t\toverriddenFieldName,\n\t\t\t\t\toverriddenIdentifier,\n\t\t\t\t},\n\t\t\t\tindent,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase *compile.MapSpec:\n\t\t\terr := c.genConverterForMap(\n\t\t\t\ttoFieldParam{\n\t\t\t\t\ttoFieldType,\n\t\t\t\t\ttoField.Name,\n\t\t\t\t\ttoField.Required,\n\t\t\t\t\ttoIdentifier,\n\t\t\t\t},\n\t\t\t\tfromFieldParam{\n\t\t\t\t\tfromField.Type,\n\t\t\t\t\tfromField.Name,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t\tfromIdentifier,\n\t\t\t\t},\n\t\t\t\toverriddenFieldParam{\n\t\t\t\t\toverriddenFieldType,\n\t\t\t\t\toverriddenFieldName,\n\t\t\t\t\toverriddenIdentifier,\n\t\t\t\t},\n\t\t\t\tindent,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\t// fmt.Printf(\"Unknown type %s for field %s \\n\",\n\t\t\t// \ttoField.Type.TypeCode().String(), toField.Name,\n\t\t\t// )\n\n\t\t\t// pkgName, err := h.TypePackageName(toField.Type.IDLFile())\n\t\t\t// if err != nil {\n\t\t\t// \treturn nil, err\n\t\t\t// }\n\t\t\t// typeName := pkgName + \".\" + toField.Type.ThriftName()\n\t\t\t// line := toIdentifier + \"(*\" + typeName + \")\" + postfix\n\t\t\t// c.Lines = append(c.Lines, line)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (b *Builder) Field(keypath string) *Builder {\n\tb.p.RegisterTransformation(impl.Field(keypath))\n\treturn b\n}", "func (b *basic) ToGoCode(n *ecsgen.Node) (string, error) {\n\t// we can only generate a Go struct definition for an Object, verify\n\t// we're not shooting ourselves in the foot\n\tif !n.IsObject() {\n\t\treturn \"\", fmt.Errorf(\"node %s is not an object\", n.Path)\n\t}\n\n\t// Now enumerate the Node's fields and sort the keys so the resulting Go code\n\t// is deterministically generated\n\tfieldKeys := []string{}\n\n\tfor key := range n.Children {\n\t\tfieldKeys = append(fieldKeys, key)\n\t}\n\n\tsort.Strings(fieldKeys)\n\n\t// Create a new buffer to write the struct definition to\n\tbuf := new(strings.Builder)\n\n\t// comment and type definition\n\tbuf.WriteString(fmt.Sprintf(\"// %s defines the object located at ECS path %s.\", n.TypeIdent().Pascal(), n.Path))\n\tbuf.WriteString(\"\\n\")\n\tbuf.WriteString(fmt.Sprintf(\"type %s struct {\", n.TypeIdent().Pascal()))\n\tbuf.WriteString(\"\\n\")\n\n\t// Enumerate the fields and generate their field definition, adding it\n\t// to the buffer as a line item.\n\tfor _, k := range fieldKeys {\n\t\tscalarField := n.Children[k]\n\t\tbuf.WriteString(\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"\\t%s %s `json:\\\"%s,omitempty\\\" yaml:\\\"%s,omitempty\\\" ecs:\\\"%s\\\"`\",\n\t\t\t\tscalarField.FieldIdent().Pascal(),\n\t\t\t\tGoFieldType(scalarField),\n\t\t\t\tscalarField.Name,\n\t\t\t\tscalarField.Name,\n\t\t\t\tscalarField.Path,\n\t\t\t),\n\t\t)\n\t\tbuf.WriteString(\"\\n\")\n\t}\n\n\t// Close the type definition and return the result\n\tbuf.WriteString(\"}\")\n\tbuf.WriteString(\"\\n\")\n\n\t// if the user included the JSON operator flag, add the implementation\n\tif b.IncludeJSONMarshal {\n\t\t// Now we implement at json.Marshaler implementation for each specific type that\n\t\t// removes any nested JSON types that might exist.\n\t\t//\n\t\t// We do this by enumerating every field in the type and check to see\n\t\t// if it's got a zero value.\n\t\tbuf.WriteString(\"\\n\")\n\t\tbuf.WriteString(\"// MarshalJSON implements the json.Marshaler interface and removes zero values from returned JSON.\")\n\t\tbuf.WriteString(\"\\n\")\n\t\tbuf.WriteString(\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"func (b %s) MarshalJSON() ([]byte, error) {\",\n\t\t\t\tn.TypeIdent().Pascal(),\n\t\t\t),\n\t\t)\n\t\tbuf.WriteString(\"\\n\")\n\n\t\t// Define the result struct we will populate non-zero fields with\n\t\tbuf.WriteString(\"\\tres := map[string]interface{}{}\")\n\t\tbuf.WriteString(\"\\n\")\n\t\tbuf.WriteString(\"\\n\")\n\n\t\t// enumerate the fields for the object fields\n\t\tfor _, fieldName := range fieldKeys {\n\t\t\tfield := n.Children[fieldName]\n\t\t\tbuf.WriteString(\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"\\tif val := reflect.ValueOf(b.%s); !val.IsZero() {\", field.FieldIdent().Pascal(),\n\t\t\t\t),\n\t\t\t)\n\t\t\tbuf.WriteString(\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"\\t\\tres[\\\"%s\\\"] = b.%s\",\n\t\t\t\t\tfield.Name,\n\t\t\t\t\tfield.FieldIdent().Pascal(),\n\t\t\t\t),\n\t\t\t)\n\t\t\tbuf.WriteString(\"\\t}\")\n\t\t\tbuf.WriteString(\"\\n\")\n\t\t\tbuf.WriteString(\"\\n\")\n\t\t}\n\n\t\t// add a line spacer and return the marshaled JSON result\n\t\tbuf.WriteString(\"\\n\")\n\t\tbuf.WriteString(\"\\treturn json.Marshal(res)\")\n\t\tbuf.WriteString(\"\\n\")\n\t\tbuf.WriteString(\"}\")\n\t\tbuf.WriteString(\"\\n\")\n\t}\n\n\treturn buf.String(), nil\n}", "func (fs *FileStat) GenerateFields() (string, error) {\n\ttb, e := fs.modTime.MarshalBinary()\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\tcb, e := fs.compressedBytes()\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\n\tformat := `\"%s\", \"%s\", %d, 0%o, binfs.MustHexDecode(\"%x\"), %t, binfs.MustHexDecode(\"%x\")`\n\treturn fmt.Sprintf(format,\n\t\tfs.path,\n\t\tfs.name,\n\t\tfs.size,\n\t\tfs.mode,\n\t\ttb,\n\t\tfs.isDir,\n\t\tcb,\n\t), nil\n}", "func GenerateBaseFields(conf CurveConfig) error {\n\tif err := goff.GenerateFF(\"fr\", \"Element\", conf.RTorsion, filepath.Join(conf.OutputDir, \"fr\"), false); err != nil {\n\t\treturn err\n\t}\n\tif err := goff.GenerateFF(\"fp\", \"Element\", conf.FpModulus, filepath.Join(conf.OutputDir, \"fp\"), false); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (n DependencyNode) Codegen(prog *Program) (value.Value, error) { return nil, nil }", "func (g *mapGen) genType() {\n\tg.P(\"type \", g.typeName, \" struct {\")\n\tg.P(\"m *map[\", getGoType(g.GeneratedFile, g.field.Message.Fields[0]), \"]\", getGoType(g.GeneratedFile, g.field.Message.Fields[1]))\n\tg.P(\"}\")\n\tg.P()\n}", "func (ec *executionContext) ___Field(ctx context.Context, sel []query.Selection, obj *introspection.Field) graphql.Marshaler {\n\tfields := graphql.CollectFields(ec.Doc, sel, __FieldImplementors, ec.Variables)\n\n\tout := graphql.NewOrderedMap(len(fields))\n\tfor i, field := range fields {\n\t\tout.Keys[i] = field.Alias\n\n\t\tswitch field.Name {\n\t\tcase \"__typename\":\n\t\t\tout.Values[i] = graphql.MarshalString(\"__Field\")\n\t\tcase \"name\":\n\t\t\tout.Values[i] = ec.___Field_name(ctx, field, obj)\n\t\tcase \"description\":\n\t\t\tout.Values[i] = ec.___Field_description(ctx, field, obj)\n\t\tcase \"args\":\n\t\t\tout.Values[i] = ec.___Field_args(ctx, field, obj)\n\t\tcase \"type\":\n\t\t\tout.Values[i] = ec.___Field_type(ctx, field, obj)\n\t\tcase \"isDeprecated\":\n\t\t\tout.Values[i] = ec.___Field_isDeprecated(ctx, field, obj)\n\t\tcase \"deprecationReason\":\n\t\t\tout.Values[i] = ec.___Field_deprecationReason(ctx, field, obj)\n\t\tdefault:\n\t\t\tpanic(\"unknown field \" + strconv.Quote(field.Name))\n\t\t}\n\t}\n\n\treturn out\n}", "func (m *BgpConfiguration) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))\n res[\"asn\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetInt32Value()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetAsn(val)\n }\n return nil\n }\n res[\"ipAddress\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetIpAddress(val)\n }\n return nil\n }\n res[\"localIpAddress\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetLocalIpAddress(val)\n }\n return nil\n }\n res[\"@odata.type\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOdataType(val)\n }\n return nil\n }\n res[\"peerIpAddress\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetPeerIpAddress(val)\n }\n return nil\n }\n return res\n}", "func (x StructField) Generate(r *rand.Rand, size int) reflect.Value {\n\tname, _ := quick.Value(reflect.TypeOf(\"\"), r)\n\tfor {\n\t\treturn reflect.ValueOf(StructField{\n\t\t\tName: name.String(),\n\t\t\tValue: Generate(r, size, false, false).Interface().(Value),\n\t\t})\n\t}\n}", "func (g *Generator) collectAndGenerate(typeName string, genFn GeneratorFunc) {\n\tfields := make([]Field, 0, 100)\n\timports := make([]Import, 0, 100)\n\tfor _, file := range g.pkg.files {\n\t\t// Set the state for this run of the walker.\n\t\tfile.typeName = typeName\n\t\tfile.fields = nil\n\t\tif file.file != nil {\n\t\t\tast.Inspect(file.file, file.genDecl)\n\t\t\tfields = append(fields, file.fields...)\n\t\t\timports = append(imports, file.imports...)\n\t\t}\n\t}\n\n\tgenFn(typeName, fields, imports)\n\n}", "func execNewField(_ int, p *gop.Context) {\n\targs := p.GetArgs(5)\n\tret := types.NewField(token.Pos(args[0].(int)), args[1].(*types.Package), args[2].(string), args[3].(types.Type), args[4].(bool))\n\tp.Ret(5, ret)\n}", "func CreateField(prefix string, field *typast.Field) *Field {\n\t// NOTE: mimic kelseyhightower/envconfig struct tags\n\n\tname := field.Get(\"envconfig\")\n\tif name == \"\" {\n\t\tname = strings.ToUpper(field.Names[0])\n\t}\n\n\treturn &Field{\n\t\tKey: fmt.Sprintf(\"%s_%s\", prefix, name),\n\t\tDefault: field.Get(\"default\"),\n\t\tRequired: field.Get(\"required\") == \"true\",\n\t}\n}", "func (p *Plugin) generateRedisHashFieldFunc(data *generateData) {\n\n\ttype FiledType struct {\n\t\t*generateField\n\t\t*generateData\n\t}\n\n\tfor _, field := range data.Fields {\n\n\t\tfieldData := &FiledType{}\n\t\tfieldData.generateData = data\n\t\tfieldData.generateField = field\n\n\t\tgetTemplateName := \"\"\n\t\tsetTemplateName := \"\"\n\t\ttpy := descriptor.FieldDescriptorProto_Type_value[field.Type]\n\t\tswitch descriptor.FieldDescriptorProto_Type(tpy) {\n\t\tcase descriptor.FieldDescriptorProto_TYPE_DOUBLE,\n\t\t\tdescriptor.FieldDescriptorProto_TYPE_FLOAT,\n\t\t\tdescriptor.FieldDescriptorProto_TYPE_INT64,\n\t\t\tdescriptor.FieldDescriptorProto_TYPE_UINT64,\n\t\t\tdescriptor.FieldDescriptorProto_TYPE_INT32,\n\t\t\tdescriptor.FieldDescriptorProto_TYPE_UINT32,\n\t\t\tdescriptor.FieldDescriptorProto_TYPE_FIXED64,\n\t\t\tdescriptor.FieldDescriptorProto_TYPE_SFIXED64,\n\t\t\tdescriptor.FieldDescriptorProto_TYPE_FIXED32,\n\t\t\tdescriptor.FieldDescriptorProto_TYPE_SFIXED32,\n\t\t\tdescriptor.FieldDescriptorProto_TYPE_BOOL,\n\t\t\tdescriptor.FieldDescriptorProto_TYPE_STRING:\n\t\t\tgetTemplateName = getBasicTypeFromRedisHashFuncTemplate\n\t\t\tsetTemplateName = setBasicTypeFromRedisHashFuncTemplate\n\t\tcase descriptor.FieldDescriptorProto_TYPE_ENUM:\n\t\t\tgetTemplateName = getBasicTypeFromRedisHashFuncTemplate\n\t\t\tfieldData.RedisType = \"Int64\"\n\t\t\tfieldData.RedisTypeReplace = true\n\t\t\tsetTemplateName = setBasicTypeFromRedisHashFuncTemplate\n\t\tcase descriptor.FieldDescriptorProto_TYPE_MESSAGE:\n\t\t\tgetTemplateName = getMessageTypeFromRedisHashFuncTemplate\n\t\t\tfieldData.RedisType = \"Bytes\"\n\t\t\tsetTemplateName = setMessageTypeFromRedisHashFuncTemplate\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\n\t\tif field.Getter {\n\t\t\tif getTemplateName != \"\" {\n\t\t\t\ttmpl, _ := template.New(\"hash-get\").Parse(getTemplateName)\n\t\t\t\tif err := tmpl.Execute(p.Buffer, fieldData); err != nil {\n\t\t\t\t\tlog.Println(getTemplateName, fieldData)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif field.Setter {\n\t\t\tif setTemplateName != \"\" {\n\t\t\t\ttmpl, _ := template.New(\"hash-set\").Parse(setTemplateName)\n\t\t\t\tif err := tmpl.Execute(p.Buffer, fieldData); err != nil {\n\t\t\t\t\tlog.Println(setTemplateName, fieldData)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (ec *executionContext) ___Field(ctx context.Context, sel ast.SelectionSet, obj *introspection.Field) graphql.Marshaler {\n\tfields := graphql.CollectFields(ctx, sel, __FieldImplementors)\n\n\tout := graphql.NewOrderedMap(len(fields))\n\tfor i, field := range fields {\n\t\tout.Keys[i] = field.Alias\n\n\t\tswitch field.Name {\n\t\tcase \"__typename\":\n\t\t\tout.Values[i] = graphql.MarshalString(\"__Field\")\n\t\tcase \"name\":\n\t\t\tout.Values[i] = ec.___Field_name(ctx, field, obj)\n\t\tcase \"description\":\n\t\t\tout.Values[i] = ec.___Field_description(ctx, field, obj)\n\t\tcase \"args\":\n\t\t\tout.Values[i] = ec.___Field_args(ctx, field, obj)\n\t\tcase \"type\":\n\t\t\tout.Values[i] = ec.___Field_type(ctx, field, obj)\n\t\tcase \"isDeprecated\":\n\t\t\tout.Values[i] = ec.___Field_isDeprecated(ctx, field, obj)\n\t\tcase \"deprecationReason\":\n\t\t\tout.Values[i] = ec.___Field_deprecationReason(ctx, field, obj)\n\t\tdefault:\n\t\t\tpanic(\"unknown field \" + strconv.Quote(field.Name))\n\t\t}\n\t}\n\n\treturn out\n}", "func getFieldConstructor(e ast.Expr) string {\n\tswitch t := e.(type) {\n\tcase *ast.StarExpr:\n\t\tswitch t.X.(type) {\n\t\tcase *ast.StarExpr:\n\t\t\tpanic(\"Ponter on pointers is not supported in annotation struct\")\n\t\tcase *ast.ArrayType:\n\t\t\tpanic(\"Pointer on arrays is not supported in annotation struct\")\n\t\tdefault:\n\t\t\treturn \"&\" + getFieldConstructor(t.X)\n\t\t}\n\tcase *ast.ArrayType:\n\t\tswitch elemType := t.Elt.(type) {\n\t\tcase *ast.StarExpr:\n\t\t\tpanic(\"Array of pointers is not supported in annotation struct\")\n\t\tcase *ast.ArrayType:\n\t\t\tpanic(\"Array of arrays is not supported in annotation struct\")\n\t\tdefault:\n\t\t\treturn \"[]\" + getFieldConstructor(elemType)\n\t\t}\n\tcase *ast.Ident:\n\t\tswitch t.Name {\n\t\tcase \"int\", \"int8\", \"int16\", \"int32\", \"int64\",\n\t\t\t\"uint\", \"uint8\", \"uint16\", \"uint32\", \"uint64\",\n\t\t\t\"float32\", \"float64\", \"byte\", \"rune\", \"string\":\n\t\t\treturn t.Name + \"{\"\n\t\tcase \"complex64\", \"complex128\", \"uintptr\":\n\t\t\tpanic(\"Type '\" + t.Name + \"' is not supported in annotation struct\")\n\t\tdefault:\n\t\t\treturn t.Name + \"{\"\n\t\t}\n\tdefault:\n\t\tpanic(\"Unsupported field type in annotation\")\n\t}\n}", "func (ec *executionContext) ___Field(sel []query.Selection, obj *introspection.Field) graphql.Marshaler {\n\tfields := graphql.CollectFields(ec.doc, sel, __FieldImplementors, ec.variables)\n\tout := graphql.NewOrderedMap(len(fields))\n\tfor i, field := range fields {\n\t\tout.Keys[i] = field.Alias\n\n\t\tswitch field.Name {\n\t\tcase \"__typename\":\n\t\t\tout.Values[i] = graphql.MarshalString(\"__Field\")\n\t\tcase \"name\":\n\t\t\tout.Values[i] = ec.___Field_name(field, obj)\n\t\tcase \"description\":\n\t\t\tout.Values[i] = ec.___Field_description(field, obj)\n\t\tcase \"args\":\n\t\t\tout.Values[i] = ec.___Field_args(field, obj)\n\t\tcase \"type\":\n\t\t\tout.Values[i] = ec.___Field_type(field, obj)\n\t\tcase \"isDeprecated\":\n\t\t\tout.Values[i] = ec.___Field_isDeprecated(field, obj)\n\t\tcase \"deprecationReason\":\n\t\t\tout.Values[i] = ec.___Field_deprecationReason(field, obj)\n\t\tdefault:\n\t\t\tpanic(\"unknown field \" + strconv.Quote(field.Name))\n\t\t}\n\t}\n\n\treturn out\n}", "func (ec *executionContext) ___Field(ctx context.Context, sel ast.SelectionSet, obj *introspection.Field) graphql.Marshaler {\n\tfields := graphql.CollectFields(ctx, sel, __FieldImplementors)\n\n\tout := graphql.NewOrderedMap(len(fields))\n\tinvalid := false\n\tfor i, field := range fields {\n\t\tout.Keys[i] = field.Alias\n\n\t\tswitch field.Name {\n\t\tcase \"__typename\":\n\t\t\tout.Values[i] = graphql.MarshalString(\"__Field\")\n\t\tcase \"name\":\n\t\t\tout.Values[i] = ec.___Field_name(ctx, field, obj)\n\t\t\tif out.Values[i] == graphql.Null {\n\t\t\t\tinvalid = true\n\t\t\t}\n\t\tcase \"description\":\n\t\t\tout.Values[i] = ec.___Field_description(ctx, field, obj)\n\t\tcase \"args\":\n\t\t\tout.Values[i] = ec.___Field_args(ctx, field, obj)\n\t\t\tif out.Values[i] == graphql.Null {\n\t\t\t\tinvalid = true\n\t\t\t}\n\t\tcase \"type\":\n\t\t\tout.Values[i] = ec.___Field_type(ctx, field, obj)\n\t\t\tif out.Values[i] == graphql.Null {\n\t\t\t\tinvalid = true\n\t\t\t}\n\t\tcase \"isDeprecated\":\n\t\t\tout.Values[i] = ec.___Field_isDeprecated(ctx, field, obj)\n\t\t\tif out.Values[i] == graphql.Null {\n\t\t\t\tinvalid = true\n\t\t\t}\n\t\tcase \"deprecationReason\":\n\t\t\tout.Values[i] = ec.___Field_deprecationReason(ctx, field, obj)\n\t\tdefault:\n\t\t\tpanic(\"unknown field \" + strconv.Quote(field.Name))\n\t\t}\n\t}\n\n\tif invalid {\n\t\treturn graphql.Null\n\t}\n\treturn out\n}", "func (ec *executionContext) ___Field(ctx context.Context, sel ast.SelectionSet, obj *introspection.Field) graphql.Marshaler {\n\tfields := graphql.CollectFields(ctx, sel, __FieldImplementors)\n\n\tout := graphql.NewOrderedMap(len(fields))\n\tinvalid := false\n\tfor i, field := range fields {\n\t\tout.Keys[i] = field.Alias\n\n\t\tswitch field.Name {\n\t\tcase \"__typename\":\n\t\t\tout.Values[i] = graphql.MarshalString(\"__Field\")\n\t\tcase \"name\":\n\t\t\tout.Values[i] = ec.___Field_name(ctx, field, obj)\n\t\t\tif out.Values[i] == graphql.Null {\n\t\t\t\tinvalid = true\n\t\t\t}\n\t\tcase \"description\":\n\t\t\tout.Values[i] = ec.___Field_description(ctx, field, obj)\n\t\tcase \"args\":\n\t\t\tout.Values[i] = ec.___Field_args(ctx, field, obj)\n\t\t\tif out.Values[i] == graphql.Null {\n\t\t\t\tinvalid = true\n\t\t\t}\n\t\tcase \"type\":\n\t\t\tout.Values[i] = ec.___Field_type(ctx, field, obj)\n\t\t\tif out.Values[i] == graphql.Null {\n\t\t\t\tinvalid = true\n\t\t\t}\n\t\tcase \"isDeprecated\":\n\t\t\tout.Values[i] = ec.___Field_isDeprecated(ctx, field, obj)\n\t\t\tif out.Values[i] == graphql.Null {\n\t\t\t\tinvalid = true\n\t\t\t}\n\t\tcase \"deprecationReason\":\n\t\t\tout.Values[i] = ec.___Field_deprecationReason(ctx, field, obj)\n\t\tdefault:\n\t\t\tpanic(\"unknown field \" + strconv.Quote(field.Name))\n\t\t}\n\t}\n\n\tif invalid {\n\t\treturn graphql.Null\n\t}\n\treturn out\n}", "func (ec *executionContext) ___Field(ctx context.Context, sel ast.SelectionSet, obj *introspection.Field) graphql.Marshaler {\n\tfields := graphql.CollectFields(ctx, sel, __FieldImplementors)\n\n\tout := graphql.NewOrderedMap(len(fields))\n\tinvalid := false\n\tfor i, field := range fields {\n\t\tout.Keys[i] = field.Alias\n\n\t\tswitch field.Name {\n\t\tcase \"__typename\":\n\t\t\tout.Values[i] = graphql.MarshalString(\"__Field\")\n\t\tcase \"name\":\n\t\t\tout.Values[i] = ec.___Field_name(ctx, field, obj)\n\t\t\tif out.Values[i] == graphql.Null {\n\t\t\t\tinvalid = true\n\t\t\t}\n\t\tcase \"description\":\n\t\t\tout.Values[i] = ec.___Field_description(ctx, field, obj)\n\t\tcase \"args\":\n\t\t\tout.Values[i] = ec.___Field_args(ctx, field, obj)\n\t\t\tif out.Values[i] == graphql.Null {\n\t\t\t\tinvalid = true\n\t\t\t}\n\t\tcase \"type\":\n\t\t\tout.Values[i] = ec.___Field_type(ctx, field, obj)\n\t\t\tif out.Values[i] == graphql.Null {\n\t\t\t\tinvalid = true\n\t\t\t}\n\t\tcase \"isDeprecated\":\n\t\t\tout.Values[i] = ec.___Field_isDeprecated(ctx, field, obj)\n\t\t\tif out.Values[i] == graphql.Null {\n\t\t\t\tinvalid = true\n\t\t\t}\n\t\tcase \"deprecationReason\":\n\t\t\tout.Values[i] = ec.___Field_deprecationReason(ctx, field, obj)\n\t\tdefault:\n\t\t\tpanic(\"unknown field \" + strconv.Quote(field.Name))\n\t\t}\n\t}\n\n\tif invalid {\n\t\treturn graphql.Null\n\t}\n\treturn out\n}", "func (f *File) genDecl(node ast.Node) bool {\n\tdecl, ok := node.(*ast.GenDecl)\n\tif !ok || decl.Tok != token.TYPE { // We only care about Type declarations.\n\t\treturn true\n\t}\n\t// The name of the type of the constants we are declaring.\n\t// Can change if this is a multi-element declaration.\n\ttyp := \"\"\n\t// Loop over the elements of the declaration. Each element is a ValueSpec:\n\t// a list of names possibly followed by a type, possibly followed by values.\n\t// If the type and value are both missing, we carry down the type (and value,\n\t// but the \"go/types\" package takes care of that).\n\tfor _, spec := range decl.Specs {\n\t\ttspec := spec.(*ast.TypeSpec) // Guaranteed to succeed as this is TYPE.\n\t\tif tspec.Type != nil {\n\t\t\t// \"X T\". We have a type. Remember it.\n\t\t\ttyp = tspec.Name.Name\n\t\t}\n\t\tif typ != f.typeName {\n\t\t\t// This is not the type we're looking for.\n\t\t\tcontinue\n\t\t}\n\t\t// We now have a list of names (from one line of source code) all being\n\t\t// declared with the desired type.\n\n\t\tstructType, ok := tspec.Type.(*ast.StructType)\n\t\tif !ok {\n\t\t\t//not a struct type\n\t\t\tcontinue\n\t\t}\n\n\t\ttypesObj, typeObjOk := f.pkg.defs[tspec.Name]\n\t\tif !typeObjOk {\n\t\t\tlog.Fatalf(\"no type info found for struct %s\", typ)\n\t\t}\n\n\t\tfor _, fieldLine := range structType.Fields.List {\n\t\t\tfor _, field := range fieldLine.Names {\n\t\t\t\t//skip struct padding\n\t\t\t\tif field.Name == \"_\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfieldObj, _, _ := types.LookupFieldOrMethod(typesObj.Type(), false, f.pkg.typesPkg, field.Name)\n\n\t\t\t\ttypeStr := fieldObj.Type().String()\n\t\t\t\ttags := parseFieldTags(fieldLine.Tag)\n\n\t\t\t\t//Skip here so we don't include rubbish import lines\n\t\t\t\tif tags[\"exclude_dao\"].Value == \"true\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tprocessedTypeStr, importPath := processTypeStr(typeStr)\n\t\t\t\t//log.Printf(\"processedTypeStr: %s, importPath: %s\", processedTypeStr, importPath)\n\n\t\t\t\tif importPath != \"\" && !importExists(importPath, f.imports) {\n\n\t\t\t\t\tf.imports = append(f.imports, Import{importPath})\n\n\t\t\t\t}\n\n\t\t\t\tv := Field{\n\t\t\t\t\tName: field.Name,\n\t\t\t\t\tTags: tags,\n\t\t\t\t\tTypeName: processedTypeStr,\n\t\t\t\t}\n\t\t\t\tf.fields = append(f.fields, v)\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func newField(\n\tcrd *CRD,\n\tfieldNames names.Names,\n\tshapeRef *awssdkmodel.ShapeRef,\n\tcfg *ackgenconfig.FieldConfig,\n) *Field {\n\tvar gte, gt, gtwp string\n\tvar shape *awssdkmodel.Shape\n\tif shapeRef != nil {\n\t\tshape = shapeRef.Shape\n\t}\n\tif shape != nil {\n\t\tgte, gt, gtwp = cleanGoType(crd.sdkAPI, crd.cfg, shape)\n\t} else {\n\t\tgte = \"string\"\n\t\tgt = \"*string\"\n\t\tgtwp = \"*string\"\n\t}\n\treturn &Field{\n\t\tCRD: crd,\n\t\tNames: fieldNames,\n\t\tShapeRef: shapeRef,\n\t\tGoType: gt,\n\t\tGoTypeElem: gte,\n\t\tGoTypeWithPkgName: gtwp,\n\t\tFieldConfig: cfg,\n\t}\n}", "func Generate(fields map[string]*indexer.Field) map[string]interface{} {\n\treturn mapFields(fields)\n}", "func (f *File) genDecl(node ast.Node) bool {\n\tdecl, ok := node.(*ast.GenDecl)\n\n\tif !ok || decl.Tok != token.TYPE {\n\t\t// We only care about types declarations.\n\t\treturn true\n\t}\n\n\t// Loop over the elements of the declaration. Each element is a ValueSpec:\n\t// a list of names possibly followed by a type, possibly followed by values.\n\t// If the type and value are both missing, we carry down the type (and value,\n\t// but the \"go/types\" package takes care of that).\n\tfor _, spec := range decl.Specs {\n\t\ttspec := spec.(*ast.TypeSpec) // Guaranteed to succeed as this is TYPE.\n\n\t\tif tspec.Name.Name != f.typeName {\n\t\t\t// Not the type we're looking for.\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Printf(\"Type spec: %v name: %s\\n\", tspec.Type, tspec.Name.Name)\n\n\t\tif structType, ok := tspec.Type.(*ast.StructType); ok {\n\t\t\tlog.Printf(\"Located the struct type: %v\\n\", structType)\n\n\t\t\tfor _, field := range structType.Fields.List {\n\t\t\t\tlog.Printf(\"Field: %v\\n\", field)\n\n\t\t\t\tif ident, ok := field.Type.(*ast.Ident); ok {\n\t\t\t\t\t// Look at list of known types and determine if we have a translation.\n\t\t\t\t\ttp := KNOWN_SOURCE_TYPES[ident.Name]\n\n\t\t\t\t\tif tp != ST_UNKNOWN {\n\t\t\t\t\t\tlog.Printf(\"Primitive or local type found: %v => %s\\n\", ident.Name, tp.String())\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// TODO: We should probably consider all of these fields as local objects and add\n\t\t\t\t\t\t// foreign key links.\n\t\t\t\t\t\tlog.Printf(\"UNRECOGNIZED LOCAL TYPE seen: %v\\n\", ident.Name)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tif len(field.Names) == 1 {\n\t\t\t\t\t\tfieldName := field.Names[0].Name\n\t\t\t\t\t\tisPK := false\n\n\t\t\t\t\t\tif strings.ToLower(fieldName) == \"id\" {\n\t\t\t\t\t\t\tisPK = true\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tf.fields = append(f.fields,\n\t\t\t\t\t\t\tField{\n\t\t\t\t\t\t\t\tsrcName: fieldName,\n\t\t\t\t\t\t\t\tdbName: strings.ToLower(fieldName), // TODO: Override with annotations\n\t\t\t\t\t\t\t\tisPK: isPK,\n\t\t\t\t\t\t\t\tsrcType: ident.Name,\n\t\t\t\t\t\t\t\tdbType: \"string\",\n\t\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t} else if selector, ok := field.Type.(*ast.SelectorExpr); ok {\n\t\t\t\t\t// TODO: This likely means an object in another package. Foreign link?\n\t\t\t\t\tlog.Printf(\"Found selector: %s :: %s\\n\", selector.X, selector.Sel.Name)\n\t\t\t\t\ttypeName := fmt.Sprintf(\"%s.%s\", selector.X, selector.Sel.Name)\n\n\t\t\t\t\ttp := KNOWN_SOURCE_TYPES[typeName]\n\n\t\t\t\t\tif tp != ST_UNKNOWN {\n\t\t\t\t\t\tlog.Printf(\"Primitive or local type found: %v => %s\\n\", typeName, tp.String())\n\t\t\t\t\t\tf.additionalImports = append(f.additionalImports, fmt.Sprintf(\"%s\", selector.X))\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// TODO: We should probably consider all of these fields as local objects and add\n\t\t\t\t\t\t// foreign key links.\n\t\t\t\t\t\tlog.Printf(\"UNRECOGNIZED LOCAL TYPE seen: %v\\n\", typeName)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tif len(field.Names) == 1 {\n\t\t\t\t\t\tfieldName := field.Names[0].Name\n\t\t\t\t\t\tisPK := false\n\n\t\t\t\t\t\tif strings.ToLower(fieldName) == \"id\" {\n\t\t\t\t\t\t\tisPK = true\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tf.fields = append(f.fields,\n\t\t\t\t\t\t\tField{\n\t\t\t\t\t\t\t\tsrcName: fieldName,\n\t\t\t\t\t\t\t\tdbName: strings.ToLower(fieldName), // TODO: Override with annotations\n\t\t\t\t\t\t\t\tisPK: isPK,\n\t\t\t\t\t\t\t\tsrcType: typeName,\n\t\t\t\t\t\t\t\tdbType: \"string\",\n\t\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// TODO: Enumerate all different possible types here.\n\t\t\t\t\tlog.Printf(\"UNKNOWN TYPE seen: %v\\n\", field.Type)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (p TreeWriter) getFields(leaf *yaml.RNode) (treeFields, error) {\n\tfieldsByName := map[string]*treeField{}\n\n\t// index nested and non-nested fields\n\tfor i := range p.Fields {\n\t\tf := p.Fields[i]\n\t\tseq, err := leaf.Pipe(&f)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif seq == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif fieldsByName[f.Name] == nil {\n\t\t\tfieldsByName[f.Name] = &treeField{name: f.Name}\n\t\t}\n\n\t\t// non-nested field -- add directly to the treeFields list\n\t\tif f.SubName == \"\" {\n\t\t\t// non-nested field -- only 1 element\n\t\t\tval, err := yaml.String(seq.Content()[0], yaml.Trim, yaml.Flow)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tfieldsByName[f.Name].value = val\n\t\t\tcontinue\n\t\t}\n\n\t\t// nested-field -- create a parent elem, and index by the 'match' value\n\t\tif fieldsByName[f.Name].subFieldByMatch == nil {\n\t\t\tfieldsByName[f.Name].subFieldByMatch = map[string]treeFields{}\n\t\t}\n\t\tindex := fieldsByName[f.Name].subFieldByMatch\n\t\tfor j := range seq.Content() {\n\t\t\telem := seq.Content()[j]\n\t\t\tmatches := f.Matches[elem]\n\t\t\tstr, err := yaml.String(elem, yaml.Trim, yaml.Flow)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// map the field by the name of the element\n\t\t\t// index the subfields by the matching element so we can put all the fields for the\n\t\t\t// same element under the same branch\n\t\t\tmatchKey := strings.Join(matches, \"/\")\n\t\t\tindex[matchKey] = append(index[matchKey], &treeField{name: f.SubName, value: str})\n\t\t}\n\t}\n\n\t// iterate over collection of all queried fields in the Resource\n\tfor _, field := range fieldsByName {\n\t\t// iterate over collection of elements under the field -- indexed by element name\n\t\tfor match, subFields := range field.subFieldByMatch {\n\t\t\t// create a new element for this collection of fields\n\t\t\t// note: we will convert name to an index later, but keep the match for sorting\n\t\t\telem := &treeField{name: match}\n\t\t\tfield.matchingElementsAndFields = append(field.matchingElementsAndFields, elem)\n\n\t\t\t// iterate over collection of queried fields for the element\n\t\t\tfor i := range subFields {\n\t\t\t\t// add to the list of fields for this element\n\t\t\t\telem.matchingElementsAndFields = append(elem.matchingElementsAndFields, subFields[i])\n\t\t\t}\n\t\t}\n\t\t// clear this cached data\n\t\tfield.subFieldByMatch = nil\n\t}\n\n\t// put the fields in a list so they are ordered\n\tfieldList := treeFields{}\n\tfor _, v := range fieldsByName {\n\t\tfieldList = append(fieldList, v)\n\t}\n\n\t// sort the fields\n\tsort.Sort(fieldList)\n\tfor i := range fieldList {\n\t\tfield := fieldList[i]\n\t\t// sort the elements under this field\n\t\tsort.Sort(field.matchingElementsAndFields)\n\n\t\tfor i := range field.matchingElementsAndFields {\n\t\t\telement := field.matchingElementsAndFields[i]\n\t\t\t// sort the elements under a list field by their name\n\t\t\tsort.Sort(element.matchingElementsAndFields)\n\t\t\t// set the name of the element to its index\n\t\t\telement.name = fmt.Sprintf(\"%d\", i)\n\t\t}\n\t}\n\n\treturn fieldList, nil\n}", "func genNodeDev(id nodes.ID, n *nodes.Node) (nodeDev, error) {\n\tr, ok := typesMap[reflect.TypeOf(n.Config).Elem()]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"unknown type for %T\", n.Config)\n\t}\n\tv := reflect.New(r)\n\te := v.Elem()\n\te.Field(0).Set(reflect.ValueOf(NodeBase{id: id, name: n.Name, typ: n.Type()}))\n\te.Field(1).Set(reflect.ValueOf(n.Config))\n\t/*\n\t\tswitch v := n.Config.(type) {\n\t\tcase *nodes.Anim1D:\n\t\t\td.nodes[id] = &anim1DDev{NodeBase: b, cfg: v}\n\t\tcase *nodes.Button:\n\t\t\td.nodes[id] = &buttonDev{nodeBase: b, cfg: v}\n\t\tcase *nodes.Display:\n\t\t\td.nodes[id] = &displayDev{nodeBase: b, cfg: v}\n\t\tcase *nodes.IR:\n\t\t\td.nodes[id] = &irDev{nodeBase: b, cfg: v}\n\t\tcase *nodes.PIR:\n\t\t\td.nodes[id] = &pirDev{nodeBase: b, cfg: v}\n\t\tcase *nodes.Sound:\n\t\t\td.nodes[id] = &soundDev{nodeBase: b, cfg: v}\n\t\tdefault:\n\t\t\tpubErr(dbus, \"failed to initialize: unknown node %q: %T\", id, n)\n\t\t\treturn fmt.Errorf(\"unknown node %q: %T\", id, n)\n\t\t}\n\t*/\n\treturn v.Interface().(nodeDev), nil\n}", "func generatePerNodeConfigSnippet(pathStructName string, nodeData *ypathgen.NodeData, fakeRootTypeName, schemaStructPkgAccessor string, preferShadowPath bool) (GoPerNodeCodeSnippet, goTypeData, util.Errors) {\n\t// TODO: See if a float32 -> binary helper should be provided\n\t// for setting a float32 leaf.\n\tvar errs util.Errors\n\ts := struct {\n\t\tPathStructName string\n\t\tGoType goTypeData\n\t\tGoFieldName string\n\t\tGoStructTypeName string\n\t\tYANGPath string\n\t\tFakeRootTypeName string\n\t\tIsScalarField bool\n\t\tIsRoot bool\n\t\tSchemaStructPkgAccessor string\n\t\tWildcardSuffix string\n\t\tSpecialConversionFn string\n\t\tPreferShadowPath bool\n\t}{\n\t\tPathStructName: pathStructName,\n\t\tGoType: goTypeData{\n\t\t\tGoTypeName: nodeData.GoTypeName,\n\t\t\tTransformedGoTypeName: transformGoTypeName(nodeData),\n\t\t\tIsLeaf: nodeData.IsLeaf,\n\t\t\tHasDefault: nodeData.HasDefault,\n\t\t},\n\t\tGoFieldName: nodeData.GoFieldName,\n\t\tGoStructTypeName: nodeData.SubsumingGoStructName,\n\t\tYANGPath: nodeData.YANGPath,\n\t\tFakeRootTypeName: fakeRootTypeName,\n\t\tIsScalarField: nodeData.IsScalarField,\n\t\tIsRoot: nodeData.YANGPath == \"/\",\n\t\tWildcardSuffix: ypathgen.WildcardSuffix,\n\t\tSchemaStructPkgAccessor: schemaStructPkgAccessor,\n\t\tPreferShadowPath: preferShadowPath,\n\t}\n\tvar getMethod, replaceMethod, convertHelper strings.Builder\n\tif nodeData.IsLeaf {\n\t\t// Leaf types use their parent GoStruct to unmarshal, before\n\t\t// being retrieved out when returned to the user.\n\t\tif err := goLeafConvertTemplate.Execute(&convertHelper, s); err != nil {\n\t\t\tutil.AppendErr(errs, err)\n\t\t}\n\t}\n\tif err := goNodeSetTemplate.Execute(&replaceMethod, s); err != nil {\n\t\tutil.AppendErr(errs, err)\n\t}\n\tif err := goNodeGetTemplate.Execute(&getMethod, s); err != nil {\n\t\tutil.AppendErr(errs, err)\n\t}\n\n\treturn GoPerNodeCodeSnippet{\n\t\tPathStructName: pathStructName,\n\t\tGetMethod: getMethod.String(),\n\t\tConvertHelper: convertHelper.String(),\n\t\tReplaceMethod: replaceMethod.String(),\n\t}, s.GoType, errs\n}", "func fieldToSchema(prog *Program, fName, tagName string, ref Reference, f *ast.Field) (*Schema, error) {\n\tvar p Schema\n\n\tif f.Doc != nil {\n\t\tp.Description = f.Doc.Text()\n\t} else if f.Comment != nil {\n\t\tp.Description = f.Comment.Text()\n\t}\n\tp.Description = strings.TrimSpace(p.Description)\n\n\tvar tags []string\n\tp.Description, tags = parseTags(p.Description)\n\terr := setTags(fName, ref.File, &p, tags)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Don't need to carry on if we're loading our own schema.\n\tif p.CustomSchema != \"\" {\n\t\treturn &p, nil\n\t}\n\n\tpkg := ref.Package\n\tvar name *ast.Ident\n\n\tdbg(\"fieldToSchema: %v\", f.Names)\n\n\tsw := f.Type\nstart:\n\tswitch typ := sw.(type) {\n\n\t// Interface, only useful for its description.\n\tcase *ast.InterfaceType:\n\t\tif len(f.Names) == 0 {\n\t\t\treturn nil, fmt.Errorf(\"field has no Names: %#v\", f)\n\t\t}\n\n\t\tfield := f.Names[0].Obj.Decl.(*ast.Field)\n\t\tswitch typ := field.Type.(type) {\n\t\tcase *ast.SelectorExpr:\n\t\t\tpkgSel, ok := typ.X.(*ast.Ident)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"typ.X is not ast.Ident: %#v\", typ.X)\n\t\t\t}\n\t\t\tpkg = pkgSel.Name\n\t\t\tname = typ.Sel\n\n\t\t\tlookup := pkg + \".\" + name.Name\n\t\t\tif _, err := GetReference(prog, ref.Context, false, lookup, ref.File); err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"GetReference: %v\", err)\n\t\t\t}\n\t\tcase *ast.Ident:\n\t\t\tname = typ\n\t\t}\n\n\t// Pointer type; we don't really care about this for now, so just read over\n\t// it.\n\tcase *ast.StarExpr:\n\t\tsw = typ.X\n\t\tgoto start\n\n\t// Simple identifiers such as \"string\", \"int\", \"MyType\", etc.\n\tcase *ast.Ident:\n\t\tmappedType, mappedFormat := MapType(prog, pkg+\".\"+typ.Name)\n\t\tif mappedType == \"\" {\n\t\t\t// Only check for canonicalType if this isn't mapped.\n\t\t\tcanon, err := canonicalType(ref.File, pkg, typ)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"cannot get canonical type: %v\", err)\n\t\t\t}\n\t\t\tif canon != nil {\n\t\t\t\tsw = canon\n\t\t\t\tgoto start\n\t\t\t}\n\t\t}\n\t\tif mappedType != \"\" {\n\t\t\tp.Type = JSONSchemaType(mappedType)\n\t\t} else {\n\t\t\tp.Type = JSONSchemaType(typ.Name)\n\t\t}\n\t\tif mappedFormat != \"\" {\n\t\t\tp.Format = mappedFormat\n\t\t}\n\n\t\t// e.g. string, int64, etc.: don't need to look up.\n\t\tif isPrimitive(p.Type) {\n\t\t\treturn &p, nil\n\t\t}\n\n\t\tp.Type = \"\"\n\t\tname = typ\n\n\t// Anonymous struct\n\tcase *ast.StructType:\n\t\tp.Type = \"object\"\n\t\tp.Properties = map[string]*Schema{}\n\t\tfor _, f := range typ.Fields.List {\n\t\t\tpropName := goutil.TagName(f, tagName)\n\t\t\tprop, err := fieldToSchema(prog, propName, tagName, ref, f)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"anon struct: %v\", err)\n\t\t\t}\n\n\t\t\tp.Properties[propName] = prop\n\t\t}\n\n\t// An expression followed by a selector, e.g. \"pkg.foo\"\n\tcase *ast.SelectorExpr:\n\t\tpkgSel, ok := typ.X.(*ast.Ident)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"typ.X is not ast.Ident: %#v\", typ.X)\n\t\t}\n\n\t\tpkg = pkgSel.Name\n\t\tname = typ.Sel\n\n\t\tlookup := pkg + \".\" + name.Name\n\t\tt, f := MapType(prog, lookup)\n\t\tif t == \"\" {\n\t\t\t// Only check for canonicalType if this isn't mapped.\n\t\t\tcanon, err := canonicalType(ref.File, pkgSel.Name, typ.Sel)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"cannot get canonical type: %v\", err)\n\t\t\t}\n\t\t\tif canon != nil {\n\t\t\t\tsw = canon\n\t\t\t\tgoto start\n\t\t\t}\n\t\t}\n\n\t\tp.Format = f\n\t\tif t != \"\" {\n\t\t\tp.Type = JSONSchemaType(t)\n\t\t\treturn &p, nil\n\t\t}\n\n\t\t// Deal with array.\n\t\t// TODO: don't do this inline but at the end. Reason it doesn't work not\n\t\t// is because we always use GetReference().\n\t\tts, _, importPath, err := findType(ref.File, pkg, name.Name)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif !strings.HasSuffix(importPath, pkg) { // import alias\n\t\t\tpkg = importPath\n\t\t}\n\n\t\tswitch resolvType := ts.Type.(type) {\n\t\tcase *ast.ArrayType:\n\t\t\tp.Type = \"array\"\n\t\t\terr := resolveArray(prog, ref, pkg, &p, resolvType.Elt)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\treturn &p, nil\n\t\t}\n\n\t// Maps\n\tcase *ast.MapType:\n\t\t// As far as I can find there is no obvious/elegant way to represent\n\t\t// this in JSON schema, so it's just an object.\n\t\tp.Type = \"object\"\n\t\tvtyp, vpkg, err := findTypeIdent(typ.Value, pkg)\n\t\tif err != nil {\n\t\t\t// we cannot find a mapping to a concrete type,\n\t\t\t// so we cannot define the type of the maps -> ?\n\t\t\tdbg(\"ERR FOUND MapType: %s\", err.Error())\n\t\t\treturn &p, nil\n\t\t}\n\t\tif isPrimitive(vtyp.Name) {\n\t\t\t// we are done, no need for a lookup of a custom type\n\t\t\tp.AdditionalProperties = &Schema{Type: JSONSchemaType(vtyp.Name)}\n\t\t\treturn &p, nil\n\t\t}\n\n\t\t_, lref, err := lookupTypeAndRef(ref.File, vpkg, vtyp.Name)\n\t\tif err == nil {\n\t\t\t// found additional properties\n\t\t\tp.AdditionalProperties = &Schema{Reference: lref}\n\t\t\t// Make sure the reference is added to `prog.References`:\n\t\t\t_, err := GetReference(prog, ref.Context, false, lref, ref.File)\n\t\t\tif err != nil {\n\t\t\t\tdbg(\"ERR, Could not find additionalProperties Reference: %s\", err.Error())\n\t\t\t}\n\t\t} else {\n\t\t\tdbg(\"ERR, Could not find additionalProperties: %s\", err.Error())\n\t\t}\n\t\treturn &p, nil\n\n\t// Array and slices.\n\tcase *ast.ArrayType:\n\t\tp.Type = \"array\"\n\n\t\terr := resolveArray(prog, ref, pkg, &p, typ.Elt)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn &p, nil\n\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"fieldToSchema: unknown type: %T\", typ)\n\t}\n\n\tif name == nil {\n\t\treturn &p, nil\n\t}\n\n\t// Check if the type resolves to a Go primitive.\n\tlookup := pkg + \".\" + name.Name\n\tt, err := getTypeInfo(prog, lookup, ref.File)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif t != \"\" {\n\t\tp.Type = t\n\t\tif isPrimitive(p.Type) {\n\t\t\treturn &p, nil\n\t\t}\n\t}\n\n\tif i := strings.LastIndex(lookup, \"/\"); i > -1 {\n\t\tlookup = pkg[i+1:] + \".\" + name.Name\n\t}\n\n\tp.Description = \"\" // SwaggerHub will complain if both Description and $ref are set.\n\tp.Reference = lookup\n\n\treturn &p, nil\n}", "func generateStruct(a *AnnotationDoc, packageName string, imports []string, indent string) (string, []string) {\n\tvar allAnnotationsPackages []string\n\tpossiblePackagesForA := combinePackages(imports, []string{packageName})\n\tts, foundPackageOfA, foundImportsOfA := getAnnotationStruct(a.Name, possiblePackagesForA)\n\tallAnnotationsPackages = combinePackages(allAnnotationsPackages, []string{foundPackageOfA})\n\tstr, _ := ts.Type.(*ast.StructType)\n\tvar b bytes.Buffer\n\tb.WriteString(indent)\n\tb.WriteString(foundPackageOfA)\n\tb.WriteString(\".\")\n\tb.WriteString(a.Name)\n\tb.WriteString(\"{\\n\")\n\tchildIndent := indent + \" \"\n\tfor _, f := range str.Fields.List {\n\t\tfieldName := getFieldName(f)\n\t\tdefValue := getDefaultValue(f)\n\t\tfieldKey := fieldName\n\t\t// consider special case when only default parameter is specified\n\t\tif len(str.Fields.List) == 1 && len(a.Content) == 1 {\n\t\t\tfor key := range a.Content {\n\t\t\t\tif key == DEFAULT_PARAM {\n\t\t\t\t\tfieldKey = DEFAULT_PARAM\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tvalue, found := a.Content[fieldKey]\n\t\tif found {\n\t\t\tswitch t := value.(type) {\n\t\t\tcase string:\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tb.WriteString(getLiteral(f.Type, t, false))\n\t\t\t\tb.WriteString(\",\\n\")\n\t\t\tcase []string:\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tb.WriteString(getFieldConstructor(f.Type))\n\t\t\t\tb.WriteString(\"\\n\")\n\t\t\t\tfor _, elem := range t {\n\t\t\t\t\tb.WriteString(childIndent + \" \")\n\t\t\t\t\tb.WriteString(elem)\n\t\t\t\t\tb.WriteString(\",\\n\")\n\t\t\t\t}\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tb.WriteString(\"}\")\n\t\t\tcase []AnnotationDoc:\n\t\t\t\t// calculate array's elements\n\t\t\t\tvar bb bytes.Buffer\n\t\t\t\tfor _, sa := range t {\n\t\t\t\t\tchildCode, foundImportsOfChild := generateStruct(&sa, foundPackageOfA, foundImportsOfA, childIndent+\" \")\n\t\t\t\t\tallAnnotationsPackages = combinePackages(allAnnotationsPackages, foundImportsOfChild)\n\t\t\t\t\tbb.WriteString(childCode)\n\t\t\t\t\tbb.WriteString(\",\\n\")\n\t\t\t\t}\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\t// insert array initialzer of child annotation type\n\t\t\t\ts := writeArrayInitializer(&b, bb.String())\n\t\t\t\t// append array of child annotations\n\t\t\t\tb.WriteString(\"{\\n\")\n\t\t\t\tb.WriteString(childIndent + \" \")\n\t\t\t\tb.WriteString(s)\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tb.WriteString(\"},\\n\")\n\t\t\tcase AnnotationDoc:\n\t\t\t\tchildCode, foundImportsOfChild := generateStruct(&t, foundPackageOfA, foundImportsOfA, childIndent)\n\t\t\t\tallAnnotationsPackages = combinePackages(allAnnotationsPackages, foundImportsOfChild)\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tif isOptional(f.Type) {\n\t\t\t\t\tb.WriteString(\"&\")\n\t\t\t\t}\n\t\t\t\tb.WriteString(strings.TrimLeft(childCode, \" \"))\n\t\t\t\tb.WriteString(\",\\n\")\n\t\t\tdefault:\n\t\t\t\tpanic(\"Unexpected annotation value type\")\n\t\t\t}\n\t\t} else {\n\t\t\tb.WriteString(childIndent)\n\t\t\tb.WriteString(defValue)\n\t\t\tb.WriteString(\",\\n\")\n\t\t}\n\t}\n\tb.WriteString(indent)\n\tb.WriteString(\"}\")\n\treturn b.String(), allAnnotationsPackages\n}", "func (v *validate) traverseField(ctx context.Context, parent reflect.Value, current reflect.Value, ns []byte, structNs []byte, cf *cField, ct *cTag) {\n\tvar typ reflect.Type\n\tvar kind reflect.Kind\n\n\tcurrent, kind, v.fldIsPointer = v.extractTypeInternal(current, false)\n\n\tswitch kind {\n\tcase reflect.Ptr, reflect.Interface, reflect.Invalid:\n\n\t\tif ct == nil {\n\t\t\treturn\n\t\t}\n\n\t\tif ct.typeof == typeOmitEmpty || ct.typeof == typeIsDefault {\n\t\t\treturn\n\t\t}\n\n\t\tif ct.hasTag {\n\t\t\tif kind == reflect.Invalid {\n\t\t\t\tv.str1 = string(append(ns, cf.altName...))\n\t\t\t\tif v.v.hasTagNameFunc {\n\t\t\t\t\tv.str2 = string(append(structNs, cf.name...))\n\t\t\t\t} else {\n\t\t\t\t\tv.str2 = v.str1\n\t\t\t\t}\n\t\t\t\tv.errs = append(v.errs,\n\t\t\t\t\t&fieldError{\n\t\t\t\t\t\tv: v.v,\n\t\t\t\t\t\ttag: ct.aliasTag,\n\t\t\t\t\t\tactualTag: ct.tag,\n\t\t\t\t\t\tns: v.str1,\n\t\t\t\t\t\tstructNs: v.str2,\n\t\t\t\t\t\tfieldLen: uint8(len(cf.altName)),\n\t\t\t\t\t\tstructfieldLen: uint8(len(cf.name)),\n\t\t\t\t\t\tparam: ct.param,\n\t\t\t\t\t\tkind: kind,\n\t\t\t\t\t},\n\t\t\t\t)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tv.str1 = string(append(ns, cf.altName...))\n\t\t\tif v.v.hasTagNameFunc {\n\t\t\t\tv.str2 = string(append(structNs, cf.name...))\n\t\t\t} else {\n\t\t\t\tv.str2 = v.str1\n\t\t\t}\n\t\t\tif !ct.runValidationWhenNil {\n\t\t\t\tv.errs = append(v.errs,\n\t\t\t\t\t&fieldError{\n\t\t\t\t\t\tv: v.v,\n\t\t\t\t\t\ttag: ct.aliasTag,\n\t\t\t\t\t\tactualTag: ct.tag,\n\t\t\t\t\t\tns: v.str1,\n\t\t\t\t\t\tstructNs: v.str2,\n\t\t\t\t\t\tfieldLen: uint8(len(cf.altName)),\n\t\t\t\t\t\tstructfieldLen: uint8(len(cf.name)),\n\t\t\t\t\t\tvalue: current.Interface(),\n\t\t\t\t\t\tparam: ct.param,\n\t\t\t\t\t\tkind: kind,\n\t\t\t\t\t\ttyp: current.Type(),\n\t\t\t\t\t},\n\t\t\t\t)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\tcase reflect.Struct:\n\n\t\ttyp = current.Type()\n\n\t\tif !typ.ConvertibleTo(timeType) {\n\n\t\t\tif ct != nil {\n\n\t\t\t\tif ct.typeof == typeStructOnly {\n\t\t\t\t\tgoto CONTINUE\n\t\t\t\t} else if ct.typeof == typeIsDefault {\n\t\t\t\t\t// set Field Level fields\n\t\t\t\t\tv.slflParent = parent\n\t\t\t\t\tv.flField = current\n\t\t\t\t\tv.cf = cf\n\t\t\t\t\tv.ct = ct\n\n\t\t\t\t\tif !ct.fn(ctx, v) {\n\t\t\t\t\t\tv.str1 = string(append(ns, cf.altName...))\n\n\t\t\t\t\t\tif v.v.hasTagNameFunc {\n\t\t\t\t\t\t\tv.str2 = string(append(structNs, cf.name...))\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tv.str2 = v.str1\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tv.errs = append(v.errs,\n\t\t\t\t\t\t\t&fieldError{\n\t\t\t\t\t\t\t\tv: v.v,\n\t\t\t\t\t\t\t\ttag: ct.aliasTag,\n\t\t\t\t\t\t\t\tactualTag: ct.tag,\n\t\t\t\t\t\t\t\tns: v.str1,\n\t\t\t\t\t\t\t\tstructNs: v.str2,\n\t\t\t\t\t\t\t\tfieldLen: uint8(len(cf.altName)),\n\t\t\t\t\t\t\t\tstructfieldLen: uint8(len(cf.name)),\n\t\t\t\t\t\t\t\tvalue: current.Interface(),\n\t\t\t\t\t\t\t\tparam: ct.param,\n\t\t\t\t\t\t\t\tkind: kind,\n\t\t\t\t\t\t\t\ttyp: typ,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tct = ct.next\n\t\t\t}\n\n\t\t\tif ct != nil && ct.typeof == typeNoStructLevel {\n\t\t\t\treturn\n\t\t\t}\n\n\t\tCONTINUE:\n\t\t\t// if len == 0 then validating using 'Var' or 'VarWithValue'\n\t\t\t// Var - doesn't make much sense to do it that way, should call 'Struct', but no harm...\n\t\t\t// VarWithField - this allows for validating against each field within the struct against a specific value\n\t\t\t// pretty handy in certain situations\n\t\t\tif len(cf.name) > 0 {\n\t\t\t\tns = append(append(ns, cf.altName...), '.')\n\t\t\t\tstructNs = append(append(structNs, cf.name...), '.')\n\t\t\t}\n\n\t\t\tv.validateStruct(ctx, parent, current, typ, ns, structNs, ct)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif ct == nil || !ct.hasTag {\n\t\treturn\n\t}\n\n\ttyp = current.Type()\n\nOUTER:\n\tfor {\n\t\tif ct == nil {\n\t\t\treturn\n\t\t}\n\n\t\tswitch ct.typeof {\n\n\t\tcase typeOmitEmpty:\n\n\t\t\t// set Field Level fields\n\t\t\tv.slflParent = parent\n\t\t\tv.flField = current\n\t\t\tv.cf = cf\n\t\t\tv.ct = ct\n\n\t\t\tif !hasValue(v) {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tct = ct.next\n\t\t\tcontinue\n\n\t\tcase typeEndKeys:\n\t\t\treturn\n\n\t\tcase typeDive:\n\n\t\t\tct = ct.next\n\n\t\t\t// traverse slice or map here\n\t\t\t// or panic ;)\n\t\t\tswitch kind {\n\t\t\tcase reflect.Slice, reflect.Array:\n\n\t\t\t\tvar i64 int64\n\t\t\t\treusableCF := &cField{}\n\n\t\t\t\tfor i := 0; i < current.Len(); i++ {\n\n\t\t\t\t\ti64 = int64(i)\n\n\t\t\t\t\tv.misc = append(v.misc[0:0], cf.name...)\n\t\t\t\t\tv.misc = append(v.misc, '[')\n\t\t\t\t\tv.misc = strconv.AppendInt(v.misc, i64, 10)\n\t\t\t\t\tv.misc = append(v.misc, ']')\n\n\t\t\t\t\treusableCF.name = string(v.misc)\n\n\t\t\t\t\tif cf.namesEqual {\n\t\t\t\t\t\treusableCF.altName = reusableCF.name\n\t\t\t\t\t} else {\n\n\t\t\t\t\t\tv.misc = append(v.misc[0:0], cf.altName...)\n\t\t\t\t\t\tv.misc = append(v.misc, '[')\n\t\t\t\t\t\tv.misc = strconv.AppendInt(v.misc, i64, 10)\n\t\t\t\t\t\tv.misc = append(v.misc, ']')\n\n\t\t\t\t\t\treusableCF.altName = string(v.misc)\n\t\t\t\t\t}\n\t\t\t\t\tv.traverseField(ctx, parent, current.Index(i), ns, structNs, reusableCF, ct)\n\t\t\t\t}\n\n\t\t\tcase reflect.Map:\n\n\t\t\t\tvar pv string\n\t\t\t\treusableCF := &cField{}\n\n\t\t\t\tfor _, key := range current.MapKeys() {\n\n\t\t\t\t\tpv = fmt.Sprintf(\"%v\", key.Interface())\n\n\t\t\t\t\tv.misc = append(v.misc[0:0], cf.name...)\n\t\t\t\t\tv.misc = append(v.misc, '[')\n\t\t\t\t\tv.misc = append(v.misc, pv...)\n\t\t\t\t\tv.misc = append(v.misc, ']')\n\n\t\t\t\t\treusableCF.name = string(v.misc)\n\n\t\t\t\t\tif cf.namesEqual {\n\t\t\t\t\t\treusableCF.altName = reusableCF.name\n\t\t\t\t\t} else {\n\t\t\t\t\t\tv.misc = append(v.misc[0:0], cf.altName...)\n\t\t\t\t\t\tv.misc = append(v.misc, '[')\n\t\t\t\t\t\tv.misc = append(v.misc, pv...)\n\t\t\t\t\t\tv.misc = append(v.misc, ']')\n\n\t\t\t\t\t\treusableCF.altName = string(v.misc)\n\t\t\t\t\t}\n\n\t\t\t\t\tif ct != nil && ct.typeof == typeKeys && ct.keys != nil {\n\t\t\t\t\t\tv.traverseField(ctx, parent, key, ns, structNs, reusableCF, ct.keys)\n\t\t\t\t\t\t// can be nil when just keys being validated\n\t\t\t\t\t\tif ct.next != nil {\n\t\t\t\t\t\t\tv.traverseField(ctx, parent, current.MapIndex(key), ns, structNs, reusableCF, ct.next)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tv.traverseField(ctx, parent, current.MapIndex(key), ns, structNs, reusableCF, ct)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\tdefault:\n\t\t\t\t// throw error, if not a slice or map then should not have gotten here\n\t\t\t\t// bad dive tag\n\t\t\t\tpanic(\"dive error! can't dive on a non slice or map\")\n\t\t\t}\n\n\t\t\treturn\n\n\t\tcase typeOr:\n\n\t\t\tv.misc = v.misc[0:0]\n\n\t\t\tfor {\n\n\t\t\t\t// set Field Level fields\n\t\t\t\tv.slflParent = parent\n\t\t\t\tv.flField = current\n\t\t\t\tv.cf = cf\n\t\t\t\tv.ct = ct\n\n\t\t\t\tif ct.fn(ctx, v) {\n\t\t\t\t\tif ct.isBlockEnd {\n\t\t\t\t\t\tct = ct.next\n\t\t\t\t\t\tcontinue OUTER\n\t\t\t\t\t}\n\n\t\t\t\t\t// drain rest of the 'or' values, then continue or leave\n\t\t\t\t\tfor {\n\n\t\t\t\t\t\tct = ct.next\n\n\t\t\t\t\t\tif ct == nil {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif ct.typeof != typeOr {\n\t\t\t\t\t\t\tcontinue OUTER\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif ct.isBlockEnd {\n\t\t\t\t\t\t\tct = ct.next\n\t\t\t\t\t\t\tcontinue OUTER\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tv.misc = append(v.misc, '|')\n\t\t\t\tv.misc = append(v.misc, ct.tag...)\n\n\t\t\t\tif ct.hasParam {\n\t\t\t\t\tv.misc = append(v.misc, '=')\n\t\t\t\t\tv.misc = append(v.misc, ct.param...)\n\t\t\t\t}\n\n\t\t\t\tif ct.isBlockEnd || ct.next == nil {\n\t\t\t\t\t// if we get here, no valid 'or' value and no more tags\n\t\t\t\t\tv.str1 = string(append(ns, cf.altName...))\n\n\t\t\t\t\tif v.v.hasTagNameFunc {\n\t\t\t\t\t\tv.str2 = string(append(structNs, cf.name...))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tv.str2 = v.str1\n\t\t\t\t\t}\n\n\t\t\t\t\tif ct.hasAlias {\n\n\t\t\t\t\t\tv.errs = append(v.errs,\n\t\t\t\t\t\t\t&fieldError{\n\t\t\t\t\t\t\t\tv: v.v,\n\t\t\t\t\t\t\t\ttag: ct.aliasTag,\n\t\t\t\t\t\t\t\tactualTag: ct.actualAliasTag,\n\t\t\t\t\t\t\t\tns: v.str1,\n\t\t\t\t\t\t\t\tstructNs: v.str2,\n\t\t\t\t\t\t\t\tfieldLen: uint8(len(cf.altName)),\n\t\t\t\t\t\t\t\tstructfieldLen: uint8(len(cf.name)),\n\t\t\t\t\t\t\t\tvalue: current.Interface(),\n\t\t\t\t\t\t\t\tparam: ct.param,\n\t\t\t\t\t\t\t\tkind: kind,\n\t\t\t\t\t\t\t\ttyp: typ,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t)\n\n\t\t\t\t\t} else {\n\n\t\t\t\t\t\ttVal := string(v.misc)[1:]\n\n\t\t\t\t\t\tv.errs = append(v.errs,\n\t\t\t\t\t\t\t&fieldError{\n\t\t\t\t\t\t\t\tv: v.v,\n\t\t\t\t\t\t\t\ttag: tVal,\n\t\t\t\t\t\t\t\tactualTag: tVal,\n\t\t\t\t\t\t\t\tns: v.str1,\n\t\t\t\t\t\t\t\tstructNs: v.str2,\n\t\t\t\t\t\t\t\tfieldLen: uint8(len(cf.altName)),\n\t\t\t\t\t\t\t\tstructfieldLen: uint8(len(cf.name)),\n\t\t\t\t\t\t\t\tvalue: current.Interface(),\n\t\t\t\t\t\t\t\tparam: ct.param,\n\t\t\t\t\t\t\t\tkind: kind,\n\t\t\t\t\t\t\t\ttyp: typ,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t)\n\t\t\t\t\t}\n\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tct = ct.next\n\t\t\t}\n\n\t\tdefault:\n\n\t\t\t// set Field Level fields\n\t\t\tv.slflParent = parent\n\t\t\tv.flField = current\n\t\t\tv.cf = cf\n\t\t\tv.ct = ct\n\n\t\t\tif !ct.fn(ctx, v) {\n\t\t\t\tv.str1 = string(append(ns, cf.altName...))\n\n\t\t\t\tif v.v.hasTagNameFunc {\n\t\t\t\t\tv.str2 = string(append(structNs, cf.name...))\n\t\t\t\t} else {\n\t\t\t\t\tv.str2 = v.str1\n\t\t\t\t}\n\n\t\t\t\tv.errs = append(v.errs,\n\t\t\t\t\t&fieldError{\n\t\t\t\t\t\tv: v.v,\n\t\t\t\t\t\ttag: ct.aliasTag,\n\t\t\t\t\t\tactualTag: ct.tag,\n\t\t\t\t\t\tns: v.str1,\n\t\t\t\t\t\tstructNs: v.str2,\n\t\t\t\t\t\tfieldLen: uint8(len(cf.altName)),\n\t\t\t\t\t\tstructfieldLen: uint8(len(cf.name)),\n\t\t\t\t\t\tvalue: current.Interface(),\n\t\t\t\t\t\tparam: ct.param,\n\t\t\t\t\t\tkind: kind,\n\t\t\t\t\t\ttyp: typ,\n\t\t\t\t\t},\n\t\t\t\t)\n\n\t\t\t\treturn\n\t\t\t}\n\t\t\tct = ct.next\n\t\t}\n\t}\n\n}", "func (m *ParentLabelDetails) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))\n res[\"color\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetColor(val)\n }\n return nil\n }\n res[\"description\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDescription(val)\n }\n return nil\n }\n res[\"id\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetId(val)\n }\n return nil\n }\n res[\"isActive\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetBoolValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetIsActive(val)\n }\n return nil\n }\n res[\"name\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetName(val)\n }\n return nil\n }\n res[\"@odata.type\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOdataType(val)\n }\n return nil\n }\n res[\"parent\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateParentLabelDetailsFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetParent(val.(ParentLabelDetailsable))\n }\n return nil\n }\n res[\"sensitivity\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetInt32Value()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetSensitivity(val)\n }\n return nil\n }\n res[\"tooltip\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetTooltip(val)\n }\n return nil\n }\n return res\n}", "func (m *EdiscoverySearch) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Search.GetFieldDeserializers()\n res[\"additionalSources\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateDataSourceFromDiscriminatorValue , m.SetAdditionalSources)\n res[\"addToReviewSetOperation\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetObjectValue(CreateEdiscoveryAddToReviewSetOperationFromDiscriminatorValue , m.SetAddToReviewSetOperation)\n res[\"custodianSources\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateDataSourceFromDiscriminatorValue , m.SetCustodianSources)\n res[\"dataSourceScopes\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetEnumValue(ParseDataSourceScopes , m.SetDataSourceScopes)\n res[\"lastEstimateStatisticsOperation\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetObjectValue(CreateEdiscoveryEstimateOperationFromDiscriminatorValue , m.SetLastEstimateStatisticsOperation)\n res[\"noncustodialSources\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateEdiscoveryNoncustodialDataSourceFromDiscriminatorValue , m.SetNoncustodialSources)\n return res\n}", "func VisitField(nodes []Node, field string, callback func(value string, negated bool, annotation Annotation)) {\n\tvisitor := &FieldVisitor{callback: callback, field: field}\n\tvisitor.VisitNodes(visitor, nodes)\n}", "func (w *State) GenerateFlowField(destination DestinationID) error {\n\tlog.Println(\"find shorted path\")\n\tFindShortestPath(w, destination)\n\tlog.Println(\"compute directions\")\n\tw.computeDirections(destination)\n\n\treturn nil\n\n}", "func (x *fastReflection_EventCreateBatch) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {\n\tswitch fd.FullName() {\n\tcase \"regen.ecocredit.v1alpha1.EventCreateBatch.class_id\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tcase \"regen.ecocredit.v1alpha1.EventCreateBatch.batch_denom\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tcase \"regen.ecocredit.v1alpha1.EventCreateBatch.issuer\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tcase \"regen.ecocredit.v1alpha1.EventCreateBatch.total_amount\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tcase \"regen.ecocredit.v1alpha1.EventCreateBatch.start_date\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tcase \"regen.ecocredit.v1alpha1.EventCreateBatch.end_date\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tcase \"regen.ecocredit.v1alpha1.EventCreateBatch.project_location\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tdefault:\n\t\tif fd.IsExtension() {\n\t\t\tpanic(fmt.Errorf(\"proto3 declared messages do not support extensions: regen.ecocredit.v1alpha1.EventCreateBatch\"))\n\t\t}\n\t\tpanic(fmt.Errorf(\"message regen.ecocredit.v1alpha1.EventCreateBatch does not contain field %s\", fd.FullName()))\n\t}\n}", "func (m *ProgramControl) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"controlId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetControlId(val)\n }\n return nil\n }\n res[\"controlTypeId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetControlTypeId(val)\n }\n return nil\n }\n res[\"createdDateTime\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetTimeValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCreatedDateTime(val)\n }\n return nil\n }\n res[\"displayName\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDisplayName(val)\n }\n return nil\n }\n res[\"owner\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateUserIdentityFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOwner(val.(UserIdentityable))\n }\n return nil\n }\n res[\"program\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateProgramFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetProgram(val.(Programable))\n }\n return nil\n }\n res[\"programId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetProgramId(val)\n }\n return nil\n }\n res[\"resource\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateProgramResourceFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetResource(val.(ProgramResourceable))\n }\n return nil\n }\n res[\"status\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetStatus(val)\n }\n return nil\n }\n return res\n}", "func (m *BusinessScenarioPlanner) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"planConfiguration\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreatePlannerPlanConfigurationFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetPlanConfiguration(val.(PlannerPlanConfigurationable))\n }\n return nil\n }\n res[\"taskConfiguration\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreatePlannerTaskConfigurationFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetTaskConfiguration(val.(PlannerTaskConfigurationable))\n }\n return nil\n }\n res[\"tasks\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateBusinessScenarioTaskFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]BusinessScenarioTaskable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(BusinessScenarioTaskable)\n }\n }\n m.SetTasks(res)\n }\n return nil\n }\n return res\n}", "func (op *metadataLookup) field(parentFunc *stmt.CallExpr, expr stmt.Expr) {\n\tif op.err != nil {\n\t\treturn\n\t}\n\tswitch e := expr.(type) {\n\tcase *stmt.SelectItem:\n\t\top.field(nil, e.Expr)\n\tcase *stmt.CallExpr:\n\t\tif e.FuncType == function.Quantile {\n\t\t\top.planHistogramFields(e)\n\t\t\treturn\n\t\t}\n\t\tfor _, param := range e.Params {\n\t\t\top.field(e, param)\n\t\t}\n\tcase *stmt.ParenExpr:\n\t\top.field(nil, e.Expr)\n\tcase *stmt.BinaryExpr:\n\t\top.field(nil, e.Left)\n\t\top.field(nil, e.Right)\n\tcase *stmt.FieldExpr:\n\t\tqueryStmt := op.executeCtx.Query\n\t\tfieldMeta, err := op.metadata.GetField(queryStmt.Namespace, queryStmt.MetricName, field.Name(e.Name))\n\t\tif err != nil {\n\t\t\top.err = err\n\t\t\treturn\n\t\t}\n\n\t\top.planField(parentFunc, fieldMeta)\n\t}\n}", "func JsonFieldWithDefaultGenerator() gopter.Gen {\n\tif jsonFieldWithDefaultGenerator != nil {\n\t\treturn jsonFieldWithDefaultGenerator\n\t}\n\n\tgenerators := make(map[string]gopter.Gen)\n\tAddIndependentPropertyGeneratorsForJsonFieldWithDefault(generators)\n\tjsonFieldWithDefaultGenerator = gen.Struct(reflect.TypeOf(JsonFieldWithDefault{}), generators)\n\n\treturn jsonFieldWithDefaultGenerator\n}", "func (m *TemplateParameter) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))\n res[\"description\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDescription(val)\n }\n return nil\n }\n res[\"displayName\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDisplayName(val)\n }\n return nil\n }\n res[\"jsonAllowedValues\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetJsonAllowedValues(val)\n }\n return nil\n }\n res[\"jsonDefaultValue\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetJsonDefaultValue(val)\n }\n return nil\n }\n res[\"@odata.type\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOdataType(val)\n }\n return nil\n }\n res[\"valueType\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseManagementParameterValueType)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetValueType(val.(*ManagementParameterValueType))\n }\n return nil\n }\n return res\n}", "func yangListFieldToGoType(listField *ygen.NodeDetails, listFieldName string, parent *ygen.ParsedDirectory, goStructElements map[string]*ygen.ParsedDirectory, generateOrderedMaps bool) (string, *generatedGoMultiKeyListStruct, *generatedGoListMethod, *generatedOrderedMapStruct, error) {\n\t// The list itself, since it is a container, has a struct associated with it. Retrieve\n\t// this from the set of Directory structs for which code (a Go struct) will be\n\t// generated such that additional details can be used in the code generation.\n\tlistElem, ok := goStructElements[listField.YANGDetails.Path]\n\tif !ok {\n\t\treturn \"\", nil, nil, nil, fmt.Errorf(\"struct for %s did not exist\", listField.YANGDetails.Path)\n\t}\n\n\tif len(listElem.ListKeys) == 0 {\n\t\t// Keyless list therefore represent this as a slice of pointers to\n\t\t// the struct that represents the list element itself.\n\t\treturn fmt.Sprintf(\"[]*%s\", listElem.Name), nil, nil, nil, nil\n\t}\n\n\tlistType, keyType, _, err := UnorderedMapTypeName(listField.YANGDetails.Path, listFieldName, parent.Name, goStructElements)\n\tif err != nil {\n\t\treturn \"\", nil, nil, nil, err\n\t}\n\tvar multiListKey *generatedGoMultiKeyListStruct\n\tvar listKeys []goStructField\n\n\tshortestPath := func(ss [][]string) [][]string {\n\t\tvar shortest []string\n\t\tfor _, s := range ss {\n\t\t\tif shortest == nil {\n\t\t\t\tshortest = s\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif len(s) < len(shortest) {\n\t\t\t\tshortest = s\n\t\t\t}\n\t\t}\n\t\treturn [][]string{shortest}\n\t}\n\n\tusedKeyElemNames := make(map[string]bool)\n\tfor _, keName := range listElem.ListKeyYANGNames {\n\t\tkeyType, ok := listElem.Fields[keName]\n\t\tif !ok {\n\t\t\treturn \"\", nil, nil, nil, fmt.Errorf(\"did not find type for key %s\", keName)\n\t\t}\n\n\t\tkeyField := goStructField{\n\t\t\tYANGName: keName,\n\t\t\tName: genutil.MakeNameUnique(listElem.ListKeys[keName].Name, usedKeyElemNames),\n\t\t\tType: listElem.ListKeys[keName].LangType.NativeType,\n\t\t\t// The shortest mapped path for a list key must be the path to the key.\n\t\t\tTags: mappedPathTag(shortestPath(keyType.MappedPaths), \"\"),\n\t\t}\n\t\tkeyField.IsScalarField = IsScalarField(keyType)\n\t\tlistKeys = append(listKeys, keyField)\n\t}\n\n\tswitch {\n\tcase len(listElem.ListKeys) != 1:\n\t\t// This is a list with multiple keys, so we need to generate a new structure\n\t\t// that represents the list key itself - this struct is described in a\n\t\t// generatedGoMultiKeyListStruct struct, which is then expanded by a template to the struct\n\t\t// definition.\n\t\tmultiListKey = &generatedGoMultiKeyListStruct{\n\t\t\tKeyStructName: keyType,\n\t\t\tParentPath: parent.Path,\n\t\t\tListName: listFieldName,\n\t\t\tKeys: listKeys,\n\t\t}\n\t}\n\n\tvar listMethodSpec *generatedGoListMethod\n\tvar orderedMapSpec *generatedOrderedMapStruct\n\n\tif listField.YANGDetails.OrderedByUser && generateOrderedMaps {\n\t\tstructName := OrderedMapTypeName(listElem.Name)\n\t\tlistType = fmt.Sprintf(\"*%s\", structName)\n\t\t// Create spec for generating ordered maps.\n\t\torderedMapSpec = &generatedOrderedMapStruct{\n\t\t\tStructName: structName,\n\t\t\tKeyName: keyType,\n\t\t\tListTypeName: listElem.Name,\n\t\t\tListFieldName: listFieldName,\n\t\t\tKeys: listKeys,\n\t\t\tParentStructName: parent.Name,\n\t\t\tYANGPath: listField.YANGDetails.Path,\n\t\t}\n\t} else {\n\t\t// Generate the specification for the methods that should be generated for this\n\t\t// list, such that this can be handed to the relevant templates to generate code.\n\t\tlistMethodSpec = &generatedGoListMethod{\n\t\t\tListName: listFieldName,\n\t\t\tListType: listElem.Name,\n\t\t\tKeys: listKeys,\n\t\t\tReceiver: parent.Name,\n\t\t}\n\t\tif multiListKey != nil {\n\t\t\tlistMethodSpec.KeyStruct = keyType\n\t\t}\n\t}\n\n\treturn listType, multiListKey, listMethodSpec, orderedMapSpec, nil\n}", "func (b *PlanBuilder) buildProjectionField(ctx context.Context, p LogicalPlan, field *ast.SelectField, expr expression.Expression) (*expression.Column, *types.FieldName, error) {\n\tvar origTblName, tblName, colName, dbName parser_model.CIStr\n\tinnerNode := getInnerFromParenthesesAndUnaryPlus(field.Expr)\n\tcol, isCol := expr.(*expression.Column)\n\t// Correlated column won't affect the final output names. So we can put it in any of the three logic block.\n\t// Don't put it into the first block just for simplifying the codes.\n\tif colNameField, ok := innerNode.(*ast.ColumnNameExpr); ok && isCol {\n\t\t// Field is a column reference.\n\t\tidx := p.Schema().ColumnIndex(col)\n\t\tvar name *types.FieldName\n\t\t// The column maybe the one from join's redundant part.\n\t\t// TODO: Fully support USING/NATURAL JOIN, refactor here.\n\t\tif idx != -1 {\n\t\t\tname = p.OutputNames()[idx]\n\t\t}\n\t\tcolName, _, tblName, origTblName, dbName = b.buildProjectionFieldNameFromColumns(field, colNameField, name)\n\t} else if field.AsName.L != \"\" {\n\t\t// Field has alias.\n\t\tcolName = field.AsName\n\t} else {\n\t\t// Other: field is an expression.\n\t\tvar err error\n\t\tif colName, err = b.buildProjectionFieldNameFromExpressions(ctx, field); err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\tname := &types.FieldName{\n\t\tTblName: tblName,\n\t\tOrigTblName: origTblName,\n\t\tColName: colName,\n\t\tOrigColName: colName,\n\t\tDBName: dbName,\n\t}\n\tif isCol {\n\t\treturn col, name, nil\n\t}\n\tnewCol := &expression.Column{\n\t\tUniqueID: b.ctx.GetSessionVars().AllocPlanColumnID(),\n\t\tRetType: expr.GetType(),\n\t\tOrigName: colName.L,\n\t}\n\treturn newCol, name, nil\n}", "func (node *selfNode) packToStructByFieldName(st reflect.Value) (err error) {\n\n\tnodeName := node.head.String()\n\tfor _, n := range node.values {\n\t\tif _, ok := n.(*selfNode); !ok {\n\t\t\treturn n.newPackError(\"field `\" + nodeName + \"` should be only made of lists\")\n\t\t}\n\t\tvalueNode := n.(*selfNode)\n\t\tfieldName := publicName(valueNode.head.String())\n\t\ttargetField := st.FieldByName(fieldName)\n\t\tif !targetField.IsValid() {\n\t\t\treturn valueNode.newPackError(\"undefined field `\" + fieldName + \"` for node `\" + nodeName + \"`\")\n\t\t}\n\n\t\tif err = valueNode.packIntoField(fieldName, targetField); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn nil\n}", "func genArguments(args []*ast.InputValueDefinition) *jen.Statement {\n\t//\n\t// Generate config for arguments\n\t//\n\t// == Example input SDL\n\t//\n\t// type Dog {\n\t// name(\n\t// \"style is stylish\"\n\t// style: NameComponentsStyle = SHORT,\n\t// ): String!\n\t// }\n\t//\n\t// == Example output\n\t//\n\t// FieldConfigArgument{\n\t// \"style\": &ArgumentConfig{ ... }\n\t// },\n\t//\n\treturn jen.Qual(defsPkg, \"FieldConfigArgument\").Values(\n\t\tjen.DictFunc(func(d jen.Dict) {\n\t\t\tfor _, arg := range args {\n\t\t\t\td[jen.Lit(arg.Name.Value)] = genArgument(arg)\n\t\t\t}\n\t\t}),\n\t)\n}", "func (*Base) ObjectField(p ASTPass, field *ast.ObjectField, ctx Context) {\n\tswitch field.Kind {\n\tcase ast.ObjectLocal:\n\t\tp.Fodder(p, &field.Fodder1, ctx)\n\t\tp.Fodder(p, &field.Fodder2, ctx)\n\t\tp.FieldParams(p, field, ctx)\n\t\tp.Fodder(p, &field.OpFodder, ctx)\n\t\tp.Visit(p, &field.Expr2, ctx)\n\n\tcase ast.ObjectFieldID:\n\t\tp.Fodder(p, &field.Fodder1, ctx)\n\t\tp.FieldParams(p, field, ctx)\n\t\tp.Fodder(p, &field.OpFodder, ctx)\n\t\tp.Visit(p, &field.Expr2, ctx)\n\n\tcase ast.ObjectFieldStr:\n\t\tp.Visit(p, &field.Expr1, ctx)\n\t\tp.FieldParams(p, field, ctx)\n\t\tp.Fodder(p, &field.OpFodder, ctx)\n\t\tp.Visit(p, &field.Expr2, ctx)\n\n\tcase ast.ObjectFieldExpr:\n\t\tp.Fodder(p, &field.Fodder1, ctx)\n\t\tp.Visit(p, &field.Expr1, ctx)\n\t\tp.Fodder(p, &field.Fodder2, ctx)\n\t\tp.FieldParams(p, field, ctx)\n\t\tp.Fodder(p, &field.OpFodder, ctx)\n\t\tp.Visit(p, &field.Expr2, ctx)\n\n\tcase ast.ObjectAssert:\n\t\tp.Fodder(p, &field.Fodder1, ctx)\n\t\tp.Visit(p, &field.Expr2, ctx)\n\t\tif field.Expr3 != nil {\n\t\t\tp.Fodder(p, &field.OpFodder, ctx)\n\t\t\tp.Visit(p, &field.Expr3, ctx)\n\t\t}\n\t}\n\n\tp.Fodder(p, &field.CommaFodder, ctx)\n}", "func Struct(rt reflect.Type, gens map[string]gopter.Gen) gopter.Gen {\n\tif rt.Kind() == reflect.Ptr {\n\t\trt = rt.Elem()\n\t}\n\tif rt.Kind() != reflect.Struct {\n\t\treturn Fail(rt)\n\t}\n\tfieldGens := []gopter.Gen{}\n\tfieldTypes := []reflect.Type{}\n\tassignable := reflect.New(rt).Elem()\n\tfor i := 0; i < rt.NumField(); i++ {\n\t\tfieldName := rt.Field(i).Name\n\t\tif !assignable.Field(i).CanSet() {\n\t\t\tcontinue\n\t\t}\n\n\t\tgen := gens[fieldName]\n\t\tif gen != nil {\n\t\t\tfieldGens = append(fieldGens, gen)\n\t\t\tfieldTypes = append(fieldTypes, rt.Field(i).Type)\n\t\t}\n\t}\n\n\tbuildStructType := reflect.FuncOf(fieldTypes, []reflect.Type{rt}, false)\n\tunbuildStructType := reflect.FuncOf([]reflect.Type{rt}, fieldTypes, false)\n\n\tbuildStructFunc := reflect.MakeFunc(buildStructType, func(args []reflect.Value) []reflect.Value {\n\t\tresult := reflect.New(rt)\n\t\tfor i := 0; i < rt.NumField(); i++ {\n\t\t\tif _, ok := gens[rt.Field(i).Name]; !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !assignable.Field(i).CanSet() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tresult.Elem().Field(i).Set(args[0])\n\t\t\targs = args[1:]\n\t\t}\n\t\treturn []reflect.Value{result.Elem()}\n\t})\n\tunbuildStructFunc := reflect.MakeFunc(unbuildStructType, func(args []reflect.Value) []reflect.Value {\n\t\ts := args[0]\n\t\tresults := []reflect.Value{}\n\t\tfor i := 0; i < s.NumField(); i++ {\n\t\t\tif _, ok := gens[rt.Field(i).Name]; !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !assignable.Field(i).CanSet() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tresults = append(results, s.Field(i))\n\t\t}\n\t\treturn results\n\t})\n\n\treturn gopter.DeriveGen(\n\t\tbuildStructFunc.Interface(),\n\t\tunbuildStructFunc.Interface(),\n\t\tfieldGens...,\n\t)\n}", "func (m *DeviceConfigurationAssignment) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"intent\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseDeviceConfigAssignmentIntent)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetIntent(val.(*DeviceConfigAssignmentIntent))\n }\n return nil\n }\n res[\"source\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseDeviceAndAppManagementAssignmentSource)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetSource(val.(*DeviceAndAppManagementAssignmentSource))\n }\n return nil\n }\n res[\"sourceId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetSourceId(val)\n }\n return nil\n }\n res[\"target\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateDeviceAndAppManagementAssignmentTargetFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetTarget(val.(DeviceAndAppManagementAssignmentTargetable))\n }\n return nil\n }\n return res\n}", "func generate(copyrights string, collector *collector, templateBuilder templateBuilder) {\n\tfor _, pkg := range collector.Packages {\n\t\tfileTemplate := fileTpl{\n\t\t\tCopyright: copyrights,\n\n\t\t\tStandardImports: []string{\n\t\t\t\t\"fmt\",\n\t\t\t\t\"unicode\",\n\t\t\t\t\"unicode/utf8\",\n\t\t\t},\n\n\t\t\tCustomImports: []string{\n\t\t\t\t\"github.com/google/uuid\",\n\t\t\t},\n\t\t}\n\t\tfor _, f := range pkg.Files {\n\t\t\tfor _, d := range f.Decls {\n\t\t\t\tg, ok := d.(*ast.GenDecl)\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tstructs := structSearch(g)\n\t\t\t\tif len(structs) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tfor _, s := range structs {\n\t\t\t\t\tatLeastOneField := false\n\n\t\t\t\t\tfor _, field := range s.Type.Fields.List {\n\n\t\t\t\t\t\tpos := collector.FileSet.Position(field.Type.Pos())\n\t\t\t\t\t\ttyp := collector.Info.TypeOf(field.Type)\n\n\t\t\t\t\t\tcomposedType := \"\"\n\t\t\t\t\t\tbaseName := getType(typ, &composedType)\n\t\t\t\t\t\tfmt.Println(\"Add validation: \", pos, \": \", baseName, \"/\", composedType)\n\n\t\t\t\t\t\tif err := templateBuilder.generateCheck(field, s.Name, baseName, composedType); err != nil {\n\t\t\t\t\t\t\tfmt.Printf(\"struct %s: %s\\n\", s.Name, err)\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tatLeastOneField = true\n\t\t\t\t\t}\n\n\t\t\t\t\tif !atLeastOneField {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\terr := templateBuilder.generateMethod(s.Name)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tfmt.Printf(\"struct gen %s: %s\\n\", s.Name, err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfileTemplate.Package = pkg.Name\n\t\terr := templateBuilder.generateFile(pkg.Path, fileTemplate)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Generation error\", err)\n\t\t}\n\t}\n}", "func (p *Planner) configureObjectFieldSource(upstreamFieldRef, downstreamFieldRef int, fieldConfiguration plan.FieldConfiguration, argumentConfiguration plan.ArgumentConfiguration) {\n\tif len(argumentConfiguration.SourcePath) < 1 {\n\t\treturn\n\t}\n\n\tfieldName := p.visitor.Operation.FieldNameUnsafeString(downstreamFieldRef)\n\n\tif len(fieldConfiguration.Path) == 1 {\n\t\tfieldName = fieldConfiguration.Path[0]\n\t}\n\n\tqueryTypeDefinition, exists := p.visitor.Definition.Index.FirstNodeByNameBytes(p.visitor.Definition.Index.QueryTypeName)\n\tif !exists {\n\t\treturn\n\t}\n\targumentDefinition := p.visitor.Definition.NodeFieldDefinitionArgumentDefinitionByName(queryTypeDefinition, []byte(fieldName), []byte(argumentConfiguration.Name))\n\tif argumentDefinition == -1 {\n\t\treturn\n\t}\n\n\targumentType := p.visitor.Definition.InputValueDefinitionType(argumentDefinition)\n\tvariableName := p.upstreamOperation.GenerateUnusedVariableDefinitionName(p.nodes[0].Ref)\n\tvariableValue, argument := p.upstreamOperation.AddVariableValueArgument([]byte(argumentConfiguration.Name), variableName)\n\tp.upstreamOperation.AddArgumentToField(upstreamFieldRef, argument)\n\timportedType := p.visitor.Importer.ImportType(argumentType, p.visitor.Definition, p.upstreamOperation)\n\tp.upstreamOperation.AddVariableDefinitionToOperationDefinition(p.nodes[0].Ref, variableValue, importedType)\n\n\tobjectVariableName, exists := p.variables.AddVariable(&resolve.ObjectVariable{\n\t\tPath: argumentConfiguration.SourcePath,\n\t\tRenderAsGraphQLValue: true,\n\t})\n\tif !exists {\n\t\tp.upstreamVariables, _ = sjson.SetRawBytes(p.upstreamVariables, string(variableName), []byte(objectVariableName))\n\t}\n}", "func (s *BasePlSqlParserListener) EnterField_spec(ctx *Field_specContext) {}", "func (c *TypeConverter) GenStructConverter(\n\tfromFields []*compile.FieldSpec,\n\ttoFields []*compile.FieldSpec,\n\tfieldMap map[string]FieldMapperEntry,\n) error {\n\t// Add compiled FieldSpecs to the FieldMapperEntry\n\tfieldMap = addSpecToMap(fieldMap, fromFields, \"\")\n\t// Check for vlaues not populated recursively by addSpecToMap\n\tfor k, v := range fieldMap {\n\t\tif fieldMap[k].Field == nil {\n\t\t\treturn errors.Errorf(\n\t\t\t\t\"Failed to find field ( %s ) for transform.\",\n\t\t\t\tv.QualifiedName,\n\t\t\t)\n\t\t}\n\t}\n\n\tc.useRecurGen = c.isRecursiveStruct(toFields) || c.isRecursiveStruct(fromFields)\n\n\tif c.useRecurGen && len(fieldMap) != 0 {\n\t\tc.append(\"inOriginal := in; _ = inOriginal\")\n\t\tc.append(\"outOriginal := out; _ = outOriginal\")\n\t}\n\n\terr := c.genStructConverter(\"\", \"\", \"\", fromFields, toFields, fieldMap, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (c *Core) generate(tab Table) (string, error) {\n\tref := []reflect.StructField{}\n\tfor _, col := range tab.Columns {\n\t\tv := reflect.StructField{\n\t\t\tName: strings.Title(col.Name),\n\t\t}\n\t\tif col.Annotations != \"\" {\n\t\t\tv.Tag = reflect.StructTag(col.Annotations)\n\t\t}\n\t\tswitch col.Type {\n\t\tcase \"float\":\n\t\t\tv.Type = reflect.TypeOf(float64(0))\n\t\tcase \"varchar\":\n\t\t\tv.Type = reflect.TypeOf(string(\"\"))\n\t\tcase \"integer\", \"int\", \"tinyint\":\n\t\t\tv.Type = reflect.TypeOf(int(0))\n\t\tcase \"bigint\":\n\t\t\tv.Type = reflect.TypeOf(int64(0))\n\t\tcase \"timestamp\":\n\t\t\tv.Type = reflect.TypeOf(time.Time{})\n\t\t}\n\t\tref = append(ref, v)\n\t}\n\treturn fmt.Sprintf(\"type %s %s\", strings.Title(tab.Name), reflect.StructOf(ref).String()), nil\n}", "func GenerateGoCode(preamble string, mainDefAddr string, includeDirectories []string, generate_tests bool) error {\n\n\toutDefs, version, err := XMLToFields(mainDefAddr, includeDirectories)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// merge enums together\n\tenums := make(map[string]*OutEnum)\n\tfor _, def := range outDefs {\n\t\tfor _, defEnum := range def.Enums {\n\t\t\tif _, ok := enums[defEnum.Name]; !ok {\n\t\t\t\tenums[defEnum.Name] = &OutEnum{\n\t\t\t\t\tName: defEnum.Name,\n\t\t\t\t\tDescription: defEnum.Description,\n\t\t\t\t}\n\t\t\t}\n\t\t\tenum := enums[defEnum.Name]\n\n\t\t\tfor _, v := range defEnum.Values {\n\t\t\t\tenum.Values = append(enum.Values, v)\n\t\t\t}\n\t\t}\n\t}\n\n\t// fill enum missing values\n\tfor _, enum := range enums {\n\t\tnextVal := 0\n\t\tfor _, v := range enum.Values {\n\t\t\tif v.Value != \"\" {\n\t\t\t\tnextVal, _ = strconv.Atoi(v.Value)\n\t\t\t\tnextVal++\n\t\t\t} else {\n\t\t\t\tv.Value = strconv.Itoa(nextVal)\n\t\t\t\tnextVal++\n\t\t\t}\n\t\t}\n\t}\n\n\t// get package name\n\t// remove underscores since they can lead to errors\n\t// (for instance, when package name ends with _test)\n\t_, inFile := filepath.Split(mainDefAddr)\n\tpkgName := strings.TrimSuffix(inFile, \".xml\")\n\n\t// dump\n\tif generate_tests {\n\t\treturn tplDialectTest.Execute(os.Stdout, map[string]interface{}{\n\t\t\t\"PkgName\": pkgName,\n\t\t\t\"Preamble\": preamble,\n\t\t\t\"Version\": func() int {\n\t\t\t\tret := int(version)\n\t\t\t\treturn ret\n\t\t\t}(),\n\t\t\t\"Defs\": outDefs,\n\t\t\t\"Enums\": enums,\n\t\t})\n\t} else {\n\t\treturn tplDialect.Execute(os.Stdout, map[string]interface{}{\n\t\t\t\"PkgName\": pkgName,\n\t\t\t\"Preamble\": preamble,\n\t\t\t\"Version\": func() int {\n\t\t\t\tret := int(version)\n\t\t\t\treturn ret\n\t\t\t}(),\n\t\t\t\"Defs\": outDefs,\n\t\t\t\"Enums\": enums,\n\t\t})\n\t}\n}", "func (x *fastReflection_ModuleOptions) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {\n\tswitch fd.FullName() {\n\tcase \"cosmos.autocli.v1.ModuleOptions.tx\":\n\t\tm := new(ServiceCommandDescriptor)\n\t\treturn protoreflect.ValueOfMessage(m.ProtoReflect())\n\tcase \"cosmos.autocli.v1.ModuleOptions.query\":\n\t\tm := new(ServiceCommandDescriptor)\n\t\treturn protoreflect.ValueOfMessage(m.ProtoReflect())\n\tdefault:\n\t\tif fd.IsExtension() {\n\t\t\tpanic(fmt.Errorf(\"proto3 declared messages do not support extensions: cosmos.autocli.v1.ModuleOptions\"))\n\t\t}\n\t\tpanic(fmt.Errorf(\"message cosmos.autocli.v1.ModuleOptions does not contain field %s\", fd.FullName()))\n\t}\n}", "func (m *Directory) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"administrativeUnits\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateAdministrativeUnitFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]AdministrativeUnitable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(AdministrativeUnitable)\n }\n }\n m.SetAdministrativeUnits(res)\n }\n return nil\n }\n res[\"attributeSets\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateAttributeSetFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]AttributeSetable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(AttributeSetable)\n }\n }\n m.SetAttributeSets(res)\n }\n return nil\n }\n res[\"customSecurityAttributeDefinitions\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateCustomSecurityAttributeDefinitionFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]CustomSecurityAttributeDefinitionable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(CustomSecurityAttributeDefinitionable)\n }\n }\n m.SetCustomSecurityAttributeDefinitions(res)\n }\n return nil\n }\n res[\"deletedItems\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateDirectoryObjectFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]DirectoryObjectable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(DirectoryObjectable)\n }\n }\n m.SetDeletedItems(res)\n }\n return nil\n }\n res[\"federationConfigurations\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateIdentityProviderBaseFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]IdentityProviderBaseable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(IdentityProviderBaseable)\n }\n }\n m.SetFederationConfigurations(res)\n }\n return nil\n }\n res[\"onPremisesSynchronization\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateOnPremisesDirectorySynchronizationFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]OnPremisesDirectorySynchronizationable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(OnPremisesDirectorySynchronizationable)\n }\n }\n m.SetOnPremisesSynchronization(res)\n }\n return nil\n }\n return res\n}", "func printStructField(t *reflect.Type) {\n fieldNum := (*t).NumField()\n for i := 0; i < fieldNum; i++ {\n fmt.Printf(\"conf's field: %s\\n\", (*t).Field(i).Name)\n }\n fmt.Println(\"\")\n}", "func handleSpecRecursive(module *common.Module, astFiles *AstFiles, spec interface{}, prefix, aliasPrefix, event string, iterator *common.StructField, dejavu map[string]bool) {\n\tif verbose {\n\t\tfmt.Printf(\"handleSpec spec: %+v, prefix: %s, aliasPrefix %s, event %s, iterator %+v\\n\", spec, prefix, aliasPrefix, event, iterator)\n\t}\n\n\tvar typeSpec *ast.TypeSpec\n\tvar structType *ast.StructType\n\tvar ok bool\n\tif typeSpec, ok = spec.(*ast.TypeSpec); !ok {\n\t\treturn\n\t}\n\tif structType, ok = typeSpec.Type.(*ast.StructType); !ok {\n\t\tlog.Printf(\"Don't know what to do with %s (%s)\", typeSpec.Name, spew.Sdump(typeSpec))\n\t\treturn\n\t}\n\n\tfor _, field := range structType.Fields.List {\n\t\tfieldCommentText := field.Comment.Text()\n\t\tfieldIterator := iterator\n\n\t\tvar tag reflect.StructTag\n\t\tif field.Tag != nil {\n\t\t\ttag = reflect.StructTag(field.Tag.Value[1 : len(field.Tag.Value)-1])\n\t\t}\n\n\t\tif e, ok := tag.Lookup(\"event\"); ok {\n\t\t\tevent = e\n\t\t\tif _, ok = module.EventTypes[e]; !ok {\n\t\t\t\tmodule.EventTypes[e] = common.NewEventTypeMetada()\n\t\t\t\tdejavu = make(map[string]bool) // clear dejavu map when it's a new event type\n\t\t\t}\n\t\t\tif e != \"*\" {\n\t\t\t\tmodule.EventTypes[e].Doc = fieldCommentText\n\t\t\t}\n\t\t}\n\n\t\tif isEmbedded := len(field.Names) == 0; isEmbedded {\n\t\t\tif fieldTag, found := tag.Lookup(\"field\"); found && fieldTag == \"-\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tident, _ := field.Type.(*ast.Ident)\n\t\t\tif ident == nil {\n\t\t\t\tif starExpr, ok := field.Type.(*ast.StarExpr); ok {\n\t\t\t\t\tident, _ = starExpr.X.(*ast.Ident)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif ident != nil {\n\t\t\t\tname := ident.Name\n\t\t\t\tif prefix != \"\" {\n\t\t\t\t\tname = prefix + \".\" + ident.Name\n\t\t\t\t}\n\n\t\t\t\tembedded := astFiles.LookupSymbol(ident.Name)\n\t\t\t\tif embedded != nil {\n\t\t\t\t\thandleEmbedded(module, ident.Name, prefix, event, field.Type)\n\t\t\t\t\thandleSpecRecursive(module, astFiles, embedded.Decl, name, aliasPrefix, event, fieldIterator, dejavu)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Printf(\"failed to resolve symbol for %+v in %s\", ident.Name, pkgname)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tfieldBasename := field.Names[0].Name\n\t\t\tif !unicode.IsUpper(rune(fieldBasename[0])) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif dejavu[fieldBasename] {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar opOverrides string\n\t\t\tvar fields []seclField\n\t\t\tif tags, err := structtag.Parse(string(tag)); err == nil && len(tags.Tags()) != 0 {\n\t\t\t\topOverrides, fields = parseTags(tags, typeSpec.Name.Name)\n\n\t\t\t\tif opOverrides == \"\" && fields == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\tfields = append(fields, seclField{name: fieldBasename})\n\t\t\t}\n\n\t\t\tfieldType, isPointer, isArray := getFieldIdentName(field.Type)\n\n\t\t\tprefixedFieldName := fieldBasename\n\t\t\tif prefix != \"\" {\n\t\t\t\tprefixedFieldName = fmt.Sprintf(\"%s.%s\", prefix, fieldBasename)\n\t\t\t}\n\n\t\t\tfor _, seclField := range fields {\n\t\t\t\thandleNonEmbedded(module, seclField, prefixedFieldName, event, fieldType, isPointer, isArray)\n\n\t\t\t\tif seclFieldIterator := seclField.iterator; seclFieldIterator != \"\" {\n\t\t\t\t\tfieldIterator = handleIterator(module, seclField, fieldType, seclFieldIterator, aliasPrefix, prefixedFieldName, event, fieldCommentText, opOverrides, isPointer, isArray)\n\t\t\t\t}\n\n\t\t\t\tif handler := seclField.handler; handler != \"\" {\n\n\t\t\t\t\thandleFieldWithHandler(module, seclField, aliasPrefix, prefix, prefixedFieldName, fieldType, seclField.containerStructName, event, fieldCommentText, opOverrides, handler, isPointer, isArray, fieldIterator)\n\n\t\t\t\t\tdelete(dejavu, fieldBasename)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif verbose {\n\t\t\t\t\tlog.Printf(\"Don't know what to do with %s: %s\", fieldBasename, spew.Sdump(field.Type))\n\t\t\t\t}\n\n\t\t\t\tdejavu[fieldBasename] = true\n\n\t\t\t\tif len(fieldType) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\talias := seclField.name\n\t\t\t\tif isBasicType(fieldType) {\n\t\t\t\t\thandleBasic(module, seclField, fieldBasename, alias, aliasPrefix, prefix, fieldType, event, opOverrides, fieldCommentText, seclField.containerStructName, fieldIterator, isArray)\n\t\t\t\t} else {\n\t\t\t\t\tspec := astFiles.LookupSymbol(fieldType)\n\t\t\t\t\tif spec != nil {\n\t\t\t\t\t\tnewPrefix, newAliasPrefix := fieldBasename, alias\n\n\t\t\t\t\t\tif prefix != \"\" {\n\t\t\t\t\t\t\tnewPrefix = prefix + \".\" + fieldBasename\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif aliasPrefix != \"\" {\n\t\t\t\t\t\t\tnewAliasPrefix = aliasPrefix + \".\" + alias\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\thandleSpecRecursive(module, astFiles, spec.Decl, newPrefix, newAliasPrefix, event, fieldIterator, dejavu)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlog.Printf(\"failed to resolve symbol for %+v in %s\", fieldType, pkgname)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif !seclField.exposedAtEventRootOnly {\n\t\t\t\t\tdelete(dejavu, fieldBasename)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func GenElem(in *ast.TypeSpec) gen.Elem {\n\t// handle supported types\n\tswitch in.Type.(type) {\n\n\tcase *ast.StructType:\n\t\tv := in.Type.(*ast.StructType)\n\t\tfmt.Printf(chalk.Green.Color(\"parsing %s...\"), in.Name.Name)\n\t\tp := &gen.Ptr{\n\t\t\tValue: &gen.Struct{\n\t\t\t\tName: in.Name.Name, // ast.Ident\n\t\t\t\tFields: parseFieldList(v.Fields),\n\t\t\t},\n\t\t}\n\n\t\t// mark type as processed\n\t\tglobalProcessed[in.Name.Name] = struct{}{}\n\n\t\tif len(p.Value.(*gen.Struct).Fields) == 0 {\n\t\t\tfmt.Printf(chalk.Red.Color(\" has no exported fields \\u2717\\n\")) // X\n\t\t\treturn nil\n\t\t}\n\t\tfmt.Print(chalk.Green.Color(\" \\u2713\\n\")) // check\n\t\treturn p\n\n\tdefault:\n\t\treturn nil\n\n\t}\n}", "func (node selfNode) packIntoField(name string, field reflect.Value) (err error) {\n\n\tfieldKind := field.Kind()\n\n\tif isScalarKind(fieldKind) {\n\t\tif len(node.values) != 1 {\n\t\t\treturn node.newPackError(\"bad number of values for scalar field `\" + name + \"`\")\n\t\t}\n\t\tif _, ok := node.values[0].(selfString); !ok {\n\t\t\treturn node.newPackError(\"expected a string element for scalar field `\" + name + \"`\")\n\t\t}\n\t\tstrValue := node.values[0].(selfString)\n\t\treturn strValue.packIntoField(name, field)\n\n\t} else if fieldKind == reflect.Struct {\n\t\treturn node.packToStruct(field)\n\n\t} else if fieldKind == reflect.Array {\n\t\treturn node.packToArray(field)\n\n\t} else if fieldKind == reflect.Slice {\n\t\treturn node.packToSlice(field)\n\n\t} else if fieldKind == reflect.Map {\n\t\tfield.Set(reflect.MakeMap(field.Type())) // Map requires initialization.\n\t\treturn node.packToMap(field)\n\n\t} else {\n\t\treturn node.newPackError(\"unsupported field kind \" + fieldKind.String())\n\t}\n\n\treturn\n}", "func (m *DeviceManagementConfigurationSettingGroupDefinition) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.DeviceManagementConfigurationSettingDefinition.GetFieldDeserializers()\n res[\"childIds\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfPrimitiveValues(\"string\")\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]string, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = *(v.(*string))\n }\n }\n m.SetChildIds(res)\n }\n return nil\n }\n res[\"dependedOnBy\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateDeviceManagementConfigurationSettingDependedOnByFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]DeviceManagementConfigurationSettingDependedOnByable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(DeviceManagementConfigurationSettingDependedOnByable)\n }\n }\n m.SetDependedOnBy(res)\n }\n return nil\n }\n res[\"dependentOn\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateDeviceManagementConfigurationDependentOnFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]DeviceManagementConfigurationDependentOnable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(DeviceManagementConfigurationDependentOnable)\n }\n }\n m.SetDependentOn(res)\n }\n return nil\n }\n return res\n}", "func (m *LabelActionBase) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))\n res[\"name\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetName(val)\n }\n return nil\n }\n res[\"@odata.type\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOdataType(val)\n }\n return nil\n }\n return res\n}", "func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value {\n\tif !receiver.IsValid() {\n\t\treturn zero\n\t}\n\ttyp := receiver.Type()\n\treceiver, _ = indirect(receiver)\n\t// Unless it's an interface, need to get to a value of type *T to guarantee\n\t// we see all methods of T and *T.\n\tptr := receiver\n\tif ptr.Kind() != reflect.Interface && ptr.CanAddr() {\n\t\tptr = ptr.Addr()\n\t}\n\tif method := ptr.MethodByName(fieldName); method.IsValid() {\n\t\treturn s.evalCall(dot, method, node, fieldName, args, final)\n\t}\n\thasArgs := len(args) > 1 || final.IsValid()\n\t// It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil.\n\treceiver, isNil := indirect(receiver)\n\tif isNil {\n\t\ts.errorf(\"nil pointer evaluating %s.%s\", typ, fieldName)\n\t}\n\tswitch receiver.Kind() {\n\tcase reflect.Struct:\n\t\ttField, ok := receiver.Type().FieldByName(fieldName)\n\t\tif ok {\n\t\t\tfield := receiver.FieldByIndex(tField.Index)\n\t\t\tif tField.PkgPath != \"\" { // field is unexported\n\t\t\t\ts.errorf(\"%s is an unexported field of struct type %s\", fieldName, typ)\n\t\t\t}\n\t\t\t// If it's a function, we must call it.\n\t\t\tif hasArgs {\n\t\t\t\ts.errorf(\"%s has arguments but cannot be invoked as function\", fieldName)\n\t\t\t}\n\t\t\treturn field\n\t\t}\n\t\ts.errorf(\"%s is not a field of struct type %s\", fieldName, typ)\n\tcase reflect.Map:\n\t\t// If it's a map, attempt to use the field name as a key.\n\t\tnameVal := reflect.ValueOf(fieldName)\n\t\tif nameVal.Type().AssignableTo(receiver.Type().Key()) {\n\t\t\tif hasArgs {\n\t\t\t\ts.errorf(\"%s is not a method but has arguments\", fieldName)\n\t\t\t}\n\t\t\treturn receiver.MapIndex(nameVal)\n\t\t}\n\t}\n\ts.errorf(\"can't evaluate field %s in type %s\", fieldName, typ)\n\tpanic(\"not reached\")\n}", "func (m *AccessPackage) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"accessPackagesIncompatibleWith\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateAccessPackageFromDiscriminatorValue , m.SetAccessPackagesIncompatibleWith)\n res[\"assignmentPolicies\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateAccessPackageAssignmentPolicyFromDiscriminatorValue , m.SetAssignmentPolicies)\n res[\"catalog\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetObjectValue(CreateAccessPackageCatalogFromDiscriminatorValue , m.SetCatalog)\n res[\"createdDateTime\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetTimeValue(m.SetCreatedDateTime)\n res[\"description\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetDescription)\n res[\"displayName\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetDisplayName)\n res[\"incompatibleAccessPackages\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateAccessPackageFromDiscriminatorValue , m.SetIncompatibleAccessPackages)\n res[\"incompatibleGroups\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateGroupFromDiscriminatorValue , m.SetIncompatibleGroups)\n res[\"isHidden\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetBoolValue(m.SetIsHidden)\n res[\"modifiedDateTime\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetTimeValue(m.SetModifiedDateTime)\n return res\n}", "func (m *VppToken) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"appleId\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetAppleId)\n res[\"automaticallyUpdateApps\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetBoolValue(m.SetAutomaticallyUpdateApps)\n res[\"countryOrRegion\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetCountryOrRegion)\n res[\"expirationDateTime\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetTimeValue(m.SetExpirationDateTime)\n res[\"lastModifiedDateTime\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetTimeValue(m.SetLastModifiedDateTime)\n res[\"lastSyncDateTime\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetTimeValue(m.SetLastSyncDateTime)\n res[\"lastSyncStatus\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetEnumValue(ParseVppTokenSyncStatus , m.SetLastSyncStatus)\n res[\"organizationName\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetOrganizationName)\n res[\"state\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetEnumValue(ParseVppTokenState , m.SetState)\n res[\"token\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetToken)\n res[\"vppTokenAccountType\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetEnumValue(ParseVppTokenAccountType , m.SetVppTokenAccountType)\n return res\n}", "func parseStructField(cache structCache, key, sk, keytail string, values []string, target reflect.Value) {\n\tl, ok := cache[sk]\n\tif !ok {\n\t\tpanic(KeyError{\n\t\t\tFullKey: key,\n\t\t\tKey: kpath(key, keytail),\n\t\t\tType: target.Type(),\n\t\t\tField: sk,\n\t\t})\n\t}\n\tf := target.Field(l.offset)\n\n\tl.parse(key, keytail, values, f)\n}", "func getFieldMetadata(pkg *model.ModelPackage, column *model.Column) string {\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"sql:\\\"\")\n\tbuffer.WriteString(\"type:\")\n\tbuffer.WriteString(column.ColumnType)\n\tif !column.IsNullable {\n\t\tbuffer.WriteString(\";not null\")\n\t}\n\tif column.IsUnique {\n\t\tbuffer.WriteString(\";unique\")\n\t}\n\tif column.IsAutoIncrement {\n\t\tbuffer.WriteString(\";AUTO_INCREMENT\")\n\t}\n\tbuffer.WriteString(\"\\\"\")\n\treturn buffer.String()\n}", "func (p *Planner) planField(e *Executor, f *Field) (processor, error) {\n\treturn p.planExpr(e, f.Expr)\n}", "func Field(typ ast.Expr, names ...*ast.Ident) *ast.Field {\n\treturn &ast.Field{\n\t\tNames: names,\n\t\tType: typ,\n\t}\n}", "func generatePerNodeSnippet(pathStructName string, nodeData *ypathgen.NodeData, fakeRootTypeName, schemaStructPkgAccessor string, preferShadowPath bool) (GoPerNodeCodeSnippet, goTypeData, util.Errors) {\n\t// Special case: ieeefloat32 is represented as a 4-byte binary in YANG\n\t// and ygen, but float32 is more user-friendly.\n\tvar specialConversionFn string\n\tif nodeData.YANGTypeName == \"ieeefloat32\" {\n\t\tswitch nodeData.LocalGoTypeName {\n\t\tcase \"Binary\":\n\t\t\tnodeData.GoTypeName = \"float32\"\n\t\t\tnodeData.LocalGoTypeName = \"float32\"\n\t\t\tspecialConversionFn = \"ygot.BinaryToFloat32\"\n\t\tcase \"[]\" + \"Binary\":\n\t\t\tnodeData.GoTypeName = \"[]float32\"\n\t\t\tnodeData.LocalGoTypeName = \"[]float32\"\n\t\t\tspecialConversionFn = \"binarySliceToFloat32\"\n\t\tdefault:\n\t\t\treturn GoPerNodeCodeSnippet{}, goTypeData{}, util.NewErrs(\n\t\t\t\terrors.Errorf(\"ieeefloat32 is expected to be a binary, got %q\", nodeData.LocalGoTypeName))\n\t\t}\n\t}\n\n\tvar errs util.Errors\n\ts := struct {\n\t\tPathStructName string\n\t\tGoType goTypeData\n\t\tGoFieldName string\n\t\tGoStructTypeName string\n\t\tYANGPath string\n\t\tFakeRootTypeName string\n\t\t// IsScalarField indicates a leaf that is stored as a pointer\n\t\t// in its parent struct.\n\t\tIsScalarField bool\n\t\tIsRoot bool\n\t\tSchemaStructPkgAccessor string\n\t\t// WildcardSuffix is the suffix used to indicate that a path\n\t\t// node contains a wildcard.\n\t\tWildcardSuffix string\n\t\t// SpecialConversionFn is the special-case conversion function\n\t\t// to convert the field from the parent struct into the\n\t\t// qualified type returned to the user.\n\t\tSpecialConversionFn string\n\t\tPreferShadowPath bool\n\t}{\n\t\tPathStructName: pathStructName,\n\t\tGoType: goTypeData{\n\t\t\tGoTypeName: nodeData.GoTypeName,\n\t\t\tTransformedGoTypeName: transformGoTypeName(nodeData),\n\t\t\tIsLeaf: nodeData.IsLeaf,\n\t\t\tHasDefault: nodeData.HasDefault,\n\t\t},\n\t\tGoFieldName: nodeData.GoFieldName,\n\t\tGoStructTypeName: nodeData.SubsumingGoStructName,\n\t\tYANGPath: nodeData.YANGPath,\n\t\tFakeRootTypeName: fakeRootTypeName,\n\t\tIsScalarField: nodeData.IsScalarField,\n\t\tIsRoot: nodeData.YANGPath == \"/\",\n\t\tWildcardSuffix: ypathgen.WildcardSuffix,\n\t\tSpecialConversionFn: specialConversionFn,\n\t\tSchemaStructPkgAccessor: schemaStructPkgAccessor,\n\t\tPreferShadowPath: preferShadowPath,\n\t}\n\tvar getMethod, collectMethod, convertHelper strings.Builder\n\tif nodeData.IsLeaf {\n\t\t// Leaf types use their parent GoStruct to unmarshal, before\n\t\t// being retrieved out when returned to the user.\n\t\tif err := goLeafConvertTemplate.Execute(&convertHelper, s); err != nil {\n\t\t\tutil.AppendErr(errs, err)\n\t\t}\n\t\t// TODO: Collect methods for non-leaf nodes is not implemented.\n\t\tif err := goLeafCollectTemplate.Execute(&collectMethod, s); err != nil {\n\t\t\tutil.AppendErr(errs, err)\n\t\t}\n\t}\n\tif err := goNodeGetTemplate.Execute(&getMethod, s); err != nil {\n\t\tutil.AppendErr(errs, err)\n\t}\n\n\treturn GoPerNodeCodeSnippet{\n\t\tPathStructName: pathStructName,\n\t\tGetMethod: getMethod.String(),\n\t\tCollectMethod: collectMethod.String(),\n\t\tConvertHelper: convertHelper.String(),\n\t}, s.GoType, errs\n}", "func init() {\n\tgroupFields := schema.Group{}.Fields()\n\t_ = groupFields\n\t// groupDescTenant is the schema descriptor for tenant field.\n\tgroupDescTenant := groupFields[0].Descriptor()\n\t// group.TenantValidator is a validator for the \"tenant\" field. It is called by the builders before save.\n\tgroup.TenantValidator = groupDescTenant.Validators[0].(func(string) error)\n\t// groupDescName is the schema descriptor for name field.\n\tgroupDescName := groupFields[1].Descriptor()\n\t// group.NameValidator is a validator for the \"name\" field. It is called by the builders before save.\n\tgroup.NameValidator = groupDescName.Validators[0].(func(string) error)\n\t// groupDescType is the schema descriptor for type field.\n\tgroupDescType := groupFields[2].Descriptor()\n\t// group.TypeValidator is a validator for the \"type\" field. It is called by the builders before save.\n\tgroup.TypeValidator = groupDescType.Validators[0].(func(string) error)\n\t// groupDescCreatedAt is the schema descriptor for created_at field.\n\tgroupDescCreatedAt := groupFields[5].Descriptor()\n\t// group.DefaultCreatedAt holds the default value on creation for the created_at field.\n\tgroup.DefaultCreatedAt = groupDescCreatedAt.Default.(func() time.Time)\n\t// groupDescUpdatedAt is the schema descriptor for updated_at field.\n\tgroupDescUpdatedAt := groupFields[6].Descriptor()\n\t// group.DefaultUpdatedAt holds the default value on creation for the updated_at field.\n\tgroup.DefaultUpdatedAt = groupDescUpdatedAt.Default.(func() time.Time)\n\t// group.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.\n\tgroup.UpdateDefaultUpdatedAt = groupDescUpdatedAt.UpdateDefault.(func() time.Time)\n\tnodeFields := schema.Node{}.Fields()\n\t_ = nodeFields\n\t// nodeDescTenant is the schema descriptor for tenant field.\n\tnodeDescTenant := nodeFields[0].Descriptor()\n\t// node.TenantValidator is a validator for the \"tenant\" field. It is called by the builders before save.\n\tnode.TenantValidator = nodeDescTenant.Validators[0].(func(string) error)\n\t// nodeDescName is the schema descriptor for name field.\n\tnodeDescName := nodeFields[1].Descriptor()\n\t// node.NameValidator is a validator for the \"name\" field. It is called by the builders before save.\n\tnode.NameValidator = nodeDescName.Validators[0].(func(string) error)\n\t// nodeDescType is the schema descriptor for type field.\n\tnodeDescType := nodeFields[2].Descriptor()\n\t// node.TypeValidator is a validator for the \"type\" field. It is called by the builders before save.\n\tnode.TypeValidator = nodeDescType.Validators[0].(func(string) error)\n\t// nodeDescCreatedAt is the schema descriptor for created_at field.\n\tnodeDescCreatedAt := nodeFields[5].Descriptor()\n\t// node.DefaultCreatedAt holds the default value on creation for the created_at field.\n\tnode.DefaultCreatedAt = nodeDescCreatedAt.Default.(func() time.Time)\n\t// nodeDescUpdatedAt is the schema descriptor for updated_at field.\n\tnodeDescUpdatedAt := nodeFields[6].Descriptor()\n\t// node.DefaultUpdatedAt holds the default value on creation for the updated_at field.\n\tnode.DefaultUpdatedAt = nodeDescUpdatedAt.Default.(func() time.Time)\n\t// node.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.\n\tnode.UpdateDefaultUpdatedAt = nodeDescUpdatedAt.UpdateDefault.(func() time.Time)\n\tpermissionFields := schema.Permission{}.Fields()\n\t_ = permissionFields\n\t// permissionDescTenant is the schema descriptor for tenant field.\n\tpermissionDescTenant := permissionFields[0].Descriptor()\n\t// permission.TenantValidator is a validator for the \"tenant\" field. It is called by the builders before save.\n\tpermission.TenantValidator = permissionDescTenant.Validators[0].(func(string) error)\n\t// permissionDescName is the schema descriptor for name field.\n\tpermissionDescName := permissionFields[1].Descriptor()\n\t// permission.NameValidator is a validator for the \"name\" field. It is called by the builders before save.\n\tpermission.NameValidator = permissionDescName.Validators[0].(func(string) error)\n\t// permissionDescCreatedAt is the schema descriptor for created_at field.\n\tpermissionDescCreatedAt := permissionFields[3].Descriptor()\n\t// permission.DefaultCreatedAt holds the default value on creation for the created_at field.\n\tpermission.DefaultCreatedAt = permissionDescCreatedAt.Default.(func() time.Time)\n\t// permissionDescUpdatedAt is the schema descriptor for updated_at field.\n\tpermissionDescUpdatedAt := permissionFields[4].Descriptor()\n\t// permission.DefaultUpdatedAt holds the default value on creation for the updated_at field.\n\tpermission.DefaultUpdatedAt = permissionDescUpdatedAt.Default.(func() time.Time)\n\t// permission.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.\n\tpermission.UpdateDefaultUpdatedAt = permissionDescUpdatedAt.UpdateDefault.(func() time.Time)\n\trouteFields := schema.Route{}.Fields()\n\t_ = routeFields\n\t// routeDescTenant is the schema descriptor for tenant field.\n\trouteDescTenant := routeFields[0].Descriptor()\n\t// route.TenantValidator is a validator for the \"tenant\" field. It is called by the builders before save.\n\troute.TenantValidator = routeDescTenant.Validators[0].(func(string) error)\n\t// routeDescName is the schema descriptor for name field.\n\trouteDescName := routeFields[1].Descriptor()\n\t// route.NameValidator is a validator for the \"name\" field. It is called by the builders before save.\n\troute.NameValidator = routeDescName.Validators[0].(func(string) error)\n\t// routeDescURI is the schema descriptor for uri field.\n\trouteDescURI := routeFields[2].Descriptor()\n\t// route.URIValidator is a validator for the \"uri\" field. It is called by the builders before save.\n\troute.URIValidator = routeDescURI.Validators[0].(func(string) error)\n\t// routeDescCreatedAt is the schema descriptor for created_at field.\n\trouteDescCreatedAt := routeFields[5].Descriptor()\n\t// route.DefaultCreatedAt holds the default value on creation for the created_at field.\n\troute.DefaultCreatedAt = routeDescCreatedAt.Default.(func() time.Time)\n\t// routeDescUpdatedAt is the schema descriptor for updated_at field.\n\trouteDescUpdatedAt := routeFields[6].Descriptor()\n\t// route.DefaultUpdatedAt holds the default value on creation for the updated_at field.\n\troute.DefaultUpdatedAt = routeDescUpdatedAt.Default.(func() time.Time)\n\t// route.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.\n\troute.UpdateDefaultUpdatedAt = routeDescUpdatedAt.UpdateDefault.(func() time.Time)\n\tuserFields := schema.User{}.Fields()\n\t_ = userFields\n\t// userDescTenant is the schema descriptor for tenant field.\n\tuserDescTenant := userFields[0].Descriptor()\n\t// user.TenantValidator is a validator for the \"tenant\" field. It is called by the builders before save.\n\tuser.TenantValidator = userDescTenant.Validators[0].(func(string) error)\n\t// userDescUUID is the schema descriptor for uuid field.\n\tuserDescUUID := userFields[1].Descriptor()\n\t// user.UUIDValidator is a validator for the \"uuid\" field. It is called by the builders before save.\n\tuser.UUIDValidator = userDescUUID.Validators[0].(func(string) error)\n\t// userDescIsSuper is the schema descriptor for is_super field.\n\tuserDescIsSuper := userFields[3].Descriptor()\n\t// user.DefaultIsSuper holds the default value on creation for the is_super field.\n\tuser.DefaultIsSuper = userDescIsSuper.Default.(bool)\n\t// userDescCreatedAt is the schema descriptor for created_at field.\n\tuserDescCreatedAt := userFields[5].Descriptor()\n\t// user.DefaultCreatedAt holds the default value on creation for the created_at field.\n\tuser.DefaultCreatedAt = userDescCreatedAt.Default.(func() time.Time)\n\t// userDescUpdatedAt is the schema descriptor for updated_at field.\n\tuserDescUpdatedAt := userFields[6].Descriptor()\n\t// user.DefaultUpdatedAt holds the default value on creation for the updated_at field.\n\tuser.DefaultUpdatedAt = userDescUpdatedAt.Default.(func() time.Time)\n\t// user.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.\n\tuser.UpdateDefaultUpdatedAt = userDescUpdatedAt.UpdateDefault.(func() time.Time)\n}", "func (m *SolutionsRoot) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))\n res[\"businessScenarios\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateBusinessScenarioFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]BusinessScenarioable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(BusinessScenarioable)\n }\n }\n m.SetBusinessScenarios(res)\n }\n return nil\n }\n res[\"@odata.type\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOdataType(val)\n }\n return nil\n }\n res[\"virtualEvents\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateVirtualEventsRootFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetVirtualEvents(val.(VirtualEventsRootable))\n }\n return nil\n }\n return res\n}", "func (m *DeviceManagementConfigurationSettingDefinition) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"accessTypes\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseDeviceManagementConfigurationSettingAccessTypes)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetAccessTypes(val.(*DeviceManagementConfigurationSettingAccessTypes))\n }\n return nil\n }\n res[\"applicability\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateDeviceManagementConfigurationSettingApplicabilityFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetApplicability(val.(DeviceManagementConfigurationSettingApplicabilityable))\n }\n return nil\n }\n res[\"baseUri\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetBaseUri(val)\n }\n return nil\n }\n res[\"categoryId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCategoryId(val)\n }\n return nil\n }\n res[\"description\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDescription(val)\n }\n return nil\n }\n res[\"displayName\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDisplayName(val)\n }\n return nil\n }\n res[\"helpText\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetHelpText(val)\n }\n return nil\n }\n res[\"infoUrls\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfPrimitiveValues(\"string\")\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]string, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = *(v.(*string))\n }\n }\n m.SetInfoUrls(res)\n }\n return nil\n }\n res[\"keywords\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfPrimitiveValues(\"string\")\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]string, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = *(v.(*string))\n }\n }\n m.SetKeywords(res)\n }\n return nil\n }\n res[\"name\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetName(val)\n }\n return nil\n }\n res[\"occurrence\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateDeviceManagementConfigurationSettingOccurrenceFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOccurrence(val.(DeviceManagementConfigurationSettingOccurrenceable))\n }\n return nil\n }\n res[\"offsetUri\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOffsetUri(val)\n }\n return nil\n }\n res[\"referredSettingInformationList\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateDeviceManagementConfigurationReferredSettingInformationFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]DeviceManagementConfigurationReferredSettingInformationable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(DeviceManagementConfigurationReferredSettingInformationable)\n }\n }\n m.SetReferredSettingInformationList(res)\n }\n return nil\n }\n res[\"rootDefinitionId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetRootDefinitionId(val)\n }\n return nil\n }\n res[\"settingUsage\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseDeviceManagementConfigurationSettingUsage)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetSettingUsage(val.(*DeviceManagementConfigurationSettingUsage))\n }\n return nil\n }\n res[\"uxBehavior\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseDeviceManagementConfigurationControlType)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetUxBehavior(val.(*DeviceManagementConfigurationControlType))\n }\n return nil\n }\n res[\"version\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetVersion(val)\n }\n return nil\n }\n res[\"visibility\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseDeviceManagementConfigurationSettingVisibility)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetVisibility(val.(*DeviceManagementConfigurationSettingVisibility))\n }\n return nil\n }\n return res\n}", "func (m *CreatePostRequestBody) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))\n res[\"certificateSigningRequest\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetObjectValue(iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.CreatePrintCertificateSigningRequestFromDiscriminatorValue , m.SetCertificateSigningRequest)\n res[\"connectorId\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetConnectorId)\n res[\"displayName\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetDisplayName)\n res[\"hasPhysicalDevice\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetBoolValue(m.SetHasPhysicalDevice)\n res[\"manufacturer\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetManufacturer)\n res[\"model\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetModel)\n res[\"physicalDeviceId\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetPhysicalDeviceId)\n return res\n}", "func (m *ManagementTemplateStep) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"acceptedVersion\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateManagementTemplateStepVersionFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetAcceptedVersion(val.(ManagementTemplateStepVersionable))\n }\n return nil\n }\n res[\"category\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseManagementCategory)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCategory(val.(*ManagementCategory))\n }\n return nil\n }\n res[\"createdByUserId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCreatedByUserId(val)\n }\n return nil\n }\n res[\"createdDateTime\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetTimeValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCreatedDateTime(val)\n }\n return nil\n }\n res[\"description\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDescription(val)\n }\n return nil\n }\n res[\"displayName\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDisplayName(val)\n }\n return nil\n }\n res[\"lastActionByUserId\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetLastActionByUserId(val)\n }\n return nil\n }\n res[\"lastActionDateTime\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetTimeValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetLastActionDateTime(val)\n }\n return nil\n }\n res[\"managementTemplate\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateManagementTemplateFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetManagementTemplate(val.(ManagementTemplateable))\n }\n return nil\n }\n res[\"portalLink\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.CreateActionUrlFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetPortalLink(val.(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.ActionUrlable))\n }\n return nil\n }\n res[\"priority\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetInt32Value()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetPriority(val)\n }\n return nil\n }\n res[\"versions\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateManagementTemplateStepVersionFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]ManagementTemplateStepVersionable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(ManagementTemplateStepVersionable)\n }\n }\n m.SetVersions(res)\n }\n return nil\n }\n return res\n}", "func expandFields(compiled *lang.CompiledExpr, define *lang.DefineExpr) lang.DefineFieldsExpr {\n\tvar fields lang.DefineFieldsExpr\n\tfor _, field := range define.Fields {\n\t\tif isEmbeddedField(field) {\n\t\t\tembedded := expandFields(compiled, compiled.LookupDefine(string(field.Type)))\n\t\t\tfields = append(fields, embedded...)\n\t\t} else {\n\t\t\tfields = append(fields, field)\n\t\t}\n\t}\n\treturn fields\n}", "func (m *Planner) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"buckets\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreatePlannerBucketFromDiscriminatorValue , m.SetBuckets)\n res[\"plans\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreatePlannerPlanFromDiscriminatorValue , m.SetPlans)\n res[\"tasks\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreatePlannerTaskFromDiscriminatorValue , m.SetTasks)\n return res\n}", "func parse(r io.Reader) ([]field, error) {\n\tinData, err := models.Unmarshal(r)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshalling models.yml: %w\", err)\n\t}\n\n\tvar fields []field\n\tfor collectionName, collection := range inData {\n\t\tfor fieldName, modelField := range collection.Fields {\n\t\t\tf := field{}\n\t\t\tf.Name = collectionName + \"/\" + fieldName\n\t\t\tf.GoName = goName(collectionName) + \"_\" + goName(fieldName)\n\t\t\tf.GoType = goType(modelField.Type)\n\t\t\tf.Collection = firstLower(goName(collectionName))\n\t\t\tf.FQField = collectionName + \"/%d/\" + fieldName\n\t\t\tf.Required = modelField.Required\n\n\t\t\tif modelField.Type == \"relation\" || modelField.Type == \"generic-relation\" {\n\t\t\t\tf.SingleRelation = true\n\t\t\t}\n\n\t\t\tif strings.Contains(fieldName, \"$\") {\n\t\t\t\tf.TemplateAttr = \"replacement\"\n\t\t\t\tf.TemplateAttrType = \"string\"\n\t\t\t\tf.TemplateFQField = collectionName + \"/%d/\" + strings.Replace(fieldName, \"$\", \"$%s\", 1)\n\t\t\t\tf.GoType = goType(modelField.Template.Fields.Type)\n\n\t\t\t\tif modelField.Template.Replacement != \"\" {\n\t\t\t\t\tf.TemplateAttr = modelField.Template.Replacement + \"ID\"\n\t\t\t\t\tf.TemplateAttrType = \"int\"\n\t\t\t\t\tf.TemplateFQField = collectionName + \"/%d/\" + strings.Replace(fieldName, \"$\", \"$%d\", 1)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfields = append(fields, f)\n\t\t}\n\t}\n\n\t// TODO: fix models-to-go to return fields in input order.\n\tsort.Slice(fields, func(i, j int) bool {\n\t\treturn fields[i].GoName < fields[j].GoName\n\t})\n\n\treturn fields, nil\n}", "func (m *Store) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"defaultLanguageTag\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetDefaultLanguageTag)\n res[\"groups\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateGroupFromDiscriminatorValue , m.SetGroups)\n res[\"languageTags\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfPrimitiveValues(\"string\" , m.SetLanguageTags)\n res[\"sets\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateSetFromDiscriminatorValue , m.SetSets)\n return res\n}", "func (m *DeviceManagementConfigurationPolicy) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"assignments\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateDeviceManagementConfigurationPolicyAssignmentFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]DeviceManagementConfigurationPolicyAssignmentable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(DeviceManagementConfigurationPolicyAssignmentable)\n }\n }\n m.SetAssignments(res)\n }\n return nil\n }\n res[\"createdDateTime\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetTimeValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCreatedDateTime(val)\n }\n return nil\n }\n res[\"creationSource\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCreationSource(val)\n }\n return nil\n }\n res[\"description\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDescription(val)\n }\n return nil\n }\n res[\"isAssigned\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetBoolValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetIsAssigned(val)\n }\n return nil\n }\n res[\"lastModifiedDateTime\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetTimeValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetLastModifiedDateTime(val)\n }\n return nil\n }\n res[\"name\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetName(val)\n }\n return nil\n }\n res[\"platforms\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseDeviceManagementConfigurationPlatforms)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetPlatforms(val.(*DeviceManagementConfigurationPlatforms))\n }\n return nil\n }\n res[\"priorityMetaData\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateDeviceManagementPriorityMetaDataFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetPriorityMetaData(val.(DeviceManagementPriorityMetaDataable))\n }\n return nil\n }\n res[\"roleScopeTagIds\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfPrimitiveValues(\"string\")\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]string, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = *(v.(*string))\n }\n }\n m.SetRoleScopeTagIds(res)\n }\n return nil\n }\n res[\"settingCount\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetInt32Value()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetSettingCount(val)\n }\n return nil\n }\n res[\"settings\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetCollectionOfObjectValues(CreateDeviceManagementConfigurationSettingFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n res := make([]DeviceManagementConfigurationSettingable, len(val))\n for i, v := range val {\n if v != nil {\n res[i] = v.(DeviceManagementConfigurationSettingable)\n }\n }\n m.SetSettings(res)\n }\n return nil\n }\n res[\"technologies\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetEnumValue(ParseDeviceManagementConfigurationTechnologies)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetTechnologies(val.(*DeviceManagementConfigurationTechnologies))\n }\n return nil\n }\n res[\"templateReference\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetObjectValue(CreateDeviceManagementConfigurationPolicyTemplateReferenceFromDiscriminatorValue)\n if err != nil {\n return err\n }\n if val != nil {\n m.SetTemplateReference(val.(DeviceManagementConfigurationPolicyTemplateReferenceable))\n }\n return nil\n }\n return res\n}", "func (x *fastReflection_Output) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {\n\tswitch fd.FullName() {\n\tcase \"cosmos.bank.v1beta1.Output.address\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tcase \"cosmos.bank.v1beta1.Output.coins\":\n\t\tlist := []*v1beta1.Coin{}\n\t\treturn protoreflect.ValueOfList(&_Output_2_list{list: &list})\n\tdefault:\n\t\tif fd.IsExtension() {\n\t\t\tpanic(fmt.Errorf(\"proto3 declared messages do not support extensions: cosmos.bank.v1beta1.Output\"))\n\t\t}\n\t\tpanic(fmt.Errorf(\"message cosmos.bank.v1beta1.Output does not contain field %s\", fd.FullName()))\n\t}\n}", "func (builder *RoomBuilder) BuildField(f *FieldProxyI) {\n\t*f = builder.field\n}", "func (g *Generator) genTypeDecoder(t reflect.Type, out string, tags fieldTags, indent int) error {\n\tws := strings.Repeat(\" \", indent)\n\n\tunmarshalerIface := reflect.TypeOf((*easyjson.Unmarshaler)(nil)).Elem()\n\tif reflect.PtrTo(t).Implements(unmarshalerIface) {\n\t\tfmt.Fprintln(g.out, ws+\"(\"+out+\").UnmarshalEasyJSON(in)\")\n\t\treturn nil\n\t}\n\n\tunmarshalerIface = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem()\n\tif reflect.PtrTo(t).Implements(unmarshalerIface) {\n\t\tfmt.Fprintln(g.out, ws+\"if data := in.Raw(); in.Ok() {\")\n\t\tfmt.Fprintln(g.out, ws+\" in.AddError( (\"+out+\").UnmarshalJSON(data) )\")\n\t\tfmt.Fprintln(g.out, ws+\"}\")\n\t\treturn nil\n\t}\n\n\tunmarshalerIface = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()\n\tif reflect.PtrTo(t).Implements(unmarshalerIface) {\n\t\tfmt.Fprintln(g.out, ws+\"if data := in.UnsafeBytes(); in.Ok() {\")\n\t\tfmt.Fprintln(g.out, ws+\" in.AddError( (\"+out+\").UnmarshalText(data) )\")\n\t\tfmt.Fprintln(g.out, ws+\"}\")\n\t\treturn nil\n\t}\n\n\terr := g.genTypeDecoderNoCheck(t, out, tags, indent)\n\treturn err\n}", "func (x *fastReflection_Metadata) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {\n\tswitch fd.FullName() {\n\tcase \"cosmos.bank.v1beta1.Metadata.description\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tcase \"cosmos.bank.v1beta1.Metadata.denom_units\":\n\t\tlist := []*DenomUnit{}\n\t\treturn protoreflect.ValueOfList(&_Metadata_2_list{list: &list})\n\tcase \"cosmos.bank.v1beta1.Metadata.base\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tcase \"cosmos.bank.v1beta1.Metadata.display\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tcase \"cosmos.bank.v1beta1.Metadata.name\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tcase \"cosmos.bank.v1beta1.Metadata.symbol\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tcase \"cosmos.bank.v1beta1.Metadata.uri\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tcase \"cosmos.bank.v1beta1.Metadata.uri_hash\":\n\t\treturn protoreflect.ValueOfString(\"\")\n\tdefault:\n\t\tif fd.IsExtension() {\n\t\t\tpanic(fmt.Errorf(\"proto3 declared messages do not support extensions: cosmos.bank.v1beta1.Metadata\"))\n\t\t}\n\t\tpanic(fmt.Errorf(\"message cosmos.bank.v1beta1.Metadata does not contain field %s\", fd.FullName()))\n\t}\n}", "func (b *PlanBuilder) buildProjectionFieldNameFromExpressions(ctx context.Context, field *ast.SelectField) (parser_model.CIStr, error) {\n\tinnerExpr := getInnerFromParenthesesAndUnaryPlus(field.Expr)\n\tvalueExpr, isValueExpr := innerExpr.(*driver.ValueExpr)\n\n\t// Non-literal: Output as inputed, except that comments need to be removed.\n\tif !isValueExpr {\n\t\treturn parser_model.NewCIStr(parser.SpecFieldPattern.ReplaceAllStringFunc(field.Text(), parser.TrimComment)), nil\n\t}\n\n\t// Literal: Need special processing\n\tswitch valueExpr.Kind() {\n\tcase types.KindString:\n\t\tprojName := valueExpr.GetString()\n\t\tprojOffset := valueExpr.GetProjectionOffset()\n\t\tif projOffset >= 0 {\n\t\t\tprojName = projName[:projOffset]\n\t\t}\n\t\t// See #3686, #3994:\n\t\t// For string literals, string content is used as column name. Non-graph initial characters are trimmed.\n\t\tfieldName := strings.TrimLeftFunc(projName, func(r rune) bool {\n\t\t\treturn !unicode.IsOneOf(mysql.RangeGraph, r)\n\t\t})\n\t\treturn parser_model.NewCIStr(fieldName), nil\n\tcase types.KindNull:\n\t\t// See #4053, #3685\n\t\treturn parser_model.NewCIStr(\"NULL\"), nil\n\tcase types.KindBinaryLiteral:\n\t\t// Don't rewrite BIT literal or HEX literals\n\t\treturn parser_model.NewCIStr(field.Text()), nil\n\tcase types.KindInt64:\n\t\t// See #9683\n\t\t// TRUE or FALSE can be a int64\n\t\tif mysql.HasIsBooleanFlag(valueExpr.Type.Flag) {\n\t\t\tif i := valueExpr.GetValue().(int64); i == 0 {\n\t\t\t\treturn parser_model.NewCIStr(\"FALSE\"), nil\n\t\t\t}\n\t\t\treturn parser_model.NewCIStr(\"TRUE\"), nil\n\t\t}\n\t\tfallthrough\n\n\tdefault:\n\t\tfieldName := field.Text()\n\t\tfieldName = strings.TrimLeft(fieldName, \"\\t\\n +(\")\n\t\tfieldName = strings.TrimRight(fieldName, \"\\t\\n )\")\n\t\treturn parser_model.NewCIStr(fieldName), nil\n\t}\n}", "func mergeFieldDef(target, source *ast.FieldDefinition) {\n\tif target.Description == \"\" {\n\t\ttarget.Description = source.Description\n\t}\n\tif target.Name == \"\" {\n\t\ttarget.Name = source.Name\n\t}\n\tif target.ArgumentsDefinition == nil {\n\t\ttarget.ArgumentsDefinition = source.ArgumentsDefinition\n\t}\n\tif target.Type == nil {\n\t\ttarget.Type = source.Type\n\t}\n\tif target.Directives == nil {\n\t\ttarget.Directives = source.Directives\n\t}\n}", "func (m *AccessPackageCatalog) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"accessPackages\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetCollectionOfObjectValues(CreateAccessPackageFromDiscriminatorValue , m.SetAccessPackages)\n res[\"catalogType\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetEnumValue(ParseAccessPackageCatalogType , m.SetCatalogType)\n res[\"createdDateTime\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetTimeValue(m.SetCreatedDateTime)\n res[\"description\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetDescription)\n res[\"displayName\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetStringValue(m.SetDisplayName)\n res[\"isExternallyVisible\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetBoolValue(m.SetIsExternallyVisible)\n res[\"modifiedDateTime\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetTimeValue(m.SetModifiedDateTime)\n res[\"state\"] = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.SetEnumValue(ParseAccessPackageCatalogState , m.SetState)\n return res\n}", "func (m *PaymentTerm) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))\n res[\"calculateDiscountOnCreditMemos\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetBoolValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCalculateDiscountOnCreditMemos(val)\n }\n return nil\n }\n res[\"code\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetCode(val)\n }\n return nil\n }\n res[\"discountDateCalculation\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDiscountDateCalculation(val)\n }\n return nil\n }\n res[\"discountPercent\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetFloat64Value()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDiscountPercent(val)\n }\n return nil\n }\n res[\"displayName\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDisplayName(val)\n }\n return nil\n }\n res[\"dueDateCalculation\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDueDateCalculation(val)\n }\n return nil\n }\n res[\"id\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetUUIDValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetId(val)\n }\n return nil\n }\n res[\"lastModifiedDateTime\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetTimeValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetLastModifiedDateTime(val)\n }\n return nil\n }\n res[\"@odata.type\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetOdataType(val)\n }\n return nil\n }\n return res\n}" ]
[ "0.6892996", "0.59916204", "0.57207865", "0.57188165", "0.56753683", "0.5636733", "0.5596574", "0.55853677", "0.5520538", "0.5467922", "0.5415466", "0.53430325", "0.53384626", "0.5321866", "0.52896065", "0.5278876", "0.5278473", "0.521161", "0.5197192", "0.5160622", "0.51567143", "0.51550436", "0.5147077", "0.51422423", "0.5134995", "0.5125008", "0.51133937", "0.5062007", "0.50596523", "0.50420433", "0.5037253", "0.5037253", "0.5037253", "0.5037191", "0.5032363", "0.5026841", "0.5018489", "0.49712357", "0.49637362", "0.49629217", "0.49525085", "0.494682", "0.49234253", "0.49165308", "0.48968995", "0.48819926", "0.48531297", "0.48460788", "0.4845097", "0.4843897", "0.48201433", "0.481769", "0.4807314", "0.47908702", "0.47879153", "0.47768614", "0.47643158", "0.47562444", "0.47546977", "0.4753293", "0.4748595", "0.47467503", "0.47423792", "0.47404823", "0.47370917", "0.4733257", "0.4729395", "0.47209784", "0.47155458", "0.4714776", "0.4691385", "0.46873227", "0.46867412", "0.4686594", "0.46796343", "0.4675671", "0.4673864", "0.46587044", "0.46586603", "0.46584782", "0.46528453", "0.46463683", "0.46432978", "0.4639629", "0.463718", "0.46325478", "0.46312508", "0.46269098", "0.46251613", "0.46191674", "0.46175572", "0.4611585", "0.46029124", "0.4602239", "0.45985088", "0.45904619", "0.4582061", "0.45726395", "0.45703688", "0.4566938" ]
0.757106
0
genArguments generates argument field config for given AST
genArguments генерирует конфигурацию поля аргументов для заданного AST
func genArguments(args []*ast.InputValueDefinition) *jen.Statement { // // Generate config for arguments // // == Example input SDL // // type Dog { // name( // "style is stylish" // style: NameComponentsStyle = SHORT, // ): String! // } // // == Example output // // FieldConfigArgument{ // "style": &ArgumentConfig{ ... } // }, // return jen.Qual(defsPkg, "FieldConfigArgument").Values( jen.DictFunc(func(d jen.Dict) { for _, arg := range args { d[jen.Lit(arg.Name.Value)] = genArgument(arg) } }), ) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func genArgument(arg *ast.InputValueDefinition) *jen.Statement {\n\t//\n\t// Generate config for argument\n\t//\n\t// == Example input SDL\n\t//\n\t// type Dog {\n\t// name(\n\t// \"style is stylish\"\n\t// style: NameComponentsStyle = SHORT,\n\t// ): String!\n\t// }\n\t//\n\t// == Example output\n\t//\n\t// &ArgumentConfig{\n\t// Type: graphql.NonNull(graphql.String),\n\t// DefaultValue: \"SHORT\", // TODO: ???\n\t// Description: \"style is stylish\",\n\t// }\n\t//\n\treturn jen.Op(\"&\").Qual(defsPkg, \"ArgumentConfig\").Values(jen.Dict{\n\t\tjen.Id(\"DefaultValue\"): genValue(arg.DefaultValue),\n\t\tjen.Id(\"Description\"): genDescription(arg),\n\t\tjen.Id(\"Type\"): genInputTypeReference(arg.Type),\n\t})\n}", "func BindArg(obj interface{}, tags ...string) FieldConfigArgument {\n\tv := reflect.Indirect(reflect.ValueOf(obj))\n\tvar config = make(FieldConfigArgument)\n\tfor i := 0; i < v.NumField(); i++ {\n\t\tfield := v.Type().Field(i)\n\n\t\tmytag := extractTag(field.Tag)\n\t\tif inArray(tags, mytag) {\n\t\t\tconfig[mytag] = &ArgumentConfig{\n\t\t\t\tType: getGraphType(field.Type),\n\t\t\t}\n\t\t}\n\t}\n\treturn config\n}", "func (*Base) Arguments(p ASTPass, l *ast.Fodder, args *ast.Arguments, r *ast.Fodder, ctx Context) {\n\tp.Fodder(p, l, ctx)\n\tfor i := range args.Positional {\n\t\targ := &args.Positional[i]\n\t\tp.Visit(p, &arg.Expr, ctx)\n\t\tp.Fodder(p, &arg.CommaFodder, ctx)\n\t}\n\tfor i := range args.Named {\n\t\targ := &args.Named[i]\n\t\tp.Fodder(p, &arg.NameFodder, ctx)\n\t\tp.Fodder(p, &arg.EqFodder, ctx)\n\t\tp.Visit(p, &arg.Arg, ctx)\n\t\tp.Fodder(p, &arg.CommaFodder, ctx)\n\t}\n\tp.Fodder(p, r, ctx)\n}", "func genArgs(optionMap map[string]string) []string {\n\toptions := []string{}\n\tfor k, v := range optionMap {\n\t\tif v != \"\" {\n\t\t\tk = fmt.Sprintf(\"%s=%s\", k, v)\n\t\t}\n\t\toptions = append(options, k)\n\t}\n\treturn options\n}", "func collectArguments() Arguments {\n\tendpoint := config.Config.ChooseEndpoint(flags.APIEndpoint)\n\ttoken := config.Config.ChooseToken(endpoint, flags.Token)\n\tscheme := config.Config.ChooseScheme(endpoint, flags.Token)\n\treturn Arguments{\n\t\tapiEndpoint: endpoint,\n\t\ttoken: token,\n\t\tscheme: scheme,\n\t}\n}", "func genFields(fs []*ast.FieldDefinition) *jen.Statement {\n\t//\n\t// Generate config for fields\n\t//\n\t// == Example input SDL\n\t//\n\t// type Dog {\n\t// name(style: NameComponentsStyle = SHORT): String!\n\t// givenName: String @deprecated(reason: \"No longer supported; please use name field.\")\n\t// }\n\t//\n\t// == Example output\n\t//\n\t// graphql.Fields{\n\t// \"name\": graphql.Field{ ... },\n\t// \"givenName\": graphql.Field{ ... },\n\t// }\n\t//\n\treturn jen.Qual(defsPkg, \"Fields\").Values(jen.DictFunc(func(d jen.Dict) {\n\t\tfor _, f := range fs {\n\t\t\td[jen.Lit(f.Name.Value)] = genField(f)\n\t\t}\n\t}))\n}", "func collectArguments() Arguments {\n\tendpoint := config.Config.ChooseEndpoint(flags.APIEndpoint)\n\ttoken := config.Config.ChooseToken(endpoint, flags.Token)\n\tscheme := config.Config.ChooseScheme(endpoint, flags.Token)\n\n\treturn Arguments{\n\t\tapiEndpoint: endpoint,\n\t\tauthToken: token,\n\t\tscheme: scheme,\n\t\tclusterNameOrID: \"\",\n\t\tuserProvidedToken: flags.Token,\n\t\tverbose: flags.Verbose,\n\t}\n}", "func (p *Planner) configureFieldArgumentSource(upstreamFieldRef, downstreamFieldRef int, argumentName string, sourcePath []string) {\n\tfieldArgument, ok := p.visitor.Operation.FieldArgument(downstreamFieldRef, []byte(argumentName))\n\tif !ok {\n\t\treturn\n\t}\n\tvalue := p.visitor.Operation.ArgumentValue(fieldArgument)\n\tif value.Kind != ast.ValueKindVariable {\n\t\tp.applyInlineFieldArgument(upstreamFieldRef, downstreamFieldRef, argumentName, sourcePath)\n\t\treturn\n\t}\n\tvariableName := p.visitor.Operation.VariableValueNameBytes(value.Ref)\n\tvariableNameStr := p.visitor.Operation.VariableValueNameString(value.Ref)\n\n\tcontextVariable := &resolve.ContextVariable{\n\t\tPath: []string{variableNameStr},\n\t\tRenderAsGraphQLValue: true,\n\t}\n\tcontextVariable.SetJsonValueType(p.visitor.Definition, p.visitor.Definition, p.argTypeRef)\n\n\tcontextVariableName, exists := p.variables.AddVariable(contextVariable)\n\tvariableValueRef, argRef := p.upstreamOperation.AddVariableValueArgument([]byte(argumentName), variableName) // add the argument to the field, but don't redefine it\n\tp.upstreamOperation.AddArgumentToField(upstreamFieldRef, argRef)\n\n\tif exists { // if the variable exists we don't have to put it onto the variables declaration again, skip\n\t\treturn\n\t}\n\n\tfor _, i := range p.visitor.Operation.OperationDefinitions[p.visitor.Walker.Ancestors[0].Ref].VariableDefinitions.Refs {\n\t\tref := p.visitor.Operation.VariableDefinitions[i].VariableValue.Ref\n\t\tif !p.visitor.Operation.VariableValueNameBytes(ref).Equals(variableName) {\n\t\t\tcontinue\n\t\t}\n\t\timportedType := p.visitor.Importer.ImportType(p.visitor.Operation.VariableDefinitions[i].Type, p.visitor.Operation, p.upstreamOperation)\n\t\tp.upstreamOperation.AddVariableDefinitionToOperationDefinition(p.nodes[0].Ref, variableValueRef, importedType)\n\t}\n\n\tp.upstreamVariables, _ = sjson.SetRawBytes(p.upstreamVariables, variableNameStr, []byte(contextVariableName))\n}", "func (p *Parser) buildArg(argDef Value, argType reflect.Type, index int, args *[]reflect.Value) error {\n\tswitch argType.Name() {\n\tcase \"Setter\":\n\t\tfallthrough\n\tcase \"GetSetter\":\n\t\targ, err := p.pathParser(argDef.Path)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v %w\", index, err)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(arg))\n\tcase \"Getter\":\n\t\targ, err := p.newGetter(argDef)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v %w\", index, err)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(arg))\n\tcase \"Enum\":\n\t\targ, err := p.enumParser(argDef.Enum)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v must be an Enum\", index)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(*arg))\n\tcase \"string\":\n\t\tif argDef.String == nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v, must be an string\", index)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(*argDef.String))\n\tcase \"float64\":\n\t\tif argDef.Float == nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v, must be an float\", index)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(*argDef.Float))\n\tcase \"int64\":\n\t\tif argDef.Int == nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v, must be an int\", index)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(*argDef.Int))\n\tcase \"bool\":\n\t\tif argDef.Bool == nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v, must be a bool\", index)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(bool(*argDef.Bool)))\n\t}\n\treturn nil\n}", "func genField(field *ast.FieldDefinition) *jen.Statement {\n\t//\n\t// Generate config for field\n\t//\n\t// == Example input SDL\n\t//\n\t// interface Pet {\n\t// \"name of the pet\"\n\t// name(style: NameComponentsStyle = SHORT): String!\n\t// \"\"\"\n\t// givenName of the pet ★\n\t// \"\"\"\n\t// givenName: String @deprecated(reason: \"No longer supported; please use name field.\")\n\t// }\n\t//\n\t// == Example output\n\t//\n\t// &graphql.Field{\n\t// Name: \"name\",\n\t// Type: graphql.NonNull(graphql.String),\n\t// Description: \"name of the pet\",\n\t// DeprecationReason: \"\",\n\t// Args: FieldConfigArgument{ ... },\n\t// }\n\t//\n\t// &graphql.Field{\n\t// Name: \"givenName\",\n\t// Type: graphql.String,\n\t// Description: \"givenName of the pet\",\n\t// DeprecationReason: \"No longer supported; please use name field.\",\n\t// Args: FieldConfigArgument{ ... },\n\t// }\n\t//\n\treturn jen.Op(\"&\").Qual(defsPkg, \"Field\").Values(jen.Dict{\n\t\tjen.Id(\"Args\"): genArguments(field.Arguments),\n\t\tjen.Id(\"DeprecationReason\"): genDeprecationReason(field.Directives),\n\t\tjen.Id(\"Description\"): genDescription(field),\n\t\tjen.Id(\"Name\"): jen.Lit(field.Name.Value),\n\t\tjen.Id(\"Type\"): genOutputTypeReference(field.Type),\n\t})\n}", "func structargs(tl *types.Type, mustname bool) []*Node {\n\tvar args []*Node\n\tgen := 0\n\tfor _, t := range tl.Fields().Slice() {\n\t\ts := t.Sym\n\t\tif mustname && (s == nil || s.Name == \"_\") {\n\t\t\t// invent a name so that we can refer to it in the trampoline\n\t\t\ts = lookupN(\".anon\", gen)\n\t\t\tgen++\n\t\t}\n\t\ta := symfield(s, t.Type)\n\t\ta.Pos = t.Pos\n\t\ta.SetIsDDD(t.IsDDD())\n\t\targs = append(args, a)\n\t}\n\n\treturn args\n}", "func (ec *executionContext) field_Mutation_createAgent_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 models.CreateAgentInput\n\tif tmp, ok := rawArgs[\"input\"]; ok {\n\t\tctx := graphql.WithPathContext(ctx, graphql.NewPathWithField(\"input\"))\n\t\targ0, err = ec.unmarshalNCreateAgentInput2golangᚑmongoᚑgraphqlᚑ003ᚋinternalᚋmodelsᚐCreateAgentInput(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"input\"] = arg0\n\treturn args, nil\n}", "func (c *compileContext) makeArgumentResolver(typ schema.InputableType) (argumentResolver, error) {\n\tswitch t := typ.(type) {\n\tcase *schema.InputObjectType:\n\t\treturn func(ctx context.Context, v schema.LiteralValue) (interface{}, error) {\n\t\t\treturn t.Decode(ctx, v)\n\t\t}, nil\n\tcase *schema.ListType:\n\t\telementResolver, err := c.makeArgumentResolver(t.Unwrap().(schema.InputableType))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn func(ctx context.Context, v schema.LiteralValue) (interface{}, error) {\n\t\t\tif v == nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\n\t\t\tlistCreator := t.Unwrap().(schema.InputableType).InputListCreator()\n\n\t\t\tif av, ok := v.(schema.LiteralArray); ok {\n\t\t\t\treturn listCreator.NewList(len(av), func(i int) (interface{}, error) {\n\t\t\t\t\treturn elementResolver(ctx, av[i])\n\t\t\t\t})\n\t\t\t}\n\n\t\t\t// if we get a non-list value we have to wrap into a single element\n\t\t\t// list.\n\t\t\t// See https://facebook.github.io/graphql/June2018/#sec-Type-System.List\n\t\t\tresultElement, err := elementResolver(ctx, v)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn listCreator.NewList(1, func(i int) (interface{}, error) {\n\t\t\t\treturn resultElement, nil\n\t\t\t})\n\t\t}, nil\n\n\tcase *schema.NotNilType:\n\t\telementResolver, err := c.makeArgumentResolver(t.Unwrap().(schema.InputableType))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn func(ctx context.Context, v schema.LiteralValue) (interface{}, error) {\n\t\t\tif v == nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Required value was not supplied\")\n\t\t\t}\n\t\t\treturn elementResolver(ctx, v)\n\t\t}, nil\n\tcase *schema.ScalarType:\n\t\treturn func(ctx context.Context, v schema.LiteralValue) (interface{}, error) {\n\t\t\treturn t.Decode(ctx, v)\n\t\t}, nil\n\tcase *schema.EnumType:\n\t\treturn func(ctx context.Context, v schema.LiteralValue) (interface{}, error) {\n\t\t\tif v == nil {\n\t\t\t\treturn t.Decode(ctx, v)\n\t\t\t}\n\t\t\tval, ok := v.(schema.LiteralString)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"Expected string, got %v\", v)\n\t\t\t}\n\t\t\treturn t.Decode(ctx, val)\n\t\t}, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Invalid type for input argument: %v\", typ)\n\t}\n}", "func (params PostParams) Generate(args []string, argConfigs []Arg) PostParams {\n\tvar md5hash string\n\tfor index, arg := range args {\n\t\tDebugf(\"Index and args %d %s %v\", index, arg, argConfigs)\n\n\t\tDebugf(\"PostParams Setting %s to %s\", strings.Title(argConfigs[index].Name), arg)\n\t\tif argConfigs[index].Type == \"object\" {\n\t\t\tDebugln(\"Using object parser\")\n\t\t\tvar jsonArg map[string]interface{}\n\t\t\terr := json.Unmarshal([]byte(arg), &jsonArg)\n\t\t\tif err != nil {\n\t\t\t\tpanic(fmt.Sprintf(\"Error parsing json from %s - %s\", argConfigs[index].Name, err.Error()))\n\t\t\t}\n\t\t\treflect.ValueOf(&params).Elem().FieldByName(getFieldByArgumentName(argConfigs[index].Name)).Set(reflect.ValueOf(jsonArg))\n\t\t} else if argConfigs[index].Type == \"array\" {\n\t\t\tDebugln(\"Using array parser\")\n\t\t\tvar jsonArray []interface{}\n\t\t\terr := json.Unmarshal([]byte(arg), &jsonArray)\n\t\t\tif err != nil {\n\t\t\t\tpanic(fmt.Sprintf(\"Error parsing json from %s - %s\", argConfigs[index].Name, err.Error()))\n\t\t\t}\n\t\t\treflect.ValueOf(&params).Elem().FieldByName(getFieldByArgumentName(argConfigs[index].Name)).Set(reflect.ValueOf(jsonArray))\n\t\t} else if argConfigs[index].Type == \"bool\" {\n\t\t\tDebugf(\"Using bool parser for (%s) = (%s)\", argConfigs[index].Name, arg)\n\t\t\tif arg == \"\" {\n\t\t\t\tDebugf(\"Missing arg value (%s) using default (%s)\", argConfigs[index].Name, argConfigs[index].Value)\n\t\t\t\targ = argConfigs[index].Value\n\t\t\t}\n\t\t\tboolArg, _ := strconv.ParseBool(arg)\n\t\t\treflect.ValueOf(&params).Elem().FieldByName(getFieldByArgumentName(argConfigs[index].Name)).SetBool(boolArg)\n\t\t} else {\n\t\t\tif argConfigs[index].Type == \"url\" {\n\t\t\t\tDebugf(\"Handling url %s\", arg)\n\t\t\t\ta, err := ComputeMd5(arg)\n\t\t\t\tmd5hash = a\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"Failed to generate MD5 from url %s. Make sure the file exists and permissions are correct. (%s)\", arg, err)\n\t\t\t\t\tExit(1)\n\t\t\t\t}\n\t\t\t\targ = ConvertFileToURL(arg)\n\t\t\t}\n\t\t\tDebugf(\"Using string parser for (%s) = (%s)\", argConfigs[index].Name, arg)\n\t\t\tif arg == \"\" {\n\t\t\t\tDebugf(\"Missing arg value (%s) using default (%s)\", argConfigs[index].Name, argConfigs[index].Value)\n\t\t\t\targ = argConfigs[index].Value\n\t\t\t}\n\t\t\treflect.ValueOf(&params).Elem().FieldByName(getFieldByArgumentName(argConfigs[index].Name)).SetString(arg)\n\t\t}\n\n\t\tDebugf(\"Finished %s\", arg)\n\t}\n\tif len(md5hash) > 0 {\n\t\tparams.Checksum = md5hash\n\t}\n\treturn params\n}", "func ASTArgsFromPlan(plan *plannercore.LoadData) *ASTArgs {\n\treturn &ASTArgs{\n\t\tFileLocRef: plan.FileLocRef,\n\t\tColumnsAndUserVars: plan.ColumnsAndUserVars,\n\t\tColumnAssignments: plan.ColumnAssignments,\n\t\tOnDuplicate: plan.OnDuplicate,\n\t\tFieldsInfo: plan.FieldsInfo,\n\t\tLinesInfo: plan.LinesInfo,\n\t}\n}", "func (g GoStruct) ArglistFunc() string {\n\tvar builder strings.Builder\n\tfor _, f := range g.Fields {\n\t\tif !f.Type.IsList {\n\t\t\tbuilder.WriteString(fmt.Sprintf(\"args = append(args, %s)\\n\", \"r.\"+f.Name))\n\t\t} else {\n\t\t\ttpl := `for _, v := range %s {\n\targs = append(args, v)\n}\n`\n\t\t\tbuilder.WriteString(fmt.Sprintf(tpl, \"r.\"+f.Name))\n\t\t\tbuilder.WriteString(fmt.Sprintf(\"inlens = append(inlens, len(%s))\\n\", \"r.\"+f.Name))\n\t\t}\n\t}\n\treturn fmt.Sprintf(\n\t\t\"func (r *%s) arglist() (args []interface{}, inlens []int) {\\n %s return\\n}\\n\",\n\t\tg.Name, builder.String())\n}", "func (ec *executionContext) field_Mutation_createAdmin_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 map[string]interface{}\n\tif tmp, ok := rawArgs[\"input\"]; ok {\n\t\targ0, err = ec.unmarshalNAdminCreateInput2map(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"input\"] = arg0\n\treturn args, nil\n}", "func (n *CommandNode) Args() []Expr { return n.args }", "func buildArg(mt *methodType, d json.RawMessage) (reflect.Value, error) {\n\tvar argv reflect.Value\n\targIsValue := false // if true, need to indirect before calling.\n\tif mt.ArgType.Kind() == reflect.Ptr {\n\t\targv = reflect.New(mt.ArgType.Elem())\n\t} else {\n\t\targv = reflect.New(mt.ArgType)\n\t\targIsValue = true\n\t}\n\terr := json.Unmarshal(d, argv.Interface())\n\tif err != nil {\n\t\treturn reflect.Value{}, err\n\t}\n\tif argIsValue {\n\t\targv = argv.Elem()\n\t}\n\treturn argv, nil\n}", "func Marshal(data *parser.Result, document *string) (err error) {\n\n\targuments := \"\"\n\ttmp := []string{}\n\n\tfor _, node := range data.AST.Children {\n\n\t\tinstruction := strings.ToUpper(node.Value)\n\t\ttab := strings.Repeat(\" \", len(node.Value)+1)\n\n\t\tswitch instruction {\n\t\tcase \"FROM\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"LABEL\":\n\t\t\targuments = KeyValueForm(node, tab)\n\t\tcase \"MAINTAINER\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"EXPOSE\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"ADD\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"ONBUILD\":\n\t\t\tfor _, n := range node.Next.Children {\n\t\t\t\targuments = strings.ToUpper(n.Value) + \" \" + DefaultForm(n)\n\t\t\t}\n\t\tcase \"STOPSIGNAL\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"HEALTHCHECK\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"ARG\":\n\t\t\targuments = KeyValueForm(node, tab)\n\t\tcase \"COPY\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"ENV\":\n\t\t\targuments = KeyValueForm(node, tab)\n\t\tcase \"RUN\":\n\t\t\targuments = ShellForm(node)\n\t\t\t//arguments = ExecForm(node)\n\t\tcase \"CMD\":\n\t\t\targuments = ExecForm(node)\n\t\t\t//arguments = ShellForm(node)\n\t\tcase \"ENTRYPOINT\":\n\t\t\targuments = ExecForm(node)\n\t\t\t//arguments = ShellForm(node)\n\t\tcase \"SHELL\":\n\t\t\targuments = ExecForm(node)\n\t\t\t//arguments = ShellForm(node)\n\t\tcase \"VOLUME\":\n\t\t\t//arguments = ExecForm(node)\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"USER\":\n\t\t\targuments = DefaultForm(node)\n\n\t\tcase \"WORKDIR\":\n\t\t\targuments = DefaultForm(node)\n\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"Instruction %s not supported\", instruction)\n\t\t}\n\n\t\tif len(arguments) > 0 {\n\t\t\ttmp = append(tmp, fmt.Sprintf(\"%s %s\", instruction, arguments))\n\t\t} else {\n\t\t\ttmp = append(tmp, instruction)\n\t\t}\n\n\t}\n\n\t*document = strings.Join(tmp, \"\\n\")\n\n\treturn err\n}", "func (params GetParams) Generate(args []string, argConfigs []Arg) GetParams {\n\tfor index, arg := range args {\n\t\tif argConfigs[index].Type != \"object\" && argConfigs[index].Type != \"array\" {\n\t\t\treflect.ValueOf(&params).Elem().FieldByName(getFieldByArgumentName(argConfigs[index].Name)).SetString(arg)\n\t\t} else if argConfigs[index].Type == \"bool\" {\n\t\t\tboolArg, _ := strconv.ParseBool(arg)\n\t\t\treflect.ValueOf(&params).Elem().FieldByName(getFieldByArgumentName(argConfigs[index].Name)).SetBool(boolArg)\n\t\t}\n\t}\n\treturn params\n}", "func (g *Generator) generate(typeName string) {\n\tfields := make([]Field, 0, 100)\n\tfor _, file := range g.pkg.files {\n\t\t// Set the state for this run of the walker.\n\t\tfile.typeName = typeName\n\t\tfile.fields = nil\n\t\tif file.file != nil {\n\t\t\tast.Inspect(file.file, file.genDecl)\n\t\t\tg.additionalImports = append(g.additionalImports, file.additionalImports...)\n\t\t\tfields = append(fields, file.fields...)\n\t\t}\n\t}\n\n\tif len(fields) == 0 {\n\t\tlog.Fatalf(\"no values defined for type %s\", typeName)\n\t}\n\n\tg.build(fields, typeName)\n}", "func getFieldList(p *program.Program, f *ast.FunctionDecl, fieldTypes []string) (\n\t_ *goast.FieldList, err error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"error in function field list. err = %v\", err)\n\t\t}\n\t}()\n\tr := []*goast.Field{}\n\tfor i := range fieldTypes {\n\t\tif len(f.Children()) <= i {\n\t\t\terr = fmt.Errorf(\"not correct type/children: %d, %d\",\n\t\t\t\tlen(f.Children()), len(fieldTypes))\n\t\t\treturn\n\t\t}\n\t\tn := f.Children()[i]\n\t\tif v, ok := n.(*ast.ParmVarDecl); ok {\n\t\t\tt, err := types.ResolveType(p, fieldTypes[i])\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"FieldList type: %s. %v\", fieldTypes[i], err)\n\t\t\t\tp.AddMessage(p.GenerateWarningMessage(err, f))\n\t\t\t\terr = nil // ignore error\n\t\t\t\tt = \"C4GO_UNDEFINE_TYPE\"\n\t\t\t}\n\n\t\t\tif t == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tr = append(r, &goast.Field{\n\t\t\t\tNames: []*goast.Ident{util.NewIdent(v.Name)},\n\t\t\t\tType: goast.NewIdent(t),\n\t\t\t})\n\t\t}\n\t}\n\n\t// for function argument: ...\n\tif strings.Contains(f.Type, \"...\") {\n\t\tr = append(r, &goast.Field{\n\t\t\tNames: []*goast.Ident{util.NewIdent(\"c4goArgs\")},\n\t\t\tType: &goast.Ellipsis{\n\t\t\t\tEllipsis: 1,\n\t\t\t\tElt: &goast.InterfaceType{\n\t\t\t\t\tInterface: 1,\n\t\t\t\t\tMethods: &goast.FieldList{\n\t\t\t\t\t\tOpening: 1,\n\t\t\t\t\t},\n\t\t\t\t\tIncomplete: false,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t}\n\n\treturn &goast.FieldList{\n\t\tList: r,\n\t}, nil\n}", "func GenerationArgsFor(category, pathToExecutable, fuzzerName string, isMaster bool) GenerationArgs {\n\tf, found := fuzzers[category]\n\tif !found {\n\t\tsklog.Errorf(\"Unknown fuzz category %q\", category)\n\t\treturn nil\n\t}\n\tmasterFlag := \"-M\"\n\tif !isMaster {\n\t\tmasterFlag = \"-S\"\n\t}\n\tseedPath := filepath.Join(config.Generator.FuzzSamples, category)\n\toutputPath := filepath.Join(config.Generator.AflOutputPath, category)\n\n\tcmd := append([]string{\"-i\", seedPath, \"-o\", outputPath, \"-m\", \"5000\", masterFlag, fuzzerName, \"--\", pathToExecutable}, f.ArgsAfterExecutable...)\n\n\treturn append(cmd, \"@@\")\n}", "func transformArgs(n ir.InitNode) {\n\tvar list []ir.Node\n\tswitch n := n.(type) {\n\tdefault:\n\t\tbase.Fatalf(\"transformArgs %+v\", n.Op())\n\tcase *ir.CallExpr:\n\t\tlist = n.Args\n\t\tif n.IsDDD {\n\t\t\treturn\n\t\t}\n\tcase *ir.ReturnStmt:\n\t\tlist = n.Results\n\t}\n\tif len(list) != 1 {\n\t\treturn\n\t}\n\n\tt := list[0].Type()\n\tif t == nil || !t.IsFuncArgStruct() {\n\t\treturn\n\t}\n\n\t// Save n as n.Orig for fmt.go.\n\tif ir.Orig(n) == n {\n\t\tn.(ir.OrigNode).SetOrig(ir.SepCopy(n))\n\t}\n\n\t// Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...).\n\ttypecheck.RewriteMultiValueCall(n, list[0])\n}", "func (n *FnInvNode) Args() []Expr { return n.args }", "func (g *Generator) generate(typeName string) {\n\tfields := make([]Field, 0, 100)\n\tfor _, file := range g.pkg.files {\n\t\t// Set the state for this run of the walker.\n\t\tfile.typeName = typeName\n\t\tif file.file != nil {\n\t\t\tast.Inspect(file.file, file.genDecl)\n\t\t\tfields = append(fields, file.fields...)\n\t\t}\n\t}\n\tif len(fields) == 0 {\n\t\tlog.Fatalf(\"no fields defined for type %s\", typeName)\n\t}\n\t// TODO: for now we remove Default from the start (maybe move that to an option)\n\tlogicalTypeName := \"\\\"\" + strings.TrimPrefix(typeName, \"Default\") + \"\\\"\"\n\n\t// Generate code that will fail if the constants change value.\n\tg.Printf(\"func (d *%s) Serialize() ([]byte, error) {\\n\", typeName)\n\tg.Printf(\"wb := utils.NewWriteBufferByteBased(utils.WithByteOrderForByteBasedBuffer(binary.BigEndian))\\n\")\n\tg.Printf(\"\\tif err := d.SerializeWithWriteBuffer(context.Background(), wb); err != nil {\\n\")\n\tg.Printf(\"\\t\\treturn nil, err\\n\")\n\tg.Printf(\"\\t}\\n\")\n\tg.Printf(\"\\treturn wb.GetBytes(), nil\\n\")\n\tg.Printf(\"}\\n\\n\")\n\tg.Printf(\"func (d *%s) SerializeWithWriteBuffer(ctx context.Context, writeBuffer utils.WriteBuffer) error {\\n\", typeName)\n\tg.Printf(\"\\tif err := writeBuffer.PushContext(%s); err != nil {\\n\", logicalTypeName)\n\tg.Printf(\"\\t\\treturn err\\n\")\n\tg.Printf(\"\\t}\\n\")\n\tfor _, field := range fields {\n\t\tfieldType := field.fieldType\n\t\tif field.isDelegate {\n\t\t\tg.Printf(\"\\t\\t\\tif err := d.%s.SerializeWithWriteBuffer(ctx, writeBuffer); err != nil {\\n\", fieldType.(*ast.Ident).Name)\n\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\tcontinue\n\t\t}\n\t\tfieldName := field.name\n\t\tfieldNameUntitled := \"\\\"\" + unTitle(fieldName) + \"\\\"\"\n\t\tif field.hasLocker != \"\" {\n\t\t\tg.Printf(\"if err := func()error {\\n\")\n\t\t\tg.Printf(\"\\td.\" + field.hasLocker + \".Lock()\\n\")\n\t\t\tg.Printf(\"\\tdefer d.\" + field.hasLocker + \".Unlock()\\n\")\n\t\t}\n\t\tneedsDereference := false\n\t\tif starFieldType, ok := fieldType.(*ast.StarExpr); ok {\n\t\t\tfieldType = starFieldType.X\n\t\t\tneedsDereference = true\n\t\t}\n\t\tif field.isStringer {\n\t\t\tif needsDereference {\n\t\t\t\tg.Printf(\"if d.%s != nil {\", field.name)\n\t\t\t}\n\t\t\tg.Printf(stringFieldSerialize, \"d.\"+field.name+\".String()\", fieldNameUntitled)\n\t\t\tif field.hasLocker != \"\" {\n\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t}\n\t\t\tif needsDereference {\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tswitch fieldType := fieldType.(type) {\n\t\tcase *ast.SelectorExpr:\n\t\t\t{\n\t\t\t\t// TODO: bit hacky but not sure how else we catch those ones\n\t\t\t\tx := fieldType.X\n\t\t\t\tsel := fieldType.Sel\n\t\t\t\txIdent, xIsIdent := x.(*ast.Ident)\n\t\t\t\tif xIsIdent {\n\t\t\t\t\tif xIdent.Name == \"atomic\" {\n\t\t\t\t\t\tif sel.Name == \"Uint32\" {\n\t\t\t\t\t\t\tg.Printf(uint32FieldSerialize, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif sel.Name == \"Uint64\" {\n\t\t\t\t\t\t\tg.Printf(uint64FieldSerialize, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif sel.Name == \"Int32\" {\n\t\t\t\t\t\t\tg.Printf(int32FieldSerialize, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif sel.Name == \"Bool\" {\n\t\t\t\t\t\t\tg.Printf(boolFieldSerialize, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif sel.Name == \"Value\" {\n\t\t\t\t\t\t\tg.Printf(serializableFieldTemplate, \"d.\"+field.name+\".Load()\", fieldNameUntitled)\n\t\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif xIdent.Name == \"sync\" {\n\t\t\t\t\t\tfmt.Printf(\"\\t skipping field %s because it is %v.%v\\n\", fieldName, x, sel)\n\t\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tg.Printf(serializableFieldTemplate, \"d.\"+field.name, fieldNameUntitled)\n\t\tcase *ast.IndexExpr:\n\t\t\tx := fieldType.X\n\t\t\tif fieldType, isxFieldSelector := x.(*ast.SelectorExpr); isxFieldSelector { // TODO: we need to refactor this so we can reuse...\n\t\t\t\txIdent, xIsIdent := fieldType.X.(*ast.Ident)\n\t\t\t\tsel := fieldType.Sel\n\t\t\t\tif xIsIdent && xIdent.Name == \"atomic\" && sel.Name == \"Pointer\" {\n\t\t\t\t\tg.Printf(atomicPointerFieldTemplate, \"d.\"+field.name, field.name, fieldNameUntitled)\n\t\t\t\t\tif field.hasLocker != \"\" {\n\t\t\t\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\t\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Printf(\"no support yet for %#q\\n\", fieldType)\n\t\t\tcontinue\n\t\tcase *ast.Ident:\n\t\t\tswitch fieldType.Name {\n\t\t\tcase \"byte\":\n\t\t\t\tg.Printf(byteFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tcase \"int\":\n\t\t\t\tg.Printf(int64FieldSerialize, \"int64(d.\"+field.name+\")\", fieldNameUntitled)\n\t\t\tcase \"int32\":\n\t\t\t\tg.Printf(int32FieldSerialize, \"int32(d.\"+field.name+\")\", fieldNameUntitled)\n\t\t\tcase \"uint32\":\n\t\t\t\tg.Printf(uint32FieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tcase \"bool\":\n\t\t\t\tg.Printf(boolFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tcase \"string\":\n\t\t\t\tg.Printf(stringFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tcase \"error\":\n\t\t\t\tg.Printf(errorFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\tdefault:\n\t\t\t\tfmt.Printf(\"\\t no support implemented for Ident with type %v\\n\", fieldType)\n\t\t\t\tg.Printf(\"{\\n\")\n\t\t\t\tg.Printf(\"_value := fmt.Sprintf(\\\"%%v\\\", d.%s)\\n\", fieldName)\n\t\t\t\tg.Printf(stringFieldSerialize, \"_value\", fieldNameUntitled)\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t}\n\t\tcase *ast.ArrayType:\n\t\t\tif eltType, ok := fieldType.Elt.(*ast.Ident); ok && eltType.Name == \"byte\" {\n\t\t\t\tg.Printf(\"if err := writeBuffer.WriteByteArray(%s, d.%s); err != nil {\\n\", fieldNameUntitled, field.name)\n\t\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t} else {\n\t\t\t\tg.Printf(\"if err := writeBuffer.PushContext(%s, utils.WithRenderAsList(true)); err != nil {\\n\\t\\treturn err\\n\\t}\\n\", fieldNameUntitled)\n\t\t\t\tg.Printf(\"for _, elem := range d.%s {\", field.name)\n\t\t\t\tswitch eltType := fieldType.Elt.(type) {\n\t\t\t\tcase *ast.SelectorExpr, *ast.StarExpr:\n\t\t\t\t\tg.Printf(\"\\n\\t\\tvar elem any = elem\\n\")\n\t\t\t\t\tg.Printf(serializableFieldTemplate, \"elem\", \"\\\"value\\\"\")\n\t\t\t\tcase *ast.Ident:\n\t\t\t\t\tswitch eltType.Name {\n\t\t\t\t\tcase \"int\":\n\t\t\t\t\t\tg.Printf(int64FieldSerialize, \"int64(d.\"+field.name+\")\", fieldNameUntitled)\n\t\t\t\t\tcase \"uint32\":\n\t\t\t\t\t\tg.Printf(uint32FieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\t\t\t\tcase \"bool\":\n\t\t\t\t\t\tg.Printf(boolFieldSerialize, \"elem\", \"\\\"\\\"\")\n\t\t\t\t\tcase \"string\":\n\t\t\t\t\t\tg.Printf(stringFieldSerialize, \"elem\", \"\\\"\\\"\")\n\t\t\t\t\tcase \"error\":\n\t\t\t\t\t\tg.Printf(errorFieldSerialize, \"elem\", \"\\\"\\\"\")\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tfmt.Printf(\"\\t no support implemented for Ident within ArrayType for %v\\n\", fieldType)\n\t\t\t\t\t\tg.Printf(\"_value := fmt.Sprintf(\\\"%%v\\\", elem)\\n\")\n\t\t\t\t\t\tg.Printf(stringFieldSerialize, \"_value\", fieldNameUntitled)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tg.Printf(\"}\\n\")\n\t\t\t\tg.Printf(\"if err := writeBuffer.PopContext(%s, utils.WithRenderAsList(true)); err != nil {\\n\\t\\treturn err\\n\\t}\\n\", fieldNameUntitled)\n\t\t\t}\n\t\tcase *ast.MapType:\n\t\t\tg.Printf(\"if err := writeBuffer.PushContext(%s, utils.WithRenderAsList(true)); err != nil {\\n\\t\\treturn err\\n\\t}\\n\", fieldNameUntitled)\n\t\t\t// TODO: we use serializable or strings as we don't want to over-complex this\n\t\t\tg.Printf(\"for _name, elem := range d.%s {\\n\", fieldName)\n\t\t\tswitch keyType := fieldType.Key.(type) {\n\t\t\tcase *ast.Ident:\n\t\t\t\tswitch keyType.Name {\n\t\t\t\tcase \"uint\", \"uint8\", \"uint16\", \"uint32\", \"uint64\", \"int\", \"int8\", \"int16\", \"int32\", \"int64\": // TODO: add other types\n\t\t\t\t\tg.Printf(\"\\t\\tname := fmt.Sprintf(\\\"%s\\\", _name)\\n\", \"%v\")\n\t\t\t\tcase \"string\":\n\t\t\t\t\tg.Printf(\"\\t\\tname := _name\\n\")\n\t\t\t\tdefault:\n\t\t\t\t\tg.Printf(\"\\t\\tname := fmt.Sprintf(\\\"%s\\\", &_name)\\n\", \"%v\")\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tg.Printf(\"\\t\\tname := fmt.Sprintf(\\\"%s\\\", &_name)\\n\", \"%v\")\n\t\t\t}\n\t\t\tswitch eltType := fieldType.Value.(type) {\n\t\t\tcase *ast.StarExpr, *ast.SelectorExpr:\n\t\t\t\tg.Printf(\"\\n\\t\\tvar elem any = elem\\n\")\n\t\t\t\tg.Printf(\"\\t\\tif serializable, ok := elem.(utils.Serializable); ok {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\tif err := writeBuffer.PushContext(name); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\tif err := serializable.SerializeWithWriteBuffer(ctx, writeBuffer); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\tif err := writeBuffer.PopContext(name); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\t\tg.Printf(\"\\t\\t} else {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\telemAsString := fmt.Sprintf(\\\"%%v\\\", elem)\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\tif err := writeBuffer.WriteString(name, uint32(len(elemAsString)*8), \\\"UTF-8\\\", elemAsString); err != nil {\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t\\treturn err\\n\")\n\t\t\t\tg.Printf(\"\\t\\t\\t}\\n\")\n\t\t\t\tg.Printf(\"\\t\\t}\\n\")\n\t\t\tcase *ast.Ident:\n\t\t\t\tswitch eltType.Name {\n\t\t\t\tcase \"bool\":\n\t\t\t\t\tg.Printf(boolFieldSerialize, \"elem\", \"name\")\n\t\t\t\tcase \"string\":\n\t\t\t\t\tg.Printf(stringFieldSerialize, \"elem\", \"name\")\n\t\t\t\tcase \"error\":\n\t\t\t\t\tg.Printf(errorFieldSerialize, \"elem\", \"name\")\n\t\t\t\tdefault:\n\t\t\t\t\tfmt.Printf(\"\\t no support implemented for Ident within MapType for %v\\n\", fieldType)\n\t\t\t\t\tg.Printf(\"\\t\\t_value := fmt.Sprintf(\\\"%%v\\\", elem)\\n\")\n\t\t\t\t\tg.Printf(stringFieldSerialize, \"_value\", \"name\")\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tfmt.Printf(\"\\t no support implemented within MapType %v\\n\", fieldType.Value)\n\t\t\t\tg.Printf(\"\\t\\t_value := fmt.Sprintf(\\\"%%v\\\", elem)\\n\")\n\t\t\t\tg.Printf(stringFieldSerialize, \"_value\", \"name\")\n\t\t\t}\n\t\t\tg.Printf(\"\\t}\\n\")\n\t\t\tg.Printf(\"if err := writeBuffer.PopContext(%s, utils.WithRenderAsList(true)); err != nil {\\n\\t\\treturn err\\n\\t}\\n\", fieldNameUntitled)\n\t\tcase *ast.ChanType:\n\t\t\tg.Printf(chanFieldSerialize, \"d.\"+field.name, fieldNameUntitled, field.name)\n\t\tcase *ast.FuncType:\n\t\t\tg.Printf(funcFieldSerialize, \"d.\"+field.name, fieldNameUntitled)\n\t\tdefault:\n\t\t\tfmt.Printf(\"no support implemented %#v\\n\", fieldType)\n\t\t}\n\t\tif field.hasLocker != \"\" {\n\t\t\tg.Printf(\"\\treturn nil\\n\")\n\t\t\tg.Printf(\"}(); err != nil {\\n\")\n\t\t\tg.Printf(\"\\treturn err\\n\")\n\t\t\tg.Printf(\"}\\n\")\n\t\t}\n\t}\n\tg.Printf(\"\\tif err := writeBuffer.PopContext(%s); err != nil {\\n\", logicalTypeName)\n\tg.Printf(\"\\t\\treturn err\\n\")\n\tg.Printf(\"\\t}\\n\")\n\tg.Printf(\"\\treturn nil\\n\")\n\tg.Printf(\"}\\n\")\n\tg.Printf(\"\\n\")\n\tg.Printf(stringerTemplate, typeName)\n}", "func mmcArgGenerator() string {\r\n\tmmcArgs:= make([] string,1000)\t\r\n\tfor i:=0;i<len(mmcArgs);i++{\r\n\t\tmmcArgs[i] = strconv.Itoa(i+1)\r\n\t}\r\n\treturn joinMmcArgs(mmcArgs)\r\n}", "func argInit() args {\n\n\tvar a args\n\tflag.Float64Var(&a.x1, \"x1\", -2.0, \"left position of real axis\")\n\tflag.Float64Var(&a.x2, \"x2\", 1.0, \"right position of real axis\")\n\tflag.Float64Var(&a.y1, \"y1\", -1.5, \"down position of imaginary axis\")\n\tflag.Float64Var(&a.y2, \"y2\", 1.5, \"up position of imaginary axis\")\n\tflag.Float64Var(&a.threshold, \"th\", 4.0, \"squared threshold of the function\")\n\tflag.IntVar(&a.w, \"w\", 1000, \"width in pixels of the image\")\n\tflag.IntVar(&a.h, \"h\", 1000, \"height in pixels of the image\")\n\tflag.IntVar(&a.nIter, \"ni\", 100, \"maximum number of iterations for pixel\")\n\tflag.IntVar(&a.nRoutines, \"nr\", 4, \"number of go routines to be used\")\n\tflag.StringVar(&a.path, \"p\", \"./\", \"path to the generated png image\")\n\n\tflag.Parse()\n\treturn a\n}", "func (p *preprocessorImpl) getDirectiveArguments(info TokenInfo, emptyOk bool) []TokenInfo {\n\tdir := info.Token\n\tvar ret []TokenInfo\n\tfor info = p.lexer.Peek(); info.Token != nil && !info.Newline; info = p.lexer.Peek() {\n\t\tret = append(ret, p.lexer.Next())\n\t}\n\n\tif len(ret) == 0 && !emptyOk {\n\t\tp.err.Errorf(\"%s needs an argument.\", dir)\n\t}\n\n\treturn ret\n}", "func parse(parentField string, v interface{}) ([]argument, error) {\n\n\t// Reflect on the value to get started.\n\trawValue := reflect.ValueOf(v)\n\n\t// If a parent field is provided we are recursing. We are now\n\t// processing a struct within a struct. We need the parent struct\n\t// name for namespacing.\n\tif parentField != \"\" {\n\t\tparentField = strings.ToLower(parentField) + \"_\"\n\t}\n\n\t// We need to check we have a pointer else we can't modify anything\n\t// later. With the pointer, get the value that the pointer points to.\n\t// With a struct, that means we are recursing and we need to assert to\n\t// get the inner struct value to process it.\n\tvar val reflect.Value\n\tswitch rawValue.Kind() {\n\tcase reflect.Ptr:\n\t\tval = rawValue.Elem()\n\t\tif val.Kind() != reflect.Struct {\n\t\t\treturn nil, fmt.Errorf(\"incompatible type `%v` looking for a pointer\", val.Kind())\n\t\t}\n\tcase reflect.Struct:\n\t\tvar ok bool\n\t\tif val, ok = v.(reflect.Value); !ok {\n\t\t\treturn nil, fmt.Errorf(\"internal recurse error\")\n\t\t}\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"incompatible type `%v`\", rawValue.Kind())\n\t}\n\n\tvar args []argument\n\n\t// We need to iterate over the fields of the struct value we are processing.\n\t// If the field is a struct then recurse to process its fields. If we have\n\t// a field that is not a struct, get pull the metadata. The `field` field\n\t// is important because it is how we update things later.\n\tfor i := 0; i < val.NumField(); i++ {\n\t\tfield := val.Type().Field(i)\n\t\tif field.Type.Kind() == reflect.Struct {\n\t\t\tnewArgs, err := parse(parentField+field.Name, val.Field(i))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\targs = append(args, newArgs...)\n\t\t\tcontinue\n\t\t}\n\n\t\targ := argument{\n\t\t\tShort: field.Tag.Get(\"flag\"),\n\t\t\tLong: parentField + strings.ToLower(field.Name),\n\t\t\tType: field.Type.Name(),\n\t\t\tDefault: field.Tag.Get(\"default\"),\n\t\t\tDesc: field.Tag.Get(\"flagdesc\"),\n\t\t\tfield: val.Field(i),\n\t\t}\n\t\targs = append(args, arg)\n\t}\n\n\treturn args, nil\n}", "func (ec *executionContext) field_Mutation_createTag_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 DayTag\n\tif tmp, ok := rawArgs[\"input\"]; ok {\n\t\tctx := graphql.WithPathContext(ctx, graphql.NewPathWithField(\"input\"))\n\t\targ0, err = ec.unmarshalNDayTag2githubᚗcomᚋArtemGretsovᚋgolangᚑgqlgenᚑgormᚑpsqlᚑexampleᚋgraphᚋgeneratedᚐDayTag(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"input\"] = arg0\n\treturn args, nil\n}", "func castArg(prefix string, f field.Field, argIndex int) string {\n\tswitch f.DatatypeName {\n\tcase field.TypeString:\n\t\treturn fmt.Sprintf(\"%s%s := args[%d]\", prefix, f.Name.UpperCamel, argIndex)\n\tcase field.TypeUint, field.TypeInt, field.TypeBool:\n\t\treturn fmt.Sprintf(`%s%s, err := cast.To%sE(args[%d])\n if err != nil {\n return err\n }`,\n\t\t\tprefix, f.Name.UpperCamel, strings.Title(f.Datatype), argIndex)\n\tcase field.TypeCustom:\n\t\treturn fmt.Sprintf(`%[1]v%[2]v := new(types.%[3]v)\n\t\t\terr = json.Unmarshal([]byte(args[%[4]v]), %[1]v%[2]v)\n \t\tif err != nil {\n return err\n }`, prefix, f.Name.UpperCamel, f.Datatype, argIndex)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown type %s\", f.DatatypeName))\n\t}\n}", "func (ec *executionContext) field_Mutation_accesstoken_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 model.AccesstokenRequest\n\tif tmp, ok := rawArgs[\"input\"]; ok {\n\t\tctx := graphql.WithPathContext(ctx, graphql.NewPathWithField(\"input\"))\n\t\targ0, err = ec.unmarshalNAccesstokenRequest2gitlabᚗcomᚋsirinibinᚋgoᚑmysqlᚑgraphqlᚋgraphᚋmodelᚐAccesstokenRequest(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"input\"] = arg0\n\treturn args, nil\n}", "func (o RegistryTaskDockerStepOutput) Arguments() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v RegistryTaskDockerStep) map[string]string { return v.Arguments }).(pulumi.StringMapOutput)\n}", "func getArguments() {\n\t// define pointers to the arguments which will be filled up when flag.Parse() is called\n\tlangFlag := flag.String(\"l\", string(auto), \"Which language to use. Args are: lua | wren | moon | auto\")\n\tdirFlag := flag.String(\"d\", \".\", \"The directory containing the main file and the subfiles\")\n\toutFlag := flag.String(\"o\", \"out\", \"The output file (sans extension)\")\n\twatchFlag := flag.Bool(\"w\", false, \"Whether to enable Watch mode, which automatically recompiles if a file has changed in the directory\")\n\tdefinesFlag := flag.String(\"D\", \"\", \"Used to pass in defines before compiling. Format is -D \\\"var1=value;var2=value;var3=value\\\"\")\n\n\t// begin parsing the flags\n\tflag.Parse()\n\n\t// these setup functions have to be performed in this particular order\n\t// because they depend on certain fields of Args to be set when they are called\n\t_setDir(*dirFlag)\n\t_setLanguage(*langFlag)\n\t_setOutputFile(*outFlag)\n\t_setDefines(*definesFlag)\n\n\tArgs.watchMode = *watchFlag\n\n\t// this gives all the non-flag command line args\n\tArgs.positional = flag.Args()\n}", "func (fi *funcInfo) emitVararg(line, a, n int) {\r\n\tfi.emitABC(line, OP_VARARG, a, n+1, 0)\r\n}", "func genConfig() ([]byte, error) {\n\t// Using genflags.getConfig() instead of config.New() because\n\t// it will include any defaults we have on the command line such\n\t// as default plugin selection. We didn't want to wire this into\n\t// the `config` package, but it will be a default value the CLI\n\t// users expect.\n\tc := genflags.resolveConfig()\n\tb, err := json.Marshal(c)\n\treturn b, errors.Wrap(err, \"unable to marshal configuration\")\n}", "func (o RegistryTaskDockerStepPtrOutput) Arguments() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *RegistryTaskDockerStep) map[string]string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Arguments\n\t}).(pulumi.StringMapOutput)\n}", "func convertBuilderFunc(fn interface{}) ParamFuncBuilder {\n\ttypFn := reflect.TypeOf(fn)\n\tif !goodParamFunc(typFn) {\n\t\t// it's not a function which returns a function,\n\t\t// it's not a a func(compileArgs) func(requestDynamicParamValue) bool\n\t\t// but it's a func(requestDynamicParamValue) bool, such as regexp.Compile.MatchString\n\t\tif typFn.NumIn() == 1 && typFn.In(0).Kind() == reflect.String && typFn.NumOut() == 1 && typFn.Out(0).Kind() == reflect.Bool {\n\t\t\tfnV := reflect.ValueOf(fn)\n\t\t\t// let's convert it to a ParamFuncBuilder which its combile route arguments are empty and not used at all.\n\t\t\t// the below return function runs on each route that this param type function is used in order to validate the function,\n\t\t\t// if that param type function is used wrongly it will be panic like the rest,\n\t\t\t// indeed the only check is the len of arguments not > 0, no types of values or conversions,\n\t\t\t// so we return it as soon as possible.\n\t\t\treturn func(args []string) reflect.Value {\n\t\t\t\tif n := len(args); n > 0 {\n\t\t\t\t\tpanic(fmt.Sprintf(\"%T does not allow any input arguments from route but got [len=%d,values=%s]\", fn, n, strings.Join(args, \", \")))\n\t\t\t\t}\n\t\t\t\treturn fnV\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tnumFields := typFn.NumIn()\n\n\tpanicIfErr := func(i int, err error) {\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"on field index: %d: %v\", i, err))\n\t\t}\n\t}\n\n\treturn func(args []string) reflect.Value {\n\t\tif len(args) != numFields {\n\t\t\t// no variadics support, for now.\n\t\t\tpanic(fmt.Sprintf(\"args(len=%d) should be the same len as numFields(%d) for: %s\", len(args), numFields, typFn))\n\t\t}\n\t\tvar argValues []reflect.Value\n\t\tfor i := 0; i < numFields; i++ {\n\t\t\tfield := typFn.In(i)\n\t\t\targ := args[i]\n\n\t\t\t// try to convert the string literal as we get it from the parser.\n\t\t\tvar (\n\t\t\t\tval interface{}\n\t\t\t)\n\n\t\t\t// try to get the value based on the expected type.\n\t\t\tswitch field.Kind() {\n\t\t\tcase reflect.Int:\n\t\t\t\tv, err := strconv.Atoi(arg)\n\t\t\t\tpanicIfErr(i, err)\n\t\t\t\tval = v\n\t\t\tcase reflect.Int8:\n\t\t\t\tv, err := strconv.ParseInt(arg, 10, 8)\n\t\t\t\tpanicIfErr(i, err)\n\t\t\t\tval = int8(v)\n\t\t\tcase reflect.Int16:\n\t\t\t\tv, err := strconv.ParseInt(arg, 10, 16)\n\t\t\t\tpanicIfErr(i, err)\n\t\t\t\tval = int16(v)\n\t\t\tcase reflect.Int32:\n\t\t\t\tv, err := strconv.ParseInt(arg, 10, 32)\n\t\t\t\tpanicIfErr(i, err)\n\t\t\t\tval = int32(v)\n\t\t\tcase reflect.Int64:\n\t\t\t\tv, err := strconv.ParseInt(arg, 10, 64)\n\t\t\t\tpanicIfErr(i, err)\n\t\t\t\tval = v\n\t\t\tcase reflect.Uint:\n\t\t\t\tv, err := strconv.ParseUint(arg, 10, strconv.IntSize)\n\t\t\t\tpanicIfErr(i, err)\n\t\t\t\tval = uint(v)\n\t\t\tcase reflect.Uint8:\n\t\t\t\tv, err := strconv.ParseUint(arg, 10, 8)\n\t\t\t\tpanicIfErr(i, err)\n\t\t\t\tval = uint8(v)\n\t\t\tcase reflect.Uint16:\n\t\t\t\tv, err := strconv.ParseUint(arg, 10, 16)\n\t\t\t\tpanicIfErr(i, err)\n\t\t\t\tval = uint16(v)\n\t\t\tcase reflect.Uint32:\n\t\t\t\tv, err := strconv.ParseUint(arg, 10, 32)\n\t\t\t\tpanicIfErr(i, err)\n\t\t\t\tval = uint32(v)\n\t\t\tcase reflect.Uint64:\n\t\t\t\tv, err := strconv.ParseUint(arg, 10, 64)\n\t\t\t\tpanicIfErr(i, err)\n\t\t\t\tval = v\n\t\t\tcase reflect.Float32:\n\t\t\t\tv, err := strconv.ParseFloat(arg, 32)\n\t\t\t\tpanicIfErr(i, err)\n\t\t\t\tval = float32(v)\n\t\t\tcase reflect.Float64:\n\t\t\t\tv, err := strconv.ParseFloat(arg, 64)\n\t\t\t\tpanicIfErr(i, err)\n\t\t\t\tval = v\n\t\t\tcase reflect.Bool:\n\t\t\t\tv, err := strconv.ParseBool(arg)\n\t\t\t\tpanicIfErr(i, err)\n\t\t\t\tval = v\n\t\t\tcase reflect.Slice:\n\t\t\t\tif len(arg) > 1 {\n\t\t\t\t\tif arg[0] == '[' && arg[len(arg)-1] == ']' {\n\t\t\t\t\t\t// it is a single argument but as slice.\n\t\t\t\t\t\tval = strings.Split(arg[1:len(arg)-1], \",\") // only string slices.\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tval = arg\n\t\t\t}\n\n\t\t\targValue := reflect.ValueOf(val)\n\t\t\tif expected, got := field.Kind(), argValue.Kind(); expected != got {\n\t\t\t\tpanic(fmt.Sprintf(\"func's input arguments should have the same type: [%d] expected %s but got %s\", i, expected, got))\n\t\t\t}\n\n\t\t\targValues = append(argValues, argValue)\n\t\t}\n\n\t\tevalFn := reflect.ValueOf(fn).Call(argValues)[0]\n\n\t\t// var evaluator EvaluatorFunc\n\t\t// // check for typed and not typed\n\t\t// if _v, ok := evalFn.(EvaluatorFunc); ok {\n\t\t// \tevaluator = _v\n\t\t// } else if _v, ok = evalFn.(func(string) bool); ok {\n\t\t// \tevaluator = _v\n\t\t// }\n\t\t// return func(paramValue interface{}) bool {\n\t\t// \treturn evaluator(paramValue)\n\t\t// }\n\t\treturn evalFn\n\t}\n}", "func (ec *executionContext) field_Mutation_addPlantToNursery_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 model.NewNurseryAddition\n\tif tmp, ok := rawArgs[\"input\"]; ok {\n\t\targ0, err = ec.unmarshalNNewNurseryAddition2githubᚗcomᚋwonesyᚋplantparenthoodᚋgraphᚋmodelᚐNewNurseryAddition(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"input\"] = arg0\n\treturn args, nil\n}", "func NewArgument(meta ScriptMetaData, node *node32, value Value) Argument {\n\treturn &argument{astNode: astNode{meta: meta, node: node}, value: value}\n}", "func (c *Compiler) moduleArgStmts(ident string, value *ValueExpr) []dst.Stmt {\n\tstmts := []dst.Stmt{}\n\tvariable := util.KebabToCamel(ident)\n\tt := value.Type()\n\tswitch t {\n\tcase ArrayType:\n\t\tstmts = append(stmts, c.compileCollectionExpansion(expandArrayFunc, ident, variable, value)...)\n\tcase ObjectType:\n\t\tstmts = append(stmts, c.compileCollectionExpansion(expandObjectFunc, ident, variable, value)...)\n\tcase FunctionType:\n\t\tassignStmt := &dst.AssignStmt{\n\t\t\tTok: token.DEFINE,\n\t\t\tLhs: []dst.Expr{&dst.Ident{Name: variable}},\n\t\t\tRhs: []dst.Expr{value.ToGoAST()},\n\t\t}\n\t\tstmts = append(stmts, assignStmt)\n\t\terrField := fmt.Sprintf(\"%s.Error\", variable)\n\t\tifErrStmt := &dst.IfStmt{\n\t\t\tCond: &dst.BinaryExpr{\n\t\t\t\tOp: token.NEQ,\n\t\t\t\tX: &dst.Ident{Name: errField},\n\t\t\t\tY: &dst.Ident{Name: nilValue},\n\t\t\t},\n\t\t\tBody: &dst.BlockStmt{\n\t\t\t\tList: []dst.Stmt{\n\t\t\t\t\t&dst.ReturnStmt{\n\t\t\t\t\t\tResults: []dst.Expr{\n\t\t\t\t\t\t\t&dst.Ident{Name: modVar},\n\t\t\t\t\t\t\t&dst.Ident{Name: errField},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tstmts = append(stmts, ifErrStmt)\n\t\tvalueField := fmt.Sprintf(\"%s.Value\", variable)\n\t\tassignFieldStmt := &dst.AssignStmt{\n\t\t\tTok: token.ASSIGN,\n\t\t\tLhs: []dst.Expr{\n\t\t\t\t&dst.Ident{Name: fmt.Sprintf(\"mod.%s\", util.KebabToPascal(ident))},\n\t\t\t},\n\t\t\tRhs: []dst.Expr{\n\t\t\t\t&dst.Ident{Name: valueField},\n\t\t\t},\n\t\t}\n\t\tstmts = append(stmts, assignFieldStmt)\n\tdefault:\n\t\tassignFieldStmt := &dst.AssignStmt{\n\t\t\tTok: token.ASSIGN,\n\t\t\tLhs: []dst.Expr{\n\t\t\t\t&dst.Ident{Name: fmt.Sprintf(\"mod.%s\", util.KebabToPascal(ident))},\n\t\t\t},\n\t\t\tRhs: []dst.Expr{\n\t\t\t\tvalue.ToGoAST(),\n\t\t\t},\n\t\t}\n\t\tstmts = append(stmts, assignFieldStmt)\n\t}\n\treturn stmts\n}", "func GenAST(program []Statement) AST {\n\tvar ast AST\n\tfor _, stmt := range program {\n\t\tv, err := ParseVerb(stmt)\n\t\tif err != nil { //TODO\n\t\t\t//panic(ParserError{stmtIndex: stmtIndex, tok: stmt[0], message: fmt.Sprintf(\"First token in statement must be a word, was %s\", stmt[0].tokType.toString())})\n\t\t\tpanic(err)\n\t\t}\n\t\tast = append(ast, v)\n\t}\n\treturn ast\n}", "func printInferredArguments(out *output.Output) {\n\tif out == nil {\n\t\treturn\n\t}\n\n\tblock := out.Block(output.Line(output.EmojiLightbulb, output.StyleItalic, \"Inferred arguments\"))\n\tblock.Writef(\"repo: %s\", codeintelUploadFlags.repo)\n\tblock.Writef(\"commit: %s\", codeintelUploadFlags.commit)\n\tblock.Writef(\"root: %s\", codeintelUploadFlags.root)\n\tblock.Writef(\"file: %s\", codeintelUploadFlags.file)\n\tblock.Writef(\"indexer: %s\", codeintelUploadFlags.indexer)\n\tblock.Writef(\"indexerVersion: %s\", codeintelUploadFlags.indexerVersion)\n\tblock.Close()\n}", "func generateParams(generator *Generator, params []parser.Node) (cParams []string) {\n\t// Translate each parameter\n\tfor _, param := range params {\n\t\t// Append the translated parameter in C\n\t\tcParams = append(\n\t\t\tcParams,\n\t\t\tgenerateInstruction(generator, param),\n\t\t)\n\t}\n\n\treturn\n}", "func expandArgs(s *State, rawArgs [][]argFragment, regexpArgs []int) []string {\n\targs := make([]string, 0, len(rawArgs))\n\tfor i, frags := range rawArgs {\n\t\tisRegexp := false\n\t\tfor _, j := range regexpArgs {\n\t\t\tif i == j {\n\t\t\t\tisRegexp = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tvar b strings.Builder\n\t\tfor _, frag := range frags {\n\t\t\tif frag.quoted {\n\t\t\t\tb.WriteString(frag.s)\n\t\t\t} else {\n\t\t\t\tb.WriteString(s.ExpandEnv(frag.s, isRegexp))\n\t\t\t}\n\t\t}\n\t\targs = append(args, b.String())\n\t}\n\treturn args\n}", "func buildIPArgument(parameter string, environmentVariable string, imageType FDBImageType, sampleAddresses []fdbv1beta2.ProcessAddress) []monitorapi.Argument {\n\tvar leftIPWrap string\n\tvar rightIPWrap string\n\tif imageType == FDBImageTypeUnified {\n\t\tleftIPWrap = \"[\"\n\t\trightIPWrap = \"]\"\n\t} else {\n\t\tleftIPWrap = \"\"\n\t\trightIPWrap = \"\"\n\t}\n\targuments := []monitorapi.Argument{{Value: fmt.Sprintf(\"--%s=%s\", parameter, leftIPWrap)}}\n\n\tfor indexOfAddress, address := range sampleAddresses {\n\t\tif indexOfAddress != 0 {\n\t\t\targuments = append(arguments, monitorapi.Argument{Value: fmt.Sprintf(\",%s\", leftIPWrap)})\n\t\t}\n\n\t\targuments = append(arguments,\n\t\t\tmonitorapi.Argument{ArgumentType: monitorapi.EnvironmentArgumentType, Source: environmentVariable},\n\t\t\tmonitorapi.Argument{Value: fmt.Sprintf(\"%s:\", rightIPWrap)},\n\t\t\tmonitorapi.Argument{ArgumentType: monitorapi.ProcessNumberArgumentType, Offset: address.Port - 2, Multiplier: 2},\n\t\t)\n\n\t\tflags := address.SortedFlags()\n\n\t\tif len(flags) > 0 {\n\t\t\targuments = append(arguments, monitorapi.Argument{Value: fmt.Sprintf(\":%s\", strings.Join(flags, \":\"))})\n\t\t}\n\t}\n\treturn arguments\n}", "func (n ClassNode) Codegen(scope *Scope, c *Compiler) value.Value {\n\tstructDefn := scope.FindType(n.Name).Type.(*types.StructType)\n\n\tfieldnames := make([]string, 0, len(n.Variables))\n\tfields := make([]types.Type, 0, len(n.Variables))\n\n\tnames := map[string]bool{}\n\n\tfor _, f := range n.Variables {\n\t\tt := f.Type.Name\n\t\tname := f.Name.String()\n\t\tif _, found := names[name]; found {\n\t\t\tlog.Fatal(\"Class '%s' has two fields/methods named '%s'\\n\", n.Name, f.Name)\n\t\t}\n\t\tnames[name] = true\n\t\tty := scope.FindType(t).Type\n\t\tty = f.Type.BuildPointerType(ty)\n\t\tfields = append(fields, ty)\n\t\tfieldnames = append(fieldnames, name)\n\t}\n\n\tthisArg := VariableDefnNode{}\n\tthisArg.Name = NewNamedReference(\"this\")\n\tthisArg.Type = GeodeTypeRef{}\n\tthisArg.Type.Array = false\n\tthisArg.Type.Name = n.Name\n\tthisArg.Type.PointerLevel = 1\n\n\tstructDefn.Fields = fields\n\tstructDefn.Names = fieldnames\n\n\tmethodBaseArgs := []VariableDefnNode{thisArg}\n\tfor _, m := range n.Methods {\n\t\tm.Name.Value = fmt.Sprintf(\"class.%s.%s\", n.Name, m.Name)\n\t\tif _, found := names[m.Name.String()]; found {\n\t\t\tlog.Fatal(\"Class '%s' has two fields/methods named '%s'\\n\", n.Name, m.Name)\n\t\t}\n\t\tnames[m.Name.String()] = true\n\t\tm.Args = append(methodBaseArgs, m.Args...)\n\t\tm.Declare(scope, c)\n\t\tm.Codegen(scope, c)\n\t}\n\n\treturn nil\n}", "func genVariants(arg interface{}) gopter.Gen {\n\targs := arg.([]interface{})\n\ts := args[0].(string)\n\tt := args[1].(string)\n\treturn gen.OneConstOf(s, strings.ToUpper(s), strings.Title(s),\n\t\tfmt.Sprintf(\"%s %s\", s, t),\n\t\tfmt.Sprintf(\"%s %s\", strings.ToUpper(s), t),\n\t\tfmt.Sprintf(\"%s %s\", strings.Title(s), t),\n\t)\n}", "func (ArgumentFalse) argumentNode() {}", "func (ec *executionContext) field_Mutation_createAccount_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 model.NewAccount\n\tif tmp, ok := rawArgs[\"input\"]; ok {\n\t\tctx := graphql.WithPathContext(ctx, graphql.NewPathWithField(\"input\"))\n\t\targ0, err = ec.unmarshalNNewAccount2githubᚗcomᚋannoyingᚑorangeᚋecpᚑapiᚋgraphᚋmodelᚐNewAccount(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"input\"] = arg0\n\treturn args, nil\n}", "func (ec *executionContext) field_Mutation_createArticle_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 models.NewArticle\n\tif tmp, ok := rawArgs[\"input\"]; ok {\n\t\tctx := graphql.WithPathContext(ctx, graphql.NewPathWithField(\"input\"))\n\t\targ0, err = ec.unmarshalNNewArticle2githubᚗcomᚋGlitchyGlitchᚋtypingerᚋmodelsᚐNewArticle(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"input\"] = arg0\n\treturn args, nil\n}", "func (cfg *Config) genTokenListSpec(t xsd.Builtin) ([]spec, error) {\n\tcfg.debugf(\"generating Go source for token list %q\", xsd.XMLName(t).Local)\n\ts := spec{\n\t\tname: strings.ToLower(t.String()),\n\t\texpr: builtinExpr(t),\n\t\txsdType: t,\n\t}\n\tmarshal, err := gen.Func(\"MarshalText\").\n\t\tReceiver(\"x \"+s.name).\n\t\tReturns(\"[]byte\", \"error\").\n\t\tBody(`\n\t\t\treturn []byte(strings.Join(x, \" \")), nil\n\t\t`).Decl()\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"MarshalText %s: %v\", s.name, err)\n\t}\n\n\tunmarshal, err := gen.Func(\"UnmarshalText\").\n\t\tReceiver(\"x \" + s.name).\n\t\tArgs(\"text []byte\").\n\t\tReturns(\"error\").\n\t\tBody(`\n\t\t\t*x = bytes.Fields(text)\n\t\t\treturn nil\n\t\t`).Decl()\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"UnmarshalText %s: %v\", s.name, err)\n\t}\n\n\ts.methods = append(s.methods, marshal, unmarshal)\n\treturn []spec{s}, nil\n}", "func BuildArgs(s Servable, argsType reflect.Type, argsValue reflect.Value, req *http.Request, buildStructArg func(s Servable, typeName string, req *http.Request) (v reflect.Value, err error)) ([]reflect.Value, error) {\n\tfieldNum := argsType.NumField()\n\tparams := make([]reflect.Value, fieldNum)\n\tfor i := 0; i < fieldNum; i++ {\n\t\tfield := argsType.Field(i)\n\t\tfieldName := field.Name\n\t\tvalueType := argsValue.FieldByName(fieldName).Type()\n\t\tif field.Type.Kind() == reflect.Ptr && valueType.Elem().Kind() == reflect.Struct {\n\t\t\tconvertor := components(req).Convertor(valueType.Elem().Name())\n\t\t\tif convertor != nil {\n\t\t\t\tparams[i] = convertor(req)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tstructName := valueType.Elem().Name()\n\t\t\tv, err := buildStructArg(s, structName, req)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.New(fmt.Sprintf(\"turbo: failed to BuildArgs, error:%s\", err))\n\t\t\t}\n\t\t\tparams[i] = v\n\t\t\tcontinue\n\t\t}\n\t\tv, _ := findValue(fieldName, req)\n\t\tvalue, err := reflectValue(field.Type, argsValue.FieldByName(fieldName), v)\n\t\tlogErrorIf(err)\n\t\tparams[i] = value\n\t}\n\treturn params, nil\n}", "func Agen(n *Node, res *Node)", "func GenerateValidArg(datatypeName string) string {\n\tswitch datatypeName {\n\tcase field.TypeString:\n\t\treturn \"xyz\"\n\tcase field.TypeUint, field.TypeInt:\n\t\treturn \"111\"\n\tcase field.TypeBool:\n\t\treturn valueFalse\n\tcase field.TypeCustom:\n\t\treturn valueNull\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown type %s\", datatypeName))\n\t}\n}", "func TraceFieldGenerator(ctx context.Context) []zapcore.Field {\n\tspanCtx := trace.FromContext(ctx).SpanContext()\n\n\treturn []zapcore.Field{\n\t\tzap.Uint64(\"dd.trace_id\", binary.BigEndian.Uint64(spanCtx.TraceID[8:])),\n\t\tzap.Uint64(\"dd.span_id\", binary.BigEndian.Uint64(spanCtx.SpanID[:])),\n\t}\n}", "func (o BuildStrategySpecBuildStepsOutput) Args() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v BuildStrategySpecBuildSteps) []string { return v.Args }).(pulumi.StringArrayOutput)\n}", "func (ec *executionContext) field_Mutation_activateGame_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 string\n\tif tmp, ok := rawArgs[\"testUUID\"]; ok {\n\t\targ0, err = ec.unmarshalNString2string(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"testUUID\"] = arg0\n\treturn args, nil\n}", "func (c ResolverIndexConfigurationFuncCall) Args() []interface{} {\n\treturn []interface{}{c.Arg0, c.Arg1}\n}", "func (t *Terraform) initArgs(p types.ProviderType, cfg map[string]interface{}, clusterDir string) []string {\n\targs := make([]string, 0)\n\n\tvarsFile := filepath.Join(clusterDir, tfVarsFile)\n\n\targs = append(args, fmt.Sprintf(\"-var-file=%s\", varsFile), clusterDir)\n\n\treturn args\n}", "func AddIndependentPropertyGeneratorsForJsonField(gens map[string]gopter.Gen) {\n\tgens[\"SourceField\"] = gen.PtrOf(gen.AlphaString())\n}", "func NewDynamicArgument(value Value) Argument {\n\treturn &argument{value: value}\n}", "func (ctx *argComplContext) generate(env *complEnv, ch chan<- rawCandidate) error {\n\treturn completeArg(ctx.words, env.evaler, env.argCompleter, ch)\n}", "func (n DependencyNode) Codegen(prog *Program) (value.Value, error) { return nil, nil }", "func ASTArgsFromStmt(stmt string) (*ASTArgs, error) {\n\tstmtNode, err := parser.New().ParseOneStmt(stmt, \"\", \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tloadDataStmt, ok := stmtNode.(*ast.LoadDataStmt)\n\tif !ok {\n\t\treturn nil, errors.Errorf(\"stmt %s is not load data stmt\", stmt)\n\t}\n\treturn &ASTArgs{\n\t\tFileLocRef: loadDataStmt.FileLocRef,\n\t\tColumnsAndUserVars: loadDataStmt.ColumnsAndUserVars,\n\t\tColumnAssignments: loadDataStmt.ColumnAssignments,\n\t\tOnDuplicate: loadDataStmt.OnDuplicate,\n\t\tFieldsInfo: loadDataStmt.FieldsInfo,\n\t\tLinesInfo: loadDataStmt.LinesInfo,\n\t}, nil\n}", "func (g *Generator) generate(typeInfo typeInfo) {\n\t// <key, value>\n\tvalues := make([]Value, 0, 100)\n\tfor _, file := range g.pkg.files {\n\t\t// Set the state for this run of the walker.\n\t\tfile.typeInfo = typeInfo\n\t\tfile.values = nil\n\t\tif file.file != nil {\n\t\t\tast.Inspect(file.file, file.genDecl)\n\t\t\tvalues = append(values, file.values...)\n\t\t}\n\t}\n\n\tif len(values) == 0 {\n\t\tlog.Fatalf(\"no values defined for type %+v\", typeInfo)\n\t}\n\tg.transformValueNames(values, transformMethod)\n\t// Generate code that will fail if the constants change value.\n\tfor _, im := range checkImportPackages {\n\t\tg.Printf(stringImport, im)\n\t}\n\n\tif useNew {\n\t\tfor _, im := range newImportPackages {\n\t\t\tg.Printf(stringImport, im)\n\t\t}\n\t}\n\tif useBinary {\n\t\tfor _, im := range binaryImportPackages {\n\t\t\tg.Printf(stringImport, im)\n\t\t}\n\t}\n\tif useJson {\n\t\tfor _, im := range jsonImportPackages {\n\t\t\tg.Printf(stringImport, im)\n\t\t}\n\t}\n\tif useText {\n\t\tfor _, im := range textImportPackages {\n\t\t\tg.Printf(stringImport, im)\n\t\t}\n\t}\n\tif useYaml {\n\t\tfor _, im := range yamlImportPackages {\n\t\t\tg.Printf(stringImport, im)\n\t\t}\n\t}\n\tif useSql {\n\t\tfor _, im := range sqlImportPackages {\n\t\t\tg.Printf(stringImport, im)\n\t\t}\n\t}\n\n\tg.buildEnumRegenerateCheck(values)\n\n\truns := splitIntoRuns(values)\n\tthreshold := 10\n\n\tif useString {\n\t\t// The decision of which pattern to use depends on the number of\n\t\t// runs in the numbers. If there's only one, it's easy. For more than\n\t\t// one, there's a tradeoff between complexity and size of the data\n\t\t// and code vs. the simplicity of a map. A map takes more space,\n\t\t// but so does the code. The decision here (crossover at 10) is\n\t\t// arbitrary, but considers that for large numbers of runs the cost\n\t\t// of the linear scan in the switch might become important, and\n\t\t// rather than use yet another algorithm such as binary search,\n\t\t// we punt and use a map. In any case, the likelihood of a map\n\t\t// being necessary for any realistic example other than bitmasks\n\t\t// is very low. And bitmasks probably deserve their own analysis,\n\t\t// to be done some other day.\n\t\tswitch {\n\t\tcase len(runs) == 1:\n\t\t\tg.buildOneRun(runs, typeInfo)\n\t\tcase len(runs) <= threshold:\n\t\t\tg.buildMultipleRuns(runs, typeInfo)\n\t\tdefault:\n\t\t\tg.buildMap(runs, typeInfo)\n\t\t}\n\t}\n\n\tif useNew {\n\t\tg.Printf(newTemplate, typeInfo.Name)\n\t}\n\tif useBinary {\n\t\tg.buildCheck(runs, typeInfo.Name, threshold)\n\t\tg.Printf(binaryTemplate, typeInfo.Name)\n\t}\n\tif useJson {\n\t\tg.buildCheck(runs, typeInfo.Name, threshold)\n\t\tg.Printf(jsonTemplate, typeInfo.Name)\n\t}\n\tif useText {\n\t\tg.buildCheck(runs, typeInfo.Name, threshold)\n\t\tg.Printf(textTemplate, typeInfo.Name)\n\t}\n\tif useYaml {\n\t\tg.buildCheck(runs, typeInfo.Name, threshold)\n\t\tg.Printf(yamlTemplate, typeInfo.Name)\n\t}\n\tif useSql {\n\t\tg.buildCheck(runs, typeInfo.Name, threshold)\n\t\tg.Printf(sqpTemplate, typeInfo.Name)\n\t}\n\n\tif useContains {\n\t\tg.Printf(containsTemplate, typeInfo.Name)\n\t}\n}", "func printInferredArguments(out *output.Output) {\n\tif out == nil {\n\t\treturn\n\t}\n\n\tblock := out.Block(output.Line(output.EmojiLightbulb, output.StyleItalic, \"Inferred arguments\"))\n\tblock.Writef(\"repo: %s\", lsifUploadFlags.repo)\n\tblock.Writef(\"commit: %s\", lsifUploadFlags.commit)\n\tblock.Writef(\"root: %s\", lsifUploadFlags.root)\n\tblock.Writef(\"file: %s\", lsifUploadFlags.file)\n\tblock.Writef(\"indexer: %s\", lsifUploadFlags.indexer)\n\tblock.Close()\n}", "func parseArgument(p *parser) (*ast.Argument, error) {\n\tvar label string\n\tvar labelStartPos, labelEndPos ast.Position\n\n\texpr, err := parseExpression(p, lowestBindingPower)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp.skipSpaceAndComments()\n\n\t// If a colon follows the expression, the expression was our label.\n\tif p.current.Is(lexer.TokenColon) {\n\t\tlabelEndPos = p.current.EndPos\n\n\t\tidentifier, ok := expr.(*ast.IdentifierExpression)\n\t\tif !ok {\n\t\t\treturn nil, p.syntaxError(\n\t\t\t\t\"expected identifier for label, got %s\",\n\t\t\t\texpr,\n\t\t\t)\n\t\t}\n\t\tlabel = identifier.Identifier.Identifier\n\t\tlabelStartPos = expr.StartPosition()\n\n\t\t// Skip the identifier\n\t\tp.nextSemanticToken()\n\n\t\texpr, err = parseExpression(p, lowestBindingPower)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif len(label) > 0 {\n\t\treturn ast.NewArgument(\n\t\t\tp.memoryGauge,\n\t\t\tlabel,\n\t\t\t&labelStartPos,\n\t\t\t&labelEndPos,\n\t\t\texpr,\n\t\t), nil\n\t}\n\treturn ast.NewUnlabeledArgument(p.memoryGauge, expr), nil\n}", "func (t *Type) ChanArgs() *Type", "func GenerateGoTestRunArgs(goFlagsConfig GoFlagsConfig) ([]string, error) {\n\tflags := GoRunFlags.WithPrefix(\"test\")\n\tbindings := map[string]interface{}{\n\t\t\"Go\": &goFlagsConfig,\n\t}\n\n\targs, err := GenerateFlagArgs(flags, bindings)\n\tif err != nil {\n\t\treturn args, err\n\t}\n\targs = append(args, \"--test.v\")\n\treturn args, nil\n}", "func GenGoCodeFromParams(parameters []StructParameter) (string, error) {\n\tvar buf bytes.Buffer\n\n\tfor _, parameter := range parameters {\n\t\tif parameter.Usage == \"\" {\n\t\t\tparameter.Usage = \"-\"\n\t\t}\n\t\tfmt.Fprintf(&buf, \"// %s %s\\n\", DefaultNamer.FieldName(parameter.Name), parameter.Usage)\n\t\tgenField(parameter, &buf)\n\t}\n\tsource, err := format.Source(buf.Bytes())\n\tif err != nil {\n\t\tfmt.Println(\"Failed to format source:\", err)\n\t}\n\n\treturn string(source), nil\n}", "func (p *Planner) addVariableDefinitionsRecursively(value ast.Value, sourcePath []string, fieldName []byte) {\n\tswitch value.Kind {\n\tcase ast.ValueKindObject:\n\t\tprevArgTypeRef := p.argTypeRef\n\t\tp.argTypeRef = p.resolveNestedArgumentType(fieldName)\n\t\tfor _, objectFieldRef := range p.visitor.Operation.ObjectValues[value.Ref].Refs {\n\t\t\tp.addVariableDefinitionsRecursively(p.visitor.Operation.ObjectFields[objectFieldRef].Value, sourcePath, p.visitor.Operation.ObjectFieldNameBytes(objectFieldRef))\n\t\t}\n\t\tp.argTypeRef = prevArgTypeRef\n\t\treturn\n\tcase ast.ValueKindList:\n\t\tfor _, i := range p.visitor.Operation.ListValues[value.Ref].Refs {\n\t\t\tp.addVariableDefinitionsRecursively(p.visitor.Operation.Values[i], sourcePath, nil)\n\t\t}\n\t\treturn\n\tcase ast.ValueKindVariable:\n\t\t// continue after switch\n\tdefault:\n\t\treturn\n\t}\n\n\tvariableName := p.visitor.Operation.VariableValueNameBytes(value.Ref)\n\tvariableNameStr := p.visitor.Operation.VariableValueNameString(value.Ref)\n\tvariableDefinition, exists := p.visitor.Operation.VariableDefinitionByNameAndOperation(p.visitor.Walker.Ancestors[0].Ref, variableName)\n\tif !exists {\n\t\treturn\n\t}\n\timportedVariableDefinition := p.visitor.Importer.ImportVariableDefinition(variableDefinition, p.visitor.Operation, p.upstreamOperation)\n\tp.upstreamOperation.AddImportedVariableDefinitionToOperationDefinition(p.nodes[0].Ref, importedVariableDefinition)\n\n\tfieldType := p.resolveNestedArgumentType(fieldName)\n\tcontextVariable := &resolve.ContextVariable{\n\t\tPath: append(sourcePath, variableNameStr),\n\t\tRenderAsGraphQLValue: true,\n\t}\n\tcontextVariable.SetJsonValueType(p.visitor.Definition, p.visitor.Definition, fieldType)\n\n\tcontextVariableName, variableExists := p.variables.AddVariable(contextVariable)\n\tif variableExists {\n\t\treturn\n\t}\n\tp.upstreamVariables, _ = sjson.SetRawBytes(p.upstreamVariables, variableNameStr, []byte(contextVariableName))\n}", "func (node *Argument) formatFast(buf *TrackedBuffer) {\n\tbuf.WriteArg(\":\", node.Name)\n\tif node.Type >= 0 {\n\t\t// For bind variables that are statically typed, emit their type as an adjacent comment.\n\t\t// This comment will be ignored by older versions of Vitess (and by MySQL) but will provide\n\t\t// type safety when using the query as a cache key.\n\t\tbuf.WriteString(\" /* \")\n\t\tbuf.WriteString(node.Type.String())\n\t\tbuf.WriteString(\" */\")\n\t}\n}", "func buildRuleToGenerateAnnotationFlags(ctx android.ModuleContext, desc string, classesJars android.Paths, stubFlagsCSV android.Path, outputPath android.WritablePath) {\n\tctx.Build(pctx, android.BuildParams{\n\t\tRule: hiddenAPIGenerateCSVRule,\n\t\tDescription: desc,\n\t\tInputs: classesJars,\n\t\tOutput: outputPath,\n\t\tImplicit: stubFlagsCSV,\n\t\tArgs: map[string]string{\n\t\t\t\"outFlag\": \"--write-flags-csv\",\n\t\t\t\"stubAPIFlags\": stubFlagsCSV.String(),\n\t\t},\n\t})\n}", "func GenLiftParams(ringQ *ring.Ring, t uint64) (deltaMont []uint64) {\n\n\tdelta := new(big.Int).Quo(ringQ.ModulusBigint, ring.NewUint(t))\n\n\tdeltaMont = make([]uint64, len(ringQ.Modulus))\n\n\ttmp := new(big.Int)\n\tbredParams := ringQ.BredParams\n\tfor i, Qi := range ringQ.Modulus {\n\t\tdeltaMont[i] = tmp.Mod(delta, ring.NewUint(Qi)).Uint64()\n\t\tdeltaMont[i] = ring.MForm(deltaMont[i], Qi, bredParams[i])\n\t}\n\n\treturn\n}", "func newArguments(arguments []string) *Arguments {\n\treturn &Arguments{\n\t\targs: arguments,\n\t\tcount: len(arguments),\n\t\tindex: 0,\n\t\trawMode: false,\n\t}\n}", "func (s *BasePlSqlParserListener) EnterArgument(ctx *ArgumentContext) {}", "func newFormulaArgMatrix(numMtx [][]float64) (arg [][]formulaArg) {\n\tfor r, row := range numMtx {\n\t\targ = append(arg, make([]formulaArg, len(row)))\n\t\tfor c, cell := range row {\n\t\t\targ[r][c] = newNumberFormulaArg(cell)\n\t\t}\n\t}\n\treturn\n}", "func fieldArgNamesStruct(obj any, path string, nest bool, allArgs map[string]reflect.Value) {\n\tif kit.IfaceIsNil(obj) {\n\t\treturn\n\t}\n\tov := reflect.ValueOf(obj)\n\tif ov.Kind() == reflect.Pointer && ov.IsNil() {\n\t\treturn\n\t}\n\tval := kit.NonPtrValue(ov)\n\ttyp := val.Type()\n\tfor i := 0; i < typ.NumField(); i++ {\n\t\tf := typ.Field(i)\n\t\tfv := val.Field(i)\n\t\tif kit.NonPtrType(f.Type).Kind() == reflect.Struct {\n\t\t\tnwPath := f.Name\n\t\t\tif path != \"\" {\n\t\t\t\tnwPath = path + \".\" + nwPath\n\t\t\t}\n\t\t\tnwNest := nest\n\t\t\tif !nwNest {\n\t\t\t\tneststr, ok := f.Tag.Lookup(\"nest\")\n\t\t\t\tif ok && (neststr == \"+\" || neststr == \"true\") {\n\t\t\t\t\tnwNest = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tfieldArgNamesStruct(kit.PtrValue(fv).Interface(), nwPath, nwNest, allArgs)\n\t\t\tcontinue\n\t\t}\n\t\tpval := kit.PtrValue(fv)\n\t\taddAllCases(f.Name, path, pval, allArgs)\n\t\tif f.Type.Kind() == reflect.Bool {\n\t\t\taddAllCases(\"No\"+f.Name, path, pval, allArgs)\n\t\t}\n\t\t// now process adding non-nested version of field\n\t\tif path == \"\" || nest {\n\t\t\tcontinue\n\t\t}\n\t\tneststr, ok := f.Tag.Lookup(\"nest\")\n\t\tif ok && (neststr == \"+\" || neststr == \"true\") {\n\t\t\tcontinue\n\t\t}\n\t\tif _, has := allArgs[f.Name]; has {\n\t\t\tmpi.Printf(\"econfig Field: %s.%s cannot be added as a non-nested %s arg because it has already been registered -- add 'nest:'+'' field tag to the one you want to keep only as a nested arg with path, to eliminate this message\\n\", path, f.Name, f.Name)\n\t\t\tcontinue\n\t\t}\n\t\taddAllCases(f.Name, \"\", pval, allArgs)\n\t\tif f.Type.Kind() == reflect.Bool {\n\t\t\taddAllCases(\"No\"+f.Name, \"\", pval, allArgs)\n\t\t}\n\t}\n}", "func (gen *jsGenerator) init(args *Arguments) error {\n\tif !args.GenClient && !args.GenModel && !args.GenServer {\n\t\treturn fmt.Errorf(\"nothing to do\")\n\t} else if len(args.Options) > 0 {\n\t\tfor k, v := range args.Options {\n\t\t\tswitch k {\n\t\t\tcase \"controller\":\n\t\t\t\tif v == \"\" {\n\t\t\t\t\treturn fmt.Errorf(\"controller cannot be empty\")\n\t\t\t\t}\n\t\t\t\tgen.baseControllerName = v\n\t\t\tcase \"output\":\n\t\t\t\tif v == \"\" {\n\t\t\t\t\treturn fmt.Errorf(\"output option cannot be empty. Valid options are 'ns-flat' and 'ns-nested'\")\n\t\t\t\t} else if v != \"ns-flat\" && v != \"ns-nested\" {\n\t\t\t\t\treturn fmt.Errorf(\"invalid output option: %s: valid options are 'ns-flat' and 'ns-nested'\", v)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"the %s option is not applicable to language js\", k)\n\t\t\t}\n\t\t}\n\t}\n\tgen.args = args\n\treturn gen.loadTempates(args.TemplateDir, \"js\", template.FuncMap{\n\t\t\"formatType\": func(t *idl.Type) string { return gen.formatType(t) },\n\t\t\"fullNameOf\": func(name string) string { return gen.fullNameOf(name) },\n\t\t\"formatValue\": func(p *idl.Pair) string { return gen.formatLiteral(p.Value, p.DataType) },\n\t\t\"filterAttrs\": func(attrs []*idl.Attribute) []*idl.Attribute { return gen.filterAttributes(attrs) },\n\t\t\"isVoid\": func(t *idl.Type) bool { return gen.isVoid(t) },\n\t\t\"isTrivialProperty\": func(t *idl.Type) bool { return gen.isTrivialProperty(t) },\n\t\t\"usings\": func() []string {\n\t\t\tpkg := gen.tplRootIdl.Namespaces[\"js\"]\n\t\t\timports := make([]string, 0)\n\t\t\tfor _, i := range gen.tplRootIdl.UniqueNamespaces(\"js\") {\n\t\t\t\tif i != pkg {\n\t\t\t\t\t//relPath := i[len(pkg)];\n\t\t\t\t\t//fmt.Println(relPath);\n\t\t\t\t\t//fmt.Println(\"pkg => \" + pkg);\n\t\t\t\t\t//fmt.Println(\"i => \" + i);\n\t\t\t\t\trelPath := \"\"\n\t\t\t\t\tfor x := 0; x < len(strings.Split(pkg, \".\")); x++ {\n\t\t\t\t\t\trelPath += \"../\"\n\t\t\t\t\t}\n\t\t\t\t\t//fmt.Println(\"relPath => \" + relPath);\n\t\t\t\t\timports = append(imports, relPath+strings.Replace(i, \".\", \"/\", -1))\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn imports\n\t\t},\n\t\t\"isNotPascalCase\": func(name string) bool {\n\t\t\tif len(name) > 1 {\n\t\t\t\treturn strings.ToUpper(name[0:1]) != name[0:1]\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\t\"baseController\": func() string {\n\t\t\tif gen.baseControllerName != \"\" {\n\t\t\t\treturn gen.baseControllerName\n\t\t\t} else {\n\t\t\t\treturn \"Concur.Babel.Mvc.BabelController\"\n\t\t\t}\n\t\t},\n\t\t\"cast\": func(t *idl.Type) string {\n\t\t\tif t.Name == \"float32\" {\n\t\t\t\treturn \"(float)\"\n\t\t\t} else {\n\t\t\t\treturn \"\"\n\t\t\t}\n\t\t},\n\t\t\"constType\": func(s string) string {\n\t\t\tcs, ok := jsConstTypes[s]\n\t\t\tif ok {\n\t\t\t\treturn cs\n\t\t\t} else {\n\t\t\t\treturn \"string\"\n\t\t\t}\n\t\t},\n\t})\n}", "func ExecForm(node *parser.Node) (arguments string) {\n\ttmp := []string{}\n\n\tfor n := node.Next; n != nil; n = n.Next {\n\t\tvalue := n.Value\n\t\tif strings.HasPrefix(n.Value, `\"`) && strings.HasSuffix(n.Value, `\"`) {\n\t\t\tvalue = strings.TrimPrefix(value, `\"`)\n\t\t\tvalue = strings.TrimSuffix(value, `\"`)\n\t\t}\n\t\ttmp = append(tmp, `\"`+strings.ReplaceAll(value, `\"`, `\\\"`)+`\"`)\n\t}\n\n\targuments = `[ ` + strings.Join(tmp, `,`) + ` ]`\n\n\tif len(node.Flags) > 0 {\n\t\targuments = fmt.Sprintf(\"%s %s\", strings.Join(node.Flags, \" \"), arguments)\n\t}\n\n\treturn arguments + \"\\n\"\n}", "func (ec *executionContext) field_Mutation_createCar_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 model.CreateCarInput\n\tif tmp, ok := rawArgs[\"input\"]; ok {\n\t\tctx := graphql.WithPathContext(ctx, graphql.NewPathWithField(\"input\"))\n\t\targ0, err = ec.unmarshalNCreateCarInput2githubᚗcomᚋuchᚑkudukᚋnaiveᚑgraphqlᚗgitᚋgraphᚋmodelᚐCreateCarInput(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"input\"] = arg0\n\treturn args, nil\n}", "func reflectArgs(fnType reflect.Type, args []Argument) []reflect.Value {\n\tin := make([]reflect.Value, len(args))\n\n\tfor k, arg := range args {\n\t\tif arg == nil {\n\t\t\t// Use the zero value of the function parameter type,\n\t\t\t// since \"reflect.Call\" doesn't accept \"nil\" parameters\n\t\t\tin[k] = reflect.New(fnType.In(k)).Elem()\n\t\t} else {\n\t\t\tin[k] = reflect.ValueOf(arg)\n\t\t}\n\t}\n\n\treturn in\n}", "func MapFieldsToTypExpr(args ...*ast.Field) []ast.Expr {\n\tr := []ast.Expr{}\n\tfor idx, f := range args {\n\t\tif len(f.Names) == 0 {\n\t\t\tf.Names = []*ast.Ident{ast.NewIdent(fmt.Sprintf(\"f%d\", idx))}\n\t\t}\n\n\t\tfor _ = range f.Names {\n\t\t\tr = append(r, f.Type)\n\t\t}\n\n\t}\n\treturn r\n}", "func ArgumentCustomType(name string, values ...Argument) Argument {\n\treturn Argument{name, argumentSlice(values)}\n}", "func generateCall(generator *Generator, node parser.Node) string {\n\tvar identifier string\n\n\t// Check if it is a built-in function or not\n\tif strings.Contains(node.Value, \"|\") {\n\t\t// Get the function identifier by spliting the value by the pipe\n\t\tidentifier = strings.Split(node.Value, \"|\")[1]\n\n\t\tcheckCall(generator, node)\n\n\t\t// Add import to the generator\n\t\taddCallImport(\n\t\t\tgenerator,\n\t\t\tnode.Value,\n\t\t)\n\t} else {\n\t\tidentifier = node.Value\n\t}\n\n\t// Translate the params\n\tparams := generateParams(generator, node.Params)\n\n\t// Link all the translations together\n\treturn fmt.Sprintf(\n\t\tcCall,\n\t\tidentifier,\n\t\tstrings.Join(params, \",\"),\n\t)\n}", "func (c ResolverCommitGraphFuncCall) Args() []interface{} {\n\treturn []interface{}{c.Arg0, c.Arg1}\n}", "func (s *BaselimboListener) EnterFormal_arg_list(ctx *Formal_arg_listContext) {}", "func argsFn(args ...OBJ) OBJ {\n\tl := len(os.Args[1:])\n\tresult := make([]OBJ, l)\n\tfor i, txt := range os.Args[1:] {\n\t\tresult[i] = &object.String{Value: txt}\n\t}\n\treturn &object.Array{Elements: result}\n}", "func decodeArg(b *hcl.Block) (*Arg, errors.Error) {\n\targ := new(Arg)\n\targ.name = b.Labels[0]\n\tbc, d := b.Body.Content(schemaArg)\n\tif err := errors.EvalDiagnostics(d); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := arg.populateArgAttributes(bc.Attributes); err != nil {\n\t\treturn nil, err\n\t}\n\treturn arg, nil\n}", "func generateStruct(a *AnnotationDoc, packageName string, imports []string, indent string) (string, []string) {\n\tvar allAnnotationsPackages []string\n\tpossiblePackagesForA := combinePackages(imports, []string{packageName})\n\tts, foundPackageOfA, foundImportsOfA := getAnnotationStruct(a.Name, possiblePackagesForA)\n\tallAnnotationsPackages = combinePackages(allAnnotationsPackages, []string{foundPackageOfA})\n\tstr, _ := ts.Type.(*ast.StructType)\n\tvar b bytes.Buffer\n\tb.WriteString(indent)\n\tb.WriteString(foundPackageOfA)\n\tb.WriteString(\".\")\n\tb.WriteString(a.Name)\n\tb.WriteString(\"{\\n\")\n\tchildIndent := indent + \" \"\n\tfor _, f := range str.Fields.List {\n\t\tfieldName := getFieldName(f)\n\t\tdefValue := getDefaultValue(f)\n\t\tfieldKey := fieldName\n\t\t// consider special case when only default parameter is specified\n\t\tif len(str.Fields.List) == 1 && len(a.Content) == 1 {\n\t\t\tfor key := range a.Content {\n\t\t\t\tif key == DEFAULT_PARAM {\n\t\t\t\t\tfieldKey = DEFAULT_PARAM\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tvalue, found := a.Content[fieldKey]\n\t\tif found {\n\t\t\tswitch t := value.(type) {\n\t\t\tcase string:\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tb.WriteString(getLiteral(f.Type, t, false))\n\t\t\t\tb.WriteString(\",\\n\")\n\t\t\tcase []string:\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tb.WriteString(getFieldConstructor(f.Type))\n\t\t\t\tb.WriteString(\"\\n\")\n\t\t\t\tfor _, elem := range t {\n\t\t\t\t\tb.WriteString(childIndent + \" \")\n\t\t\t\t\tb.WriteString(elem)\n\t\t\t\t\tb.WriteString(\",\\n\")\n\t\t\t\t}\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tb.WriteString(\"}\")\n\t\t\tcase []AnnotationDoc:\n\t\t\t\t// calculate array's elements\n\t\t\t\tvar bb bytes.Buffer\n\t\t\t\tfor _, sa := range t {\n\t\t\t\t\tchildCode, foundImportsOfChild := generateStruct(&sa, foundPackageOfA, foundImportsOfA, childIndent+\" \")\n\t\t\t\t\tallAnnotationsPackages = combinePackages(allAnnotationsPackages, foundImportsOfChild)\n\t\t\t\t\tbb.WriteString(childCode)\n\t\t\t\t\tbb.WriteString(\",\\n\")\n\t\t\t\t}\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\t// insert array initialzer of child annotation type\n\t\t\t\ts := writeArrayInitializer(&b, bb.String())\n\t\t\t\t// append array of child annotations\n\t\t\t\tb.WriteString(\"{\\n\")\n\t\t\t\tb.WriteString(childIndent + \" \")\n\t\t\t\tb.WriteString(s)\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tb.WriteString(\"},\\n\")\n\t\t\tcase AnnotationDoc:\n\t\t\t\tchildCode, foundImportsOfChild := generateStruct(&t, foundPackageOfA, foundImportsOfA, childIndent)\n\t\t\t\tallAnnotationsPackages = combinePackages(allAnnotationsPackages, foundImportsOfChild)\n\t\t\t\tb.WriteString(childIndent)\n\t\t\t\tif isOptional(f.Type) {\n\t\t\t\t\tb.WriteString(\"&\")\n\t\t\t\t}\n\t\t\t\tb.WriteString(strings.TrimLeft(childCode, \" \"))\n\t\t\t\tb.WriteString(\",\\n\")\n\t\t\tdefault:\n\t\t\t\tpanic(\"Unexpected annotation value type\")\n\t\t\t}\n\t\t} else {\n\t\t\tb.WriteString(childIndent)\n\t\t\tb.WriteString(defValue)\n\t\t\tb.WriteString(\",\\n\")\n\t\t}\n\t}\n\tb.WriteString(indent)\n\tb.WriteString(\"}\")\n\treturn b.String(), allAnnotationsPackages\n}", "func (mi TestModuleInfo) GenerateBuildActions(blueprint.ModuleContext) {}", "func (me *TxsdArguments) Walk() (err error) {\n\tif fn := WalkHandlers.TxsdArguments; me != nil {\n\t\tif fn != nil {\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif err = me.XsdGoPkgHasElems_ArgumentsequenceTxsdArgumentsArgumentsschema_Argument_TxsdArgumentsSequenceArgument_.Walk(); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\treturn\n\t\t}\n\t\tif fn != nil {\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func Fields(fields ...string) Parameter {\n\treturn func(pa Parameterizable) {\n\t\tpa.SetParameter(\"fields\", fields)\n\t}\n}", "func getConfigArgs(action BuildAction, dir string, ctx Context, args []string) []string {\n\t// The next block of code verifies that the current directory is the root directory of the source\n\t// tree. It then finds the relative path of dir based on the root directory of the source tree\n\t// and verify that dir is inside of the source tree.\n\tcheckTopDir(ctx)\n\ttopDir, err := os.Getwd()\n\tif err != nil {\n\t\tctx.Fatalf(\"Error retrieving top directory: %v\", err)\n\t}\n\tdir, err = filepath.EvalSymlinks(dir)\n\tif err != nil {\n\t\tctx.Fatalf(\"Unable to evaluate symlink of %s: %v\", dir, err)\n\t}\n\tdir, err = filepath.Abs(dir)\n\tif err != nil {\n\t\tctx.Fatalf(\"Unable to find absolute path %s: %v\", dir, err)\n\t}\n\trelDir, err := filepath.Rel(topDir, dir)\n\tif err != nil {\n\t\tctx.Fatalf(\"Unable to find relative path %s of %s: %v\", relDir, topDir, err)\n\t}\n\t// If there are \"..\" in the path, it's not in the source tree.\n\tif strings.Contains(relDir, \"..\") {\n\t\tctx.Fatalf(\"Directory %s is not under the source tree %s\", dir, topDir)\n\t}\n\n\tconfigArgs := args[:]\n\n\t// If the arguments contains GET-INSTALL-PATH, change the target name prefix from MODULES-IN- to\n\t// GET-INSTALL-PATH-IN- to extract the installation path instead of building the modules.\n\ttargetNamePrefix := \"MODULES-IN-\"\n\tif inList(\"GET-INSTALL-PATH\", configArgs) {\n\t\ttargetNamePrefix = \"GET-INSTALL-PATH-IN-\"\n\t\tconfigArgs = removeFromList(\"GET-INSTALL-PATH\", configArgs)\n\t}\n\n\tvar targets []string\n\n\tswitch action {\n\tcase BUILD_MODULES:\n\t\t// No additional processing is required when building a list of specific modules or all modules.\n\tcase BUILD_MODULES_IN_A_DIRECTORY:\n\t\t// If dir is the root source tree, all the modules are built of the source tree are built so\n\t\t// no need to find the build file.\n\t\tif topDir == dir {\n\t\t\tbreak\n\t\t}\n\n\t\tbuildFile := findBuildFile(ctx, relDir)\n\t\tif buildFile == \"\" {\n\t\t\tctx.Fatalf(\"Build file not found for %s directory\", relDir)\n\t\t}\n\t\ttargets = []string{convertToTarget(filepath.Dir(buildFile), targetNamePrefix)}\n\tcase BUILD_MODULES_IN_DIRECTORIES:\n\t\tnewConfigArgs, dirs := splitArgs(configArgs)\n\t\tconfigArgs = newConfigArgs\n\t\ttargets = getTargetsFromDirs(ctx, relDir, dirs, targetNamePrefix)\n\t}\n\n\t// Tidy only override all other specified targets.\n\ttidyOnly := os.Getenv(\"WITH_TIDY_ONLY\")\n\tif tidyOnly == \"true\" || tidyOnly == \"1\" {\n\t\tconfigArgs = append(configArgs, \"tidy_only\")\n\t} else {\n\t\tconfigArgs = append(configArgs, targets...)\n\t}\n\n\treturn configArgs\n}", "func ProcessArgs(cfg *Config) (u []string) {\n\tflag.Usage = func() {\n\t\tfmt.Println(\"Usage:\\nkdevpije user1,user2,alias3... [default|week|sprint]\")\n\t\tflag.PrintDefaults()\n\t}\n\tvar debugFlag = flag.Bool(\"debug\", false, \"Print logs to stderr\")\n\tvar reloadData = flag.Bool(\"reloadData\", false, \"Download list of employees again\")\n\tflag.Parse() // Scan the arguments list\n\n\tif !*debugFlag {\n\t\tlog.SetOutput(ioutil.Discard)\n\t}\n\tlog.Println(\"Processing arguments\")\n\tcfg.ReloadData = *reloadData\n\temps := flag.Arg(0)\n\tif emps == \"\" {\n\t\tflag.PrintDefaults()\n\t\treturn\n\t}\n\tu = strings.Split(emps, \",\")\n\tu = employees.ExpandFiveTimes(u, cfg.Aliases)\n\n\ttimeframe := flag.Arg(1)\n\tif timeframe == \"\" {\n\t\ttimeframe = \"default\"\n\t}\n\ttf, ok := cfg.Intervals[timeframe]\n\tif !ok {\n\t\ttf = 1\n\t}\n\tcfg.TimeFrame = tf\n\tcfg.PDConfig.TimeFrame = cfg.TimeFrame\n\tlog.Println(\"Processed config:\", cfg)\n\treturn\n}", "func (ec *executionContext) field_Mutation_ChangeReceiver_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 string\n\tif tmp, ok := rawArgs[\"txcode\"]; ok {\n\t\tctx := graphql.WithPathContext(ctx, graphql.NewPathWithField(\"txcode\"))\n\t\targ0, err = ec.unmarshalNString2string(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"txcode\"] = arg0\n\tvar arg1 model.CustomerChanges\n\tif tmp, ok := rawArgs[\"input\"]; ok {\n\t\tctx := graphql.WithPathContext(ctx, graphql.NewPathWithField(\"input\"))\n\t\targ1, err = ec.unmarshalNCustomerChanges2githubᚗcomᚋbaadjisᚋtransferserviceᚋgraphᚋmodelᚐCustomerChanges(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"input\"] = arg1\n\treturn args, nil\n}", "func (g *Generator) collectAndGenerate(typeName string, genFn GeneratorFunc) {\n\tfields := make([]Field, 0, 100)\n\timports := make([]Import, 0, 100)\n\tfor _, file := range g.pkg.files {\n\t\t// Set the state for this run of the walker.\n\t\tfile.typeName = typeName\n\t\tfile.fields = nil\n\t\tif file.file != nil {\n\t\t\tast.Inspect(file.file, file.genDecl)\n\t\t\tfields = append(fields, file.fields...)\n\t\t\timports = append(imports, file.imports...)\n\t\t}\n\t}\n\n\tgenFn(typeName, fields, imports)\n\n}" ]
[ "0.6473212", "0.5893512", "0.52816784", "0.5240693", "0.5222666", "0.51920587", "0.50385666", "0.5037734", "0.50329053", "0.5010324", "0.50005144", "0.49702114", "0.4968135", "0.47936794", "0.47646612", "0.47604808", "0.47278732", "0.47187448", "0.4692279", "0.4692153", "0.46814573", "0.46635288", "0.4658278", "0.46540728", "0.46322483", "0.46279842", "0.46115977", "0.45967904", "0.45956135", "0.4594253", "0.45753404", "0.452807", "0.45275617", "0.45247382", "0.45232978", "0.45211542", "0.45009685", "0.44962022", "0.4494873", "0.44909754", "0.44702196", "0.44440544", "0.44364893", "0.44225812", "0.43959165", "0.43953192", "0.43909836", "0.43868423", "0.43827596", "0.4379306", "0.43528584", "0.4345264", "0.43422312", "0.4338586", "0.43341056", "0.43313193", "0.43285048", "0.4312617", "0.43111342", "0.43001252", "0.4299968", "0.4292014", "0.42913413", "0.42822087", "0.42758998", "0.42689538", "0.42689186", "0.4262163", "0.42609718", "0.4259967", "0.4243299", "0.4239233", "0.42385328", "0.42328787", "0.42303622", "0.42078492", "0.4206437", "0.41984665", "0.41905227", "0.41859326", "0.41836825", "0.4179301", "0.41790164", "0.41750932", "0.4172565", "0.41660464", "0.41658062", "0.41639036", "0.41604862", "0.4149038", "0.4137513", "0.41307592", "0.41253623", "0.41241714", "0.41224602", "0.41215736", "0.41208646", "0.41187602", "0.41174406", "0.4117139" ]
0.7882007
0
genArgument generates argument config for given AST
genArgument генерирует конфиг аргумента для заданного AST
func genArgument(arg *ast.InputValueDefinition) *jen.Statement { // // Generate config for argument // // == Example input SDL // // type Dog { // name( // "style is stylish" // style: NameComponentsStyle = SHORT, // ): String! // } // // == Example output // // &ArgumentConfig{ // Type: graphql.NonNull(graphql.String), // DefaultValue: "SHORT", // TODO: ??? // Description: "style is stylish", // } // return jen.Op("&").Qual(defsPkg, "ArgumentConfig").Values(jen.Dict{ jen.Id("DefaultValue"): genValue(arg.DefaultValue), jen.Id("Description"): genDescription(arg), jen.Id("Type"): genInputTypeReference(arg.Type), }) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func genArguments(args []*ast.InputValueDefinition) *jen.Statement {\n\t//\n\t// Generate config for arguments\n\t//\n\t// == Example input SDL\n\t//\n\t// type Dog {\n\t// name(\n\t// \"style is stylish\"\n\t// style: NameComponentsStyle = SHORT,\n\t// ): String!\n\t// }\n\t//\n\t// == Example output\n\t//\n\t// FieldConfigArgument{\n\t// \"style\": &ArgumentConfig{ ... }\n\t// },\n\t//\n\treturn jen.Qual(defsPkg, \"FieldConfigArgument\").Values(\n\t\tjen.DictFunc(func(d jen.Dict) {\n\t\t\tfor _, arg := range args {\n\t\t\t\td[jen.Lit(arg.Name.Value)] = genArgument(arg)\n\t\t\t}\n\t\t}),\n\t)\n}", "func BindArg(obj interface{}, tags ...string) FieldConfigArgument {\n\tv := reflect.Indirect(reflect.ValueOf(obj))\n\tvar config = make(FieldConfigArgument)\n\tfor i := 0; i < v.NumField(); i++ {\n\t\tfield := v.Type().Field(i)\n\n\t\tmytag := extractTag(field.Tag)\n\t\tif inArray(tags, mytag) {\n\t\t\tconfig[mytag] = &ArgumentConfig{\n\t\t\t\tType: getGraphType(field.Type),\n\t\t\t}\n\t\t}\n\t}\n\treturn config\n}", "func parseArgument(p *parser) (*ast.Argument, error) {\n\tvar label string\n\tvar labelStartPos, labelEndPos ast.Position\n\n\texpr, err := parseExpression(p, lowestBindingPower)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp.skipSpaceAndComments()\n\n\t// If a colon follows the expression, the expression was our label.\n\tif p.current.Is(lexer.TokenColon) {\n\t\tlabelEndPos = p.current.EndPos\n\n\t\tidentifier, ok := expr.(*ast.IdentifierExpression)\n\t\tif !ok {\n\t\t\treturn nil, p.syntaxError(\n\t\t\t\t\"expected identifier for label, got %s\",\n\t\t\t\texpr,\n\t\t\t)\n\t\t}\n\t\tlabel = identifier.Identifier.Identifier\n\t\tlabelStartPos = expr.StartPosition()\n\n\t\t// Skip the identifier\n\t\tp.nextSemanticToken()\n\n\t\texpr, err = parseExpression(p, lowestBindingPower)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif len(label) > 0 {\n\t\treturn ast.NewArgument(\n\t\t\tp.memoryGauge,\n\t\t\tlabel,\n\t\t\t&labelStartPos,\n\t\t\t&labelEndPos,\n\t\t\texpr,\n\t\t), nil\n\t}\n\treturn ast.NewUnlabeledArgument(p.memoryGauge, expr), nil\n}", "func NewArgument(meta ScriptMetaData, node *node32, value Value) Argument {\n\treturn &argument{astNode: astNode{meta: meta, node: node}, value: value}\n}", "func (ArgumentFalse) argumentNode() {}", "func (p *Parser) buildArg(argDef Value, argType reflect.Type, index int, args *[]reflect.Value) error {\n\tswitch argType.Name() {\n\tcase \"Setter\":\n\t\tfallthrough\n\tcase \"GetSetter\":\n\t\targ, err := p.pathParser(argDef.Path)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v %w\", index, err)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(arg))\n\tcase \"Getter\":\n\t\targ, err := p.newGetter(argDef)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v %w\", index, err)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(arg))\n\tcase \"Enum\":\n\t\targ, err := p.enumParser(argDef.Enum)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v must be an Enum\", index)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(*arg))\n\tcase \"string\":\n\t\tif argDef.String == nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v, must be an string\", index)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(*argDef.String))\n\tcase \"float64\":\n\t\tif argDef.Float == nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v, must be an float\", index)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(*argDef.Float))\n\tcase \"int64\":\n\t\tif argDef.Int == nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v, must be an int\", index)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(*argDef.Int))\n\tcase \"bool\":\n\t\tif argDef.Bool == nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v, must be a bool\", index)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(bool(*argDef.Bool)))\n\t}\n\treturn nil\n}", "func (s *BasePlSqlParserListener) EnterArgument(ctx *ArgumentContext) {}", "func (app ApplicationArguments) Argument(name, description string, shorts ...rune) *kingpin.FlagClause {\n\treturn app.add(name, description, false, shorts...)\n}", "func argInit() args {\n\n\tvar a args\n\tflag.Float64Var(&a.x1, \"x1\", -2.0, \"left position of real axis\")\n\tflag.Float64Var(&a.x2, \"x2\", 1.0, \"right position of real axis\")\n\tflag.Float64Var(&a.y1, \"y1\", -1.5, \"down position of imaginary axis\")\n\tflag.Float64Var(&a.y2, \"y2\", 1.5, \"up position of imaginary axis\")\n\tflag.Float64Var(&a.threshold, \"th\", 4.0, \"squared threshold of the function\")\n\tflag.IntVar(&a.w, \"w\", 1000, \"width in pixels of the image\")\n\tflag.IntVar(&a.h, \"h\", 1000, \"height in pixels of the image\")\n\tflag.IntVar(&a.nIter, \"ni\", 100, \"maximum number of iterations for pixel\")\n\tflag.IntVar(&a.nRoutines, \"nr\", 4, \"number of go routines to be used\")\n\tflag.StringVar(&a.path, \"p\", \"./\", \"path to the generated png image\")\n\n\tflag.Parse()\n\treturn a\n}", "func GenAST(program []Statement) AST {\n\tvar ast AST\n\tfor _, stmt := range program {\n\t\tv, err := ParseVerb(stmt)\n\t\tif err != nil { //TODO\n\t\t\t//panic(ParserError{stmtIndex: stmtIndex, tok: stmt[0], message: fmt.Sprintf(\"First token in statement must be a word, was %s\", stmt[0].tokType.toString())})\n\t\t\tpanic(err)\n\t\t}\n\t\tast = append(ast, v)\n\t}\n\treturn ast\n}", "func GenerationArgsFor(category, pathToExecutable, fuzzerName string, isMaster bool) GenerationArgs {\n\tf, found := fuzzers[category]\n\tif !found {\n\t\tsklog.Errorf(\"Unknown fuzz category %q\", category)\n\t\treturn nil\n\t}\n\tmasterFlag := \"-M\"\n\tif !isMaster {\n\t\tmasterFlag = \"-S\"\n\t}\n\tseedPath := filepath.Join(config.Generator.FuzzSamples, category)\n\toutputPath := filepath.Join(config.Generator.AflOutputPath, category)\n\n\tcmd := append([]string{\"-i\", seedPath, \"-o\", outputPath, \"-m\", \"5000\", masterFlag, fuzzerName, \"--\", pathToExecutable}, f.ArgsAfterExecutable...)\n\n\treturn append(cmd, \"@@\")\n}", "func (ctx *argComplContext) generate(env *complEnv, ch chan<- rawCandidate) error {\n\treturn completeArg(ctx.words, env.evaler, env.argCompleter, ch)\n}", "func buildArg(mt *methodType, d json.RawMessage) (reflect.Value, error) {\n\tvar argv reflect.Value\n\targIsValue := false // if true, need to indirect before calling.\n\tif mt.ArgType.Kind() == reflect.Ptr {\n\t\targv = reflect.New(mt.ArgType.Elem())\n\t} else {\n\t\targv = reflect.New(mt.ArgType)\n\t\targIsValue = true\n\t}\n\terr := json.Unmarshal(d, argv.Interface())\n\tif err != nil {\n\t\treturn reflect.Value{}, err\n\t}\n\tif argIsValue {\n\t\targv = argv.Elem()\n\t}\n\treturn argv, nil\n}", "func GenerateValidArg(datatypeName string) string {\n\tswitch datatypeName {\n\tcase field.TypeString:\n\t\treturn \"xyz\"\n\tcase field.TypeUint, field.TypeInt:\n\t\treturn \"111\"\n\tcase field.TypeBool:\n\t\treturn valueFalse\n\tcase field.TypeCustom:\n\t\treturn valueNull\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown type %s\", datatypeName))\n\t}\n}", "func NewDynamicArgument(value Value) Argument {\n\treturn &argument{value: value}\n}", "func Agen(n *Node, res *Node)", "func genArgs(optionMap map[string]string) []string {\n\toptions := []string{}\n\tfor k, v := range optionMap {\n\t\tif v != \"\" {\n\t\t\tk = fmt.Sprintf(\"%s=%s\", k, v)\n\t\t}\n\t\toptions = append(options, k)\n\t}\n\treturn options\n}", "func decodeArg(b *hcl.Block) (*Arg, errors.Error) {\n\targ := new(Arg)\n\targ.name = b.Labels[0]\n\tbc, d := b.Body.Content(schemaArg)\n\tif err := errors.EvalDiagnostics(d); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := arg.populateArgAttributes(bc.Attributes); err != nil {\n\t\treturn nil, err\n\t}\n\treturn arg, nil\n}", "func (s *BasePlSqlParserListener) EnterFunction_argument(ctx *Function_argumentContext) {}", "func (node Argument) Format(buf *TrackedBuffer) {\n\tbuf.WriteArg(string(node))\n}", "func (s *BasemumpsListener) EnterArg(ctx *ArgContext) {}", "func (c *compileContext) makeArgumentResolver(typ schema.InputableType) (argumentResolver, error) {\n\tswitch t := typ.(type) {\n\tcase *schema.InputObjectType:\n\t\treturn func(ctx context.Context, v schema.LiteralValue) (interface{}, error) {\n\t\t\treturn t.Decode(ctx, v)\n\t\t}, nil\n\tcase *schema.ListType:\n\t\telementResolver, err := c.makeArgumentResolver(t.Unwrap().(schema.InputableType))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn func(ctx context.Context, v schema.LiteralValue) (interface{}, error) {\n\t\t\tif v == nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\n\t\t\tlistCreator := t.Unwrap().(schema.InputableType).InputListCreator()\n\n\t\t\tif av, ok := v.(schema.LiteralArray); ok {\n\t\t\t\treturn listCreator.NewList(len(av), func(i int) (interface{}, error) {\n\t\t\t\t\treturn elementResolver(ctx, av[i])\n\t\t\t\t})\n\t\t\t}\n\n\t\t\t// if we get a non-list value we have to wrap into a single element\n\t\t\t// list.\n\t\t\t// See https://facebook.github.io/graphql/June2018/#sec-Type-System.List\n\t\t\tresultElement, err := elementResolver(ctx, v)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn listCreator.NewList(1, func(i int) (interface{}, error) {\n\t\t\t\treturn resultElement, nil\n\t\t\t})\n\t\t}, nil\n\n\tcase *schema.NotNilType:\n\t\telementResolver, err := c.makeArgumentResolver(t.Unwrap().(schema.InputableType))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn func(ctx context.Context, v schema.LiteralValue) (interface{}, error) {\n\t\t\tif v == nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Required value was not supplied\")\n\t\t\t}\n\t\t\treturn elementResolver(ctx, v)\n\t\t}, nil\n\tcase *schema.ScalarType:\n\t\treturn func(ctx context.Context, v schema.LiteralValue) (interface{}, error) {\n\t\t\treturn t.Decode(ctx, v)\n\t\t}, nil\n\tcase *schema.EnumType:\n\t\treturn func(ctx context.Context, v schema.LiteralValue) (interface{}, error) {\n\t\t\tif v == nil {\n\t\t\t\treturn t.Decode(ctx, v)\n\t\t\t}\n\t\t\tval, ok := v.(schema.LiteralString)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"Expected string, got %v\", v)\n\t\t\t}\n\t\t\treturn t.Decode(ctx, val)\n\t\t}, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Invalid type for input argument: %v\", typ)\n\t}\n}", "func genConfig() ([]byte, error) {\n\t// Using genflags.getConfig() instead of config.New() because\n\t// it will include any defaults we have on the command line such\n\t// as default plugin selection. We didn't want to wire this into\n\t// the `config` package, but it will be a default value the CLI\n\t// users expect.\n\tc := genflags.resolveConfig()\n\tb, err := json.Marshal(c)\n\treturn b, errors.Wrap(err, \"unable to marshal configuration\")\n}", "func finishReadingArgument(ctx *parsingCtx) *ParseError {\n\tif ctx.scope == READING_WORD {\n\t\tctx.scope = READING_ARGUMENTS\n\t\tif statement := ctx.head.Last(); statement != nil {\n\t\t\tstatement.AddArgument(&WordArgument{ctx.word})\n\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tif ctx.scope == READING_NUMBER {\n\t\tctx.scope = READING_ARGUMENTS\n\t\tif statement := ctx.head.Last(); statement != nil {\n\t\t\tnumber, _ := strconv.Atoi(ctx.number)\n\t\t\tstatement.AddArgument(&NumberArgument{number})\n\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn nil\n}", "func tokenToFormulaArg(token efp.Token) formulaArg {\n\tswitch token.TSubType {\n\tcase efp.TokenSubTypeLogical:\n\t\treturn newBoolFormulaArg(strings.EqualFold(token.TValue, \"TRUE\"))\n\tcase efp.TokenSubTypeNumber:\n\t\tnum, _ := strconv.ParseFloat(token.TValue, 64)\n\t\treturn newNumberFormulaArg(num)\n\tdefault:\n\t\treturn newStringFormulaArg(token.TValue)\n\t}\n}", "func Argument(name string, argType ArgumentType) *RequiredArgumentBuilder {\n\treturn &RequiredArgumentBuilder{Name: name, Type: argType}\n}", "func castArg(prefix string, f field.Field, argIndex int) string {\n\tswitch f.DatatypeName {\n\tcase field.TypeString:\n\t\treturn fmt.Sprintf(\"%s%s := args[%d]\", prefix, f.Name.UpperCamel, argIndex)\n\tcase field.TypeUint, field.TypeInt, field.TypeBool:\n\t\treturn fmt.Sprintf(`%s%s, err := cast.To%sE(args[%d])\n if err != nil {\n return err\n }`,\n\t\t\tprefix, f.Name.UpperCamel, strings.Title(f.Datatype), argIndex)\n\tcase field.TypeCustom:\n\t\treturn fmt.Sprintf(`%[1]v%[2]v := new(types.%[3]v)\n\t\t\terr = json.Unmarshal([]byte(args[%[4]v]), %[1]v%[2]v)\n \t\tif err != nil {\n return err\n }`, prefix, f.Name.UpperCamel, f.Datatype, argIndex)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown type %s\", f.DatatypeName))\n\t}\n}", "func (s *BaseSyslParserListener) EnterFunc_arg(ctx *Func_argContext) {}", "func collectArguments() Arguments {\n\tendpoint := config.Config.ChooseEndpoint(flags.APIEndpoint)\n\ttoken := config.Config.ChooseToken(endpoint, flags.Token)\n\tscheme := config.Config.ChooseScheme(endpoint, flags.Token)\n\treturn Arguments{\n\t\tapiEndpoint: endpoint,\n\t\ttoken: token,\n\t\tscheme: scheme,\n\t}\n}", "func (opts *HMACDeriveKeyOpts) Argument() []byte {\n\treturn opts.Arg\n}", "func (opts *HMACDeriveKeyOpts) Argument() []byte {\n\treturn opts.Arg\n}", "func argument(n int) string {\n\tflag.Parse()\n\treturn flag.Arg(n)\n}", "func (n *Argument) Walk(v walker.Visitor) {\n\tif !v.EnterNode(n) {\n\t\treturn\n\t}\n\n\tif n.Expr != nil {\n\t\tn.Expr.Walk(v)\n\t}\n\n\tv.LeaveNode(n)\n}", "func mmcArgGenerator() string {\r\n\tmmcArgs:= make([] string,1000)\t\r\n\tfor i:=0;i<len(mmcArgs);i++{\r\n\t\tmmcArgs[i] = strconv.Itoa(i+1)\r\n\t}\r\n\treturn joinMmcArgs(mmcArgs)\r\n}", "func (p *Parser) parseProcessArg() (AstProcessArg, error) {\n if (p.Scanner.Token == scanner.String) {\n sval, err := strconv.Unquote(p.Scanner.TokenText)\n if err != nil {\n return nil, err\n }\n\n p.Scanner.Scan()\n return &AstLiteralProcessArg{StringDatum(sval)}, nil\n } else {\n return nil, p.Error(\"Unreognised process argument type\")\n }\n}", "func (in *Argument) DeepCopy() *Argument {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Argument)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func starlarkValueToArg(v starlark.Value) (Arg, error) {\n\tswitch x := v.(type) {\n\tcase Arg:\n\t\treturn x, nil\n\tcase starlark.String:\n\t\treturn String(x), nil\n\tdefault:\n\t\treturn nil, errors.Errorf(\n\t\t\t\"Cannot convert %s into a target argument\",\n\t\t\tv.Type(),\n\t\t)\n\t}\n}", "func (s *BaseMySqlParserListener) EnterFunctionArg(ctx *FunctionArgContext) {}", "func Marshal(data *parser.Result, document *string) (err error) {\n\n\targuments := \"\"\n\ttmp := []string{}\n\n\tfor _, node := range data.AST.Children {\n\n\t\tinstruction := strings.ToUpper(node.Value)\n\t\ttab := strings.Repeat(\" \", len(node.Value)+1)\n\n\t\tswitch instruction {\n\t\tcase \"FROM\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"LABEL\":\n\t\t\targuments = KeyValueForm(node, tab)\n\t\tcase \"MAINTAINER\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"EXPOSE\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"ADD\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"ONBUILD\":\n\t\t\tfor _, n := range node.Next.Children {\n\t\t\t\targuments = strings.ToUpper(n.Value) + \" \" + DefaultForm(n)\n\t\t\t}\n\t\tcase \"STOPSIGNAL\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"HEALTHCHECK\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"ARG\":\n\t\t\targuments = KeyValueForm(node, tab)\n\t\tcase \"COPY\":\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"ENV\":\n\t\t\targuments = KeyValueForm(node, tab)\n\t\tcase \"RUN\":\n\t\t\targuments = ShellForm(node)\n\t\t\t//arguments = ExecForm(node)\n\t\tcase \"CMD\":\n\t\t\targuments = ExecForm(node)\n\t\t\t//arguments = ShellForm(node)\n\t\tcase \"ENTRYPOINT\":\n\t\t\targuments = ExecForm(node)\n\t\t\t//arguments = ShellForm(node)\n\t\tcase \"SHELL\":\n\t\t\targuments = ExecForm(node)\n\t\t\t//arguments = ShellForm(node)\n\t\tcase \"VOLUME\":\n\t\t\t//arguments = ExecForm(node)\n\t\t\targuments = DefaultForm(node)\n\t\tcase \"USER\":\n\t\t\targuments = DefaultForm(node)\n\n\t\tcase \"WORKDIR\":\n\t\t\targuments = DefaultForm(node)\n\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"Instruction %s not supported\", instruction)\n\t\t}\n\n\t\tif len(arguments) > 0 {\n\t\t\ttmp = append(tmp, fmt.Sprintf(\"%s %s\", instruction, arguments))\n\t\t} else {\n\t\t\ttmp = append(tmp, instruction)\n\t\t}\n\n\t}\n\n\t*document = strings.Join(tmp, \"\\n\")\n\n\treturn err\n}", "func ToArg(name, value string) string {\n\treturn name + \"=\" + value\n}", "func buildIPArgument(parameter string, environmentVariable string, imageType FDBImageType, sampleAddresses []fdbv1beta2.ProcessAddress) []monitorapi.Argument {\n\tvar leftIPWrap string\n\tvar rightIPWrap string\n\tif imageType == FDBImageTypeUnified {\n\t\tleftIPWrap = \"[\"\n\t\trightIPWrap = \"]\"\n\t} else {\n\t\tleftIPWrap = \"\"\n\t\trightIPWrap = \"\"\n\t}\n\targuments := []monitorapi.Argument{{Value: fmt.Sprintf(\"--%s=%s\", parameter, leftIPWrap)}}\n\n\tfor indexOfAddress, address := range sampleAddresses {\n\t\tif indexOfAddress != 0 {\n\t\t\targuments = append(arguments, monitorapi.Argument{Value: fmt.Sprintf(\",%s\", leftIPWrap)})\n\t\t}\n\n\t\targuments = append(arguments,\n\t\t\tmonitorapi.Argument{ArgumentType: monitorapi.EnvironmentArgumentType, Source: environmentVariable},\n\t\t\tmonitorapi.Argument{Value: fmt.Sprintf(\"%s:\", rightIPWrap)},\n\t\t\tmonitorapi.Argument{ArgumentType: monitorapi.ProcessNumberArgumentType, Offset: address.Port - 2, Multiplier: 2},\n\t\t)\n\n\t\tflags := address.SortedFlags()\n\n\t\tif len(flags) > 0 {\n\t\t\targuments = append(arguments, monitorapi.Argument{Value: fmt.Sprintf(\":%s\", strings.Join(flags, \":\"))})\n\t\t}\n\t}\n\treturn arguments\n}", "func (a *Arguments) PutArgument(expr Expression) {\n\ta.exprs = append(a.exprs, expr)\n}", "func (n DependencyNode) Codegen(prog *Program) (value.Value, error) { return nil, nil }", "func initArgs(){\n\t//master -config ./master.json\n\tflag.StringVar(&confFile, \"config\", \"./master.json\", \"specify master.json as config file\")\n\tflag.Parse()\n}", "func (s *BaselimboListener) EnterFormal_arg(ctx *Formal_argContext) {}", "func NewArgumentScanner(args []string, options ...ArgOption) Scanner {\n\t// Process the options\n\topts := &argOptions{\n\t\tjoiner: \" \",\n\t\topts: []FileOption{LineEndings(NoLineStyle)},\n\t}\n\tfor _, opt := range options {\n\t\topt.argApply(opts)\n\t}\n\n\t// Construct the joiner scanner\n\tloc := ArgLocation{\n\t\tB: ArgPos{I: 0, C: 1},\n\t\tE: ArgPos{I: 0, C: 1},\n\t}\n\tjoiner := NewMemoizingScanner(NewFileScanner(bytes.NewBufferString(opts.joiner), loc, opts.opts...))\n\n\t// Construct a list of scanners\n\tstreams := []Scanner{}\n\tfor i, arg := range args {\n\t\tif i != 0 {\n\t\t\tstreams = append(streams, joiner)\n\t\t}\n\n\t\tloc := ArgLocation{\n\t\t\tB: ArgPos{I: i + 1, C: 1},\n\t\t\tE: ArgPos{I: i + 1, C: 1},\n\t\t}\n\t\tstreams = append(streams, NewFileScanner(bytes.NewBufferString(arg), loc, opts.opts...))\n\t}\n\n\treturn NewChainingScanner(streams)\n}", "func (params PostParams) Generate(args []string, argConfigs []Arg) PostParams {\n\tvar md5hash string\n\tfor index, arg := range args {\n\t\tDebugf(\"Index and args %d %s %v\", index, arg, argConfigs)\n\n\t\tDebugf(\"PostParams Setting %s to %s\", strings.Title(argConfigs[index].Name), arg)\n\t\tif argConfigs[index].Type == \"object\" {\n\t\t\tDebugln(\"Using object parser\")\n\t\t\tvar jsonArg map[string]interface{}\n\t\t\terr := json.Unmarshal([]byte(arg), &jsonArg)\n\t\t\tif err != nil {\n\t\t\t\tpanic(fmt.Sprintf(\"Error parsing json from %s - %s\", argConfigs[index].Name, err.Error()))\n\t\t\t}\n\t\t\treflect.ValueOf(&params).Elem().FieldByName(getFieldByArgumentName(argConfigs[index].Name)).Set(reflect.ValueOf(jsonArg))\n\t\t} else if argConfigs[index].Type == \"array\" {\n\t\t\tDebugln(\"Using array parser\")\n\t\t\tvar jsonArray []interface{}\n\t\t\terr := json.Unmarshal([]byte(arg), &jsonArray)\n\t\t\tif err != nil {\n\t\t\t\tpanic(fmt.Sprintf(\"Error parsing json from %s - %s\", argConfigs[index].Name, err.Error()))\n\t\t\t}\n\t\t\treflect.ValueOf(&params).Elem().FieldByName(getFieldByArgumentName(argConfigs[index].Name)).Set(reflect.ValueOf(jsonArray))\n\t\t} else if argConfigs[index].Type == \"bool\" {\n\t\t\tDebugf(\"Using bool parser for (%s) = (%s)\", argConfigs[index].Name, arg)\n\t\t\tif arg == \"\" {\n\t\t\t\tDebugf(\"Missing arg value (%s) using default (%s)\", argConfigs[index].Name, argConfigs[index].Value)\n\t\t\t\targ = argConfigs[index].Value\n\t\t\t}\n\t\t\tboolArg, _ := strconv.ParseBool(arg)\n\t\t\treflect.ValueOf(&params).Elem().FieldByName(getFieldByArgumentName(argConfigs[index].Name)).SetBool(boolArg)\n\t\t} else {\n\t\t\tif argConfigs[index].Type == \"url\" {\n\t\t\t\tDebugf(\"Handling url %s\", arg)\n\t\t\t\ta, err := ComputeMd5(arg)\n\t\t\t\tmd5hash = a\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"Failed to generate MD5 from url %s. Make sure the file exists and permissions are correct. (%s)\", arg, err)\n\t\t\t\t\tExit(1)\n\t\t\t\t}\n\t\t\t\targ = ConvertFileToURL(arg)\n\t\t\t}\n\t\t\tDebugf(\"Using string parser for (%s) = (%s)\", argConfigs[index].Name, arg)\n\t\t\tif arg == \"\" {\n\t\t\t\tDebugf(\"Missing arg value (%s) using default (%s)\", argConfigs[index].Name, argConfigs[index].Value)\n\t\t\t\targ = argConfigs[index].Value\n\t\t\t}\n\t\t\treflect.ValueOf(&params).Elem().FieldByName(getFieldByArgumentName(argConfigs[index].Name)).SetString(arg)\n\t\t}\n\n\t\tDebugf(\"Finished %s\", arg)\n\t}\n\tif len(md5hash) > 0 {\n\t\tparams.Checksum = md5hash\n\t}\n\treturn params\n}", "func Int16Arg(register Register, name string, options ...ArgOptionApplyer) *int16 {\n\tp := new(int16)\n\t_ = Int16ArgVar(register, p, name, options...)\n\treturn p\n}", "func (c *Command) addArgument(arg interface{}) {\n\tc.Arguments = append(c.Arguments, arg)\n}", "func (p *Planner) configureFieldArgumentSource(upstreamFieldRef, downstreamFieldRef int, argumentName string, sourcePath []string) {\n\tfieldArgument, ok := p.visitor.Operation.FieldArgument(downstreamFieldRef, []byte(argumentName))\n\tif !ok {\n\t\treturn\n\t}\n\tvalue := p.visitor.Operation.ArgumentValue(fieldArgument)\n\tif value.Kind != ast.ValueKindVariable {\n\t\tp.applyInlineFieldArgument(upstreamFieldRef, downstreamFieldRef, argumentName, sourcePath)\n\t\treturn\n\t}\n\tvariableName := p.visitor.Operation.VariableValueNameBytes(value.Ref)\n\tvariableNameStr := p.visitor.Operation.VariableValueNameString(value.Ref)\n\n\tcontextVariable := &resolve.ContextVariable{\n\t\tPath: []string{variableNameStr},\n\t\tRenderAsGraphQLValue: true,\n\t}\n\tcontextVariable.SetJsonValueType(p.visitor.Definition, p.visitor.Definition, p.argTypeRef)\n\n\tcontextVariableName, exists := p.variables.AddVariable(contextVariable)\n\tvariableValueRef, argRef := p.upstreamOperation.AddVariableValueArgument([]byte(argumentName), variableName) // add the argument to the field, but don't redefine it\n\tp.upstreamOperation.AddArgumentToField(upstreamFieldRef, argRef)\n\n\tif exists { // if the variable exists we don't have to put it onto the variables declaration again, skip\n\t\treturn\n\t}\n\n\tfor _, i := range p.visitor.Operation.OperationDefinitions[p.visitor.Walker.Ancestors[0].Ref].VariableDefinitions.Refs {\n\t\tref := p.visitor.Operation.VariableDefinitions[i].VariableValue.Ref\n\t\tif !p.visitor.Operation.VariableValueNameBytes(ref).Equals(variableName) {\n\t\t\tcontinue\n\t\t}\n\t\timportedType := p.visitor.Importer.ImportType(p.visitor.Operation.VariableDefinitions[i].Type, p.visitor.Operation, p.upstreamOperation)\n\t\tp.upstreamOperation.AddVariableDefinitionToOperationDefinition(p.nodes[0].Ref, variableValueRef, importedType)\n\t}\n\n\tp.upstreamVariables, _ = sjson.SetRawBytes(p.upstreamVariables, variableNameStr, []byte(contextVariableName))\n}", "func collectArguments() Arguments {\n\tendpoint := config.Config.ChooseEndpoint(flags.APIEndpoint)\n\ttoken := config.Config.ChooseToken(endpoint, flags.Token)\n\tscheme := config.Config.ChooseScheme(endpoint, flags.Token)\n\n\treturn Arguments{\n\t\tapiEndpoint: endpoint,\n\t\tauthToken: token,\n\t\tscheme: scheme,\n\t\tclusterNameOrID: \"\",\n\t\tuserProvidedToken: flags.Token,\n\t\tverbose: flags.Verbose,\n\t}\n}", "func (s *BaseConcertoListener) EnterFuncArg(ctx *FuncArgContext) {}", "func NewArgumentWithDots(meta ScriptMetaData, nodeBegin *node32, nodeEnd *node32, value Value) Argument {\n\treturn &argument{astNode: astNode{meta: meta, node: nodeBegin, endNode: nodeEnd}, value: value}\n}", "func (s *BaseGraffleParserListener) EnterBuilt_func_input(ctx *Built_func_inputContext) {}", "func GenerateUniqueArg(datatypeName string) string {\n\tswitch datatypeName {\n\tcase field.TypeString:\n\t\treturn \"strconv.Itoa(i)\"\n\tcase field.TypeUint:\n\t\treturn \"uint64(i)\"\n\tcase field.TypeInt:\n\t\treturn \"int32(i)\"\n\tcase field.TypeBool:\n\t\treturn valueFalse\n\tcase field.TypeCustom:\n\t\treturn valueNull\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown type %s\", datatypeName))\n\t}\n}", "func (e PackageExpr) Arg() rel.Expr {\n\treturn e.a\n}", "func (fi *funcInfo) emitVararg(line, a, n int) {\r\n\tfi.emitABC(line, OP_VARARG, a, n+1, 0)\r\n}", "func IntArg(register Register, name string, options ...ArgOptionApplyer) *int {\n\tp := new(int)\n\t_ = IntArgVar(register, p, name, options...)\n\treturn p\n}", "func (ec *executionContext) field_Mutation_addPlantToNursery_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 model.NewNurseryAddition\n\tif tmp, ok := rawArgs[\"input\"]; ok {\n\t\targ0, err = ec.unmarshalNNewNurseryAddition2githubᚗcomᚋwonesyᚋplantparenthoodᚋgraphᚋmodelᚐNewNurseryAddition(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"input\"] = arg0\n\treturn args, nil\n}", "func (ec *executionContext) field_Mutation_createAgent_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {\n\tvar err error\n\targs := map[string]interface{}{}\n\tvar arg0 models.CreateAgentInput\n\tif tmp, ok := rawArgs[\"input\"]; ok {\n\t\tctx := graphql.WithPathContext(ctx, graphql.NewPathWithField(\"input\"))\n\t\targ0, err = ec.unmarshalNCreateAgentInput2golangᚑmongoᚑgraphqlᚑ003ᚋinternalᚋmodelsᚐCreateAgentInput(ctx, tmp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\targs[\"input\"] = arg0\n\treturn args, nil\n}", "func printInferredArguments(out *output.Output) {\n\tif out == nil {\n\t\treturn\n\t}\n\n\tblock := out.Block(output.Line(output.EmojiLightbulb, output.StyleItalic, \"Inferred arguments\"))\n\tblock.Writef(\"repo: %s\", codeintelUploadFlags.repo)\n\tblock.Writef(\"commit: %s\", codeintelUploadFlags.commit)\n\tblock.Writef(\"root: %s\", codeintelUploadFlags.root)\n\tblock.Writef(\"file: %s\", codeintelUploadFlags.file)\n\tblock.Writef(\"indexer: %s\", codeintelUploadFlags.indexer)\n\tblock.Writef(\"indexerVersion: %s\", codeintelUploadFlags.indexerVersion)\n\tblock.Close()\n}", "func generateCall(generator *Generator, node parser.Node) string {\n\tvar identifier string\n\n\t// Check if it is a built-in function or not\n\tif strings.Contains(node.Value, \"|\") {\n\t\t// Get the function identifier by spliting the value by the pipe\n\t\tidentifier = strings.Split(node.Value, \"|\")[1]\n\n\t\tcheckCall(generator, node)\n\n\t\t// Add import to the generator\n\t\taddCallImport(\n\t\t\tgenerator,\n\t\t\tnode.Value,\n\t\t)\n\t} else {\n\t\tidentifier = node.Value\n\t}\n\n\t// Translate the params\n\tparams := generateParams(generator, node.Params)\n\n\t// Link all the translations together\n\treturn fmt.Sprintf(\n\t\tcCall,\n\t\tidentifier,\n\t\tstrings.Join(params, \",\"),\n\t)\n}", "func (opts *HMACTruncated256AESDeriveKeyOpts) Argument() []byte {\n\treturn opts.Arg\n}", "func (opts *HMACTruncated256AESDeriveKeyOpts) Argument() []byte {\n\treturn opts.Arg\n}", "func (n *CommandNode) Args() []Expr { return n.args }", "func ArgumentCustomType(name string, values ...Argument) Argument {\n\treturn Argument{name, argumentSlice(values)}\n}", "func genField(field *ast.FieldDefinition) *jen.Statement {\n\t//\n\t// Generate config for field\n\t//\n\t// == Example input SDL\n\t//\n\t// interface Pet {\n\t// \"name of the pet\"\n\t// name(style: NameComponentsStyle = SHORT): String!\n\t// \"\"\"\n\t// givenName of the pet ★\n\t// \"\"\"\n\t// givenName: String @deprecated(reason: \"No longer supported; please use name field.\")\n\t// }\n\t//\n\t// == Example output\n\t//\n\t// &graphql.Field{\n\t// Name: \"name\",\n\t// Type: graphql.NonNull(graphql.String),\n\t// Description: \"name of the pet\",\n\t// DeprecationReason: \"\",\n\t// Args: FieldConfigArgument{ ... },\n\t// }\n\t//\n\t// &graphql.Field{\n\t// Name: \"givenName\",\n\t// Type: graphql.String,\n\t// Description: \"givenName of the pet\",\n\t// DeprecationReason: \"No longer supported; please use name field.\",\n\t// Args: FieldConfigArgument{ ... },\n\t// }\n\t//\n\treturn jen.Op(\"&\").Qual(defsPkg, \"Field\").Values(jen.Dict{\n\t\tjen.Id(\"Args\"): genArguments(field.Arguments),\n\t\tjen.Id(\"DeprecationReason\"): genDeprecationReason(field.Directives),\n\t\tjen.Id(\"Description\"): genDescription(field),\n\t\tjen.Id(\"Name\"): jen.Lit(field.Name.Value),\n\t\tjen.Id(\"Type\"): genOutputTypeReference(field.Type),\n\t})\n}", "func (gen declCodeGen) Generate(module ModuleDefinition, writer io.Writer) error {\n\tif module.TagDefault == TAGS_AUTOMATIC {\n\t\t// See x.680, section 12.3. It implies certain transformations to component and alternative lists that are not implemented.\n\t\treturn errors.New(\"AUTOMATIC tagged modules are not supported\")\n\t}\n\tctx := moduleContext{\n\t\textensibilityImplied: module.ExtensibilityImplied,\n\t\ttagDefault: module.TagDefault,\n\t\tlookupContext: module.ModuleBody,\n\t\tparams: gen.Params,\n\t}\n\tmoduleName := goast.NewIdent(goifyName(module.ModuleIdentifier.Reference))\n\tif len(gen.Params.Package) > 0 {\n\t\tmoduleName = goast.NewIdent(gen.Params.Package)\n\t}\n\tast := &goast.File{\n\t\tName: moduleName,\n\t\tDecls: ctx.generateDeclarations(module),\n\t}\n\tif len(ctx.errors) != 0 {\n\t\tmsg := \"errors generating Go AST from module: \\n\"\n\t\tfor _, err := range ctx.errors {\n\t\t\tmsg += \" \" + err.Error() + \"\\n\"\n\t\t}\n\t\treturn errors.New(msg)\n\t}\n\timportDecls := make([]goast.Decl, 0)\n\tfor _, moduleName := range ctx.requiredModules {\n\t\tmodulePath := &goast.BasicLit{Kind: gotoken.STRING, Value: fmt.Sprintf(\"\\\"%v\\\"\", moduleName)}\n\t\tspecs := []goast.Spec{&goast.ImportSpec{Path: modulePath}}\n\t\timportDecls = append(importDecls, &goast.GenDecl{Tok: gotoken.IMPORT, Specs: specs})\n\t}\n\tast.Decls = append(importDecls, ast.Decls...)\n\treturn goprint.Fprint(writer, gotoken.NewFileSet(), ast)\n}", "func (n *CommandNode) AddArg(a Expr) {\n\tn.args = append(n.args, a)\n}", "func (node *Argument) formatFast(buf *TrackedBuffer) {\n\tbuf.WriteArg(\":\", node.Name)\n\tif node.Type >= 0 {\n\t\t// For bind variables that are statically typed, emit their type as an adjacent comment.\n\t\t// This comment will be ignored by older versions of Vitess (and by MySQL) but will provide\n\t\t// type safety when using the query as a cache key.\n\t\tbuf.WriteString(\" /* \")\n\t\tbuf.WriteString(node.Type.String())\n\t\tbuf.WriteString(\" */\")\n\t}\n}", "func Struct(pkgName, strctName, argName string) *CXArgument {\n\tpkg, err := PROGRAM.GetPackage(pkgName)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tstrct, err := pkg.GetStruct(strctName)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\targ := MakeArgument(argName, \"\", -1).AddType(TypeNames[TYPE_CUSTOM])\n\targ.DeclarationSpecifiers = append(arg.DeclarationSpecifiers, DECL_STRUCT)\n\targ.Size = strct.Size\n\targ.TotalSize = strct.Size\n\targ.CustomType = strct\n\n\treturn arg\n}", "func (s *BaseSyslParserListener) EnterTransform_arg(ctx *Transform_argContext) {}", "func (w *reqResWriter) writeArg1(arg Output) error {\n\treturn w.writeArg(arg, false, reqResWriterPreArg1, reqResWriterPreArg2)\n}", "func Literal(literal string) *LiteralArgumentBuilder {\n\treturn &LiteralArgumentBuilder{Literal: literal}\n}", "func (gen *jsGenerator) init(args *Arguments) error {\n\tif !args.GenClient && !args.GenModel && !args.GenServer {\n\t\treturn fmt.Errorf(\"nothing to do\")\n\t} else if len(args.Options) > 0 {\n\t\tfor k, v := range args.Options {\n\t\t\tswitch k {\n\t\t\tcase \"controller\":\n\t\t\t\tif v == \"\" {\n\t\t\t\t\treturn fmt.Errorf(\"controller cannot be empty\")\n\t\t\t\t}\n\t\t\t\tgen.baseControllerName = v\n\t\t\tcase \"output\":\n\t\t\t\tif v == \"\" {\n\t\t\t\t\treturn fmt.Errorf(\"output option cannot be empty. Valid options are 'ns-flat' and 'ns-nested'\")\n\t\t\t\t} else if v != \"ns-flat\" && v != \"ns-nested\" {\n\t\t\t\t\treturn fmt.Errorf(\"invalid output option: %s: valid options are 'ns-flat' and 'ns-nested'\", v)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"the %s option is not applicable to language js\", k)\n\t\t\t}\n\t\t}\n\t}\n\tgen.args = args\n\treturn gen.loadTempates(args.TemplateDir, \"js\", template.FuncMap{\n\t\t\"formatType\": func(t *idl.Type) string { return gen.formatType(t) },\n\t\t\"fullNameOf\": func(name string) string { return gen.fullNameOf(name) },\n\t\t\"formatValue\": func(p *idl.Pair) string { return gen.formatLiteral(p.Value, p.DataType) },\n\t\t\"filterAttrs\": func(attrs []*idl.Attribute) []*idl.Attribute { return gen.filterAttributes(attrs) },\n\t\t\"isVoid\": func(t *idl.Type) bool { return gen.isVoid(t) },\n\t\t\"isTrivialProperty\": func(t *idl.Type) bool { return gen.isTrivialProperty(t) },\n\t\t\"usings\": func() []string {\n\t\t\tpkg := gen.tplRootIdl.Namespaces[\"js\"]\n\t\t\timports := make([]string, 0)\n\t\t\tfor _, i := range gen.tplRootIdl.UniqueNamespaces(\"js\") {\n\t\t\t\tif i != pkg {\n\t\t\t\t\t//relPath := i[len(pkg)];\n\t\t\t\t\t//fmt.Println(relPath);\n\t\t\t\t\t//fmt.Println(\"pkg => \" + pkg);\n\t\t\t\t\t//fmt.Println(\"i => \" + i);\n\t\t\t\t\trelPath := \"\"\n\t\t\t\t\tfor x := 0; x < len(strings.Split(pkg, \".\")); x++ {\n\t\t\t\t\t\trelPath += \"../\"\n\t\t\t\t\t}\n\t\t\t\t\t//fmt.Println(\"relPath => \" + relPath);\n\t\t\t\t\timports = append(imports, relPath+strings.Replace(i, \".\", \"/\", -1))\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn imports\n\t\t},\n\t\t\"isNotPascalCase\": func(name string) bool {\n\t\t\tif len(name) > 1 {\n\t\t\t\treturn strings.ToUpper(name[0:1]) != name[0:1]\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\t\"baseController\": func() string {\n\t\t\tif gen.baseControllerName != \"\" {\n\t\t\t\treturn gen.baseControllerName\n\t\t\t} else {\n\t\t\t\treturn \"Concur.Babel.Mvc.BabelController\"\n\t\t\t}\n\t\t},\n\t\t\"cast\": func(t *idl.Type) string {\n\t\t\tif t.Name == \"float32\" {\n\t\t\t\treturn \"(float)\"\n\t\t\t} else {\n\t\t\t\treturn \"\"\n\t\t\t}\n\t\t},\n\t\t\"constType\": func(s string) string {\n\t\t\tcs, ok := jsConstTypes[s]\n\t\t\tif ok {\n\t\t\t\treturn cs\n\t\t\t} else {\n\t\t\t\treturn \"string\"\n\t\t\t}\n\t\t},\n\t})\n}", "func marshalArg(arg any) any {\n\tif buf, err := json.Marshal(arg); err == nil {\n\t\targ = string(buf)\n\t}\n\treturn arg\n}", "func formulaArgToToken(arg formulaArg) efp.Token {\n\tswitch arg.Type {\n\tcase ArgNumber:\n\t\tif arg.Boolean {\n\t\t\treturn efp.Token{TValue: arg.Value(), TType: efp.TokenTypeOperand, TSubType: efp.TokenSubTypeLogical}\n\t\t}\n\t\treturn efp.Token{TValue: arg.Value(), TType: efp.TokenTypeOperand, TSubType: efp.TokenSubTypeNumber}\n\tdefault:\n\t\treturn efp.Token{TValue: arg.Value(), TType: efp.TokenTypeOperand, TSubType: efp.TokenSubTypeText}\n\t}\n}", "func ASTArgsFromStmt(stmt string) (*ASTArgs, error) {\n\tstmtNode, err := parser.New().ParseOneStmt(stmt, \"\", \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tloadDataStmt, ok := stmtNode.(*ast.LoadDataStmt)\n\tif !ok {\n\t\treturn nil, errors.Errorf(\"stmt %s is not load data stmt\", stmt)\n\t}\n\treturn &ASTArgs{\n\t\tFileLocRef: loadDataStmt.FileLocRef,\n\t\tColumnsAndUserVars: loadDataStmt.ColumnsAndUserVars,\n\t\tColumnAssignments: loadDataStmt.ColumnAssignments,\n\t\tOnDuplicate: loadDataStmt.OnDuplicate,\n\t\tFieldsInfo: loadDataStmt.FieldsInfo,\n\t\tLinesInfo: loadDataStmt.LinesInfo,\n\t}, nil\n}", "func (s *BaseSyslParserListener) EnterCall_arg(ctx *Call_argContext) {}", "func (s *BaseGShellListener) EnterNamedArgument(ctx *NamedArgumentContext) {}", "func (n *Attribute) Arg(i int) *Argument { return n.Args[i].(*Argument) }", "func (params GetParams) Generate(args []string, argConfigs []Arg) GetParams {\n\tfor index, arg := range args {\n\t\tif argConfigs[index].Type != \"object\" && argConfigs[index].Type != \"array\" {\n\t\t\treflect.ValueOf(&params).Elem().FieldByName(getFieldByArgumentName(argConfigs[index].Name)).SetString(arg)\n\t\t} else if argConfigs[index].Type == \"bool\" {\n\t\t\tboolArg, _ := strconv.ParseBool(arg)\n\t\t\treflect.ValueOf(&params).Elem().FieldByName(getFieldByArgumentName(argConfigs[index].Name)).SetBool(boolArg)\n\t\t}\n\t}\n\treturn params\n}", "func (s *BasePlSqlParserListener) ExitArgument(ctx *ArgumentContext) {}", "func BundleGeneratorConfig(config sheaf.BundleConfig) BundleGeneratorOption {\n\treturn func(generator BundleGenerator) BundleGenerator {\n\t\tgenerator.config = config\n\t\treturn generator\n\t}\n}", "func getConfigArgs(action BuildAction, dir string, ctx Context, args []string) []string {\n\t// The next block of code verifies that the current directory is the root directory of the source\n\t// tree. It then finds the relative path of dir based on the root directory of the source tree\n\t// and verify that dir is inside of the source tree.\n\tcheckTopDir(ctx)\n\ttopDir, err := os.Getwd()\n\tif err != nil {\n\t\tctx.Fatalf(\"Error retrieving top directory: %v\", err)\n\t}\n\tdir, err = filepath.EvalSymlinks(dir)\n\tif err != nil {\n\t\tctx.Fatalf(\"Unable to evaluate symlink of %s: %v\", dir, err)\n\t}\n\tdir, err = filepath.Abs(dir)\n\tif err != nil {\n\t\tctx.Fatalf(\"Unable to find absolute path %s: %v\", dir, err)\n\t}\n\trelDir, err := filepath.Rel(topDir, dir)\n\tif err != nil {\n\t\tctx.Fatalf(\"Unable to find relative path %s of %s: %v\", relDir, topDir, err)\n\t}\n\t// If there are \"..\" in the path, it's not in the source tree.\n\tif strings.Contains(relDir, \"..\") {\n\t\tctx.Fatalf(\"Directory %s is not under the source tree %s\", dir, topDir)\n\t}\n\n\tconfigArgs := args[:]\n\n\t// If the arguments contains GET-INSTALL-PATH, change the target name prefix from MODULES-IN- to\n\t// GET-INSTALL-PATH-IN- to extract the installation path instead of building the modules.\n\ttargetNamePrefix := \"MODULES-IN-\"\n\tif inList(\"GET-INSTALL-PATH\", configArgs) {\n\t\ttargetNamePrefix = \"GET-INSTALL-PATH-IN-\"\n\t\tconfigArgs = removeFromList(\"GET-INSTALL-PATH\", configArgs)\n\t}\n\n\tvar targets []string\n\n\tswitch action {\n\tcase BUILD_MODULES:\n\t\t// No additional processing is required when building a list of specific modules or all modules.\n\tcase BUILD_MODULES_IN_A_DIRECTORY:\n\t\t// If dir is the root source tree, all the modules are built of the source tree are built so\n\t\t// no need to find the build file.\n\t\tif topDir == dir {\n\t\t\tbreak\n\t\t}\n\n\t\tbuildFile := findBuildFile(ctx, relDir)\n\t\tif buildFile == \"\" {\n\t\t\tctx.Fatalf(\"Build file not found for %s directory\", relDir)\n\t\t}\n\t\ttargets = []string{convertToTarget(filepath.Dir(buildFile), targetNamePrefix)}\n\tcase BUILD_MODULES_IN_DIRECTORIES:\n\t\tnewConfigArgs, dirs := splitArgs(configArgs)\n\t\tconfigArgs = newConfigArgs\n\t\ttargets = getTargetsFromDirs(ctx, relDir, dirs, targetNamePrefix)\n\t}\n\n\t// Tidy only override all other specified targets.\n\ttidyOnly := os.Getenv(\"WITH_TIDY_ONLY\")\n\tif tidyOnly == \"true\" || tidyOnly == \"1\" {\n\t\tconfigArgs = append(configArgs, \"tidy_only\")\n\t} else {\n\t\tconfigArgs = append(configArgs, targets...)\n\t}\n\n\treturn configArgs\n}", "func (a *arguments) Argument() string {\n\treturn a.argument\n}", "func (eeo EncodingErrorOption) argApply(o *argOptions) {\n\to.opts = append(o.opts, eeo)\n}", "func genYaml(name string) error {\n\treturn nil\n}", "func (s *BasePlSqlParserListener) EnterFunction_argument_modeling(ctx *Function_argument_modelingContext) {\n}", "func genConfigCobra(cmd *cobra.Command, args []string) {\n\ts, err := genConfig()\n\tif err != nil {\n\t\terrlog.LogError(err)\n\t\tos.Exit(1)\n\t}\n\tfmt.Println(string(s))\n}", "func ASTArgsFromPlan(plan *plannercore.LoadData) *ASTArgs {\n\treturn &ASTArgs{\n\t\tFileLocRef: plan.FileLocRef,\n\t\tColumnsAndUserVars: plan.ColumnsAndUserVars,\n\t\tColumnAssignments: plan.ColumnAssignments,\n\t\tOnDuplicate: plan.OnDuplicate,\n\t\tFieldsInfo: plan.FieldsInfo,\n\t\tLinesInfo: plan.LinesInfo,\n\t}\n}", "func genVariants(arg interface{}) gopter.Gen {\n\targs := arg.([]interface{})\n\ts := args[0].(string)\n\tt := args[1].(string)\n\treturn gen.OneConstOf(s, strings.ToUpper(s), strings.Title(s),\n\t\tfmt.Sprintf(\"%s %s\", s, t),\n\t\tfmt.Sprintf(\"%s %s\", strings.ToUpper(s), t),\n\t\tfmt.Sprintf(\"%s %s\", strings.Title(s), t),\n\t)\n}", "func (c *context) ArgBytes(name string) []byte {\n\treturn c.ParamBytes(name)\n}", "func (s *BaseGShellListener) EnterScriptArgument(ctx *ScriptArgumentContext) {}", "func genConfigXML(data map[string]interface{}, section string) string {\n\tif len(data) == 0 {\n\t\treturn \"\"\n\t}\n\n\tb := &bytes.Buffer{}\n\n\t// <yandex>\n\t//\t\t<SECTION>\n\tfprintf(b, \"<%s>\\n\", xmlTagYandex)\n\tfprintf(b, \"%4s<%s>\\n\", \" \", section)\n\n\txmlbuilder.GenerateXML(b, data, 4, 4)\n\t//\t\t<SECTION>\n\t// <yandex>\n\tfprintf(b, \"%4s</%s>\\n\", \" \", section)\n\tfprintf(b, \"</%s>\\n\", xmlTagYandex)\n\n\treturn b.String()\n}", "func (v *Argument) Encode(sw stream.Writer) error {\n\tif err := sw.WriteStructBegin(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 1, Type: wire.TBinary}); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteString(v.Name); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteFieldEnd(); err != nil {\n\t\treturn err\n\t}\n\n\tif v.Type == nil {\n\t\treturn errors.New(\"field Type of Argument is required\")\n\t}\n\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 2, Type: wire.TStruct}); err != nil {\n\t\treturn err\n\t}\n\tif err := v.Type.Encode(sw); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteFieldEnd(); err != nil {\n\t\treturn err\n\t}\n\n\tif v.Annotations != nil {\n\t\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 3, Type: wire.TMap}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := _Map_String_String_Encode(v.Annotations, sw); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sw.WriteFieldEnd(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn sw.WriteStructEnd()\n}", "func (n *AnonClassExpr) Arg(i int) *Argument { return n.Args[i].(*Argument) }", "func (*Base) Arguments(p ASTPass, l *ast.Fodder, args *ast.Arguments, r *ast.Fodder, ctx Context) {\n\tp.Fodder(p, l, ctx)\n\tfor i := range args.Positional {\n\t\targ := &args.Positional[i]\n\t\tp.Visit(p, &arg.Expr, ctx)\n\t\tp.Fodder(p, &arg.CommaFodder, ctx)\n\t}\n\tfor i := range args.Named {\n\t\targ := &args.Named[i]\n\t\tp.Fodder(p, &arg.NameFodder, ctx)\n\t\tp.Fodder(p, &arg.EqFodder, ctx)\n\t\tp.Visit(p, &arg.Arg, ctx)\n\t\tp.Fodder(p, &arg.CommaFodder, ctx)\n\t}\n\tp.Fodder(p, r, ctx)\n}", "func (rt *operatorRuntime) genOp(op func(interface{}, interface{}) interface{},\n\tvs parser.Scope, is map[string]interface{}, tid uint64) (interface{}, error) {\n\n\tvar ret interface{}\n\n\terrorutil.AssertTrue(len(rt.node.Children) == 2,\n\t\tfmt.Sprint(\"Operation requires 2 operands\", rt.node))\n\n\tres1, err := rt.node.Children[0].Runtime.Eval(vs, is, tid)\n\tif err == nil {\n\t\tvar res2 interface{}\n\n\t\tif res2, err = rt.node.Children[1].Runtime.Eval(vs, is, tid); err == nil {\n\t\t\tret = op(res1, res2)\n\t\t}\n\t}\n\n\treturn ret, err\n}", "func (self *ArgumentParser) statePassThrough(parser *parserState) stateFunc {\n for ; parser.pos < len(parser.args) ; parser.pos++ {\n arg := parser.args[parser.pos]\n parser.emitWithArgument(tokArgument, parser.stickyArg, parser.stickyArg.String)\n parser.emitWithValue(tokValue, arg)\n }\n return nil\n}" ]
[ "0.73820037", "0.57308537", "0.55290264", "0.5522804", "0.5506649", "0.5157852", "0.5125404", "0.5091101", "0.504007", "0.5006817", "0.49592155", "0.49067444", "0.49022472", "0.48643097", "0.48459315", "0.484178", "0.48397017", "0.48209807", "0.4767527", "0.47625056", "0.47602305", "0.47338164", "0.46945795", "0.46309894", "0.4624366", "0.46112818", "0.45811763", "0.4580689", "0.4573709", "0.45722592", "0.45722592", "0.45616448", "0.4558582", "0.45512962", "0.45456877", "0.4532968", "0.4532445", "0.45313627", "0.45305777", "0.4515654", "0.4459298", "0.4456183", "0.44486606", "0.44464204", "0.44449866", "0.44370413", "0.4434004", "0.44288403", "0.4418655", "0.4417687", "0.4396892", "0.4395933", "0.43903193", "0.43853173", "0.4381437", "0.4362715", "0.43624857", "0.43614763", "0.43610916", "0.43598354", "0.4358731", "0.43564078", "0.43513617", "0.43513617", "0.43476504", "0.4337128", "0.43327057", "0.43223402", "0.4314685", "0.43111363", "0.43079457", "0.4304672", "0.43041334", "0.43000758", "0.42928866", "0.428864", "0.42808256", "0.42805028", "0.42754608", "0.42721108", "0.42719278", "0.42713574", "0.42532334", "0.42484128", "0.42405307", "0.42388877", "0.42294285", "0.4226754", "0.4225793", "0.42224246", "0.42222396", "0.4218464", "0.4218087", "0.4214192", "0.42058864", "0.4203382", "0.41990677", "0.41987687", "0.41892397", "0.4172177" ]
0.793481
0
AddReceipt adds receipt for user.
AddReceipt добавляет чек для пользователя.
func (client Client) AddReceipt(userId string, text string) error { addReceiptUrl := client.backendUrl + "/internal/receipt" request := addReceiptRequest{ReceiptString: text, UserId: userId} reader, err := getReader(request) if err != nil { return err } response, err := http.Post(addReceiptUrl, "text/javascript", reader) if err != nil { return err } switch response.StatusCode { case http.StatusOK: return nil default: return errors.New(response.Status) } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (puo *ProductUpdateOne) AddReceipt(r ...*Receipt) *ProductUpdateOne {\n\tids := make([]int, len(r))\n\tfor i := range r {\n\t\tids[i] = r[i].ID\n\t}\n\treturn puo.AddReceiptIDs(ids...)\n}", "func (pu *ProductUpdate) AddReceipt(r ...*Receipt) *ProductUpdate {\n\tids := make([]int, len(r))\n\tfor i := range r {\n\t\tids[i] = r[i].ID\n\t}\n\treturn pu.AddReceiptIDs(ids...)\n}", "func (service *Service) AddUser(accountId types.ID) error {\n\t// you can be delegate of a user after the user designate you as a delegate.\n\tif isDelegate, err := service.accounts.IsDelegateOf(service.addr, accountId); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to call Accounts.IsDelegateOf\")\n\t} else if !isDelegate {\n\t\treturn ErrDelegationNotAllowed\n\t}\n\tservice.accountIds = append(service.accountIds, accountId)\n\treturn nil\n}", "func (r *Receipt) AddItem(item *Item) {\n r.Items = append(r.Items, item)\n r.Taxes += float64(item.Quantity) * item.Taxes\n r.Total += float64(item.Quantity) * (item.Price + item.Taxes)\n}", "func (_UsersData *UsersDataTransactor) AddUser(opts *bind.TransactOpts, uuid [16]byte, userAddress common.Address, orgUuid [16]byte, publicKey [2][32]byte, idCartNoHash [32]byte, time *big.Int) (*types.Transaction, error) {\n\treturn _UsersData.contract.Transact(opts, \"addUser\", uuid, userAddress, orgUuid, publicKey, idCartNoHash, time)\n}", "func (_AnchorChain *AnchorChainTransactor) AddUser(opts *bind.TransactOpts, user common.Address) (*types.Transaction, error) {\n\treturn _AnchorChain.contract.Transact(opts, \"addUser\", user)\n}", "func (c *Client) AddUser(userID, phone, name, certNum string, userType string, certType string, autoSign bool) (*AddUserResponse, error) {\n\tcreateSign := \"0\"\n\tif autoSign {\n\t\tcreateSign = \"1\"\n\t}\n\tp := addUserParams{\n\t\tAppUserID: userID,\n\t\tCellNum: phone,\n\t\tUserType: userType,\n\t\tUserName: name,\n\t\tCertifyType: certType,\n\t\tCertifyNumber: certNum,\n\t\tCreateSignature: createSign,\n\t}\n\n\tparamMap, err := toMap(p, map[string]string{\n\t\tAppIDKey: c.config.AppID,\n\t\tPasswordKey: c.config.Password,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tret, err := httpRequest(c, p.URI(), paramMap, nil, func() interface{} {\n\t\treturn &AddUserResponse{}\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trsp := ret.(*AddUserResponse)\n\n\tif err = checkErr(rsp.Code, rsp.SubCode, rsp.Message); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn rsp, nil\n}", "func (puo *ProductUpdateOne) AddReceiptIDs(ids ...int) *ProductUpdateOne {\n\tpuo.mutation.AddReceiptIDs(ids...)\n\treturn puo\n}", "func (_UsersData *UsersDataTransactorSession) AddUser(uuid [16]byte, userAddress common.Address, orgUuid [16]byte, publicKey [2][32]byte, idCartNoHash [32]byte, time *big.Int) (*types.Transaction, error) {\n\treturn _UsersData.Contract.AddUser(&_UsersData.TransactOpts, uuid, userAddress, orgUuid, publicKey, idCartNoHash, time)\n}", "func NewReceipt() *Receipt {\n\treturn &Receipt{}\n}", "func (_UsersData *UsersDataSession) AddUser(uuid [16]byte, userAddress common.Address, orgUuid [16]byte, publicKey [2][32]byte, idCartNoHash [32]byte, time *big.Int) (*types.Transaction, error) {\n\treturn _UsersData.Contract.AddUser(&_UsersData.TransactOpts, uuid, userAddress, orgUuid, publicKey, idCartNoHash, time)\n}", "func (rm *ReceiptMaker) NewReceipt() types.MessageReceipt {\n\tseq := rm.seq\n\trm.seq++\n\treturn types.MessageReceipt{\n\t\tReturn: []byte(fmt.Sprintf(\"%d\", seq)),\n\t}\n}", "func (a *Client) AddStockReceipts(params *AddStockReceiptsParams, authInfo runtime.ClientAuthInfoWriter) (*AddStockReceiptsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewAddStockReceiptsParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"addStockReceipts\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/accounts/{koronaAccountId}/stockReceipts\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &AddStockReceiptsReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*AddStockReceiptsOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for addStockReceipts: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (_Pausable *PausableTransactor) AddPauser(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) {\n\treturn _Pausable.contract.Transact(opts, \"addPauser\", account)\n}", "func NewReceipt(values map[string]string, contentID uuid.UUID, userID uuid.UUID) *Receipt {\n\tif values == nil {\n\t\tvalues = make(map[string]string, 0)\n\t}\n\treturn &Receipt{\n\t\tID: uuid.NewUUID(),\n\t\tValues: values,\n\t\tSendState: READY,\n\t\tCreated: time.Now(),\n\t\tContentID: contentID,\n\t\tUserID: userID,\n\t}\n}", "func (vu *VaultUsers) AddUser(email string, publicKeyString string, masterPassphrase []byte) error {\n\tif vu.users[email] != nil {\n\t\treturn errors.New(\"User already exists in vault:\" + email)\n\t}\n\n\tuser, err := NewVaultUser(vu.path, email, publicKeyString)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := user.SetEncryptedMasterKey(masterPassphrase); err != nil {\n\t\treturn err\n\t}\n\n\tif err := user.Save(); err != nil {\n\t\treturn err\n\t}\n\tvu.users[email] = user\n\treturn nil\n}", "func (pu *ProductUpdate) AddReceiptIDs(ids ...int) *ProductUpdate {\n\tpu.mutation.AddReceiptIDs(ids...)\n\treturn pu\n}", "func (s *Service) AddUserRecord() http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// todo\n\t})\n}", "func (_ERC20Pausable *ERC20PausableTransactor) AddPauser(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) {\n\treturn _ERC20Pausable.contract.Transact(opts, \"addPauser\", account)\n}", "func (m *MemoryUserStorage) Add(user users.User) int {\n\tuser.ID = len(m.users) + 1\n\tuser.Cash = 1000.0\n\tm.users = append(m.users, user)\n\n\treturn user.ID\n}", "func (_AnchorChain *AnchorChainTransactorSession) AddUser(user common.Address) (*types.Transaction, error) {\n\treturn _AnchorChain.Contract.AddUser(&_AnchorChain.TransactOpts, user)\n}", "func (fs Fakes) AddUser(u types.User) int32 {\n\tid := fs.UserStore.lastUserID + 1\n\tfs.UserStore.lastUserID = id\n\tu.ID = id\n\tfs.UserStore.list = append(fs.UserStore.list, u)\n\treturn id\n}", "func NewReceipt() *Receipt {\n return &Receipt{}\n}", "func (session *AliceSession) OnReceipt(receiptFile, secretFile string) error {\n\tif err := utils.CheckRegularFileReadPerm(receiptFile); err != nil {\n\t\treturn err\n\t}\n\tif err := utils.CheckDirOfPathExistence(secretFile); err != nil {\n\t\treturn err\n\t}\n\n\thandle := C.handle_t(session.handle)\n\n\treceiptFileCStr := C.CString(receiptFile)\n\tdefer C.free(unsafe.Pointer(receiptFileCStr))\n\n\tsecretFileCStr := C.CString(secretFile)\n\tdefer C.free(unsafe.Pointer(secretFileCStr))\n\n\tret := bool(C.E_TableOtComplaintAliceOnReceipt(\n\t\thandle, receiptFileCStr, secretFileCStr))\n\tif !ret {\n\t\treturn fmt.Errorf(\n\t\t\t\"E_TableOtComplaintAliceOnReceipt(%v, %s, %s) failed\",\n\t\t\thandle, receiptFile, secretFile)\n\t}\n\n\treturn nil\n}", "func AddUserReview(db *sql.DB, revieweeEmail string, strengths []string, opportunities []string, cycle string) error {\n\tq := `\n INSERT INTO reviews\n (recipient_id,\n review_cycle_id,\n feedback,\n is_strength,\n is_growth_opportunity)\n VALUES ((SELECT id\n FROM users\n WHERE email =?\n LIMIT 1),\n (SELECT id\n FROM review_cycles\n WHERE name =? ),\n ?,\n ?,\n ?) ;\n `\n\t// could make some uber query, but it is just easier to iterate\n\tfor _, strength := range strengths {\n\t\tif _, err := db.Exec(q, revieweeEmail, cycle, strength, true, false); err != nil {\n\t\t\treturn errors.Wrap(err, \"unable to insert strengths in reviews\")\n\t\t}\n\t}\n\tfor _, opportunity := range opportunities {\n\t\tif _, err := db.Exec(q, revieweeEmail, cycle, opportunity, false, true); err != nil {\n\t\t\treturn errors.Wrap(err, \"unable to insert opportunity in reviews\")\n\t\t}\n\t}\n\treturn nil\n}", "func (r *PurchaseInvoicePurchaseInvoiceLinesCollectionRequest) Add(ctx context.Context, reqObj *PurchaseInvoiceLine) (resObj *PurchaseInvoiceLine, err error) {\n\terr = r.JSONRequest(ctx, \"POST\", \"\", reqObj, &resObj)\n\treturn\n}", "func (_AnchorChain *AnchorChainSession) AddUser(user common.Address) (*types.Transaction, error) {\n\treturn _AnchorChain.Contract.AddUser(&_AnchorChain.TransactOpts, user)\n}", "func (_ChpRegistry *ChpRegistryTransactor) AddPauser(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) {\n\treturn _ChpRegistry.contract.Transact(opts, \"addPauser\", account)\n}", "func AddNoteUsn(path, userId string, Usn int) {\n\tnote := model.NewNote(path)\n\tnote.UID = userId\n\tfiled := []string{\"Usn\"}\n\tnote.Usn = Usn\n\tbeego.Debug(\"inc Usn\")\n\tif err := note.Update(filed); err != nil {\n\t\tbeego.Error(err)\n\t}\n\treturn\n}", "func (s *Service) Add(userId, tan string) error {\n\thash, err := s.hasher.Hash(tan)\n\tif nil != err {\n\t\treturn err\n\t}\n\t_, err = s.repository.Create(userId, hash)\n\tif nil == err {\n\t\ts.subscriberRepo.AddSubscriber(userId)\n\t}\n\treturn err\n}", "func AddPayee(id bson.ObjectId, payeeID bson.ObjectId) User {\n\tsession, _ := mgo.Dial(\"127.0.0.1\")\n\tdefer session.Close()\n\tsession.SetMode(mgo.Monotonic, true)\n\tdb := session.DB(\"reimburse-me\").C(\"user\")\n\tuserID := bson.M{\"_id\": id}\n\tchange := bson.M{\"$addToSet\": bson.M{\n\t\t\"payees\": payeeID,\n\t}}\n\tdb.Update(userID, change)\n\tvar user User\n\tdb.Find(bson.M{\"_id\": id}).One(&user)\n\treturn user\n}", "func (r *CompanyCustomerPaymentJournalsCollectionRequest) Add(ctx context.Context, reqObj *CustomerPaymentJournal) (resObj *CustomerPaymentJournal, err error) {\n\terr = r.JSONRequest(ctx, \"POST\", \"\", reqObj, &resObj)\n\treturn\n}", "func (_PauserRole *PauserRoleTransactor) AddPauser(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) {\n\treturn _PauserRole.contract.Transact(opts, \"addPauser\", account)\n}", "func (r *CustomerPaymentJournalCustomerPaymentsCollectionRequest) Add(ctx context.Context, reqObj *CustomerPayment) (resObj *CustomerPayment, err error) {\n\terr = r.JSONRequest(ctx, \"POST\", \"\", reqObj, &resObj)\n\treturn\n}", "func (r *CustomerPaymentJournalCustomerPaymentsCollectionRequest) Add(ctx context.Context, reqObj *CustomerPayment) (resObj *CustomerPayment, err error) {\n\terr = r.JSONRequest(ctx, \"POST\", \"\", reqObj, &resObj)\n\treturn\n}", "func (a *Client) AddStockReceiptItems(params *AddStockReceiptItemsParams, authInfo runtime.ClientAuthInfoWriter) (*AddStockReceiptItemsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewAddStockReceiptItemsParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"addStockReceiptItems\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/accounts/{koronaAccountId}/stockReceipts/{stockReceiptId}/items\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &AddStockReceiptItemsReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*AddStockReceiptItemsOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for addStockReceiptItems: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (t *Thereum) TxReceipt(hash common.Hash) (*types.Receipt, error) {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\treceipt, _, _, _ := rawdb.ReadReceipt(t.database, hash, t.chainConfig)\n\treturn receipt, nil\n}", "func (_ElvToken *ElvTokenTransactor) AddPauser(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) {\n\treturn _ElvToken.contract.Transact(opts, \"addPauser\", account)\n}", "func AddUser(u User) (User, error) {\n\tif u.ID != 0 {\n\t\treturn User{}, errors.New(\"new user must not include an id or it must be set to zero\")\n\t}\n\tu.ID = nextID\n\tnextID++\n\tusers = append(users, &u)\n\treturn u, nil\n}", "func (m *RepairinvoiceMutation) AddUserid(i int) {\n\tif m.adduserid != nil {\n\t\t*m.adduserid += i\n\t} else {\n\t\tm.adduserid = &i\n\t}\n}", "func ProcessNewReceipt(ctx context.Context, xbiz *XBusiness, d1, d2 *time.Time, r *Receipt) (Journal, error) {\n\tvar j Journal\n\tj.BID = xbiz.P.BID\n\tj.Amount = RoundToCent(r.Amount)\n\tj.Dt = r.Dt\n\tj.Type = JNLTYPERCPT\n\tj.ID = r.RCPTID\n\t// j.RAID = r.RAID\n\tjid, err := InsertJournal(ctx, &j)\n\tif err != nil {\n\t\tUlog(\"Error inserting Journal entry: %v\\n\", err)\n\t\treturn j, err\n\t}\n\tif jid > 0 {\n\t\t// now add the Journal allocation records...\n\t\tfor i := 0; i < len(r.RA); i++ {\n\t\t\t// // Console(\"r.RA[%d] id = %d\\n\", i, r.RA[i].RCPAID)\n\t\t\t// rntagr, _ := GetRentalAgreement(r.RA[i].RAID) // what Rental Agreements did this payment affect and the amounts for each\n\t\t\tvar ja JournalAllocation\n\t\t\tja.JID = jid\n\t\t\tja.TCID = r.TCID\n\t\t\tja.Amount = RoundToCent(r.RA[i].Amount)\n\t\t\tja.BID = j.BID\n\t\t\tja.ASMID = r.RA[i].ASMID\n\t\t\tja.AcctRule = r.RA[i].AcctRule\n\t\t\tif ja.ASMID > 0 { // there may not be an assessment associated, it could be unallocated funds\n\t\t\t\t// TODO(Steve): should we ignore error?\n\t\t\t\ta, _ := GetAssessment(ctx, ja.ASMID) // but if there is an associated assessment, then mark the RID and RAID\n\t\t\t\tja.RID = a.RID\n\t\t\t\tja.RAID = r.RA[i].RAID\n\t\t\t}\n\t\t\tja.TCID = r.TCID\n\t\t\tif _, err = InsertJournalAllocationEntry(ctx, &ja); err != nil {\n\t\t\t\tLogAndPrintError(\"ProcessNewReceipt\", err)\n\t\t\t\treturn j, err\n\t\t\t}\n\t\t\tj.JA = append(j.JA, ja)\n\t\t}\n\t}\n\treturn j, nil\n}", "func (_DappboxManager *DappboxManagerTransactor) AddUsers(opts *bind.TransactOpts, dAppBoxOrigin common.Address, _address common.Address, _userName string, _defaultURL string, _shortenURL string) (*types.Transaction, error) {\n\treturn _DappboxManager.contract.Transact(opts, \"addUsers\", dAppBoxOrigin, _address, _userName, _defaultURL, _shortenURL)\n}", "func AddUser(APIstub shim.ChaincodeStubInterface, args []string, txnID string, userID string) sc.Response {\n\n\texistingClaimAsBytes, _ := APIstub.GetState(args[0])\n\n\tclaim := Claim{}\n\tjson.Unmarshal(existingClaimAsBytes, &claim)\n\n\tif utils.StringInSlice(userID, claim.UserIDs) {\n\t\treturn shim.Error(\"User already in Claim\")\n\t}\n\n\tclaim.UserIDs = append(claim.UserIDs, userID)\n\n\tclaimAsBytes, _ := json.Marshal(claim)\n\n\tAPIstub.PutState(args[0], claimAsBytes)\n\n\ttimestamp, _ := APIstub.GetTxTimestamp()\n\ttimestampAsInt := timestamp.GetSeconds()\n\tisotimestamp := time.Unix(timestampAsInt, 0).Format(time.RFC3339)\n\ttxnDetails := []string{txnID, \"CEA - Claim User Addition\", isotimestamp, \"\", claim.ID}\n\ttxn.Add(APIstub, txnDetails)\n\n\treturn shim.Success(claimAsBytes)\n\n}", "func (*RegDBService) AddUser(reg *Registration) error {\n\terr := rdb.Create(&reg).Error\n\treturn err\n}", "func (_DappboxManager *DappboxManagerSession) AddUsers(dAppBoxOrigin common.Address, _address common.Address, _userName string, _defaultURL string, _shortenURL string) (*types.Transaction, error) {\n\treturn _DappboxManager.Contract.AddUsers(&_DappboxManager.TransactOpts, dAppBoxOrigin, _address, _userName, _defaultURL, _shortenURL)\n}", "func (u *CryptohomeClient) AddRecoveryAuthFactor(ctx context.Context, authSessionID, label, mediatorPubKeyHex, userGaiaID, deviceUserID string) error {\n\t_, err := u.binary.addRecoveryAuthFactor(ctx, authSessionID, label, mediatorPubKeyHex, userGaiaID, deviceUserID)\n\treturn err\n}", "func (_OwnerProxyRegistry *OwnerProxyRegistryTransactor) AddDelegate(opts *bind.TransactOpts, from common.Address) (*types.Transaction, error) {\n\treturn _OwnerProxyRegistry.contract.Transact(opts, \"addDelegate\", from)\n}", "func (r *Runner) AddUser(user *discordgo.User) {\n\tr.DiscordSession.Users[user.ID] = user\n}", "func TestEthTxAdapter_addReceiptToResult(t *testing.T) {\n\tt.Parallel()\n\n\tj := models.JSON{}\n\tinput := *models.NewRunInput(models.NewID(), j, models.RunStatusUnstarted)\n\n\toutput := addReceiptToResult(nil, input, j)\n\tassert.True(t, output.HasError())\n\tassert.EqualError(t, output.Error(), \"missing receipt for transaction\")\n}", "func (service *UserService) AddUser(u models.User) (models.User, error) {\n\tservice.MaxUserID = service.MaxUserID + 1\n\tu.ID = service.MaxUserID\n\tservice.UserList[service.MaxUserID] = u\n\treturn u, nil\n}", "func (ms *moviestoreImpl) AddUser(name string, age Age) UserID {\n\tuserID := ms.nextUserID\n\tms.nextUserID++\n\tuser := User{name, age, userID}\n\tms.users[userID] = user\n\treturn userID\n}", "func (_DappboxManager *DappboxManagerTransactorSession) AddUsers(dAppBoxOrigin common.Address, _address common.Address, _userName string, _defaultURL string, _shortenURL string) (*types.Transaction, error) {\n\treturn _DappboxManager.Contract.AddUsers(&_DappboxManager.TransactOpts, dAppBoxOrigin, _address, _userName, _defaultURL, _shortenURL)\n}", "func (_Userable *UserableTransactor) AddAuditor(opts *bind.TransactOpts, _newAuditor common.Address) (*types.Transaction, error) {\n\treturn _Userable.contract.Transact(opts, \"addAuditor\", _newAuditor)\n}", "func (e *LifecycleEvent) SetReceiptHandle(receipt string) { e.receiptHandle = receipt }", "func (r *Redis) AddUser(id, key string) (err error) {\n\terr = r.client.HMSet(id, \"timestamp\", strconv.FormatInt(time.Now().UTC().Unix(), 10), \"key\", key, \"files\", \"\").Err()\n\treturn\n}", "func (c *UsageController) Add(recipeID int64, userID int64) error {\n\tc.Usage = append(c.Usage, models.Usage{\n\t\tID: c.getNewID(),\n\t\tRecipeID: recipeID,\n\t\tDate: time.Now(),\n\t\tUserID: userID,\n\t})\n\n\treturn nil\n}", "func (pg *PGUser) Add(in *user.User) (err error) {\n\tfmt.Printf(\"\\nPGUser in: %+v\\n\", in)\n\tif err := pg.DB.Create(in).Scan(&in); err != nil {\n\t\treturn oops.Err(err.Error)\n\t}\n\treturn nil\n}", "func (s *Server) AddItemLine(ctx context.Context, in *api.ItemLine) (*api.MsgResponse, error) {\n\tlog.Printf(\"insert itemLine with %v\", *in)\n\tb, err := json.Marshal(in)\n\tif err != nil {\n\t\treturn &api.MsgResponse{\n\t\t\tResponseMsg: \"FAILED\",\n\t\t}, err\n\t}\n\tc := make(chan ConfirmationMessage)\n\tfn := func(uid string, err error) {\n\t\tif err != nil {\n\t\t\tresp := ConfirmationMessage{\n\t\t\t\tresponse: \"ERROR\",\n\t\t\t\terr: err,\n\t\t\t}\n\t\t\tc <- resp\n\t\t} else {\n\t\t\tresp := ConfirmationMessage{\n\t\t\t\tresponse: uid,\n\t\t\t\terr: nil,\n\t\t\t}\n\t\t\tc <- resp\n\t\t}\n\t}\n\ts.MsgPublisher.PublishEvent(kitemLineChannelID, string(b), fn)\n\n\tif ret := <-c; ret.err != nil {\n\t\treturn &api.MsgResponse{\n\t\t\tResponseMsg: \"Error\",\n\t\t}, ret.err\n\t}\n\treturn &api.MsgResponse{\n\t\tResponseMsg: \"Created\",\n\t}, err\n}", "func (room *Room) AddUser(user *User) error {\n\troom.rwMutex.Lock()\n\tdefer room.rwMutex.Unlock()\n\tif room.deleted {\n\t\treturn ErrMissingRoom\n\t}\n\troom.users[user] = nil\n\treturn nil\n}", "func (card *Card) AddUser(user string) {\n log.Printf(\"Adding user %s to card %s.\", user, card.Id)\n GenPOSTForm(card.trello, \"/cards/\" + card.Id + \"/idMembers\", nil, url.Values{ \"value\": { card.trello.userIdbyName[user] } })\n}", "func AddUser(req *router.Request) error {\n\tparams := req.Params.(*AddUserParams)\n\n\tencryptedPassword, err := auth.CryptPassword(params.Password)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tuser := &auth.User{\n\t\tName: params.Name,\n\t\tEmail: params.Email,\n\t\tPassword: encryptedPassword,\n\t}\n\n\tif err := user.Save(); err != nil {\n\t\treturn err\n\t}\n\n\treq.Created(payloads.NewFullUser(user))\n\treturn nil\n}", "func (list *UserNotifications) Add(notificationID string) error {\n\tif list.Contains(notificationID) {\n\t\treturn errors.New(\"Notification \" + notificationID + \" has already been added\")\n\t}\n\n\tlist.Items = append(list.Items, notificationID)\n\treturn nil\n}", "func (r *CompanyAgedAccountsReceivableCollectionRequest) Add(ctx context.Context, reqObj *AgedAccountsReceivable) (resObj *AgedAccountsReceivable, err error) {\n\terr = r.JSONRequest(ctx, \"POST\", \"\", reqObj, &resObj)\n\treturn\n}", "func (c Application) AddUser() revel.Result {\n\tif user := c.connected(); user != nil {\n\t\tc.ViewArgs[\"user\"] = user\n\t}\n\treturn nil\n}", "func PlusKudo(self *slack.UserDetails, ev *slack.ReactionAddedEvent, rtm *slack.RTM, db *SQLite) {\n\tlog.Debug(ev)\n\tdb.PlusKudo(ev.ItemUser)\n}", "func AddCartItem(service Service, userService users.Service) func(w http.ResponseWriter, r *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tctx := r.Context()\n\t\tlogger := loglib.GetLogger(ctx)\n\t\tusername, err := auth.GetLoggedInUsername(r)\n\t\tif err != nil {\n\t\t\thttpresponse.ErrorResponseJSON(ctx, w, http.StatusForbidden, errorcode.ErrorsInRequestData, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tuser, err := userService.RetrieveUserByUsername(ctx, username)\n\t\tif err != nil || user == nil {\n\t\t\thttpresponse.ErrorResponseJSON(ctx, w, http.StatusUnauthorized, errorcode.UserNotFound, \"User not found\")\n\t\t\treturn\n\t\t}\n\n\t\tlogger.Infof(\"user is %v\", user.Username)\n\t\t// unmarshal request\n\t\treq := addCartItemRequest{}\n\t\tif err := json.NewDecoder(r.Body).Decode(&req); (err != nil || req == addCartItemRequest{}) {\n\t\t\thttpresponse.ErrorResponseJSON(ctx, w, http.StatusBadRequest, errorcode.ErrorsInRequestData, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\t// validate request\n\t\tif err := req.Validate(); err != nil {\n\t\t\thttpresponse.ErrorResponseJSON(ctx, w, http.StatusBadRequest, errorcode.ErrorsInRequestData, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tcart, err := service.AddItemCart(ctx, user.ID, req.ProductID, req.Quantity)\n\t\tif err != nil {\n\t\t\thttpresponse.ErrorResponseJSON(ctx, w, http.StatusInternalServerError, \"internal_error\", err.Error())\n\t\t\treturn\n\t\t}\n\n\t\thttpresponse.RespondJSON(w, http.StatusOK, cart, nil)\n\t}\n}", "func (_ChpRegistry *ChpRegistryTransactorSession) AddPauser(account common.Address) (*types.Transaction, error) {\n\treturn _ChpRegistry.Contract.AddPauser(&_ChpRegistry.TransactOpts, account)\n}", "func (db *InMemoryDB) AddTransaction(userID string, transaction model.Transaction) {\n\tif transactions, ok := db.UserTransactions[userID]; ok {\n\t\tdb.UserTransactions[userID] = append(transactions, transaction)\n\t} else {\n\t\tdb.UserTransactions[userID] = []model.Transaction{transaction}\n\t}\n}", "func (t *tx) AddUser(user *model.User) error {\n\t// FIXME: handle sql constraint errors\n\terr := t.Create(user).Error\n\n\treturn errors.Wrap(err, \"create user failed\")\n}", "func (ec *ExpertiseCreate) AddExpertiseUser(u ...*User) *ExpertiseCreate {\n\tids := make([]int, len(u))\n\tfor i := range u {\n\t\tids[i] = u[i].ID\n\t}\n\treturn ec.AddExpertiseUserIDs(ids...)\n}", "func ReadReceipt(row *sql.Row, a *Receipt) error {\n\terr := row.Scan(&a.RCPTID, &a.PRCPTID, &a.BID, &a.TCID, &a.PMTID, &a.DEPID, &a.DID, &a.RAID, &a.Dt, &a.DocNo, &a.Amount, &a.AcctRuleReceive, &a.ARID, &a.AcctRuleApply, &a.FLAGS, &a.Comment,\n\t\t&a.OtherPayorName, &a.CreateTS, &a.CreateBy, &a.LastModTime, &a.LastModBy)\n\tSkipSQLNoRowsError(&err)\n\treturn err\n}", "func (q *Quickbooks) CreateSalesReceipt(invoice SalesReceipt) (*SalesReceiptObject, error) {\n\tendpoint := fmt.Sprintf(\"/company/%s/salesreceipt\", q.RealmID)\n\n\tres, err := q.makePostRequest(endpoint, invoice)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer res.Body.Close()\n\n\tnewSalesReceipt := SalesReceiptObject{}\n\terr = json.NewDecoder(res.Body).Decode(&newSalesReceipt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &newSalesReceipt, nil\n}", "func AddUser(w http.ResponseWriter, r *http.Request) {\n\treqBody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\t}\n\tvar user datastructures.UserLogin\n\terr = json.Unmarshal(reqBody, &user)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\t}\n\terr = database.AddUser(user)\n\tif err != nil {\n\t\tif err.Error() == \"User already exists\" {\n\t\t\tw.WriteHeader(http.StatusConflict)\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t}\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\t}\n}", "func (u *UserProfile) AddAddress(a Address) {\n\t// TODO: consider sending a request to dominos to update the user with this address.\n\tu.Addresses = append(u.Addresses, UserAddressFromAddress(a))\n}", "func NewReceipt(blockRoot []byte, failed bool, cumulativeGasUsed *big.Int) *Receipt {\n\tr := &Receipt{PostState: bgmcommon.CopyBytes(blockRoot), CumulativeGasUsed: new(big.Int).Set(cumulativeGasUsed)}\n\tif failed {\n\t\tr.Status = ReceiptStatusFailed\n\t} else {\n\t\tr.Status = ReceiptStatusSuccessful\n\t}\n\treturn r\n}", "func (db userDatabase) AddPendingUser(name string, email string, template string, password string, summary string) error {\n\tcon, err := db.mysql.GetConnection()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer con.Close()\n\n\thasher := md5.New()\n\thasher.Write([]byte(password))\n\tcreds := hex.EncodeToString(hasher.Sum(nil))\n\n\t_, err = con.Exec(\"INSERT INTO users (name, email, gender, password, summary) VALUES(?, ?, ?, ?, ?)\",\n\t\tname, email, template, creds, summary)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func NewReceipt(barCodes []string) *Receipt {\n\tlineItems := map[string]*LineItem{}\n\tfor _, barCode := range barCodes {\n\t\tquantityCode := regexp.MustCompile(\"-[0-9]+$\").FindString(barCode)\n\t\tproductBarCode := barCode\n\t\tquantity := 1\n\t\tif quantityCode != \"\" {\n\t\t\tproductBarCode = strings.TrimSuffix(productBarCode, quantityCode)\n\t\t\tquantityCode = strings.TrimPrefix(quantityCode, \"-\")\n\t\t\tquantity, _ = strconv.Atoi(quantityCode)\n\t\t}\n\n\t\tif li, ok := lineItems[productBarCode]; !ok {\n\t\t\tlineItems[productBarCode] = NewLineItem(productBarCode, quantity)\n\t\t} else {\n\t\t\tli.Quantity += quantity\n\t\t}\n\t}\n\n\treturn &Receipt{LineItems: lineItems}\n}", "func (ec *Client) TransactionReceipt(ctx context.Context, txHash helper.Hash) (*types.Receipt, error) {\n\tvar r *types.Receipt\n\terr := ec.c.CallContext(ctx, &r, \"siot_getTransactionReceipt\", txHash)\n\tif err == nil && r != nil && len(r.PostState) == 0 {\n\t\treturn nil, fmt.Errorf(\"server returned receipt without post state\")\n\t}\n\treturn r, err\n}", "func (u *user) AddReputation(amount int) {\n\tu.reputation.Add(amount)\n}", "func (u UserController) AddUser(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tvar user models.User\n\tif err := json.NewDecoder(r.Body).Decode(&user); err != nil || user.IsEmpty() {\n\t\tlog.Println(err)\n\t\thttp.Error(w, \"bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tid, err := u.userRepository.Create(user)\n\tif err != nil {\n\t\thttp.Error(w, \"service unavailable\", http.StatusServiceUnavailable)\n\t\treturn\n\t}\n\thttp.Redirect(w, r, fmt.Sprintf(\"/users/%v\", id), http.StatusSeeOther)\n}", "func (r *CompanyPurchaseInvoiceLinesCollectionRequest) Add(ctx context.Context, reqObj *PurchaseInvoiceLine) (resObj *PurchaseInvoiceLine, err error) {\n\terr = r.JSONRequest(ctx, \"POST\", \"\", reqObj, &resObj)\n\treturn\n}", "func (rt *RecoveryTracker) AddRecoveryRequest(partitionID int32, fromOffset int64, toOffset int64) error {\n\tlog.WithField(\"partition_id\", partitionID).WithField(\"from_offset\", fromOffset).WithField(\"to_offset\", toOffset).Warn(\"recoverytracker: requesting partition recovery\")\n\n\trt.requestLock.Lock()\n\tdefer rt.requestLock.Unlock()\n\n\trequests := rt.recoveryRequests[partitionID]\n\tif requests == nil {\n\t\trequests = &RecoveryRequests{}\n\t\trt.recoveryRequests[partitionID] = requests\n\t}\n\n\t// if this overlaps an existing request, merge them in-place\n\toverlapFound := false\n\tfor _, request := range requests.Requests {\n\t\t// test for overlaps\n\t\tif fromOffset <= request.ToOffset && request.FromOffset <= toOffset {\n\t\t\tlog.WithField(\"partition_id\", partitionID).WithField(\"from_offset\", request.FromOffset).\n\t\t\t\tWithField(\"to_offset\", request.ToOffset).Info(\"recoverytracker: merging with existing recovery request\")\n\n\t\t\trequest.FromOffset = min(fromOffset, request.FromOffset)\n\t\t\trequest.ToOffset = max(toOffset, request.ToOffset)\n\t\t\toverlapFound = true\n\t\t}\n\t}\n\n\t// otherwise create and add a new request\n\tif !overlapFound {\n\t\trequest := &RecoveryRequest{\n\t\t\tPartitionID: partitionID,\n\t\t\tFromOffset: fromOffset,\n\t\t\tToOffset: toOffset,\n\t\t\tCreated: time.Now(),\n\t\t}\n\t\trequests.Requests = append(requests.Requests, request)\n\t}\n\treturn rt.sendRecoveryRequests(partitionID, requests)\n}", "func (ec *Client) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) {\n\tec.Send(generalCost)\n\treturn ec.c.TransactionReceipt(ctx, txHash)\n}", "func (r *CompanyJournalLinesCollectionRequest) Add(ctx context.Context, reqObj *JournalLine) (resObj *JournalLine, err error) {\n\terr = r.JSONRequest(ctx, \"POST\", \"\", reqObj, &resObj)\n\treturn\n}", "func (c Controller) AddUser(w http.ResponseWriter, r *http.Request) {\n\trequest := &AddUserRequest{}\n\tif err := render.Bind(r, request); err != nil {\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tuser, err := c.userService.AddUser(r.Context(), user.AddUserRequest(*request))\n\tif err != nil {\n\t\thttp.Error(w, \"could not add user\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\trender.JSON(w, r, user)\n}", "func (buo *BookingUpdateOne) AddUSERNUMBER(i int) *BookingUpdateOne {\n\tbuo.mutation.AddUSERNUMBER(i)\n\treturn buo\n}", "func (ticket *Ticket) AddLines(lines TicketLines) (int, error) {\n\n\tif ticket.IsAmendable() {\n\t\tticket.Lines = append(ticket.Lines, lines...)\n\t\treturn len(ticket.Lines), nil\n\t} else {\n\t\treturn -1, errors.New(\"Ticket with id: \" + strconv.FormatInt(ticket.Id, 10) + \" is not ammendable\")\n\t}\n}", "func (_ChpRegistry *ChpRegistrySession) AddPauser(account common.Address) (*types.Transaction, error) {\n\treturn _ChpRegistry.Contract.AddPauser(&_ChpRegistry.TransactOpts, account)\n}", "func (tb *timerBuilder) AddUserTimer(ti *persistence.TimerInfo, msBuilder mutableState) {\n\tif !tb.isLoadedUserTimers {\n\t\ttb.loadUserTimers(msBuilder)\n\t}\n\tseqNum := tb.localSeqNumGen.NextSeq()\n\ttimer := &timerDetails{\n\t\tTimerSequenceID: TimerSequenceID{VisibilityTimestamp: ti.ExpiryTime, TaskID: seqNum},\n\t\tTimerID: ti.TimerID,\n\t\tTaskCreated: ti.TaskID == TimerTaskStatusCreated}\n\ttb.insertTimer(timer)\n}", "func (_m *ReceiptStore) ProcessReceipt(msgBytes []byte) {\n\t_m.Called(msgBytes)\n}", "func (userManager *UserManager) AddUser(user *User) {\n\t//userManager.users = append(userManager.users, user)\n\n\tub, err := json.Marshal(*user)\n\n\tlog.Println(\"ADDUSER: \" + string(ub))\n\n\tif err == nil {\n\t\tuserManager.DBHelper.Put(user.ID, ub)\n\t} else {\n\t\tlog.Println(\"Error marshalling user \" + err.Error())\n\t}\n}", "func (puo *ProductUpdateOne) RemoveReceipt(r ...*Receipt) *ProductUpdateOne {\n\tids := make([]int, len(r))\n\tfor i := range r {\n\t\tids[i] = r[i].ID\n\t}\n\treturn puo.RemoveReceiptIDs(ids...)\n}", "func (bu *BookingUpdate) AddUSERNUMBER(i int) *BookingUpdate {\n\tbu.mutation.AddUSERNUMBER(i)\n\treturn bu\n}", "func AddUser(u User) {\n\tuserData.Insert(u)\n}", "func (client ManagementClient) PostUserRequestaudittrailSender(req *http.Request) (*http.Response, error) {\n return autorest.SendWithSender(client, req)\n }", "func (c CvpRestAPI) AddUser(user *SingleUser) error {\n\tif user == nil {\n\t\treturn errors.New(\"AddUser: can not add nil user\")\n\t}\n\tresp, err := c.client.Post(\"/user/addUser.do\", nil, user)\n\tif err != nil {\n\t\treturn errors.Errorf(\"AddUser: %s\", err)\n\t}\n\tvar addedUser *SingleUser\n\tif err = json.Unmarshal(resp, &addedUser); err != nil {\n\t\treturn errors.Errorf(\"AddUser: JSON unmarshal error: \\n%v\", err)\n\t}\n\tif err = addedUser.Error(); err != nil {\n\t\tvar retErr error\n\t\tif addedUser.ErrorCode == USER_ALREADY_EXISTS ||\n\t\t\taddedUser.ErrorCode == DATA_ALREADY_EXISTS {\n\t\t\tretErr = errors.Errorf(\"AddUser: user '%s' already exists\", addedUser.UserData.UserID)\n\t\t} else {\n\t\t\tretErr = errors.Errorf(\"AddUser: %s\", addedUser.String())\n\t\t}\n\t\treturn retErr\n\t}\n\treturn nil\n}", "func AddUser(w http.ResponseWriter, r *http.Request) {\n\tuser := &data.User{}\n\tif r.Body == nil {\n\t\thttp.Error(w, \"You must send data\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\terr := user.FromJSON(r.Body)\n\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t//validation\n\terr = user.Validate()\n\tif err != nil {\n\t\thttp.Error(\n\t\t\tw,\n\t\t\tfmt.Sprintf(\"Error validating user: %s\", err),\n\t\t\thttp.StatusBadRequest,\n\t\t)\n\t\treturn\n\t}\n\tuser = user.Clean()\n\tif err = user.SetPassword(); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\terr = data.Create(user)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\tuser.ToJSON(w)\n}", "func (m *MgoUserManager) AddUserDetail(u *auth.User) (*auth.User, error) {\n\tu.Id = bson.NewObjectId()\n\terr := m.insertUser(u)\n\treturn u, err\n}", "func AddUserToEvent(euid humus.UID, userUid humus.UID, premium int) bool {\n\tvar us User\n\tus.SetUID(userUid)\n\tus.Premium = premium\n\tvar ev = Event{\n\t\tAttending: []*User{&us},\n\t}\n\tev.SetUID(euid)\n\t_, err := db.Mutate(context.Background(), humus.CreateMutation(&ev, humus.MutateSet))\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn true\n}", "func AddUser(c echo.Context) error {\n\n\tvar body User\n\n\terr := c.Bind(&body)\n\tif err != nil {\n\t\treturn c.JSON(http.StatusBadRequest, ResponseError{Status: http.StatusBadRequest, Message: err.Error()})\n\t}\n\n\tif body.ID == nil {\n\t\treturn c.JSON(http.StatusBadRequest, ResponseError{Status: http.StatusBadRequest, Message: \"id empty\"})\n\t}\n\n\tuser := User{\n\t\tID: body.ID,\n\t\tName: body.Name,\n\t\tUsername: body.Username,\n\t\tPassword: body.Password,\n\t}\n\n\tusers = append(users, user)\n\n\treturn c.JSON(http.StatusCreated, user)\n}" ]
[ "0.65677184", "0.6518654", "0.54841864", "0.54648083", "0.5241363", "0.5195453", "0.51696557", "0.51665854", "0.5155161", "0.514535", "0.5085661", "0.50395", "0.5033886", "0.50278527", "0.50236046", "0.49896088", "0.49473062", "0.49466264", "0.4934199", "0.4933858", "0.49277717", "0.49178162", "0.49110535", "0.48808488", "0.48524132", "0.4837199", "0.48346403", "0.4829178", "0.48195553", "0.48178372", "0.48104066", "0.48004246", "0.47804353", "0.4772494", "0.4772494", "0.47606868", "0.47415143", "0.47279352", "0.47157457", "0.47056267", "0.4658072", "0.46374932", "0.4637224", "0.46236265", "0.46196753", "0.46186438", "0.46179762", "0.4617325", "0.4608181", "0.46018898", "0.46013984", "0.45947266", "0.45932904", "0.45750543", "0.4563937", "0.45568192", "0.4556364", "0.4555953", "0.45509872", "0.4516811", "0.449668", "0.44963774", "0.44869447", "0.44837824", "0.4475507", "0.44704184", "0.4465477", "0.4463221", "0.44540873", "0.44451898", "0.44427302", "0.44425023", "0.4440662", "0.4438296", "0.4436944", "0.44321966", "0.44298542", "0.44273606", "0.44270787", "0.44234312", "0.44221914", "0.44212633", "0.4414147", "0.44128585", "0.440925", "0.439212", "0.43920794", "0.43888792", "0.43767023", "0.4376626", "0.43739158", "0.43658012", "0.43578494", "0.4357741", "0.43533626", "0.43532634", "0.43478537", "0.43447736", "0.43287632", "0.43223354" ]
0.7855252
0
MergeUnique Merges `source` string slice into `dest` and returns result. Inserts from `source` only when `dest` does not `Contain` given string.
MergeUnique объединяет строковый срез `source` в `dest` и возвращает результат. Вставляет из `source` только в том случае, если `dest` не содержит заданную строку.
func MergeUnique(dest, source []string) []string { for _, str := range source { if !Contain(dest, str) { dest = append(dest, str) } } return dest }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func MergeAndDeduplicateSlice(src []string, target []string) []string {\n\tm := make(map[string]bool)\n\tfor i := range src {\n\t\tm[src[i]] = true\n\t}\n\n\tfor i := range target {\n\t\tif _, ok := m[target[i]]; !ok {\n\t\t\tsrc = append(src, target[i])\n\t\t}\n\t}\n\n\treturn src\n}", "func StringUniqueAppend(slice []string, s string) []string {\n\treturn strings.UniqueAppend(slice, s)\n}", "func DedupStrings(src []string) []string {\n\tm := make(map[string]struct{}, len(src))\n\tdst := make([]string, 0, len(src))\n\n\tfor _, v := range src {\n\t\t// Skip empty items\n\t\tif len(v) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\t// Skip duplicates\n\t\tif _, ok := m[v]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tm[v] = struct{}{}\n\t\tdst = append(dst, v)\n\t}\n\n\treturn dst\n}", "func MergeUnique(left, right []string) []string {\n\treturn CollectVariety(left, right, GetUnique, GetUnique, GetUnique)\n}", "func MergeStringSlices(slice1 []string, slice2 []string) []string {\n\tfor _, item := range slice2 {\n\t\tif !IsStringPresent(slice1, item) {\n\t\t\tslice1 = append(slice1, item)\n\t\t}\n\t}\n\treturn slice1\n}", "func AppendUniqueSlices(a, b []string) []string {\n\tfor _, e := range a {\n\t\tif !SliceContainsString(e, b) {\n\t\t\tb = append(b, e)\n\t\t}\n\t}\n\treturn b\n}", "func appendUnique(s []string, e string) []string {\n\tif !contains(s, e) {\n\t\treturn append(s, e)\n\t}\n\treturn s\n}", "func (c StringArrayCollection) Merge(i interface{}) Collection {\n\tm := i.([]string)\n\tvar d = make([]string, len(c.value))\n\tcopy(d, c.value)\n\n\tfor i := 0; i < len(m); i++ {\n\t\texist := false\n\t\tfor j := 0; j < len(d); j++ {\n\t\t\tif d[j] == m[i] {\n\t\t\t\texist = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !exist {\n\t\t\td = append(d, m[i])\n\t\t}\n\t}\n\n\treturn StringArrayCollection{\n\t\tvalue: d,\n\t}\n}", "func AddStringIfMissing(slice []string, s string) (bool, []string) {\n\tfor _, item := range slice {\n\t\tif item == s {\n\t\t\treturn false, slice\n\t\t}\n\t}\n\treturn true, append(slice, s)\n}", "func appendIfMissing(slice []string, s string) []string {\n\tfor _, e := range slice {\n\t\tif e == s {\n\t\t\treturn slice\n\t\t}\n\t}\n\treturn append(slice, s)\n}", "func mergeTags(existing string, tags []string) string {\n\tif existing == \"\" {\n\t\treturn strings.Join(tags, \",\")\n\t}\n\told := strings.Split(existing, \",\")\n\tvar merged []string\n\tfor _, o := range old {\n\t\tfound := false\n\t\tfor _, tag := range tags {\n\t\t\tif tag == o {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tmerged = append(merged, o)\n\t\t}\n\t}\n\tfor _, tag := range tags {\n\t\tfound := false\n\t\tfor _, o := range merged {\n\t\t\tif tag == o {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tmerged = append(merged, tag)\n\t\t}\n\t}\n\treturn strings.Join(merged, \",\")\n}", "func CompareSliceStrU(s1, s2 []string) bool {\n\tif len(s1) != len(s2) {\n\t\treturn false\n\t}\n\n\tfor i := range s1 {\n\t\tfor j := len(s2) - 1; j >= 0; j-- {\n\t\t\tif s1[i] == s2[j] {\n\t\t\t\ts2 = append(s2[:j], s2[j+1:]...)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif len(s2) > 0 {\n\t\treturn false\n\t}\n\treturn true\n}", "func (dst *Hosts) Merge(src Hosts) {\n\tif dst == nil || len(src) == 0 {\n\t\treturn\n\t}\n\n\tcopied := *dst\n\tcopied = append(copied, src...)\n\n\tregistry := map[string]int{}\n\tfor i := len(copied); i > 0; i-- {\n\t\tregistry[copied[i-1].Name] = i - 1\n\t}\n\tunique := copied[:0]\n\tfor i, host := range copied {\n\t\torigin := registry[host.Name]\n\t\tif i == origin {\n\t\t\tunique = append(unique, host)\n\t\t\tcontinue\n\t\t}\n\t\tunique[origin].Merge(host)\n\t}\n\n\t*dst = unique\n}", "func mergeAlternately(word1 string, word2 string) string {\n\tvar buf bytes.Buffer\n\tfor i := range word1 {\n\t\tbuf.WriteByte(word1[i])\n\t\tif i < len(word2) {\n\t\t\tbuf.WriteByte(word2[i])\n\t\t}\n\t}\n\n\tif len(word1) < len(word2) {\n\t\tbuf.WriteString(word2[len(word1):])\n\t}\n\treturn buf.String()\n}", "func appendIfMissing(slice []string, s string) ([]string, bool) {\n\tfor _, ele := range slice {\n\t\tif ele == s {\n\t\t\treturn slice, false\n\t\t}\n\t}\n\treturn append(slice, s), true\n}", "func UniqueAppend(orig []string, add ...string) []string {\n\treturn append(orig, NewUniqueElements(orig, add...)...)\n}", "func MergeStringSlices(a []string, b []string) []string {\n\tset := sets.NewString(a...)\n\tset.Insert(b...)\n\treturn set.UnsortedList()\n}", "func mergeString(a, b string) string {\n\tif a != \"\" {\n\t\treturn a\n\t}\n\n\treturn b\n}", "func MergeSortedStrings(n ...[]string) []string {\n\tvar result []string\n\tif len(n) == 0 {\n\t\treturn nil\n\t} else if len(n) == 1 {\n\t\t// Special case. Merge single slice with a nil slice, to remove any\n\t\t// duplicates from the single slice.\n\t\treturn MergeSortedStrings(n[0], nil)\n\t}\n\n\tvar maxSize int\n\tfor _, a := range n {\n\t\tif len(a) > maxSize {\n\t\t\tmaxSize = len(a)\n\t\t}\n\t}\n\tresult = make([]string, 0, maxSize) // This will likely be too small but it's a start.\n\n\tidxs := make([]int, len(n)) // Indexes we've processed.\n\tvar j int // Index we currently think is minimum.\n\n\tfor {\n\t\tj = -1\n\n\t\t// Find the smallest minimum in all slices.\n\t\tfor i := 0; i < len(n); i++ {\n\t\t\tif idxs[i] >= len(n[i]) {\n\t\t\t\tcontinue // We have completely drained all values in this slice.\n\t\t\t} else if j == -1 {\n\t\t\t\t// We haven't picked the minimum value yet. Pick this one.\n\t\t\t\tj = i\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// It this value key is lower than the candidate.\n\n\t\t\tif n[i][idxs[i]] < n[j][idxs[j]] {\n\t\t\t\tj = i\n\t\t\t} else if n[i][idxs[i]] == n[j][idxs[j]] {\n\t\t\t\t// Duplicate value. Throw it away.\n\t\t\t\tidxs[i]++\n\t\t\t}\n\n\t\t}\n\n\t\t// We could have drained all of the values and be done...\n\t\tif j == -1 {\n\t\t\tbreak\n\t\t}\n\n\t\t// First value to just append it and move on.\n\t\tif len(result) == 0 {\n\t\t\tresult = append(result, n[j][idxs[j]])\n\t\t\tidxs[j]++\n\t\t\tcontinue\n\t\t}\n\n\t\t// Append the minimum value to results if it's not a duplicate of\n\t\t// the existing one.\n\n\t\tif result[len(result)-1] < n[j][idxs[j]] {\n\t\t\tresult = append(result, n[j][idxs[j]])\n\t\t} else if result[len(result)-1] == n[j][idxs[j]] {\n\t\t\t// Duplicate so drop it.\n\t\t} else {\n\t\t\tpanic(\"value being merged out of order.\")\n\t\t}\n\n\t\tidxs[j]++\n\t}\n\treturn result\n}", "func (s StringSet) Union(other StringSet) StringSet {\n\tresult := make(StringSet)\n\tfor v := range s {\n\t\tresult[v] = struct{}{}\n\t}\n\tfor v := range other {\n\t\tresult[v] = struct{}{}\n\t}\n\treturn result\n}", "func (a *StringArray) Merge(b *StringArray) {\n\tif a.Len() == 0 {\n\t\t*a = *b\n\t\treturn\n\t}\n\n\tif b.Len() == 0 {\n\t\treturn\n\t}\n\n\t// Normally, both a and b should not contain duplicates. Due to a bug in older versions, it's\n\t// possible stored blocks might contain duplicate values. Remove them if they exists before\n\t// merging.\n\t// a = a.Deduplicate()\n\t// b = b.Deduplicate()\n\n\tif a.MaxTime() < b.MinTime() {\n\t\ta.Timestamps = append(a.Timestamps, b.Timestamps...)\n\t\ta.Values = append(a.Values, b.Values...)\n\t\treturn\n\t}\n\n\tif b.MaxTime() < a.MinTime() {\n\t\tvar tmp StringArray\n\t\ttmp.Timestamps = append(b.Timestamps, a.Timestamps...)\n\t\ttmp.Values = append(b.Values, a.Values...)\n\t\t*a = tmp\n\t\treturn\n\t}\n\n\tout := NewStringArrayLen(a.Len() + b.Len())\n\ti, j, k := 0, 0, 0\n\tfor i < len(a.Timestamps) && j < len(b.Timestamps) {\n\t\tif a.Timestamps[i] < b.Timestamps[j] {\n\t\t\tout.Timestamps[k] = a.Timestamps[i]\n\t\t\tout.Values[k] = a.Values[i]\n\t\t\ti++\n\t\t} else if a.Timestamps[i] == b.Timestamps[j] {\n\t\t\tout.Timestamps[k] = b.Timestamps[j]\n\t\t\tout.Values[k] = b.Values[j]\n\t\t\ti++\n\t\t\tj++\n\t\t} else {\n\t\t\tout.Timestamps[k] = b.Timestamps[j]\n\t\t\tout.Values[k] = b.Values[j]\n\t\t\tj++\n\t\t}\n\t\tk++\n\t}\n\n\tif i < len(a.Timestamps) {\n\t\tn := copy(out.Timestamps[k:], a.Timestamps[i:])\n\t\tcopy(out.Values[k:], a.Values[i:])\n\t\tk += n\n\t} else if j < len(b.Timestamps) {\n\t\tn := copy(out.Timestamps[k:], b.Timestamps[j:])\n\t\tcopy(out.Values[k:], b.Values[j:])\n\t\tk += n\n\t}\n\n\ta.Timestamps = out.Timestamps[:k]\n\ta.Values = out.Values[:k]\n}", "func StringSliceExtractUnique(strSlice []string) (result []string) {\n\tif strSlice == nil {\n\t\treturn []string{}\n\t} else if len(strSlice) <= 1 {\n\t\treturn strSlice\n\t} else {\n\t\tfor _, v := range strSlice {\n\t\t\tif !StringSliceContains(&result, v) {\n\t\t\t\tresult = append(result, v)\n\t\t\t}\n\t\t}\n\n\t\treturn result\n\t}\n}", "func (us *UniqueStrings) Add(strings ...string) {\n\tfor _, s := range strings {\n\t\tif _, ok := us.values[s]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tif us.values == nil {\n\t\t\tus.values = map[string]struct{}{}\n\t\t}\n\t\tus.values[s] = struct{}{}\n\t\tus.result = append(us.result, s)\n\t}\n}", "func (m *mergeQuerier) mergeDistinctStringSlice(f stringSliceFunc) ([]string, storage.Warnings, error) {\n\tvar jobs = make([]interface{}, len(m.tenantIDs))\n\n\tfor pos := range m.tenantIDs {\n\t\tjobs[pos] = &stringSliceFuncJob{\n\t\t\tquerier: m.queriers[pos],\n\t\t\ttenantID: m.tenantIDs[pos],\n\t\t}\n\t}\n\n\trun := func(ctx context.Context, jobIntf interface{}) error {\n\t\tjob, ok := jobIntf.(*stringSliceFuncJob)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"unexpected type %T\", jobIntf)\n\t\t}\n\n\t\tvar err error\n\t\tjob.result, job.warnings, err = f(ctx, job.querier)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"error querying %s %s\", rewriteLabelName(defaultTenantLabel), job.tenantID)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\terr := concurrency.ForEach(m.ctx, jobs, maxConcurrency, run)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// aggregate warnings and deduplicate string results\n\tvar warnings storage.Warnings\n\tresultMap := make(map[string]struct{})\n\tfor _, jobIntf := range jobs {\n\t\tjob, ok := jobIntf.(*stringSliceFuncJob)\n\t\tif !ok {\n\t\t\treturn nil, nil, fmt.Errorf(\"unexpected type %T\", jobIntf)\n\t\t}\n\n\t\tfor _, e := range job.result {\n\t\t\tresultMap[e] = struct{}{}\n\t\t}\n\n\t\tfor _, w := range job.warnings {\n\t\t\twarnings = append(warnings, errors.Wrapf(w, \"warning querying %s %s\", rewriteLabelName(defaultTenantLabel), job.tenantID))\n\t\t}\n\t}\n\n\tvar result = make([]string, 0, len(resultMap))\n\tfor e := range resultMap {\n\t\tresult = append(result, e)\n\t}\n\tsort.Strings(result)\n\treturn result, warnings, nil\n}", "func merge(source ...[]string) []string {\n\tm := make(map[string]struct{}, len(source)*10)\n\tfor _, list := range source {\n\t\tfor _, item := range list {\n\t\t\tm[item] = struct{}{}\n\t\t}\n\t}\n\tdst := make([]string, len(m))\n\tcnt := 0\n\tfor k := range m {\n\t\tdst[cnt] = k\n\t\tcnt += 1\n\t}\n\tsort.Strings(dst)\n\treturn dst\n}", "func AppendStringIfNotPresent(s string, ss []string) []string {\n\tfor _, e := range ss {\n\t\tif e == s {\n\t\t\treturn ss\n\t\t}\n\t}\n\treturn append(ss, s)\n}", "func appendHostIfMissing(slice []string, s string) []string {\n\tfor _, ele := range slice {\n\t\tif ele == s {\n\t\t\treturn slice\n\t\t}\n\t}\n\treturn append(slice, s)\n}", "func StringSliceDelete(slice1 []string, slice2 []string) []string {\n\n\tvar diff []string\n\n\tfor _, s1 := range slice1 {\n\t\tfound := false\n\t\tfor _, s2 := range slice2 {\n\t\t\tif s1 == s2 {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\t// String not found. We add it to return slice\n\t\tif !found {\n\t\t\tdiff = append(diff, s1)\n\t\t}\n\t}\n\n\treturn diff\n}", "func StringSlicesUnion(one, two []string) []string {\n\tvar union []string\n\tunion = append(union, one...)\n\tunion = append(union, two...)\n\treturn OnlyUnique(union)\n}", "func Merge(aa []string, bb ...string) []string {\n\tcheck := make(map[string]int)\n\tres := make([]string, 0)\n\tdd := append(aa, bb...)\n\tfor _, val := range dd {\n\t\tcheck[val] = 1\n\t}\n\n\tfor letter, _ := range check {\n\t\tif letter == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tres = append(res, letter)\n\t}\n\n\tsort.Strings(res)\n\n\treturn res\n}", "func AppendIfMissing(slice []string, val string) []string {\n\tfor _, ele := range slice {\n\t\tif ele == val {\n\t\t\treturn slice\n\t\t}\n\t}\n\treturn append(slice, val)\n}", "func Union(s1, s2 string) string {\n\tvar intersect strings.Builder\n\tset := make(map[rune]bool)\n\tfor _, char := range s1 {\n\t\tif _, ok := set[char]; !ok {\n\t\t\tset[char] = true\n\t\t\tintersect.WriteRune(char)\n\t\t}\n\t}\n\tfor _, char := range s2 {\n\t\tif _, ok := set[char]; !ok {\n\t\t\tset[char] = true\n\t\t\tintersect.WriteRune(char)\n\t\t}\n\t}\n\treturn intersect.String()\n}", "func dupe(src []byte) []byte {\n\td := make([]byte, len(src))\n\tcopy(d, src)\n\treturn d\n}", "func Distinct(s string) string {\n\tvar ascii [256]bool\n\tvar nonascii map[rune]bool\n\treturn strings.Map(func(r rune) rune {\n\t\tif r < 0x80 {\n\t\t\tb := byte(r)\n\t\t\tif ascii[b] {\n\t\t\t\treturn -1\n\t\t\t}\n\t\t\tascii[b] = true\n\t\t} else {\n\t\t\tif nonascii == nil {\n\t\t\t\tnonascii = make(map[rune]bool)\n\t\t\t}\n\t\t\tif nonascii[r] {\n\t\t\t\treturn -1\n\t\t\t}\n\t\t\tnonascii[r] = true\n\t\t}\n\t\treturn r\n\t}, s)\n}", "func stringSliceOverlaps(left []string, right []string) bool {\n\tfor _, s := range left {\n\t\tfor _, t := range right {\n\t\t\tif s == t {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func Merge(target sgmt.MutableSegment, srcs ...sgmt.MutableSegment) error {\n\tsafeClosers := []io.Closer{}\n\tdefer func() {\n\t\tfor _, c := range safeClosers {\n\t\t\tc.Close()\n\t\t}\n\t}()\n\n\t// for each src\n\tfor _, src := range srcs {\n\t\t// get reader for `src`\n\t\treader, err := src.Reader()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// ensure readers are all closed.\n\t\treaderCloser := x.NewSafeCloser(reader)\n\t\tsafeClosers = append(safeClosers, readerCloser)\n\n\t\t// retrieve all docs known to the reader\n\t\tdIter, err := reader.AllDocs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// iterate over all known docs\n\t\tfor dIter.Next() {\n\t\t\td := dIter.Current()\n\t\t\t_, err := target.Insert(d)\n\t\t\tif err == nil || err == index.ErrDuplicateID {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\n\t\t// ensure no errors while iterating\n\t\tif err := dIter.Err(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// ensure no errors while closing reader\n\t\tif err := readerCloser.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// all good\n\treturn nil\n}", "func (s StringSet) AddSet(src StringSet) {\n\tfor str := range src {\n\t\ts[str] = struct{}{}\n\t}\n}", "func ConcatSlice(sliceToConcat []byte) string {\n\tstringRep := \"\"\n\n\tfor index := 0; index < len(sliceToConcat); index++ {\n\t\tstringRep = stringRep + string(sliceToConcat[index])\n\n\t\tif index+1 != len(sliceToConcat) {\n\t\t\tstringRep = stringRep + \"-\"\n\t\t}\n\t}\n\n\treturn stringRep\n}", "func Merge(dest string, input ...string) error {\n\tfor _, file := range input {\n\t\tstat, err := os.Stat(file)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif stat.Mode().IsRegular() {\n\t\t\ttargetFilePath := filepath.Join(dest, stat.Name())\n\t\t\tif err := EnsureFile(targetFilePath); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tbody, err := ioutil.ReadFile(file)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := ioutil.WriteFile(targetFilePath, body, 0600); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else if stat.Mode().IsDir() {\n\t\t\tif err := filepath.Walk(file, func(path string, info os.FileInfo, err error) error {\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tstat, err := os.Stat(path)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif stat.Mode().IsRegular() {\n\t\t\t\t\tdestDir := filepath.Join(dest, filepath.Dir(path))\n\t\t\t\t\tif err := EnsureDir(destDir); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\tif err := copy.Copy(file, destDir); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func RemoveStringDuplicates(slice []string) []string {\n\treturnSlice := make([]string, 0)\n\tseen := make(map[string]struct{})\n\n\tfor _, s := range slice {\n\t\tif _, wasThere := seen[s]; !wasThere {\n\t\t\treturnSlice = append(returnSlice, s)\n\t\t\tseen[s] = struct{}{}\n\t\t}\n\t}\n\n\treturn returnSlice\n}", "func dedupStrings(s []string) []string {\n\tp := len(s) - 1\n\tif p <= 0 {\n\t\treturn s\n\t}\n\n\tfor i := p - 1; i >= 0; i-- {\n\t\tif s[p] != s[i] {\n\t\t\tp--\n\t\t\ts[p] = s[i]\n\t\t}\n\t}\n\n\treturn s[p:]\n}", "func Unique(ss []string) []string {\n\tr := make([]string, 0)\n\tfor _, s := range ss {\n\t\tif Search(s, r) == -1 {\n\t\t\tr = append(r, s)\n\t\t}\n\t}\n\n\treturn r\n}", "func makeUnique(src []string, maxLength int) []string {\n\tresult := make([]string, 0, maxLength)\n\tuniqueMap := make(map[string]struct{}, maxLength)\n\tfor _, v := range src {\n\t\tif _, ok := uniqueMap[v]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tuniqueMap[v] = struct{}{}\n\n\t\tresult = append(result, v)\n\t\tif len(result) >= maxLength {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn result\n}", "func (dst *Workers) Merge(src Workers) {\n\tif dst == nil || len(src) == 0 {\n\t\treturn\n\t}\n\n\tcopied := *dst\n\tcopied = append(copied, src...)\n\n\tregistry := map[string]int{}\n\tfor i := len(copied); i > 0; i-- {\n\t\tregistry[copied[i-1].Name] = i - 1\n\t}\n\tunique := copied[:0]\n\tfor i, worker := range copied {\n\t\torigin := registry[worker.Name]\n\t\tif i == origin {\n\t\t\tunique = append(unique, worker)\n\t\t\tcontinue\n\t\t}\n\t\tunique[origin].Merge(worker)\n\t}\n\n\t*dst = unique\n}", "func ConcatSlice(sliceToConcat []byte) string {\n\tvar dummy string\n\tfor index := 0; index < len(sliceToConcat)-1; index++ {\n\t\tdummy = dummy + string(sliceToConcat[index]) + \"-\"\n\t}\n\tdummy = dummy + string(sliceToConcat[len(sliceToConcat)-1])\n\treturn dummy\n}", "func mergeLabel(ss []string, i, prefixLen int, prefixes prefixMap) {\n\ts := ss[i]\n\tsuffix := s[len(s)-prefixLen:]\n\tfor _, j := range prefixes[suffix] {\n\t\t// Empty strings mean \"already used.\" Also avoid merging with self.\n\t\tif ss[j] == \"\" || i == j {\n\t\t\tcontinue\n\t\t}\n\t\tif *v {\n\t\t\tfmt.Fprintf(os.Stderr, \"%d-length overlap at (%4d,%4d): %q and %q share %q\\n\",\n\t\t\t\tprefixLen, i, j, ss[i], ss[j], suffix)\n\t\t}\n\t\tss[i] += ss[j][prefixLen:]\n\t\tss[j] = \"\"\n\t\t// ss[i] has a new suffix, so merge again if possible.\n\t\t// Note: we only have to merge again at the same prefix length. Shorter\n\t\t// prefix lengths will be handled in the next iteration of crush's for loop.\n\t\t// Can there be matches for longer prefix lengths, introduced by the merge?\n\t\t// I believe that any such matches would by necessity have been eliminated\n\t\t// during substring removal or merged at a higher prefix length. For\n\t\t// instance, in crush(\"abc\", \"cde\", \"bcdef\"), combining \"abc\" and \"cde\"\n\t\t// would yield \"abcde\", which could be merged with \"bcdef.\" However, in\n\t\t// practice \"cde\" would already have been elimintated by removeSubstrings.\n\t\tmergeLabel(ss, i, prefixLen, prefixes)\n\t\treturn\n\t}\n}", "func (dst *Proxies) Merge(src Proxies) {\n\tif dst == nil || len(src) == 0 {\n\t\treturn\n\t}\n\n\tcopied := *dst\n\tcopied = append(copied, src...)\n\n\tregistry := map[string]int{}\n\tfor i := len(copied); i > 0; i-- {\n\t\tregistry[copied[i-1].Name] = i - 1\n\t}\n\tunique := copied[:0]\n\tfor i, proxy := range copied {\n\t\torigin := registry[proxy.Name]\n\t\tif i == origin {\n\t\t\tunique = append(unique, proxy)\n\t\t\tcontinue\n\t\t}\n\t\tunique[origin].Merge(proxy)\n\t}\n\n\t*dst = unique\n}", "func replaceUnique(list, from, to []string) []string {\n\tlist = slices.Clone(list)\n\tfor i, f := range from {\n\t\tj := slices.Index(list, f)\n\t\tif j == -1 {\n\t\t\tpanic(\"can't rename nonexistent column: \" + f)\n\t\t}\n\t\tif slices.Contains(list, to[i]) {\n\t\t\tpanic(\"can't rename to existing column: \" + to[i])\n\t\t}\n\t\tlist[j] = to[i]\n\t}\n\treturn list\n}", "func AppendIfMissingIgnoreCase(str string, suffix string, suffixes ...string) string {\n\treturn internalAppendIfMissing(str, suffix, true, suffixes...)\n}", "func (s String) Intersection(strings ...String) (intersection String) {\n\tintersection = s.Copy()\n\tfor key := range s {\n\t\tfor _, set := range append(strings, s) {\n\t\t\tif !set.Contains(key) {\n\t\t\t\tdelete(intersection, key)\n\t\t\t}\n\t\t}\n\t}\n\treturn intersection\n}", "func (il *IntList) JoinUnique(other *IntList) {\n // The algorithm here is stupid. Are there better ones?\n otherLast := other.Last()\n for otherIt := other.First(); otherIt != otherLast; otherIt = otherIt.Next() {\n contained := false\n value := otherIt.Value()\n last := il.Last()\n for it := il.First(); it != last; it = it.Next() {\n if it.Value() == value {\n contained = true\n break\n }\n }\n if !contained {\n il.Append(value)\n }\n }\n}", "func (s String) Union(strings ...String) (union String) {\n\tunion = s.Copy()\n\tfor _, set := range strings {\n\t\tfor key := range set {\n\t\t\tunion[key] = yes\n\t\t}\n\t}\n\treturn union\n}", "func AppendUniq(list []string, items ...string) []string {\n\tfor _, item := range items {\n\t\tshouldAdd := true\n\t\tfor _, v := range list {\n\t\t\tif v == item {\n\t\t\t\tshouldAdd = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif shouldAdd {\n\t\t\tlist = append(list, item)\n\t\t}\n\t}\n\n\treturn list\n}", "func DeduplicateSliceStably(items []string) []string {\n\tdata := make([]string, 0, len(items))\n\tdeduplicate := map[string]struct{}{}\n\tfor _, val := range items {\n\t\tif _, exists := deduplicate[val]; !exists {\n\t\t\tdeduplicate[val] = struct{}{}\n\t\t\tdata = append(data, val)\n\t\t}\n\t}\n\treturn data\n}", "func (s StringSet) Union(other StringSet) StringSet {\n\tresultSet := make(StringSet, len(s))\n\tfor val := range s {\n\t\tresultSet[val] = true\n\t}\n\n\tfor val := range other {\n\t\tresultSet[val] = true\n\t}\n\n\treturn resultSet\n}", "func union(left, right []string) []string {\n\tu := make([]string, len(left))\n\tcopy(u, left)\noutter:\n\tfor _, r := range right {\n\t\tfor _, l := range left {\n\t\t\tif l == r {\n\t\t\t\tcontinue outter\n\t\t\t}\n\t\t}\n\t\tu = append(u, r)\n\t}\n\treturn u\n}", "func (that *StrAnyMap) Merge(other *StrAnyMap) {\n\tthat.mu.Lock()\n\tdefer that.mu.Unlock()\n\tif that.data == nil {\n\t\tthat.data = other.MapCopy()\n\t\treturn\n\t}\n\tif other != that {\n\t\tother.mu.RLock()\n\t\tdefer other.mu.RUnlock()\n\t}\n\tfor k, v := range other.data {\n\t\tthat.data[k] = v\n\t}\n}", "func Deduplicate(input []string) []string {\n\tresult := []string{}\n\tseen := make(map[string]struct{})\n\tfor _, val := range input {\n\t\tif _, ok := seen[val]; !ok {\n\t\t\tresult = append(result, val)\n\t\t\tseen[val] = struct{}{}\n\t\t}\n\t}\n\treturn result\n}", "func RemoveDuplicatedStrings(slice []string) []string {\n\tresult := []string{}\n\n\tcheck := make(map[string]bool)\n\tfor _, element := range slice {\n\t\tcheck[element] = true\n\t}\n\n\tfor key := range check {\n\t\tresult = append(result, key)\n\t}\n\n\treturn result\n}", "func UniqueStrings(vs []string) (r []string) {\n\tm := map[string]struct{}{}\n\tvar ok bool\n\tfor _, v := range vs {\n\t\tif _, ok = m[v]; !ok {\n\t\t\tm[v] = struct{}{}\n\t\t\tr = append(r, v)\n\t\t}\n\t}\n\n\treturn\n}", "func appendUnique(slice []Term, item Term) []Term {\n\tfor _, c := range slice {\n\t\tif c == item {\n\t\t\treturn slice\n\t\t}\n\t}\n\n\treturn append(slice, item)\n}", "func MakeUnique(str string, pool []string) string {\n\tvar nb int\n\ttested := str\n\tfor tested == \"\" || IsIn(tested, pool...) {\n\t\tnb++\n\t\ttested = str + strconv.Itoa(nb)\n\t}\n\treturn tested\n}", "func Unique2(input string) bool {\n\ts := strings.Split(input, \"\")\n\tsort.Strings(s)\n\treturn uniqueAux(1, s)\n}", "func dedupStr(in []string) []string {\n\tsort.Strings(in)\n\n\tj := 0\n\tfor i := 1; i < len(in); i++ {\n\t\tif in[j] == in[i] {\n\t\t\tcontinue\n\t\t}\n\t\tj++\n\t\tin[j] = in[i]\n\t}\n\n\treturn in[:j+1]\n}", "func (c StringArrayCollection) Unique() Collection {\n\tvar d = make([]string, len(c.value))\n\tcopy(d, c.value)\n\tx := make([]string, 0)\n\tfor _, i := range d {\n\t\tif len(x) == 0 {\n\t\t\tx = append(x, i)\n\t\t} else {\n\t\t\tfor k, v := range x {\n\t\t\t\tif i == v {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif k == len(x)-1 {\n\t\t\t\t\tx = append(x, i)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn StringArrayCollection{\n\t\tvalue: x,\n\t}\n}", "func StringsHas(target []string, src string) bool {\n\tfor _, t := range target {\n\t\tif strings.TrimSpace(t) == src {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (u *Utils) UniqueStrings(input []string) []string {\n\tr := make([]string, 0, len(input))\n\tm := make(map[string]bool)\n\tfor _, val := range input {\n\t\tif _, ok := m[val]; !ok {\n\t\t\tm[val] = true\n\t\t\tr = append(r, val)\n\t\t}\n\t}\n\treturn r\n}", "func (v Values) Merge(src Values) Values {\n\tfor key, srcVal := range src {\n\t\tdestVal, found := v[key]\n\n\t\tsrcType := fmt.Sprintf(\"%T\", srcVal)\n\t\tdestType := fmt.Sprintf(\"%T\", destVal)\n\t\tmatch := srcType == destType\n\t\tvalidSrc := istable(srcVal)\n\t\tvalidDest := istable(destVal)\n\n\t\tif found && match && validSrc && validDest {\n\t\t\tdestMap := destVal.(Values)\n\t\t\tsrcMap := srcVal.(Values)\n\t\t\tdestMap.Merge(srcMap)\n\t\t} else {\n\t\t\tv[key] = srcVal\n\t\t}\n\t}\n\treturn v\n}", "func SanitizeDuplicates(b []string) []string {\n\tsz := len(b) - 1\n\tfor i := 0; i < sz; i++ {\n\t\tfor j := i + 1; j <= sz; j++ {\n\t\t\tif (b)[i] == ((b)[j]) {\n\t\t\t\t(b)[j] = (b)[sz]\n\t\t\t\t(b) = (b)[0:sz]\n\t\t\t\tsz--\n\t\t\t\tj--\n\t\t\t}\n\t\t}\n\t}\n\treturn b\n}", "func (t *Set) Merge(other *Set, prefix []byte) *Set {\n\tif other != nil {\n\t\tadder := func(key []byte) bool {\n\t\t\tt.Add(key)\n\t\t\treturn true\n\t\t}\n\t\tother.Iter(prefix, adder)\n\t}\n\treturn t\n}", "func MergeStrings(stringArray ...string) string {\n\n\tvar buffer bytes.Buffer\n\tfor _, v := range stringArray {\n\t\tbuffer.WriteString(v)\n\t}\n\treturn buffer.String()\n\n}", "func IncludeString(l []string, s string) []string {\n\ti := sort.Search(\n\t\tlen(l),\n\t\tfunc(i int) bool {\n\t\t\treturn l[i] >= s\n\t\t},\n\t)\n\tif i < len(l) && l[i] == s {\n\t\t// string is already in slice\n\t\treturn l\n\t}\n\tl = append(l, \"\")\n\tcopy(l[i+1:], l[i:])\n\tl[i] = s\n\treturn l\n}", "func uniqueStr2(in string) bool {\n\tfor i := 0; i < len(in); i++ {\n\t\tfor j := i + 1; j < len(in[i+1:]); j++ {\n\t\t\tif in[i] == in[j] {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}", "func combine(str1, str2 string) string {\n\tvar res string\n\tlen1 := len(str1)\n\tlen2 := len(str2)\n\t//mark the number of same chars\n\tvar sameNum int = 0\n\tfor len1 > 0 && sameNum < len2 {\n\t\tif str1[len1-1] == str2[sameNum] {\n\t\t\tlen1--\n\t\t\tsameNum++\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\t//combine str1 and str2\n\tres = str1[0:len1] + str2[sameNum:len2]\n\treturn res\n\n}", "func AppendIfMissing(slice []string, i string) []string {\n\tfor _, ele := range slice {\n\t\tif ele == i {\n\t\t\treturn slice\n\t\t}\n\t}\n\treturn append(slice, i)\n}", "func AppendIfMissing(slice []string, i string) []string {\n\tfor _, ele := range slice {\n\t\tif ele == i {\n\t\t\treturn slice\n\t\t}\n\t}\n\treturn append(slice, i)\n}", "func UniqueString(a []string) []string {\n\tr := make([]string, 0, len(a))\n\n\tsort.Strings(a)\n\n\tr = append(r, a[0])\n\ti := a[0]\n\n\tfor _, v := range a {\n\t\tif v != i {\n\t\t\tr = append(r, v)\n\t\t\ti = v\n\t\t}\n\t}\n\n\treturn r\n}", "func mapStringStringMergeFrom(dst, src *map[string]string) {\n\tif (src == nil) || (*src == nil) {\n\t\treturn\n\t}\n\n\tif *dst == nil {\n\t\t*dst = make(map[string]string)\n\t}\n\n\tfor key, value := range *src {\n\t\tif _, ok := (*dst)[key]; ok {\n\t\t\t// Such key already exists in dst\n\t\t\tcontinue\n\t\t}\n\n\t\t// No such a key in dst\n\t\t(*dst)[key] = value\n\t}\n}", "func merge(dst, src *unstructured.Unstructured) bool {\n\tdstNS := dst.GetLabels()[resourceLabelNamespace]\n\tsrcNS := src.GetLabels()[resourceLabelNamespace]\n\tif dstNS != srcNS {\n\t\treturn false\n\t}\n\n\tif dstResults, ok, _ := unstructured.NestedSlice(dst.UnstructuredContent(), \"results\"); ok {\n\t\tif srcResults, ok, _ := unstructured.NestedSlice(src.UnstructuredContent(), \"results\"); ok {\n\t\t\tdstResults = append(dstResults, srcResults...)\n\n\t\t\tif err := unstructured.SetNestedSlice(dst.UnstructuredContent(), dstResults, \"results\"); err == nil {\n\t\t\t\taddSummary(dst, src)\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (l *StrLinked) RemoveDupes() {\n\ttmp := make(tmp)\n\n\tl.head = remove(tmp.contains, l.head)\n}", "func Merge(dest interface{}, source interface{}) error {\n\topts := make([]func(*mergo.Config), 0)\n\n\t// lists are always overridden - we don't append merged lists since it generally makes things more complicated\n\topts = append(opts, mergo.WithOverride)\n\n\terr := mergo.Merge(dest, source, opts...)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\treturn nil\n}", "func (s *UpdaterSet) Merge(set UpdaterSet) error {\n\texists := make([]string, 0, len(set.set))\n\tfor n, _ := range set.set {\n\t\tif _, ok := s.set[n]; ok {\n\t\t\texists = append(exists, n)\n\t\t}\n\t}\n\n\tif len(exists) > 0 {\n\t\treturn ErrExists{exists}\n\t}\n\n\tfor n, u := range set.set {\n\t\ts.set[n] = u\n\t}\n\treturn nil\n}", "func removeDuplicates(stringSlices ...[]string) []string {\n\tuniqueMap := map[string]bool{}\n\n\tfor _, stringSlice := range stringSlices {\n\t\tfor _, str := range stringSlice {\n\t\t\tuniqueMap[str] = true\n\t\t}\n\t}\n\n\t// Create a slice with the capacity of unique items\n\t// This capacity make appending flow much more efficient\n\tresult := make([]string, 0, len(uniqueMap))\n\n\tfor key := range uniqueMap {\n\t\tresult = append(result, key)\n\t}\n\n\treturn result\n}", "func (t Tags) Merge(key string, value ...string) {\n\tfor _, v := range value {\n\t\tcurrent := t.GetAll(key)\n\t\tfound := false\n\t\tfor _, cv := range current {\n\t\t\tif v == cv {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tt.Add(key, v)\n\t\t}\n\t}\n}", "func StringsUnique(s []string) bool {\n\tsort.Strings(s)\n\n\tfor i := 0; i < len(s)-1; i++ {\n\t\tif s[i] == s[i+1] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func DedupInPlace(a *[]string) (dups int) {\n\tsz := len(*a)\n\tif 2 > sz {\n\t\treturn\n\t}\n\tlast := sz - 1\n\tfor i := 0; i < last; i++ {\n\t\ts := (*a)[i]\n\t\tfor j := last; j > i; j-- {\n\t\t\tif s == (*a)[j] { // found a dup - remove it\n\t\t\t\tdups++\n\t\t\t\tif j == last {\n\t\t\t\t\t(*a) = (*a)[:last]\n\t\t\t\t} else {\n\t\t\t\t\t(*a) = append((*a)[:j], (*a)[j+1:]...)\n\t\t\t\t}\n\t\t\t\tlast--\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func AppendStr(strs []string, str string) []string {\n\tfor _, s := range strs {\n\t\tif s == str {\n\t\t\treturn strs\n\t\t}\n\t}\n\treturn append(strs, str)\n}", "func (sm StringMap) Upsert(k, v string) {\n\tif av, existing := sm.Get(k); existing {\n\t\tav.SetValue(v)\n\t} else {\n\t\t*sm.orig = append(*sm.orig, NewStringKeyValue(k, v).orig)\n\t}\n}", "func concatUnique(collections ...[]string) []string {\n\tresultSet := make(map[string]struct{})\n\tfor _, c := range collections {\n\t\tfor _, i := range c {\n\t\t\tif _, ok := resultSet[i]; !ok {\n\t\t\t\tresultSet[i] = struct{}{}\n\t\t\t}\n\t\t}\n\t}\n\tresult := make([]string, 0, len(resultSet))\n\tfor k := range resultSet {\n\t\tresult = append(result, k)\n\t}\n\treturn result\n}", "func (s *SliceOfString) Concat(items []string) *SliceOfString {\n\ts.items = append(s.items, items...)\n\treturn s\n}", "func merge(new, dst *Range) bool {\n\tif new.End() < dst.Pos {\n\t\treturn false\n\t}\n\tif new.End() > dst.End() {\n\t\tdst.Size = new.Size\n\t} else {\n\t\tdst.Size += dst.Pos - new.Pos\n\t}\n\tdst.Pos = new.Pos\n\treturn true\n}", "func RemoveStringSliceCopy(slice []string, start, end int) []string {\n\tresult := make([]string, len(slice)-(end-start))\n\tat := copy(result, slice[:start])\n\tcopy(result[at:], slice[end:])\n\treturn result\n\n}", "func Unique(input string) bool {\n\tseen := make(map[rune]bool)\n\n\tfor _, r := range input {\n\t\t_, found := seen[r]\n\t\tif found {\n\t\t\treturn false\n\t\t}\n\t\tseen[r] = true\n\t}\n\n\treturn true\n}", "func (queryParametersBag) uniqueStringsSlice(in []string) []string {\n\tkeys := make(map[string]bool)\n\tout := make([]string, 0)\n\n\tfor _, entry := range in {\n\t\tif _, ok := keys[entry]; !ok {\n\t\t\tkeys[entry] = true\n\t\t\tout = append(out, entry)\n\t\t}\n\t}\n\n\treturn out\n}", "func common(s, o []rune) []rune {\n\tmax, min := s, o\n\tif len(max) < len(min) {\n\t\tmax, min = min, max\n\t}\n\tvar str []rune\n\tfor i, r := range min {\n\t\tif r != max[i] {\n\t\t\tbreak\n\t\t}\n\t\tif str == nil {\n\t\t\tstr = []rune{r}\n\t\t} else {\n\t\t\tstr = append(str, r)\n\t\t}\n\t}\n\treturn str\n}", "func PrependIfMissingIgnoreCase(str string, prefix string, prefixes ...string) string {\n\treturn prependIfMissing(str, prefix, true, prefixes...)\n}", "func appendIfMissing(inputSlice []rowStore, input rowStore) []rowStore {\n\tfor _, element := range inputSlice {\n\t\tif element == input {\n\t\t\treturn inputSlice\n\t\t}\n\t}\n\treturn append(inputSlice, input)\n}", "func (r StringsSet) AddAll(other StringsSet) {\n\tfor s := range other {\n\t\tr[s] = struct{}{}\n\t}\n}", "func depSliceDeduplicate(s []Dependency) []Dependency {\n\tl := len(s)\n\tif l < 2 {\n\t\treturn s\n\t}\n\tif l == 2 {\n\t\tif s[0] == s[1] {\n\t\t\treturn s[0:1]\n\t\t}\n\t\treturn s\n\t}\n\n\tfound := make(map[string]bool, l)\n\tj := 0\n\tfor i, x := range s {\n\t\th := x.Hash()\n\t\tif !found[h] {\n\t\t\tfound[h] = true\n\t\t\ts[j] = s[i]\n\t\t\tj++\n\t\t}\n\t}\n\n\treturn s[:j]\n}", "func SortUnique(a []string) (rv []string) {\n\n\tsort.Strings(a)\n\n\tpos := 1\n\tlast := a[0]\n\tfor i := 1; i < len(a); i++ {\n\t\ts := a[i]\n\t\tif s != last {\n\t\t\tif pos != i {\n\t\t\t\ta[pos] = a[i]\n\t\t\t}\n\t\t\tpos++\n\t\t}\n\t\tlast = s\n\t}\n\trv = a[:pos]\n\treturn\n}" ]
[ "0.64953166", "0.5654552", "0.5652756", "0.55148536", "0.53828245", "0.53584385", "0.5342289", "0.5150319", "0.513586", "0.5078584", "0.50460446", "0.50121546", "0.5000155", "0.49774796", "0.49146158", "0.49021077", "0.48866537", "0.48661357", "0.48562434", "0.47965133", "0.4780307", "0.47801292", "0.4774901", "0.47722873", "0.4759733", "0.4744842", "0.47354126", "0.4715488", "0.4708589", "0.46616873", "0.46606046", "0.46561733", "0.45682538", "0.45668474", "0.4550267", "0.452949", "0.45249027", "0.4513581", "0.45042053", "0.4497204", "0.44971558", "0.44886267", "0.44882458", "0.44841638", "0.44706774", "0.44642594", "0.4455903", "0.4449288", "0.44251135", "0.44072983", "0.44059145", "0.44048172", "0.4402026", "0.4391726", "0.4391068", "0.43840945", "0.43787333", "0.43781415", "0.4373554", "0.43593356", "0.43465927", "0.43330926", "0.43272105", "0.43208873", "0.43179226", "0.43153548", "0.43117827", "0.43036747", "0.43021172", "0.4301195", "0.42982307", "0.42979857", "0.42941293", "0.42897224", "0.42880508", "0.42880508", "0.4277076", "0.42723846", "0.4270383", "0.42680794", "0.42665404", "0.42658705", "0.4261972", "0.42616907", "0.42537907", "0.42451707", "0.4244592", "0.42297924", "0.42213133", "0.42211708", "0.42211473", "0.42167428", "0.42070878", "0.42015666", "0.41969824", "0.41946515", "0.41935065", "0.418656", "0.41849187", "0.41829473" ]
0.7760827
0
Match matches input kv with kv, value will be wildcard matched depending on the user input
Совпадение совпадает с входным kv с kv, значение будет совпадать по диктуемому пользователем шаблону
func (kv BatchKeyRotateKV) Match(ikv BatchKeyRotateKV) bool { if kv.Empty() { return true } if strings.EqualFold(kv.Key, ikv.Key) { return wildcard.Match(kv.Value, ikv.Value) } return false }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Match(goos, kv, key string) (value string, ok bool) {\n\tif len(kv) <= len(key) || kv[len(key)] != '=' {\n\t\treturn \"\", false\n\t}\n\n\tif goos == \"windows\" {\n\t\t// Case insensitive.\n\t\tif !strings.EqualFold(kv[:len(key)], key) {\n\t\t\treturn \"\", false\n\t\t}\n\t} else {\n\t\t// Case sensitive.\n\t\tif kv[:len(key)] != key {\n\t\t\treturn \"\", false\n\t\t}\n\t}\n\n\treturn kv[len(key)+1:], true\n}", "func (kv BatchJobReplicateKV) Match(ikv BatchJobReplicateKV) bool {\n\tif kv.Empty() {\n\t\treturn true\n\t}\n\tif strings.EqualFold(kv.Key, ikv.Key) {\n\t\treturn wildcard.Match(kv.Value, ikv.Value)\n\t}\n\treturn false\n}", "func match(got string, pattern *regexp.Regexp, msg string, note func(key string, value interface{})) error {\n\tif pattern.MatchString(got) {\n\t\treturn nil\n\t}\n\n\treturn errors.New(msg)\n}", "func (f *CompiledFingerprints) matchKeyValueString(key, value string, part part) []string {\n\tvar matched bool\n\tvar technologies []string\n\n\tfor app, fingerprint := range f.Apps {\n\t\tswitch part {\n\t\tcase cookiesPart:\n\t\t\tfor data, pattern := range fingerprint.cookies {\n\t\t\t\tif data != key {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif pattern.MatchString(value) {\n\t\t\t\t\tmatched = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\tcase headersPart:\n\t\t\tfor data, pattern := range fingerprint.headers {\n\t\t\t\tif data != key {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif pattern.MatchString(value) {\n\t\t\t\t\tmatched = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\tcase metaPart:\n\t\t\tfor data, patterns := range fingerprint.meta {\n\t\t\t\tif data != key {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tfor _, pattern := range patterns {\n\t\t\t\t\tif pattern.MatchString(value) {\n\t\t\t\t\t\tmatched = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// If no match, continue with the next fingerprint\n\t\tif !matched {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Append the technologies as well as implied ones\n\t\ttechnologies = append(technologies, app)\n\t\tif len(fingerprint.implies) > 0 {\n\t\t\ttechnologies = append(technologies, fingerprint.implies...)\n\t\t}\n\t\tmatched = false\n\t}\n\treturn technologies\n}", "func (m AllKeysMatcher) Match(key string, attributes map[string]interface{}, bucketingKey *string) bool {\n\treturn true\n}", "func (f filters) matchAny(k string, v []byte) bool {\n\tfor _, filter := range f {\n\t\tif filter(k, v) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (kt KeyToken) Match(okt KeyToken) bool {\n\tif kt.Tok.IsKeyword() && kt.Key != \"\" {\n\t\treturn kt.Tok.Match(okt.Tok) && kt.Key == okt.Key\n\t}\n\treturn kt.Tok.Match(okt.Tok)\n}", "func (kl KeyTokenList) Match(okt KeyToken) bool {\n\tfor _, kt := range kl {\n\t\tif kt.Match(okt) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func KeyMatchFunc(args ...interface{}) (interface{}, error) {\n\tname1 := args[0].(string)\n\tname2 := args[1].(string)\n\n\treturn (bool)(KeyMatch(name1, name2)), nil\n}", "func ExtCaseInsensitiveMatch(mval interface{}, sval map[string]interface{}) bool {\n\tspecif, ok := sval[\"value\"]\n\tif !ok {\n\t\treturn false\n\t}\n\n\tspecval, ok := specif.(string)\n\tif !ok {\n\t\treturn false\n\t}\n\n\tswitch mcast := mval.(type) {\n\tcase string:\n\t\tif strings.ToLower(specval) == strings.ToLower(mcast) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (r *Key) matcher(c *Client) func([]byte) bool {\n\treturn func(b []byte) bool {\n\t\tcr, err := unmarshalKey(b, c, r)\n\t\tif err != nil {\n\t\t\tc.Config.Logger.Warning(\"failed to unmarshal provided resource in matcher.\")\n\t\t\treturn false\n\t\t}\n\t\tnr := r.urlNormalized()\n\t\tncr := cr.urlNormalized()\n\t\tc.Config.Logger.Infof(\"looking for %v\\nin %v\", nr, ncr)\n\n\t\tif nr.Project == nil && ncr.Project == nil {\n\t\t\tc.Config.Logger.Info(\"Both Project fields null - considering equal.\")\n\t\t} else if nr.Project == nil || ncr.Project == nil {\n\t\t\tc.Config.Logger.Info(\"Only one Project field is null - considering unequal.\")\n\t\t\treturn false\n\t\t} else if *nr.Project != *ncr.Project {\n\t\t\treturn false\n\t\t}\n\t\tif nr.Name == nil && ncr.Name == nil {\n\t\t\tc.Config.Logger.Info(\"Both Name fields null - considering equal.\")\n\t\t} else if nr.Name == nil || ncr.Name == nil {\n\t\t\tc.Config.Logger.Info(\"Only one Name field is null - considering unequal.\")\n\t\t\treturn false\n\t\t} else if *nr.Name != *ncr.Name {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n}", "func (ef *Filter) ExactMatch(key, source string) bool {\n\tfieldValues, ok := ef.filter[key]\n\t//do not filter if there is no filter set or cannot determine filter\n\tif !ok || len(fieldValues) == 0 {\n\t\treturn true\n\t}\n\t// try to match full name value to avoid O(N) regular expression matching\n\treturn fieldValues[source]\n}", "func (s *setting) Match(exists string) (bool, bool) {\n\tfor _, o := range s.Options {\n\t\tif o == exists {\n\t\t\treturn true, false\n\t\t} else if o == exists+\":\" {\n\t\t\treturn true, true\n\t\t}\n\t}\n\treturn false, false\n}", "func (f RabinKarp) MatchAll(p string, v []string) Matches {\n\tvar matches Matches\n\tfor _, value := range v {\n\t\tif ok, match := f(p, value); ok {\n\t\t\tmatches = append(matches, match)\n\t\t}\n\t}\n\treturn matches\n}", "func KeyMatch(key1 string, key2 string) bool {\n\ti := strings.Index(key2, \"*\")\n\tif i == -1 {\n\t\treturn key1 == key2\n\t}\n\n\tif len(key1) > i {\n\t\treturn key1[:i] == key2[:i]\n\t}\n\treturn key1 == key2[:i]\n}", "func (mux *Mux) match(key muxKey) Handler {\n\t// Check for exact match first.\n\tif r, ok := mux.m[key]; ok {\n\t\treturn r\n\t} else if r, ok := mux.m[muxKey{\"\", key.host, key.path}]; ok {\n\t\treturn r\n\t} else if r, ok := mux.m[muxKey{key.scheme, \"\", key.path}]; ok {\n\t\treturn r\n\t} else if r, ok := mux.m[muxKey{\"\", \"\", key.path}]; ok {\n\t\treturn r\n\t}\n\n\t// Check for longest valid match. mux.es contains all patterns\n\t// that end in / sorted from longest to shortest.\n\tfor _, e := range mux.es {\n\t\tif (e.key.scheme == \"\" || key.scheme == e.key.scheme) &&\n\t\t\t(e.key.host == \"\" || key.host == e.key.host) &&\n\t\t\tstrings.HasPrefix(key.path, e.key.path) {\n\t\t\treturn e.handler\n\t\t}\n\t}\n\treturn nil\n}", "func Match(path string, key string) bool {\n\tif path == key {\n\t\treturn true\n\t}\n\tif !strings.Contains(path, \"*\") {\n\t\treturn false\n\t}\n\tmatch, err := filepath.Match(path, key)\n\tif err != nil {\n\t\treturn false\n\t}\n\tcountPath := strings.Count(path, \"/\")\n\tcountKey := strings.Count(key, \"/\")\n\treturn match && countPath == countKey\n}", "func (c Provider) Match(query string) (params []string) {\n\tif sm := SourceRegex.FindStringSubmatch(query); len(sm) > 2 {\n\t\tparams = sm[1:]\n\t}\n\treturn\n}", "func (a *WhisperAggregation) Match(metric string) *WhisperAggregationItem {\n\tfor _, s := range a.Data {\n\t\tif s.pattern.MatchString(metric) {\n\t\t\treturn s\n\t\t}\n\t}\n\treturn a.Default\n}", "func (c Provider) Match(query string) (params []string) {\n\tif sm := SourceRegex.FindStringSubmatch(query); len(sm) > 1 {\n\t\tparams = sm[1:]\n\t}\n\treturn\n}", "func (i Info) Matches(value string) bool {\n\tif strings.Contains(i.Name, value) {\n\t\treturn true\n\t}\n\tif strings.Contains(i.Zone, value) {\n\t\treturn true\n\t}\n\tif strings.Contains(i.AliasTarget, value) {\n\t\treturn true\n\t}\n\tfor _, v := range i.Values {\n\t\tif strings.Contains(v, value) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (r *RegexpKeys) Match(key string) (string, error) {\n\tfor _, re := range r.regexp_keys {\n\t\tif re.CompiledRegexp.Match([]byte(key)) {\n\t\t\treturn re.Name, nil\n\t\t}\n\t}\n\treturn \"\", errors.New(\"Could not match key to regex.\")\n}", "func TestMockKv_Get(t *testing.T) {\n\tt.Run(\"exact match\", func(t *testing.T) {\n\t\tpair := mvccpb.KeyValue{\n\t\t\tKey: []byte(\"/foo\"),\n\t\t\tValue: []byte(\"1\"),\n\t\t}\n\n\t\tkv := newMockKV()\n\t\tkv.values = map[string]mvccpb.KeyValue{string(pair.Key): pair}\n\t\tres, err := kv.Get(context.Background(), \"/foo\")\n\n\t\trequire.NoError(t, err)\n\t\trequire.Len(t, res.Kvs, 1)\n\t\tassert.Equal(t, []byte(\"/foo\"), res.Kvs[0].Key)\n\t})\n\n\tt.Run(\"not exact match\", func(t *testing.T) {\n\t\tpair := mvccpb.KeyValue{\n\t\t\tKey: []byte(\"/foo\"),\n\t\t\tValue: []byte(\"1\"),\n\t\t}\n\n\t\tkv := newMockKV()\n\t\tkv.values = map[string]mvccpb.KeyValue{string(pair.Key): pair}\n\t\tres, err := kv.Get(context.Background(), \"/bar\")\n\n\t\trequire.NoError(t, err)\n\t\tassert.Empty(t, res.Kvs)\n\t})\n\n\tt.Run(\"prefix match\", func(t *testing.T) {\n\t\tfooPair := mvccpb.KeyValue{\n\t\t\tKey: []byte(\"/foo\"),\n\t\t\tValue: []byte(\"1\"),\n\t\t}\n\t\tbazPair := mvccpb.KeyValue{\n\t\t\tKey: []byte(\"/baz\"),\n\t\t\tValue: []byte(\"2\"),\n\t\t}\n\t\tfirstPair := mvccpb.KeyValue{\n\t\t\tKey: []byte(\"/first\"),\n\t\t\tValue: []byte(\"3\"),\n\t\t}\n\n\t\tkv := newMockKV()\n\t\tkv.values = map[string]mvccpb.KeyValue{\n\t\t\tstring(fooPair.Key): fooPair,\n\t\t\tstring(bazPair.Key): bazPair,\n\t\t\tstring(firstPair.Key): firstPair,\n\t\t}\n\t\tres, err := kv.Get(context.Background(), \"/f\", clientv3.WithPrefix())\n\n\t\trequire.NoError(t, err)\n\t\tassert.ElementsMatch(t, []*mvccpb.KeyValue{&fooPair, &firstPair}, res.Kvs)\n\t})\n\n\tt.Run(\"empty prefix\", func(t *testing.T) {\n\t\tfooPair := mvccpb.KeyValue{\n\t\t\tKey: []byte(\"/foo\"),\n\t\t\tValue: []byte(\"1\"),\n\t\t}\n\t\tbazPair := mvccpb.KeyValue{\n\t\t\tKey: []byte(\"/baz\"),\n\t\t\tValue: []byte(\"2\"),\n\t\t}\n\t\tfirstPair := mvccpb.KeyValue{\n\t\t\tKey: []byte(\"/first\"),\n\t\t\tValue: []byte(\"3\"),\n\t\t}\n\n\t\tkv := newMockKV()\n\t\tkv.values = map[string]mvccpb.KeyValue{\n\t\t\tstring(fooPair.Key): fooPair,\n\t\t\tstring(bazPair.Key): bazPair,\n\t\t\tstring(firstPair.Key): firstPair,\n\t\t}\n\t\tres, err := kv.Get(context.Background(), \"\", clientv3.WithPrefix())\n\n\t\trequire.NoError(t, err)\n\t\tassert.ElementsMatch(t, []*mvccpb.KeyValue{&fooPair, &bazPair, &firstPair}, res.Kvs)\n\t})\n}", "func (m *Model) MatchesKey(msg tea.KeyMsg) bool {\n\tif !m.focused || len(m.valueLists) == 0 {\n\t\treturn false\n\t}\n\tcurList := m.valueLists[m.selectedList]\n\tswitch {\n\tcase key.Matches(msg,\n\t\tm.KeyMap.CursorUp,\n\t\tm.KeyMap.CursorDown,\n\t\tm.KeyMap.GoToStart,\n\t\tm.KeyMap.GoToEnd,\n\t\tm.KeyMap.Filter,\n\t\tm.KeyMap.ClearFilter,\n\t\tm.KeyMap.CancelWhileFiltering,\n\t\tm.KeyMap.AcceptWhileFiltering,\n\t\tm.KeyMap.PrevCompletions,\n\t\tm.KeyMap.NextCompletions,\n\t\tm.KeyMap.NextPage,\n\t\tm.KeyMap.PrevPage,\n\t\tm.KeyMap.Abort):\n\t\treturn true\n\tcase !curList.SettingFilter() &&\n\t\tkey.Matches(msg, m.KeyMap.AcceptCompletion):\n\t\treturn true\n\tcase curList.SettingFilter():\n\t\treturn true\n\t}\n\treturn false\n}", "func (c Provider) Match(query string) (params []string) {\n\tif sm := MirrorsRegex.FindStringSubmatch(query); len(sm) > 1 {\n\t\tparams = sm[1:]\n\t}\n\treturn\n}", "func metadataContainsValue(m interface{}, path []string, value string) bool {\n\tif len(path) == 0 {\n\t\treturn false\n\t}\n\n\tkey := strings.Title(strings.ToLower(path[0]))\n\n\tif mapData, isMap := m.(map[string]interface{}); isMap {\n\t\t// here we know its a map, but don't know the type of value, so we must check before accessing it\n\t\tv := mapData[key]\n\n\t\t// we will handle both strings and slice of strings here, so create a variable to use in both cases\n\t\ttempSlice := []string{}\n\n\t\tif sliceValue, isSliceString := v.([]string); isSliceString {\n\t\t\ttempSlice = sliceValue\n\t\t} else if stringValue, isString := v.(string); isString {\n\t\t\ttempSlice = []string{stringValue}\n\t\t}\n\n\t\tfor _, val := range tempSlice {\n\t\t\tmatch := strings.Contains(strings.ToLower(val), strings.ToLower(value))\n\n\t\t\tif match {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\t// if value is anything besides a string or slice of string, pass it to another function call with the next key in the path\n\t\treturn metadataContainsValue(v, path[1:], value)\n\t}\n\n\t// if m is not a map, it must be a slice; pass each value in it back to this function with the current key and check return values\n\tif sliceData, isSlice := m.([]interface{}); isSlice {\n\t\tfor _, elem := range sliceData {\n\t\t\tmatch := metadataContainsValue(elem, []string{key}, value)\n\n\t\t\tif match {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}", "func (f *CompiledFingerprints) matchMapString(keyValue map[string]string, part part) []string {\n\tvar matched bool\n\tvar technologies []string\n\n\tfor app, fingerprint := range f.Apps {\n\t\tswitch part {\n\t\tcase cookiesPart:\n\t\t\tfor data, pattern := range fingerprint.cookies {\n\t\t\t\tvalue, ok := keyValue[data]\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif pattern.MatchString(value) {\n\t\t\t\t\tmatched = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\tcase headersPart:\n\t\t\tfor data, pattern := range fingerprint.headers {\n\t\t\t\tvalue, ok := keyValue[data]\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif pattern.MatchString(value) {\n\t\t\t\t\tmatched = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\tcase metaPart:\n\t\t\tfor data, patterns := range fingerprint.meta {\n\t\t\t\tvalue, ok := keyValue[data]\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfor _, pattern := range patterns {\n\t\t\t\t\tif pattern.MatchString(value) {\n\t\t\t\t\t\tmatched = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// If no match, continue with the next fingerprint\n\t\tif !matched {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Append the technologies as well as implied ones\n\t\ttechnologies = append(technologies, app)\n\t\tif len(fingerprint.implies) > 0 {\n\t\t\ttechnologies = append(technologies, fingerprint.implies...)\n\t\t}\n\t\tmatched = false\n\t}\n\treturn technologies\n}", "func (f *StringSetFilter) ItemMatch(pattern string) *StringSetFilter {\r\n\tf.AddValidator(func(paramName string, paramValue []string) *Error {\r\n\t\tre, err := regexp.Compile(pattern)\r\n\t\tif err != nil {\r\n\t\t\treturn NewError(ErrorInternalError, paramName, \"InvalidValidator\")\r\n\t\t}\r\n\t\tfor _, v := range paramValue {\r\n\t\t\tif !re.MatchString(v) {\r\n\t\t\t\treturn NewError(ErrorInvalidParam, paramName, \"ItemWrongFormat\")\r\n\t\t\t}\r\n\t\t}\r\n\t\treturn nil\r\n\t})\r\n\treturn f\r\n}", "func (vb *Builder) Match(fieldName string, str1, str2 interface{}) {\n\tif str1 != str2 {\n\t\tvb.Append(fieldName, doesNotMatch)\n\t}\n}", "func (a AnyArgument) Match(v driver.Value) bool {\n\treturn true\n}", "func (a AnyArgument) Match(v driver.Value) bool {\n\treturn true\n}", "func (t *TST) Match(p string) []string {\n\tif p == \"\" {\n\t\treturn nil\n\t}\n\t// if p has no matching, just find the string\n\tif !strings.ContainsAny(p, \"*_\") {\n\t\tif t.Find(p) {\n\t\t\treturn []string{p}\n\t\t}\n\t\treturn nil\n\t}\n\t// when matching for \"word*\" use prefix instead since it's cheaper\n\tif idx := strings.LastIndex(p, \"*\"); strings.Count(p, \"*\") == 1 && idx == len(p)-1 {\n\t\treturn t.Prefix(p[:idx])\n\t}\n\tmatches := []string{}\n\tt.root.rmatch(p, \"\", &matches)\n\tif len(matches) > 0 {\n\t\treturn matches\n\t}\n\treturn nil\n}", "func PrefixMatch(key string) (res []interface{}) {\n\tglobalStore.RLock()\n\tdefer globalStore.RUnlock()\n\n\tfor k, v := range globalStore.store {\n\t\tif strings.HasPrefix(k, key) {\n\t\t\tres = append(res, v)\n\t\t}\n\t}\n\n\treturn\n}", "func (s Selection) Match(msg Event) (bool, bool) {\n\tfor _, v := range s.N {\n\t\tval, ok := msg.Select(v.Key)\n\t\tif !ok {\n\t\t\treturn false, false\n\t\t}\n\t\tswitch vt := val.(type) {\n\t\tcase float64:\n\t\t\t// JSON numbers are all by spec float64 values\n\t\t\tif !v.Pattern.NumMatch(int(vt)) {\n\t\t\t\treturn false, true\n\t\t\t}\n\t\tcase int:\n\t\t\t// JSON numbers are all by spec float64 values\n\t\t\tif !v.Pattern.NumMatch(vt) {\n\t\t\t\treturn false, true\n\t\t\t}\n\t\tcase int64:\n\t\t\t// JSON numbers are all by spec float64 values\n\t\t\tif !v.Pattern.NumMatch(int(vt)) {\n\t\t\t\treturn false, true\n\t\t\t}\n\t\tcase int32:\n\t\t\t// JSON numbers are all by spec float64 values\n\t\t\tif !v.Pattern.NumMatch(int(vt)) {\n\t\t\t\treturn false, true\n\t\t\t}\n\t\tcase uint:\n\t\t\t// JSON numbers are all by spec float64 values\n\t\t\tif !v.Pattern.NumMatch(int(vt)) {\n\t\t\t\treturn false, true\n\t\t\t}\n\t\tcase uint32:\n\t\t\t// JSON numbers are all by spec float64 values\n\t\t\tif !v.Pattern.NumMatch(int(vt)) {\n\t\t\t\treturn false, true\n\t\t\t}\n\t\tcase uint64:\n\t\t\t// JSON numbers are all by spec float64 values\n\t\t\tif !v.Pattern.NumMatch(int(vt)) {\n\t\t\t\treturn false, true\n\t\t\t}\n\t\t}\n\t}\n\tfor _, v := range s.S {\n\t\tval, ok := msg.Select(v.Key)\n\t\tif !ok {\n\t\t\treturn false, false\n\t\t}\n\t\tswitch vt := val.(type) {\n\t\tcase string:\n\t\t\tif !v.Pattern.StringMatch(vt) {\n\t\t\t\treturn false, true\n\t\t\t}\n\t\tcase float64:\n\t\t\t// TODO - tmp hack that also loses floating point accuracy\n\t\t\tif !v.Pattern.StringMatch(strconv.Itoa(int(vt))) {\n\t\t\t\treturn false, true\n\t\t\t}\n\t\tdefault:\n\t\t\ts.incrementMismatchCount()\n\t\t\treturn false, true\n\t\t}\n\t}\n\treturn true, true\n}", "func (c *TimerCond) Match(t *TimerRecord) bool {\n\tif val, ok := c.ID.Get(); ok && t.ID != val {\n\t\treturn false\n\t}\n\n\tif val, ok := c.Namespace.Get(); ok && t.Namespace != val {\n\t\treturn false\n\t}\n\n\tif val, ok := c.Key.Get(); ok {\n\t\tif c.KeyPrefix && !strings.HasPrefix(t.Key, val) {\n\t\t\treturn false\n\t\t}\n\n\t\tif !c.KeyPrefix && t.Key != val {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (f *Filter) Match(key [KeySize]byte, data []byte) bool {\n\t// Create a filter bitstream.\n\tb := newBitReader(f.filterNData[4:])\n\n\t// Hash our search term with the same parameters as the filter.\n\tk0 := binary.LittleEndian.Uint64(key[0:8])\n\tk1 := binary.LittleEndian.Uint64(key[8:16])\n\tterm := siphash.Hash(k0, k1, data) % f.modulusNP\n\n\t// Go through the search filter and look for the desired value.\n\tvar lastValue uint64\n\tfor lastValue < term {\n\t\t// Read the difference between previous and new value from\n\t\t// bitstream.\n\t\tvalue, err := f.readFullUint64(&b)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\n\t\t// Add the previous value to it.\n\t\tvalue += lastValue\n\t\tif value == term {\n\t\t\treturn true\n\t\t}\n\n\t\tlastValue = value\n\t}\n\n\treturn false\n}", "func RegexMatch(key1 string, key2 string) bool {\n\tres, err := regexp.MatchString(key2, key1)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn res\n}", "func RegexMatch(key1 string, key2 string) bool {\n\tres, err := regexp.MatchString(key2, key1)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn res\n}", "func sinkMatch(r *types.SinkRequest, c eventbus.ConstraintMatcher) bool {\n\tswitch c.Name() {\n\tcase \"request.remoteAddress\":\n\t\treturn c.Match(r.RemoteAddr)\n\tcase \"request.method\":\n\t\treturn c.Match(r.Method)\n\tcase \"request.path\":\n\t\t// match path, right side of \"/sink\"\n\t\treturn c.Match(r.Path)\n\tcase \"request.username\":\n\t\treturn c.Match(r.Username)\n\tcase \"request.password\":\n\t\treturn c.Match(r.Password)\n\tcase \"request.content-type\":\n\t\treturn c.Match(r.Header.Get(\"content-type\"))\n\t}\n\n\t// Dynamically check matcher name if it contains request.(get|post|header).*\n\t// and use value for matcher:\n\t//\n\t// to match \"&foo=bar\" in URL string use .where('request.get.foo', 'bar')\n\t//\n\t// It only matches first value (get, post and header can have multiple values)\n\n\tif strings.HasPrefix(c.Name(), sinkMatchRequestGet) {\n\t\treturn c.Match(r.Query.Get(c.Name()[len(sinkMatchRequestGet):]))\n\t}\n\n\tif strings.HasPrefix(c.Name(), sinkMatchRequestPost) {\n\t\treturn c.Match(r.PostForm.Get(c.Name()[len(sinkMatchRequestPost):]))\n\t}\n\n\tif strings.HasPrefix(c.Name(), sinkMatchRequestHeader) {\n\t\treturn c.Match(r.Header.Get(c.Name()[len(sinkMatchRequestHeader):]))\n\t}\n\n\treturn true\n}", "func (a *Arg) Match(arg string) bool {\n\tswitch {\n\tcase a.ShortName != \"\" && a.ShortName == arg:\n\t\treturn true\n\tcase a.LongName != \"\" && a.LongName == arg:\n\t\treturn true\n\t}\n\treturn false\n}", "func (f *Flow) MatchString(key string, predicate getter.StringPredicate) bool {\n\tif s, err := f.GetFieldString(key); err == nil {\n\t\treturn predicate(s)\n\t}\n\treturn false\n}", "func (m EqualsMatcher) Match(s string) bool {\n\tfor _, term := range m.list {\n\t\tif term == s {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (set Set) Match(caps [][]string) []string {\n\tif set == nil {\n\t\treturn nil\n\t}\nanyof:\n\tfor _, andList := range caps {\n\t\tfor _, cap := range andList {\n\t\t\tif _, ok := set[cap]; !ok {\n\t\t\t\tcontinue anyof\n\t\t\t}\n\t\t}\n\t\treturn andList\n\t}\n\t// match anything\n\treturn nil\n}", "func match(path, pattern string, vars ...interface{}) bool {\n\tregex := mustCompileCached(pattern)\n\tmatches := regex.FindStringSubmatch(path)\n\tif len(matches) <= 0 {\n\t\treturn false\n\t}\n\tfor i, match := range matches[1:] {\n\t\tswitch p := vars[i].(type) {\n\t\tcase *string:\n\t\t\t*p = match\n\t\tcase *int:\n\t\t\tn, err := strconv.Atoi(match)\n\t\t\tif err != nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\t*p = n\n\t\tdefault:\n\t\t\tpanic(\"vars must be *string or *int\")\n\t\t}\n\t}\n\treturn true\n}", "func (m ValueMatcher) Match(got reflect.Value, d data.Data, _ Region) (data.Data, bool) {\n\tif m.Type != got.Type() {\n\t\treturn d, false\n\t}\n\treturn d, m.Value == got.Interface()\n}", "func (*Privilege) Match(toks sayori.Toks) (string, bool) {\n\talias, ok := toks.Get(0)\n\tif !ok {\n\t\treturn \"\", false\n\t}\n\talias = strings.ToLower(alias)\n\n\tfor _, validAlias := range []string{\"p\", \"priv\", \"privileged\"} {\n\t\tif alias == validAlias {\n\t\t\treturn alias, true\n\t\t}\n\t}\n\treturn \"\", false\n}", "func (v *Value) Match(expr string) bool {\n\t// Compile the regular expression.\n\tre, err := v.script.compileRegexp(expr)\n\tif err != nil {\n\t\treturn false // Fail silently\n\t}\n\n\t// Return true if the expression matches the value, interpreted as a\n\t// string.\n\tloc := re.FindStringIndex(v.String())\n\tif loc == nil {\n\t\tv.script.RStart = 0\n\t\tv.script.RLength = -1\n\t\treturn false\n\t}\n\tv.script.RStart = loc[0] + 1\n\tv.script.RLength = loc[1] - loc[0]\n\treturn true\n}", "func Test_CheckParam(t *testing.T) {\n\n\t//Validate mode\n\n\tat := []string{\"Alphanumeric\", \"Alpha\"}\n\tbt := []string{\"Alphanumeric\", \"Alpha\"}\n\tct := []string{\"Alphanumeric\", \"Alpha\"}\n\tdt := []string{\"Numeric\"}\n\ttarget := map[string][]string{\n\t\t\"a\": at,\n\t\t\"b\": bt,\n\t\t\"c\": ct,\n\t\t\"d\": dt,\n\t}\n\n\t//Test set 1\n\tstandardOutput := make(map[string]string)\n\n\tstandardOutput[\"c\"] = \"[Check catal#yst123 with Alphanumeric failed][Check catal#yst123 with Alpha failed]\"\n\tstandardOutput[\"d\"] = \"[Check 81927l39824 with Numeric failed]\"\n\n\ta := []string{\"apple\", \"applause\"}\n\tb := []string{\"banana\", \"balista\"}\n\tc := []string{\"catherine\", \"catal#yst123\"}\n\td := []string{\"432\", \"301\", \"81927l39824\"}\n\n\tx := map[string][]string{\n\t\t\"a\": a,\n\t\t\"b\": b,\n\t\t\"c\": c,\n\t\t\"d\": d,\n\t}\n\n\tkeys := []string{\"a\", \"b\", \"c\", \"d\"}\n\n\t//Simulate input []interface with getQueryValue().\n\t//This is the param we get when parsing the GET\n\n\tinput := getQueryValue(x, keys)\n\n\t//fmt.Println(\"input: \", x)\n\t//fmt.Println(\"filter: \", target)\n\t_, detail := CheckParam(*input, target)\n\n\tfmt.Println(\"Result: \", detail)\n\n\tassert.Equal(t, detail, standardOutput, \"The two words should be the same.\")\n\n\t//Test set 2\n\n\tstandardOutput = make(map[string]string)\n\n\tstandardOutput[\"d\"] = \"[Check monopolosomeplace.com with Email failed]\"\n\n\ta = []string{\"op.gg\", \"www.yahoo.com.tw\"}\n\tb = []string{\"banana\", \"balista\"}\n\tc = []string{\"catherine\", \"catalyst\"}\n\td = []string{\"tig4605246@gmail.com\", \"monopolosomeplace.com\"}\n\n\tx = map[string][]string{\n\t\t\"a\": a,\n\t\t\"b\": b,\n\t\t\"c\": c,\n\t\t\"d\": d,\n\t}\n\n\tkeys = []string{\"a\", \"b\", \"c\", \"d\"}\n\n\t//Simulate input []interface with getQueryValue().\n\t//This is the param we get when parsing the GET\n\n\tinput = getQueryValue(x, keys)\n\n\tdt2 := []string{\"Email\"}\n\tat2 := []string{\"DNS\"}\n\ttarget[\"a\"] = at2\n\ttarget[\"d\"] = dt2\n\t//fmt.Println(\"input: \", x)\n\t//fmt.Println(\"filter: \", target)\n\t_, detail = CheckParam(*input, target)\n\t//fmt.Println(\"Result: \",detail,\"\\nexpected: \",standardOutput)\n\tassert.Equal(t, detail, standardOutput, \"The two words should be the same.\")\n\n}", "func (k *KeyHandler) ExactPathMatch(pathA string, pathB string) bool {\n\treturn pathA == pathB\n}", "func (r TargetRule) matches(target map[string]string, username, hostname string) bool {\n\tfor k, v := range r {\n\t\tv = strings.ReplaceAll(v, OwnUser, username)\n\t\tv = strings.ReplaceAll(v, OwnHost, hostname)\n\n\t\tif target[k] != v {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (m *Matcher) Match(name string, attrs []string) bool {\n\tif _, ok := m.names[name]; ok {\n\t\treturn true\n\t}\n\tfor _, g := range m.globs {\n\t\tif g.MatchString(name) {\n\t\t\treturn true\n\t\t}\n\t}\n\tfor _, e := range m.exprs {\n\t\tif e.Matches(attrs) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (rule Rule) MatchRule(input string) (api.TaggedMetric, bool) {\n\ttagSet := extractTagValues(rule.graphitePatternRegex, rule.graphitePatternTags, input)\n\tif tagSet == nil {\n\t\treturn api.TaggedMetric{}, false\n\t}\n\tinterpolatedKey, err := interpolateTags(rule.raw.MetricKeyPattern, tagSet, false)\n\tif err != nil {\n\t\treturn api.TaggedMetric{}, false\n\t}\n\t// Do not output tags appearing in both graphite metric & metric key.\n\t// for exmaple, if graphite metric is\n\t// `foo.%a%.%b%`\n\t// and metric key is\n\t// `bar.%b%`\n\t// the resulting tag set should only contain {a} after the matching\n\t// because %b% is already encoded.\n\tfor _, metricKeyTag := range rule.metricKeyTags {\n\t\tif _, containsKey := tagSet[metricKeyTag]; containsKey {\n\t\t\tdelete(tagSet, metricKeyTag)\n\t\t}\n\t}\n\treturn api.TaggedMetric{\n\t\tapi.MetricKey(interpolatedKey),\n\t\ttagSet,\n\t}, true\n}", "func (k *Key) In(defaultVal string, candidates []string) string {\n\tval := k.String()\n\tfor _, cand := range candidates {\n\t\tif val == cand {\n\t\t\treturn val\n\t\t}\n\t}\n\treturn defaultVal\n}", "func (m ContainsMatcher) Match(s string) bool {\n\tfor _, term := range m.list {\n\t\tif strings.Contains(s, term) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func mappingUrlKeystoValues(postback *Pbo) {\n\tmatchingIndexes := argumentPattern.FindStringIndex(postback.Url)\n\tfor matchingIndexes != nil {\n\t\tpatternMatch := argumentPattern.FindString(postback.Url)\n\t\tmatchString := patternMatch[1:(len(patternMatch) - 1)]\n\t\treplaceString, keyHasValue := postback.Data[matchString]\n\t\tif !keyHasValue {\n\t\t\treplaceString = MISMATCH_KEY_VALUE_URL\n\t\t\tpostback.Data[matchString] = MISMATCH_KEY_VALUE_URL\n\t\t}\n\t\tpostback.Url = postback.Url[:matchingIndexes[0]] + replaceString + postback.Url[matchingIndexes[1]:]\n\t\tmatchingIndexes = argumentPattern.FindStringIndex(postback.Url)\n\t}\n}", "func apiKeyMatcher(key string) (string, bool) {\n\tswitch key {\n\tcase \"Api_key\", \"api_key\":\n\t\treturn key, true\n\tdefault:\n\t\treturn key, false\n\t}\n}", "func TestMatchByPrefix(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tpcc := buildTestPrefixConfigMap()\n\ttestData := []struct {\n\t\tkey proto.Key\n\t\texpConfig interface{}\n\t}{\n\t\t{proto.KeyMin, config1},\n\t\t{proto.Key(\"\\x01\"), config1},\n\t\t{proto.Key(\"/db\"), config1},\n\t\t{proto.Key(\"/db1\"), config2},\n\t\t{proto.Key(\"/db1/a\"), config2},\n\t\t{proto.Key(\"/db1/table1\"), config3},\n\t\t{proto.Key(\"/db1/table\\xff\"), config3},\n\t\t{proto.Key(\"/db2\"), config1},\n\t\t{proto.Key(\"/db3\"), config4},\n\t\t{proto.Key(\"/db3\\xff\"), config4},\n\t\t{proto.Key(\"/db5\"), config1},\n\t\t{proto.Key(\"/xfe\"), config1},\n\t\t{proto.Key(\"/xff\"), config1},\n\t}\n\tfor i, test := range testData {\n\t\tpc := pcc.MatchByPrefix(test.key)\n\t\tif test.expConfig != pc.Config {\n\t\t\tt.Errorf(\"%d: expected config %v for %q; got %v\", i, test.expConfig, test.key, pc.Config)\n\t\t}\n\t}\n}", "func (c *condition) match(v string) bool {\n\tif c.excludes(v) {\n\t\treturn false\n\t}\n\tif c.includes(v) {\n\t\treturn true\n\t}\n\tif len(c.Include) == 0 {\n\t\treturn true\n\t}\n\treturn false\n}", "func Match(prefix string) string {\n\tfor _, enc := range defaultEncodings {\n\t\thint := enc.Match(prefix)\n\t\tif hint != \"\" {\n\t\t\treturn hint\n\t\t}\n\t}\n\treturn \"\"\n}", "func (ps *Segment) Match(s string) (name string, capture bool, wildcard bool, matches bool) {\n\tif ps.IsWildcard {\n\t\twildcard = true\n\t\tmatches = true\n\t\treturn\n\t}\n\tif ps.IsVariable {\n\t\tname = ps.Name\n\t\tcapture = true\n\t\tmatches = true\n\t\treturn\n\t}\n\tif strings.EqualFold(s, ps.Name) {\n\t\tmatches = true\n\t\treturn\n\t}\n\treturn\n}", "func (p *Path) Match(path string) *Match {\n\tvar match = &Match{\n\t\tValues: make(map[string]string),\n\t}\n\n\tfor _, part := range p.parts {\n\t\tif len(path) < 1 {\n\t\t\treturn nil\n\t\t}\n\n\t\tif path[0] != '/' {\n\t\t\treturn nil\n\t\t}\n\t\t// prefix /\n\t\tpath = path[1:]\n\n\t\tmatched, key, value, length := part.match(path)\n\n\t\t//log.Printf(\"%#v == %v (%d) %s\", part, matched, length, value)\n\n\t\tif !matched {\n\t\t\treturn nil\n\t\t}\n\n\t\tif key != \"\" {\n\t\t\tmatch.Values[key] = value\n\t\t}\n\t\tpath = path[length:]\n\t}\n\n\tif len(path) > 0 && path != \"/\" {\n\t\treturn nil\n\t}\n\n\treturn match\n}", "func (k *KeyHandler) WildcardPathMatch(pathA string, pathB string) bool {\n\tsegsA := strings.Split(pathA, \"/\")\n\tsegsB := strings.Split(pathB, \"/\")\n\n\tmatch := true\n\n\tfor i, seg := range segsA {\n\t\tif i > (len(segsB) - 1) {\n\t\t\treturn false\n\t\t}\n\n\t\tif segsB[i] != seg && seg != \"*\" {\n\t\t\tmatch = false\n\t\t}\n\t}\n\n\treturn match\n}", "func (s *Plugin) Match(ctx context.Context, request *interact.Request, condition *v1alpha1.MockAPI_Condition) (match bool, err error) {\n\tsimple := condition.GetSimple()\n\tif simple == nil {\n\t\treturn false, nil\n\t}\n\tc := core.NewContext(request)\n\tfor _, item := range simple.Items {\n\t\toperandX := core.Render(c, item.OperandX)\n\t\toperandY := core.Render(c, item.OperandY)\n\t\tmatched, err := core.Match(operandX, item.Operator, operandY)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif item.Opposite {\n\t\t\tmatched = !matched\n\t\t}\n\t\tif matched {\n\t\t\tif simple.UseOrAmongItems {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t\tcontinue\n\t\t} else {\n\t\t\tif simple.UseOrAmongItems {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn false, nil\n\t\t}\n\t}\n\treturn true, nil\n}", "func (n *tnode) rmatch(pat, prefix string, m *[]string) {\n\tif n == nil {\n\t\treturn\n\t}\n\tc := pat[0]\n\tif c == '_' || c < n.c {\n\t\tn.lokid.rmatch(pat, prefix, m)\n\t}\n\tif c == '_' || c == n.c {\n\t\tif n.val != nil && len(pat)-1 == 0 {\n\t\t\t*m = append(*m, prefix+string(n.c))\n\t\t}\n\t\tif len(pat)-1 > 0 {\n\t\t\tn.eqkid.rmatch(pat[1:], prefix+string(n.c), m)\n\t\t}\n\t}\n\tif c == '_' || c > n.c {\n\t\tn.hikid.rmatch(pat, prefix, m)\n\t}\n}", "func searchExact(w http.ResponseWriter, r *http.Request, db *mgo.Database, argPos int) {\n\tkey := r.FormValue(\"key\")\n\tval := r.FormValue(\"val\")\n\n\tcontext := make([]appResult, 0, 10)\n\tvar res *appResult\n\n\tc := db.C(\"machines\")\n\tvar usePath bool\n\tif key == \"apps.path\" {\n\t\tusePath = true\n\t}\n\n\terr := c.Find(bson.M{key: val}).\n\t\tSelect(bson.M{\n\t\t\"hostname\": 1,\n\t\t\"apps\": 1,\n\t\t\"_id\": 1}).\n\t\tSort(\"hostname\").\n\t\tFor(&res, func() error {\n\t\tres.Apps = filter_apps(val, res.Apps, usePath)\n\t\tcontext = append(context, *res)\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\tset.ExecuteTemplate(w, \"searchresults\", context)\n}", "func keyExistsInArray(key string, value interface{}, log logr.Logger) (invalidType bool, keyExists bool) {\n\tswitch valuesAvailable := value.(type) {\n\tcase []interface{}:\n\t\tfor _, val := range valuesAvailable {\n\t\t\tif wildcard.Match(fmt.Sprint(val), key) || wildcard.Match(key, fmt.Sprint(val)) {\n\t\t\t\treturn false, true\n\t\t\t}\n\t\t}\n\n\tcase string:\n\t\tif wildcard.Match(valuesAvailable, key) {\n\t\t\treturn false, true\n\t\t}\n\n\t\tvar arr []string\n\t\tif err := json.Unmarshal([]byte(valuesAvailable), &arr); err != nil {\n\t\t\tlog.Error(err, \"failed to unmarshal value to JSON string array\", \"key\", key, \"value\", value)\n\t\t\treturn true, false\n\t\t}\n\n\t\tfor _, val := range arr {\n\t\t\tif key == val {\n\t\t\t\treturn false, true\n\t\t\t}\n\t\t}\n\n\tdefault:\n\t\tinvalidType = true\n\t\treturn\n\t}\n\n\treturn false, false\n}", "func MatchPatterns(p Pattern, v string) error {\n\tswitch {\n\tcase p == \"\": // No pattern is specified.\n\t\treturn nil\n\n\tcase strings.HasPrefix(string(p), \"const:\"):\n\t\tw := string(p[len(\"const:\"):])\n\t\tif w != v {\n\t\t\treturn fmt.Errorf(\"const not matched: %q %q\", p, v)\n\t\t}\n\t\treturn nil\n\n\tcase strings.HasPrefix(string(p), \"pattern:\"):\n\t\tw := string(p[len(\"pattern:\"):])\n\t\tif matchSuffix(w, v) != nil {\n\t\t\treturn fmt.Errorf(\"pattern not matched: %q %q\", p, v)\n\t\t}\n\t\treturn nil\n\n\tcase strings.HasPrefix(string(p), \"split_pattern:\"):\n\t\tws := strings.Split(string(p[len(\"split_pattern:\"):]), \";\")\n\t\tfor _, w := range ws {\n\t\t\tif matchSuffix(w, v) == nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\treturn fmt.Errorf(\"split_pattern not matched: %q %q\", p, v)\n\n\tdefault:\n\t\treturn fmt.Errorf(\"unkown pattern\")\n\t}\n}", "func processFilter(keys []string, filter []string) ([]string, bool) {\n\tvar vpps []string\n\tif len(filter) > 0 {\n\t\t// Ignore all parameters but first\n\t\tvpps = strings.Split(filter[0], \",\")\n\t} else {\n\t\t// Show all if there is no filter\n\t\tvpps = keys\n\t}\n\tvar isData bool\n\t// Find at leas one match\n\tfor _, key := range keys {\n\t\tfor _, vpp := range vpps {\n\t\t\tif key == vpp {\n\t\t\t\tisData = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn vpps, isData\n}", "func (n *Node) Match(u string) (interface{}, map[string]string, bool) {\n\tu, err := url.QueryUnescape(u)\n\n\tif err != nil {\n\t\treturn nil, nil, false\n\t}\n\n\treturn n.match(map[string]string{}, strings.Split(checkURL(u), \"/\")[1:])\n}", "func (in InHandler) Evaluate(key, value interface{}) bool {\n\tswitch typedKey := key.(type) {\n\tcase string:\n\t\treturn in.validateValueWithStringPattern(typedKey, value)\n\tcase int, int32, int64, float32, float64, bool:\n\t\treturn in.validateValueWithStringPattern(fmt.Sprint(typedKey), value)\n\tcase []interface{}:\n\t\tvar stringSlice []string\n\t\tfor _, v := range typedKey {\n\t\t\tstringSlice = append(stringSlice, v.(string))\n\t\t}\n\t\treturn in.validateValueWithStringSetPattern(stringSlice, value)\n\tdefault:\n\t\tin.log.V(2).Info(\"Unsupported type\", \"value\", typedKey, \"type\", fmt.Sprintf(\"%T\", typedKey))\n\t\treturn false\n\t}\n}", "func whereLabelMatches(label, pattern string, in interface{}) ([]interface{}, error) {\n\trx, err := regexp.Compile(pattern)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn whereLabel(\"whereLabelMatches\", in, label, func(value string, ok bool) bool {\n\t\treturn ok && rx.MatchString(value)\n\t})\n}", "func matchAction(ivr string) (string, error) {\n\tinput_words := strings.Split(strings.ToLower(ivr), \" \")\n\tactions := map[string]string{\n\t\t\"on\": \"On\",\n\t\t\"off\": \"Off\",\n\t}\n\n\tfor key, value := range actions {\n\t\tif contains(input_words, key) {\n\t\t\treturn value, nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"no matching action\")\n}", "func MatchVarsRegexp(path string, url string) (bool, bool, []string, []string) {\n\tmatch := true\n\tnext := false\n\tkeys := []string{}\n\tvalues := []string{}\n\tu := 0\n\tp := 0\n\tfor {\n\t\tif url[u] == path[p] {\n\t\t} else {\n\t\t\tif path[p] != braceStart {\n\t\t\t\tmatch = false\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tkey := \"\"\n\t\t\t\treg := \"\"\n\t\t\t\tfor {\n\t\t\t\t\tp++\n\t\t\t\t\tif path[p] == coron {\n\t\t\t\t\t\tkeys = append(keys, key)\n\t\t\t\t\t\treg, p = forwardPoint(path, p)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tif path[p] == braceEnd {\n\t\t\t\t\t\tkeys = append(keys, key)\n\t\t\t\t\t\tp++\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tif !(p < len(path)) {\n\t\t\t\t\t\tkeys = append(keys, key)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tkey = key + string(path[p])\n\t\t\t\t}\n\t\t\t\tvalue := \"\"\n\t\t\t\tfor {\n\t\t\t\t\tif url[u] == slash {\n\t\t\t\t\t\tvalues = append(values, value)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tvalue = value + string(url[u])\n\t\t\t\t\tu++\n\t\t\t\t\tif !(u < len(url)) {\n\t\t\t\t\t\tvalues = append(values, value)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif reg != \"\" {\n\t\t\t\t\tre := regexp.MustCompile(reg)\n\t\t\t\t\tif !re.MatchString(value) {\n\t\t\t\t\t\tmatch = false\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tu++\n\t\tp++\n\t\tif p < len(path) && u < len(url) {\n\t\t} else if !(p < len(path)) && !(u < len(url)) {\n\t\t\tbreak\n\t\t} else if !(p < len(path)) && u < len(url) {\n\t\t\tnext = true\n\t\t\tbreak\n\t\t} else {\n\t\t\tmatch = false\n\t\t\tbreak\n\t\t}\n\t}\n\treturn match, next, keys, values\n}", "func (k *VRFKey) Match(srcIP net.IP, cidr *net.IPNet) bool {\n\treturn k.SourceIP.String() == srcIP.String() && k.DestCIDR.String() == cidr.String()\n}", "func (p *Policy) Match(policy Policy) *Policy {\n\t// Exact or all op\n\t// *resource, resource*, *resource*\n\tif (p.Op == policy.Op || p.Op == OpAll) &&\n\t\t(p.Resource == policy.Resource || p.Resource == \"*\" ||\n\t\t\t(p.Resource[0] == '*' && strings.HasSuffix(policy.Resource, p.Resource[1:])) ||\n\t\t\t(p.Resource[len(p.Resource)-1] == '*' && strings.HasPrefix(policy.Resource, p.Resource[:len(p.Resource)-1])) ||\n\t\t\t(p.Resource[0] == '*' && p.Resource[len(p.Resource)-1] == '*' && strings.Contains(policy.Resource, p.Resource[1:len(p.Resource)-1]))) {\n\n\t\treturn p\n\t}\n\treturn nil\n}", "func (r *Request) MatchParam(key, value string) *Request {\n\tquery := r.URLStruct.Query()\n\tquery.Set(key, value)\n\tr.URLStruct.RawQuery = query.Encode()\n\treturn r\n}", "func (s Set) Match(value string) bool {\n\tfor i := range s {\n\t\tif s[i].Match(value) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func matchPattern(pattern, name string) (matched bool) {\n\tif pattern == \"\" {\n\t\treturn name == pattern\n\t}\n\tif pattern == \"*\" {\n\t\treturn true\n\t}\n\trName, rPattern := make([]rune, 0, len(name)), make([]rune, 0, len(pattern))\n\tfor _, r := range name {\n\t\trName = append(rName, r)\n\t}\n\tfor _, r := range pattern {\n\t\trPattern = append(rPattern, r)\n\t}\n\treturn deepMatchRune(rName, rPattern, false)\n}", "func (s StreamID) Match(pattern string) bool {\n\treturn wildcard.MatchSimple(pattern, s.str)\n}", "func (mg *MultiGlob) Match(input string) bool {\n\t_, matched := match(mg.node, input, false)\n\treturn matched\n}", "func (p *PropertySet) Match(s string) []string {\n\tp.mux.RLock()\n\tdefer p.mux.RUnlock()\n\n\tproperties := make([]string, 0)\n\tfor property := range p.props {\n\t\tif strings.HasPrefix(property, s) {\n\t\t\tproperties = append(properties, property)\n\t\t}\n\t}\n\n\treturn properties\n}", "func (cont *Container) Match(query fl.Query) bool {\n\tfor k, q := range query {\n\t\tif !cont.MatchField(k, q...) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (s *AppServerV3) MatchSearch(values []string) bool {\n\treturn MatchSearch(nil, values, nil)\n}", "func match(r io.ReaderAt, buf []byte, key []byte, pos uint32) (bool, error) {\n\tklen := len(key)\n\tfor n := 0; n < klen; n += len(buf) {\n\t\tnleft := klen - n\n\t\tif len(buf) > nleft {\n\t\t\tbuf = buf[:nleft]\n\t\t}\n\t\tif _, err := r.ReadAt(buf, int64(pos)); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif !bytes.Equal(buf, key[n:n+len(buf)]) {\n\t\t\treturn false, nil\n\t\t}\n\t\tpos += uint32(len(buf))\n\t}\n\treturn true, nil\n}", "func GlobMatch(patterns ...string) MatcherFunc { return GlobMatches(patterns) }", "func (c *Counter) Match(values []string) bool {\n\ta, b := c.Values, values\n\tif len(a) == len(b) {\n\t\tb = b[:len(a)]\n\t\tfor i := range a {\n\t\t\tif a[i] != b[i] {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\treturn false\n}", "func (r *Requirement) Matches(ls Labels) bool {\n\tswitch strings.ToLower(r.Operator) {\n\tcase strings.ToLower(Operator_equals.String()), strings.ToLower(Operator_in.String()):\n\t\tif !ls.Has(r.Key) {\n\t\t\treturn false\n\t\t}\n\t\treturn r.hasValue(ls.Get(r.Key))\n\tcase strings.ToLower(Operator_notEquals.String()), strings.ToLower(Operator_notIn.String()):\n\t\tif !ls.Has(r.Key) {\n\t\t\treturn false\n\t\t}\n\t\treturn !r.hasValue(ls.Get(r.Key))\n\tdefault:\n\t\treturn false\n\t}\n}", "func LooksLikeAKeyString(inputStr string) (matched bool) {\n\treturn keyStringRe.MatchString(inputStr)\n}", "func (m fieldMatcher) Matches(x interface{}) bool {\n\tval := reflect.ValueOf(x)\n\tfor i := 0; i < val.NumField(); i++ {\n\t\tfield := val.Type().Field(i)\n\t\tif field.Name == m.Key {\n\t\t\tif reflect.DeepEqual(getValue(val.Field(i)), m.Value) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func getEnvKeyValue(match string, partial bool) (string, string, error) {\n\tfor _, e := range os.Environ() {\n\t\tpair := strings.Split(e, \"=\")\n\t\tif len(pair) != 2 {\n\t\t\tcontinue\n\t\t}\n\n\t\tkey := pair[0]\n\t\tvalue := pair[1]\n\n\t\tif partial && strings.Contains(key, match) {\n\t\t\treturn key, value, nil\n\t\t}\n\n\t\tif strings.Compare(key, match) == 0 {\n\t\t\treturn key, value, nil\n\t\t}\n\t}\n\n\tmatchType := \"match\"\n\tif partial {\n\t\tmatchType = \"partial match\"\n\t}\n\n\treturn \"\", \"\", fmt.Errorf(\"Failed to find %s with %s\", matchType, match)\n}", "func Match(rule grpc.Rule, data map[string]string, pidRuntime string) bool {\n\t// Return early if we have nothing to filter on.\n\tif len(rule.ContainerRuntimes) < 1 && len(rule.FilterEvents) < 1 {\n\t\treturn true\n\t}\n\n\tmatchedRuntime := false\n\tfor _, runtime := range rule.ContainerRuntimes {\n\t\tif pidRuntime == runtime {\n\t\t\t// Return early if we know we have nothing else to filter on.\n\t\t\tif len(rule.FilterEvents) < 1 {\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\t// Continue to the next check.\n\t\t\tmatchedRuntime = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Return early here if we never matched a runtime.\n\tif len(rule.ContainerRuntimes) > 0 && !matchedRuntime {\n\t\treturn false\n\t}\n\n\t// Return early here if we have nothing else to filter on.\n\tif len(rule.FilterEvents) < 1 {\n\t\treturn true\n\t}\n\n\tfor key, ogValue := range data {\n\t\ts, ok := rule.FilterEvents[key]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, find := range s.Values {\n\t\t\tif strings.Contains(ogValue, find) {\n\t\t\t\t// Return early since we have nothing else to filter on.\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\t// We did not match any filters.\n\treturn false\n}", "func (a AnyPastTime) Match(v driver.Value) bool {\n\tstr, ok := v.(string)\n\tif !ok {\n\t\treturn false\n\t}\n\tt := types.Timestamp{}\n\tif err := t.Scan(str); err != nil {\n\t\treturn false\n\t}\n\treturn time.Since(t.Time()) < a.Range\n}", "func (action Action) Match(a Action) bool {\n\treturn wildcard.Match(string(action), string(a))\n}", "func customGrep(data string, pattern map[string]string) map[string][]string {\n\t// \"pattern name\":\"\"\n\tresult := make(map[string][]string)\n\tfor k, v := range pattern {\n\t\tresultArr := Grepping(data, v)\n\t\tif len(resultArr) > 0 {\n\t\t\tresult[k] = resultArr\n\t\t}\n\t}\n\treturn result\n}", "func equal(key, value string, params Parameter) bool {\n switch value {\n case \"nil\", \"empty\":\n return equalNilAndEmpty(key, value, params)\n default:\n return equalValue(key, value, params)\n }\n}", "func bestMatch(name string, t reflect.Type) string {\n\tkey := strings.ToLower(hyphens.ReplaceAllString(name, \"\"))\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tfield := t.Field(i)\n\t\tif field.Name == name {\n\t\t\treturn field.Name\n\t\t}\n\t\tj := field.Tag.Get(\"json\")\n\t\tif j != \"\" {\n\t\t\tflags := strings.Split(j, \",\")\n\t\t\tfor _, flag := range flags {\n\t\t\t\tif name == flag {\n\t\t\t\t\treturn field.Name\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tlname := strings.ToLower(hyphens.ReplaceAllString(field.Name, \"\"))\n\t\tif key == lname {\n\t\t\treturn field.Name\n\t\t}\n\t}\n\treturn \"\"\n}", "func (d metaphoneDict) matches(metaphone string) metaphoneDict {\n\tvar l, r int\n\ti := d.phoneticLocation(metaphone)\n\tfor r = i; r < len(d) && d[r].metaphone == metaphone; r++ {\n\t}\n\tfor l = i; l >= 0 && d[l].metaphone == metaphone; l-- {\n\t}\n\tl++\n\tif r-l < fudgeDistance*2 {\n\t\tl -= fudgeDistance\n\t\tr += fudgeDistance\n\t}\n\tif l < 0 {\n\t\tl = 0\n\t}\n\tif r > len(d) {\n\t\tr = len(d)\n\t}\n\treturn d[l:r]\n}", "func (card Card) fieldsMatchByType(name, value string) (fields []Field) {\n name = strings.ToLower(name)\n value = strings.ToLower(value)\n for _, f := range card.Fields {\n if strings.ToLower(f.Type) == name &&\n strings.Contains(strings.ToLower(f.Value), value) {\n fields = append(fields, f)\n }\n }\n\n return\n}", "func (t attrSelector) Match(n *html.Node) bool {\n\tswitch t.operation {\n\tcase \"\":\n\t\treturn matchAttribute(n, t.key, func(string) bool { return true })\n\tcase \"=\":\n\t\treturn matchAttribute(n, t.key, func(s string) bool { return s == t.val })\n\tcase \"!=\":\n\t\treturn attributeNotEqualMatch(t.key, t.val, n)\n\tcase \"~=\":\n\t\t// matches elements where the attribute named key is a whitespace-separated list that includes val.\n\t\treturn matchAttribute(n, t.key, func(s string) bool { return matchInclude(t.val, s) })\n\tcase \"|=\":\n\t\treturn attributeDashMatch(t.key, t.val, n)\n\tcase \"^=\":\n\t\treturn attributePrefixMatch(t.key, t.val, n)\n\tcase \"$=\":\n\t\treturn attributeSuffixMatch(t.key, t.val, n)\n\tcase \"*=\":\n\t\treturn attributeSubstringMatch(t.key, t.val, n)\n\tcase \"#=\":\n\t\treturn attributeRegexMatch(t.key, t.regexp, n)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unsuported operation : %s\", t.operation))\n\t}\n}", "func whereAny(entries interface{}, key, sep string, cmp []string) (interface{}, error) {\n\treturn generalizedWhere(\"whereAny\", entries, key, func(value interface{}) bool {\n\t\tif value == nil {\n\t\t\treturn false\n\t\t} else {\n\t\t\titems := strings.Split(value.(string), sep)\n\t\t\treturn len(intersect(cmp, items)) > 0\n\t\t}\n\t})\n}" ]
[ "0.7097848", "0.69746256", "0.6162469", "0.6156034", "0.60447186", "0.599638", "0.5879797", "0.5872499", "0.5860298", "0.58108395", "0.57897544", "0.57737875", "0.5747656", "0.5741802", "0.56776214", "0.5676985", "0.5634095", "0.56085414", "0.5597004", "0.5589954", "0.557177", "0.5567571", "0.5556967", "0.5525543", "0.5478485", "0.5461796", "0.54478884", "0.54216236", "0.5407237", "0.5377896", "0.5377896", "0.53481555", "0.53255564", "0.52947456", "0.5289103", "0.52722126", "0.5267257", "0.5267257", "0.5257442", "0.5232316", "0.5210086", "0.52078223", "0.5205293", "0.518691", "0.5178901", "0.5173906", "0.51736754", "0.516359", "0.5158604", "0.5150494", "0.5148744", "0.5138188", "0.5128183", "0.51218", "0.51207054", "0.51013595", "0.5098328", "0.5090163", "0.50792694", "0.50711775", "0.50552505", "0.50517535", "0.5051306", "0.50501394", "0.5047032", "0.5041911", "0.5033525", "0.5024925", "0.50241643", "0.50232524", "0.5006551", "0.49865666", "0.49860978", "0.49847582", "0.49816144", "0.4979884", "0.497763", "0.49593437", "0.49398288", "0.49331662", "0.49311292", "0.49137333", "0.4902631", "0.49009874", "0.48973328", "0.48787296", "0.48780355", "0.4869388", "0.48691073", "0.48640612", "0.4858991", "0.48544428", "0.48533815", "0.48524925", "0.48512816", "0.48487774", "0.4848216", "0.48462984", "0.48416254", "0.4836475" ]
0.71222144
0
KeyRotate rotates encryption key of an object
KeyRotate поворачивает шифровальный ключ объекта
func (r *BatchJobKeyRotateV1) KeyRotate(ctx context.Context, api ObjectLayer, objInfo ObjectInfo) error { srcBucket := r.Bucket srcObject := objInfo.Name if objInfo.DeleteMarker || !objInfo.VersionPurgeStatus.Empty() { return nil } sseKMS := crypto.S3KMS.IsEncrypted(objInfo.UserDefined) sseS3 := crypto.S3.IsEncrypted(objInfo.UserDefined) if !sseKMS && !sseS3 { // neither sse-s3 nor sse-kms disallowed return errInvalidEncryptionParameters } if sseKMS && r.Encryption.Type == sses3 { // previously encrypted with sse-kms, now sse-s3 disallowed return errInvalidEncryptionParameters } versioned := globalBucketVersioningSys.PrefixEnabled(srcBucket, srcObject) versionSuspended := globalBucketVersioningSys.PrefixSuspended(srcBucket, srcObject) lock := api.NewNSLock(r.Bucket, objInfo.Name) lkctx, err := lock.GetLock(ctx, globalOperationTimeout) if err != nil { return err } ctx = lkctx.Context() defer lock.Unlock(lkctx) opts := ObjectOptions{ VersionID: objInfo.VersionID, Versioned: versioned, VersionSuspended: versionSuspended, NoLock: true, } obj, err := api.GetObjectInfo(ctx, r.Bucket, objInfo.Name, opts) if err != nil { return err } oi := obj.Clone() var ( newKeyID string newKeyContext kms.Context ) encMetadata := make(map[string]string) for k, v := range oi.UserDefined { if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) { encMetadata[k] = v } } if (sseKMS || sseS3) && r.Encryption.Type == ssekms { if err = r.Encryption.Validate(); err != nil { return err } newKeyID = strings.TrimPrefix(r.Encryption.Key, crypto.ARNPrefix) newKeyContext = r.Encryption.kmsContext } if err = rotateKey(ctx, []byte{}, newKeyID, []byte{}, r.Bucket, oi.Name, encMetadata, newKeyContext); err != nil { return err } // Since we are rotating the keys, make sure to update the metadata. oi.metadataOnly = true oi.keyRotation = true for k, v := range encMetadata { oi.UserDefined[k] = v } if _, err := api.CopyObject(ctx, r.Bucket, oi.Name, r.Bucket, oi.Name, oi, ObjectOptions{ VersionID: oi.VersionID, }, ObjectOptions{ VersionID: oi.VersionID, NoLock: true, }); err != nil { return err } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func rotateKey(oldKey []byte, newKey []byte, metadata map[string]string) error {\n\tdelete(metadata, SSECustomerKey) // make sure we do not save the key by accident\n\n\tif metadata[ServerSideEncryptionSealAlgorithm] != SSESealAlgorithmDareSha256 { // currently DARE-SHA256 is the only option\n\t\treturn errObjectTampered\n\t}\n\tiv, err := base64.StdEncoding.DecodeString(metadata[ServerSideEncryptionIV])\n\tif err != nil || len(iv) != SSEIVSize {\n\t\treturn errObjectTampered\n\t}\n\tsealedKey, err := base64.StdEncoding.DecodeString(metadata[ServerSideEncryptionSealedKey])\n\tif err != nil || len(sealedKey) != 64 {\n\t\treturn errObjectTampered\n\t}\n\n\tsha := sha256.New() // derive key encryption key\n\tsha.Write(oldKey)\n\tsha.Write(iv)\n\tkeyEncryptionKey := sha.Sum(nil)\n\n\tobjectEncryptionKey := bytes.NewBuffer(nil) // decrypt object encryption key\n\tn, err := sio.Decrypt(objectEncryptionKey, bytes.NewReader(sealedKey), sio.Config{\n\t\tKey: keyEncryptionKey,\n\t})\n\tif n != 32 || err != nil { // Either the provided key does not match or the object was tampered.\n\t\tif subtle.ConstantTimeCompare(oldKey, newKey) == 1 {\n\t\t\treturn errInvalidSSEParameters // AWS returns special error for equal but invalid keys.\n\t\t}\n\t\treturn errSSEKeyMismatch // To provide strict AWS S3 compatibility we return: access denied.\n\t}\n\tif subtle.ConstantTimeCompare(oldKey, newKey) == 1 {\n\t\treturn nil // we don't need to rotate keys if newKey == oldKey\n\t}\n\n\tnonce := make([]byte, 32) // generate random values for key derivation\n\tif _, err = io.ReadFull(rand.Reader, nonce); err != nil {\n\t\treturn err\n\t}\n\n\tniv := sha256.Sum256(nonce[:]) // derive key encryption key\n\tsha = sha256.New()\n\tsha.Write(newKey)\n\tsha.Write(niv[:])\n\tkeyEncryptionKey = sha.Sum(nil)\n\n\tsealedKeyW := bytes.NewBuffer(nil) // sealedKey := 16 byte header + 32 byte payload + 16 byte tag\n\tn, err = sio.Encrypt(sealedKeyW, bytes.NewReader(objectEncryptionKey.Bytes()), sio.Config{\n\t\tKey: keyEncryptionKey,\n\t})\n\tif n != 64 || err != nil {\n\t\treturn errors.New(\"failed to seal object encryption key\") // if this happens there's a bug in the code (may panic ?)\n\t}\n\n\tmetadata[ServerSideEncryptionIV] = base64.StdEncoding.EncodeToString(niv[:])\n\tmetadata[ServerSideEncryptionSealAlgorithm] = SSESealAlgorithmDareSha256\n\tmetadata[ServerSideEncryptionSealedKey] = base64.StdEncoding.EncodeToString(sealedKeyW.Bytes())\n\treturn nil\n}", "func (o OfflineNotaryRepository) RotateKey(data.RoleName, bool, []string) error {\n\treturn storage.ErrOffline{}\n}", "func (e EmptyTargetsNotaryRepository) RotateKey(data.RoleName, bool, []string) error {\n\treturn nil\n}", "func (s HTTPStore) RotateKey(role data.RoleName) ([]byte, error) {\n\turl, err := s.buildKeyURL(role)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq, err := http.NewRequest(\"POST\", url.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := s.roundTrip.RoundTrip(req)\n\tif err != nil {\n\t\treturn nil, NetworkError{Wrapped: err}\n\t}\n\tdefer resp.Body.Close()\n\tif err := translateStatusToError(resp, role.String()+\" key\"); err != nil {\n\t\treturn nil, err\n\t}\n\tb := io.LimitReader(resp.Body, MaxKeySize)\n\tbody, err := ioutil.ReadAll(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn body, nil\n}", "func (k keyCredential) Rotate(tx transaction.Transaction) (*msgraph.KeyCredential, *crypto.Jwk, error) {\n\tkeysInUse, err := k.filterRevokedKeys(tx)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tkeyCredential, jwk, err := k.new(tx)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tkeysInUse = append(keysInUse, *keyCredential)\n\n\tapp := util.EmptyApplication().Keys(keysInUse).Build()\n\tif err := k.Application().Patch(tx.Ctx, tx.Instance.GetObjectId(), app); err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"updating application with keycredential: %w\", err)\n\t}\n\n\treturn keyCredential, jwk, nil\n}", "func (u UninitializedNotaryRepository) RotateKey(data.RoleName, bool, []string) error {\n\treturn client.ErrRepositoryNotExist{}\n}", "func (e aesGCMEncodedEncryptor) RotateEncryption(ciphertext string) (string, error) {\n\tif !e.ConfiguredToRotate() {\n\t\treturn \"\", &EncryptionError{errors.New(\"key rotation not configured\")}\n\t}\n\n\tplaintext, err := e.Decrypt(ciphertext)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn e.Encrypt(plaintext)\n}", "func TestEncryptionRotation(t *testing.T, scenario RotationScenario) {\n\t// test data\n\tns := scenario.Namespace\n\tlabelSelector := scenario.LabelSelector\n\n\t// step 1: create the desired resource\n\te := NewE(t)\n\tclientSet := GetClients(e)\n\tscenario.CreateResourceFunc(e, GetClients(e), ns)\n\n\t// step 2: run provided encryption scenario\n\tTestEncryptionType(t, scenario.BasicScenario, scenario.EncryptionProvider)\n\n\t// step 3: take samples\n\trawEncryptedResourceWithKey1 := scenario.GetRawResourceFunc(e, clientSet, ns)\n\n\t// step 4: force key rotation and wait for migration to complete\n\tlastMigratedKeyMeta, err := GetLastKeyMeta(t, clientSet.Kube, ns, labelSelector)\n\trequire.NoError(e, err)\n\trequire.NoError(e, ForceKeyRotation(e, scenario.UnsupportedConfigFunc, fmt.Sprintf(\"test-key-rotation-%s\", rand.String(4))))\n\tWaitForNextMigratedKey(e, clientSet.Kube, lastMigratedKeyMeta, scenario.TargetGRs, ns, labelSelector)\n\tscenario.AssertFunc(e, clientSet, scenario.EncryptionProvider, ns, labelSelector)\n\n\t// step 5: verify if the provided resource was encrypted with a different key (step 2 vs step 4)\n\trawEncryptedResourceWithKey2 := scenario.GetRawResourceFunc(e, clientSet, ns)\n\tif rawEncryptedResourceWithKey1 == rawEncryptedResourceWithKey2 {\n\t\tt.Errorf(\"expected the resource to has a different content after a key rotation,\\ncontentBeforeRotation %s\\ncontentAfterRotation %s\", rawEncryptedResourceWithKey1, rawEncryptedResourceWithKey2)\n\t}\n\n\t// TODO: assert conditions - operator and encryption migration controller must report status as active not progressing, and not failing for all scenarios\n}", "func (g *Generator) rekey() error {\n\tfor i := keySize / g.cipher.BlockSize(); i > 0; i-- {\n\t\tg.readBlock(g.key[g.cipher.BlockSize()*i:])\n\t}\n\n\treturn g.updateCipher()\n}", "func RotateAccessKeys() string {\n\tsess := session.Must(session.NewSessionWithOptions(session.Options{\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t\tProfile: \"default\",\n\t}))\n\tclient := iam.New(sess)\n\tdeleteCurrentIamKey(client)\n\tnewKeyOutput, err := client.CreateAccessKey(&iam.CreateAccessKeyInput{})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tcfg := readCredentialsFile()\n\tfmt.Println(\"new IAM key is \", *newKeyOutput.AccessKey.AccessKeyId)\n\tcfg.Section(\"default_original\").Key(\"aws_access_key_id\").SetValue(*newKeyOutput.AccessKey.AccessKeyId)\n\tcfg.Section(\"default_original\").Key(\"aws_secret_access_key\").SetValue(*newKeyOutput.AccessKey.SecretAccessKey)\n\tlocation := writeCredentialsFile(cfg)\n\n\treturn location\n}", "func k8sRotate(t *testing.T, dir string) {\n\tk8sUpdate(t, dir, rotatedHubbleServerCertificate, rotatedHubbleServerPrivkey, rotatedHubbleServerCA)\n}", "func RotateEncryptionKeys(dbp zesty.DBProvider) (err error) {\n\tdefer errors.DeferredAnnotatef(&err, \"Failed to rotate encrypted callbacks to new key\")\n\n\tvar last string\n\tfor {\n\t\tvar lastID *string\n\t\tif last != \"\" {\n\t\t\tlastID = &last\n\t\t}\n\t\t// load all callbacks\n\t\tcallbacks, err := listCallbacks(dbp, utask.MaxPageSize, lastID, false)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(callbacks) == 0 {\n\t\t\tbreak\n\t\t}\n\t\tlast = callbacks[len(callbacks)-1].PublicID\n\n\t\tfor _, c := range callbacks {\n\t\t\tsp, err := dbp.TxSavepoint()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// load callback locked\n\t\t\tcb, err := loadFromPublicID(dbp, c.PublicID, true)\n\t\t\tif err != nil {\n\t\t\t\tdbp.RollbackTo(sp)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// update callback (encrypt)\n\t\t\tif err := cb.update(dbp); err != nil {\n\t\t\t\tdbp.RollbackTo(sp)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// commit\n\t\t\tif err := dbp.Commit(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func RotateEncryption(ciphertext string) (string, error) {\n\treturn defaultEncryptor.RotateEncryption(ciphertext)\n}", "func (b *backend) pathConfigRotate() *framework.Path {\n return &framework.Path{\n\tPattern: fmt.Sprintf(\"config/rotate/?$\"),\n\tHelpSynopsis: \"Use the existing key to generate a set a new key\",\n\tHelpDescription: \"Use this endpoint to use the current key to generate a new key, and use that\",\n\n\tFields: map[string]*framework.FieldSchema{\n\t \"key_name\": &framework.FieldSchema{\n\t\tType: framework.TypeString,\n\t\tDescription: \"The name for the newly generated key.\",\n\t },\n\t},\n\n\tCallbacks: map[logical.Operation]framework.OperationFunc{\n\t logical.UpdateOperation: b.pathRotateKey,\n\t},\n }\n}", "func (rtg *RTGProtocol) GenRotationKey(share *RTGShare, crp []*ring.Poly, rotKey *rlwe.SwitchingKey) {\n\tfor i := 0; i < rtg.beta; i++ {\n\t\trtg.ringQP.Copy(share.Value[i], rotKey.Value[i][0])\n\t\trtg.ringQP.Copy(crp[i], rotKey.Value[i][1])\n\t}\n}", "func testKeyRotation(t *testing.T, dbStore keyRotator, newValidAlias string) data.PrivateKey {\n\ttestKey, err := utils.GenerateECDSAKey(rand.Reader)\n\trequire.NoError(t, err)\n\n\t// Test writing new key in database/cache\n\terr = dbStore.AddKey(trustmanager.KeyInfo{Role: data.CanonicalTimestampRole, Gun: \"gun/ignored\"}, testKey)\n\trequire.NoError(t, err)\n\n\t// Try rotating the key to a valid alias\n\terr = dbStore.RotateKeyPassphrase(testKey.ID(), newValidAlias)\n\trequire.NoError(t, err)\n\n\t// Try rotating the key to an invalid alias\n\terr = dbStore.RotateKeyPassphrase(testKey.ID(), \"invalidAlias\")\n\trequire.Error(t, err, \"there should be no password for invalidAlias so rotation should fail\")\n\n\treturn testKey\n}", "func (c *Clac) Rot() error {\n\treturn c.rot(true)\n}", "func TestSwizzlerRotateKeyBaseRole(t *testing.T) {\n\tf, origMeta := createNewSwizzler(t)\n\n\ttheRole := data.CanonicalSnapshotRole\n\tcs := signed.NewEd25519()\n\tpubKey, err := cs.Create(theRole, f.Gun, data.ED25519Key)\n\trequire.NoError(t, err)\n\n\trequire.NoError(t, f.RotateKey(theRole, pubKey))\n\n\tfor role, metaBytes := range origMeta {\n\t\tnewMeta, err := f.MetadataCache.GetSized(role.String(), store.NoSizeLimit)\n\t\trequire.NoError(t, err)\n\n\t\tif role != data.CanonicalRootRole {\n\t\t\trequire.True(t, bytes.Equal(metaBytes, newMeta), \"bytes have changed for role %s\", role)\n\t\t} else {\n\t\t\trequire.False(t, bytes.Equal(metaBytes, newMeta))\n\t\t\torigSigned, newSigned := &data.SignedRoot{}, &data.SignedRoot{}\n\t\t\trequire.NoError(t, json.Unmarshal(metaBytes, origSigned))\n\t\t\trequire.NoError(t, json.Unmarshal(newMeta, newSigned))\n\t\t\trequire.NotEqual(t, []string{pubKey.ID()}, origSigned.Signed.Roles[theRole].KeyIDs)\n\t\t\trequire.Equal(t, []string{pubKey.ID()}, newSigned.Signed.Roles[theRole].KeyIDs)\n\t\t\t_, ok := origSigned.Signed.Keys[pubKey.ID()]\n\t\t\trequire.False(t, ok)\n\t\t\t_, ok = newSigned.Signed.Keys[pubKey.ID()]\n\t\t\trequire.True(t, ok)\n\t\t}\n\t}\n}", "func rotate(s string, rot int) string {\n rot %= 26\n b := []byte(s)\n for i, c := range b {\n c |= 0x20\n if 'a' <= c && c <= 'z' {\n b[i] = alphabet[(int(('z'-'a'+1)+(c-'a'))+rot)%26]\n }\n }\n return string(b)\n}", "func (r *Rover) rotate(com string) *Rover {\n\tr.rotators[r.Dir](com)\n\treturn r\n}", "func (mt Mytoken) Rotate() *Mytoken {\n\trotated := mt\n\trotated.SeqNo++\n\tif rotated.Rotation.Lifetime > 0 {\n\t\trotated.ExpiresAt = unixtime.InSeconds(int64(rotated.Rotation.Lifetime))\n\t}\n\trotated.IssuedAt = unixtime.Now()\n\trotated.NotBefore = rotated.IssuedAt\n\trotated.jwt = \"\"\n\treturn &rotated\n}", "func TestRotatePlainTextToEncrypted(t *testing.T) {\n\tdir, err := os.MkdirTemp(\"\", \"badger-test\")\n\trequire.NoError(t, err)\n\tdefer os.RemoveAll(dir)\n\n\t// Open DB without encryption.\n\topts := badger.DefaultOptions(dir)\n\tdb, err := badger.Open(opts)\n\trequire.NoError(t, err)\n\n\trequire.NoError(t, db.Update(func(txn *badger.Txn) error {\n\t\treturn txn.Set([]byte(\"foo\"), []byte(\"bar\"))\n\t}))\n\n\trequire.NoError(t, db.Close())\n\n\t// Create an encryption key.\n\tkey := make([]byte, 32)\n\ty.Check2(rand.Read(key))\n\tfp, err := os.CreateTemp(\"\", \"*.key\")\n\trequire.NoError(t, err)\n\t_, err = fp.Write(key)\n\trequire.NoError(t, err)\n\tdefer fp.Close()\n\n\toldKeyPath = \"\"\n\tnewKeyPath = fp.Name()\n\tsstDir = dir\n\n\t// Enable encryption. newKeyPath is encrypted.\n\trequire.Nil(t, doRotate(nil, []string{}))\n\n\t// Try opening DB without the key.\n\topts.BlockCacheSize = 1 << 20\n\t_, err = badger.Open(opts)\n\trequire.EqualError(t, err, badger.ErrEncryptionKeyMismatch.Error())\n\n\t// Check whether db opens with the new key.\n\topts.EncryptionKey = key\n\tdb, err = badger.Open(opts)\n\trequire.NoError(t, err)\n\n\trequire.NoError(t, db.View(func(txn *badger.Txn) error {\n\t\tiopt := badger.DefaultIteratorOptions\n\t\tit := txn.NewIterator(iopt)\n\t\tdefer it.Close()\n\t\tcount := 0\n\t\tfor it.Rewind(); it.Valid(); it.Next() {\n\t\t\tcount++\n\t\t}\n\t\trequire.Equal(t, 1, count)\n\t\treturn nil\n\t}))\n\trequire.NoError(t, db.Close())\n}", "func rotate(arr []byte, k int) []byte {\n\tn := len(arr)\n\tdst := make([]byte, n)\n\tfor i := 0; i < n; i++ {\n\t\tdst[i] = arr[(i+k)%n]\n\t}\n\treturn dst\n}", "func (mt Mytoken) Rotate() *Mytoken { // skipcq: CRT-P0003\n\trotated := mt\n\trotated.SeqNo++\n\tif rotated.Rotation.Lifetime > 0 {\n\t\trotated.ExpiresAt = unixtime.InSeconds(int64(rotated.Rotation.Lifetime))\n\t}\n\trotated.IssuedAt = unixtime.Now()\n\trotated.NotBefore = rotated.IssuedAt\n\trotated.jwt = \"\"\n\treturn &rotated\n}", "func (c Chords) Rotate(k int) Chords {\n\tlength := len(c)\n\tif k < 0 || length == 0 {\n\t\treturn c\n\t}\n\n\tr := k % length\n\treturn append(c[k:], c[:r]...)\n}", "func (e RotationValidationError) Key() bool { return e.key }", "func (w *Writer) Rotate() {\n\tw.rot <- true\n}", "func (ag *Agent) Rotate(v float32) {\n\tag.R += v\n}", "func (tx *Tx) RotateSessionKey() (*Configuration, error) {\n\tconfig := &Configuration{\n\t\tSessionAuthKey: securecookie.GenerateRandomKey(32),\n\t\tSessionCryptKey: securecookie.GenerateRandomKey(32),\n\t}\n\tconfig, _, err := tx.UpdateConfiguration(config)\n\treturn config, err\n}", "func TestEncryptionRotation(t *testing.T) {\n\tlibrary.TestEncryptionRotation(t, library.RotationScenario{\n\t\tBasicScenario: library.BasicScenario{\n\t\t\tNamespace: operatorclient.GlobalMachineSpecifiedConfigNamespace,\n\t\t\tLabelSelector: \"encryption.apiserver.operator.openshift.io/component\" + \"=\" + operatorclient.TargetNamespace,\n\t\t\tEncryptionConfigSecretName: fmt.Sprintf(\"encryption-config-%s\", operatorclient.TargetNamespace),\n\t\t\tEncryptionConfigSecretNamespace: operatorclient.GlobalMachineSpecifiedConfigNamespace,\n\t\t\tOperatorNamespace: operatorclient.OperatorNamespace,\n\t\t\tTargetGRs: operatorencryption.DefaultTargetGRs,\n\t\t\tAssertFunc: operatorencryption.AssertSecretsAndConfigMaps,\n\t\t},\n\t\tCreateResourceFunc: operatorencryption.CreateAndStoreSecretOfLife,\n\t\tGetRawResourceFunc: operatorencryption.GetRawSecretOfLife,\n\t\tUnsupportedConfigFunc: func(raw []byte) error {\n\t\t\toperatorClient := operatorencryption.GetOperator(t)\n\t\t\tapiServerOperator, err := operatorClient.Get(context.TODO(), \"cluster\", metav1.GetOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tapiServerOperator.Spec.UnsupportedConfigOverrides.Raw = raw\n\t\t\t_, err = operatorClient.Update(context.TODO(), apiServerOperator, metav1.UpdateOptions{})\n\t\t\treturn err\n\t\t},\n\t\tEncryptionProvider: configv1.EncryptionType(*provider),\n\t})\n}", "func Rotate(v1, v2 Vect) Vect {\n\treturn Vect{v1.X*v2.X - v1.Y*v2.Y, v1.X*v2.Y + v1.Y*v2.X}\n}", "func RotationalCipher(s string, key int) string {\n\tc := make([]byte, len(s))\n\tvar a int\n\tfor k, v := range s {\n\t\t// lower case letters\n\t\tif v <= 'z' && v >= 'a' {\n\t\t\ta = 'a'\n\t\t\t// upper case letters\n\t\t} else if v <= 'Z' && v >= 'A' {\n\t\t\ta = 'A'\n\t\t\t//punctuations\n\t\t} else {\n\t\t\tc[k] = s[k]\n\t\t\tcontinue\n\t\t}\n\t\tc[k] = byte(a + ((int(v)-a)+key)%26)\n\n\t}\n\treturn string(c)\n}", "func (n Notes) Rotate(k int) Notes {\n\tif k < 0 {\n\t\tpanic(\"invalid rotation\")\n\t}\n\n\tr := k % len(n)\n\treturn append(n[k:], n[:r]...)\n}", "func (journal *txJournal) rotate(all map[common.Address]common.Transactions) error {\n\treturn nil\n}", "func (self *rsaKeyHolder) initEncryptionKey() {\n\tlog.Print(\"Generating JWE encryption key\")\n\tself.mux.Lock()\n\tdefer self.mux.Unlock()\n\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tself.key = privateKey\n}", "func (text *TEXT) Rotate(r ...float64) *TEXT {\n\ttext.rotate = r\n\treturn text\n}", "func (c *Clac) RotR() error {\n\treturn c.rotR(true)\n}", "func RepeatingKey(plaintext []byte, key []byte) []byte {\n\tciphertext := make([]byte, len(plaintext))\n\tkeyLength := len(key)\n\tfor i, thisByte := range plaintext {\n\t\tciphertext[i] = thisByte ^ key[i%keyLength]\n\t}\n\treturn ciphertext\n}", "func (item *Item) cipherKey(skey []byte) []byte {\n\tif item.Password == \"\" {\n\t\treturn skey\n\t}\n\tn := len(skey)\n\tk := make([]byte, n)\n\tp := []byte(item.Password)\n\t// key = (byte of password) + (bytes of default key)\n\tfor i := range k {\n\t\tif i < len(p) {\n\t\t\tk[i] = p[i]\n\t\t} else {\n\t\t\tk[i] = skey[i]\n\t\t}\n\t}\n\treturn k\n}", "func (c2d *C2DMatrix) Rotate(rot float64) {\n\tvar mat Matrix\n\n\tvar Sin float64 = math.Sin(rot)\n\tvar Cos float64 = math.Cos(rot)\n\n\tmat.m11 = Cos\n\tmat.m12 = Sin\n\tmat.m13 = 0\n\n\tmat.m21 = -Sin\n\tmat.m22 = Cos\n\tmat.m23 = 0\n\n\tmat.m31 = 0\n\tmat.m32 = 0\n\tmat.m33 = 1\n\n\t//and multiply\n\tc2d.MatrixMultiply(mat)\n}", "func (s *CreateJobOutput) SetRotate(v string) *CreateJobOutput {\n\ts.Rotate = &v\n\treturn s\n}", "func (o BucketObjectCustomerEncryptionOutput) EncryptionKey() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BucketObjectCustomerEncryption) string { return v.EncryptionKey }).(pulumi.StringOutput)\n}", "func (k *Filesystem) Encrypt(ctx context.Context, keyID string, plaintext []byte, aad []byte) ([]byte, error) {\n\tk.mu.RLock()\n\tdefer k.mu.RUnlock()\n\n\t// Find the most recent DEK - that's what we'll use for encryption\n\tpth := filepath.Join(k.root, keyID)\n\tinfos, err := os.ReadDir(pth)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to list keys: %w\", err)\n\t}\n\tif len(infos) < 1 {\n\t\treturn nil, fmt.Errorf(\"there are no key versions\")\n\t}\n\tvar latest fs.DirEntry\n\tfor _, info := range infos {\n\t\tif info.Name() == \"metadata\" {\n\t\t\tcontinue\n\t\t}\n\t\tif latest == nil {\n\t\t\tlatest = info\n\t\t\tcontinue\n\t\t}\n\t\tif info.Name() > latest.Name() {\n\t\t\tlatest = info\n\t\t}\n\t}\n\tif latest == nil {\n\t\treturn nil, fmt.Errorf(\"key %q does not exist\", keyID)\n\t}\n\n\tlatestPath := filepath.Join(pth, latest.Name())\n\tdek, err := os.ReadFile(latestPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read encryption key: %w\", err)\n\t}\n\n\tblock, err := aes.NewCipher(dek)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"bad cipher block: %w\", err)\n\t}\n\taesgcm, err := cipher.NewGCM(block)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to wrap cipher block: %w\", err)\n\t}\n\tnonce := make([]byte, aesgcm.NonceSize())\n\tif _, err := io.ReadFull(rand.Reader, nonce); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to generate nonce: %w\", err)\n\t}\n\tciphertext := aesgcm.Seal(nonce, nonce, plaintext, aad)\n\n\t// Append the keyID to the ciphertext so we know which key to use to decrypt.\n\tid := []byte(latest.Name() + \":\")\n\tciphertext = append(id, ciphertext...)\n\n\treturn ciphertext, nil\n}", "func (e aesGCMEncodedEncryptor) ConfiguredToRotate() bool {\n\treturn len(e.primaryKey) == requiredKeyLength && len(e.secondaryKey) == requiredKeyLength\n}", "func rotateText(inputText string, rot int) string {\n rot %= 26\n rotatedText := []byte(inputText)\n\n for index, byteValue := range rotatedText {\n if byteValue >= 'a' && byteValue <= 'z' {\n rotatedText[index] = lowerCaseAlphabet[(int((26+(byteValue-'a')))+rot)%26]\n } else if byteValue >= 'A' && byteValue <= 'Z' {\n rotatedText[index] = upperCaseAlphabet[(int((26+(byteValue-'A')))+rot)%26]\n }\n }\n return string(rotatedText)\n}", "func (lf *File) Rotate() {\n\tlf.ReOpen()\n}", "func (ce *ColumnEncryptionProperties) Key() string { return ce.key }", "func (self *rsaKeyHolder) recreate(obj runtime.Object) {\n\tsecret := obj.(*v1.Secret)\n\tlog.Printf(\"Synchronized secret %s has been deleted. Recreating.\", secret.Name)\n\tif err := self.synchronizer.Create(self.getEncryptionKeyHolder()); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (l *FileWriter) rotate() error {\n\tif err := l.close(); err != nil {\n\t\treturn err\n\t}\n\tif err := l.openNew(); err != nil {\n\t\treturn err\n\t}\n\tl.mill()\n\treturn nil\n}", "func (backend *JwtBackend) rotateSecret(storage logical.Storage, roleID string, secretID string, TTL int) (*secretStorageEntry, error) {\n\tif roleID == \"\" {\n\t\treturn nil, fmt.Errorf(\"Secrets Role ID is not specified\")\n\t}\n\n\tif secretID == \"\" {\n\t\treturn nil, fmt.Errorf(\"Secrets ID is not specified\")\n\t}\n\n\tsecretKey, _ := uuid.NewUUID()\n\tsalt, _ := backend.Salt()\n\tkey := salt.GetHMAC(secretKey.String())\n\n\tsecretEntry, err := backend.getSecretEntry(storage, roleID, secretID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsecretEntry.Key = key\n\tsecretEntry.CreationTime = time.Now().UTC()\n\tsecretEntry.Expiration = time.Now().Add(time.Duration(TTL) * time.Second).UTC()\n\n\tif err := backend.setSecretEntry(storage, secretEntry); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn secretEntry, nil\n}", "func rotorsIncr(_key [3]int, _rotors [3]int) [3]int {\n\tvar notch = [5]int{16, 4, 21, 9, 25}\n\tif _key[1] == notch[_rotors[1]] {\n\t\t_key[0] = (_key[0] + 1) % 26\n\t\t_key[1] = (_key[1] + 1) % 26\n\t}\n\tif _key[2] == notch[_rotors[2]] {\n\t\t_key[1] = (_key[1] + 1) % 26\n\t}\n\t_key[2] = (_key[2] + 1) % 26\n\treturn _key\n}", "func (cd *ColumnDecryptionProperties) Key() string { return cd.key }", "func (s *JobOutput) SetRotate(v string) *JobOutput {\n\ts.Rotate = &v\n\treturn s\n}", "func (q Quat) Rotate(other Quat) Quat {\n\treturn Quat{\n\t\t(other.W * q.W) - (other.X * q.X) - (other.Y * q.Y) - (other.Z * q.Z),\n\t\t(other.X * q.W) + (other.W * q.X) - (other.Z * q.Y) + (other.Y * q.Z),\n\t\t(other.Y * q.W) + (other.Z * q.X) + (other.W * q.Y) - (other.X * q.Z),\n\t\t(other.Z * q.W) - (other.Y * q.X) + (other.X * q.Y) + (other.W * q.Z),\n\t}\n}", "func (ce *ColumnEncryptionProperties) WipeOutEncryptionKey() { ce.key = \"\" }", "func (obj *key) Key() rsa.PublicKey {\n\treturn obj.ky\n}", "func ConfiguredToRotate() bool {\n\treturn defaultEncryptor.ConfiguredToRotate()\n}", "func (obj *key) Encrypt(msg []byte) ([]byte, error) {\n\th := sha256.New()\n\treturn rsa.EncryptOAEP(h, rand.Reader, &obj.ky, msg, []byte(\"\"))\n}", "func (c *MysqlConn) Rotate(operation Operation) OpOutput {\n\tsp, err := GetMysqlOpQuery(operation)\n\tif err != nil {\n\t\treturn OpOutput{\n\t\t\tResult: nil,\n\t\t\tErr: err,\n\t\t}\n\t}\n\n\trows, err := c.c.Query(sp)\n\tif err != nil {\n\t\treturn OpOutput{Result: nil, Err: err}\n\t}\n\n\tvar key string\n\tvar value string\n\tresult := make(map[string]string)\n\tfor rows.Next() {\n\t\terr = rows.Scan(&key, &value)\n\t\tif err != nil {\n\t\t\treturn OpOutput{nil, err}\n\t\t}\n\t\tresult[key] = value\n\t}\n\n\treturn OpOutput{result, nil}\n}", "func (p *siprng) rekey() {\n\tvar k [16]byte\n\tif _, err := io.ReadFull(rand.Reader, k[:]); err != nil {\n\t\tpanic(err.Error())\n\t}\n\tp.k0 = binary.LittleEndian.Uint64(k[0:8])\n\tp.k1 = binary.LittleEndian.Uint64(k[8:16])\n\tp.ctr = 1\n}", "func Vrotate(v1, v2 Vect) Vect {\n\treturn goVect(C.cpvrotate(v1.c(), v2.c()))\n}", "func (decryptor *PgDecryptor) ReadSymmetricKeyRotated(privateKeys []*keys.PrivateKey, reader io.Reader) ([]byte, []byte, error) {\n\tsymmetricKey, rawData, err := decryptor.binaryDecryptor.ReadSymmetricKeyRotated(privateKeys, reader)\n\tif err != nil {\n\t\treturn symmetricKey, rawData, err\n\t}\n\treturn symmetricKey, rawData, nil\n}", "func (client JobClient) RenewKey(ctx context.Context, resourceGroupName string, accountName string, jobName string) (result JobSasTokenDescription, err error) {\n if tracing.IsEnabled() {\n ctx = tracing.StartSpan(ctx, fqdn + \"/JobClient.RenewKey\")\n defer func() {\n sc := -1\n if result.Response.Response != nil {\n sc = result.Response.Response.StatusCode\n }\n tracing.EndSpan(ctx, sc, err)\n }()\n }\n req, err := client.RenewKeyPreparer(ctx, resourceGroupName, accountName, jobName)\n if err != nil {\n err = autorest.NewErrorWithError(err, \"microsoftazuremanagementaisupercomputer.JobClient\", \"RenewKey\", nil , \"Failure preparing request\")\n return\n }\n\n resp, err := client.RenewKeySender(req)\n if err != nil {\n result.Response = autorest.Response{Response: resp}\n err = autorest.NewErrorWithError(err, \"microsoftazuremanagementaisupercomputer.JobClient\", \"RenewKey\", resp, \"Failure sending request\")\n return\n }\n\n result, err = client.RenewKeyResponder(resp)\n if err != nil {\n err = autorest.NewErrorWithError(err, \"microsoftazuremanagementaisupercomputer.JobClient\", \"RenewKey\", resp, \"Failure responding to request\")\n }\n\n return\n}", "func (e *Entity) encryptionKey(now time.Time) (Key, bool) {\n\tcandidateSubkey := -1\n\n\t// Iterate the keys to find the newest key\n\tvar maxTime time.Time\n\tfor i, subkey := range e.Subkeys {\n\t\tif subkey.Sig.FlagsValid &&\n\t\t\tsubkey.Sig.FlagEncryptCommunications &&\n\t\t\tsubkey.PublicKey.PubKeyAlgo.CanEncrypt() &&\n\t\t\t!subkey.Sig.KeyExpired(now) &&\n\t\t\t(maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) {\n\t\t\tcandidateSubkey = i\n\t\t\tmaxTime = subkey.Sig.CreationTime\n\t\t}\n\t}\n\n\tif candidateSubkey != -1 {\n\t\tsubkey := e.Subkeys[candidateSubkey]\n\t\treturn Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true\n\t}\n\n\t// If we don't have any candidate subkeys for encryption and\n\t// the primary key doesn't have any usage metadata then we\n\t// assume that the primary key is ok. Or, if the primary key is\n\t// marked as ok to encrypt to, then we can obviously use it.\n\ti := e.primaryIdentity()\n\tif !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications &&\n\t\te.PrimaryKey.PubKeyAlgo.CanEncrypt() &&\n\t\t!i.SelfSignature.KeyExpired(now) {\n\t\treturn Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true\n\t}\n\n\t// This Entity appears to be signing only.\n\treturn Key{}, false\n}", "func (transform *Transform) Rotate(angle float32, axis mgl32.Vec3) {\n\ttransform.objMatrix = mgl32.HomogRotate3D(angle, axis)\n}", "func (canvas *Canvas) Rotate(theta float32) {\n\ts, c := math.Sin(float64(theta)), math.Cos(float64(theta))\n\twriteCommand(canvas.contents, \"cm\", c, s, -s, c, 0, 0)\n}", "func (b *Bucket) RotateFileEncryptionKeysForPrefix(pre string) error {\n\tif b.Version == 0 {\n\t\treturn nil\n\t}\n\n\tfor p, md := range b.Metadata {\n\t\tif strings.HasPrefix(p, pre) {\n\t\t\tif md.Key != \"\" {\n\t\t\t\tkey, err := dcrypto.NewKey()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tmd.SetFileEncryptionKey(key)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (key *Key) MarshalJSON(passphrase string) ([]byte, error) {\n\tsalt, err := crypto.RandomEntropy(32)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdk, err := scrypt.Key([]byte(passphrase), salt, scryptN, scryptR, scryptP, scryptKeyLen)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tiv, err := crypto.RandomEntropy(aes.BlockSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tenckey := dk[:16]\n\n\tprivateKeyBytes, err := key.KeyPair.Private.Bytes()\n\tprivateKeyBytes = privateKeyBytes[4:]\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\taesBlock, err := aes.NewCipher(enckey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstream := cipher.NewCTR(aesBlock, iv)\n\tcipherText := make([]byte, len(privateKeyBytes))\n\tstream.XORKeyStream(cipherText, privateKeyBytes)\n\n\tmac := crypto.Keccak256(dk[16:32], cipherText)\n\tcipherParamsJSON := cipherparamsJSON{\n\t\tIV: hex.EncodeToString(iv),\n\t}\n\n\tsp := ScryptParams{\n\t\tN: scryptN,\n\t\tR: scryptR,\n\t\tP: scryptP,\n\t\tDKeyLength: scryptKeyLen,\n\t\tSalt: hex.EncodeToString(salt),\n\t}\n\n\tkeyjson := cryptoJSON{\n\t\tCipher: ksCipher,\n\t\tCipherText: hex.EncodeToString(cipherText),\n\t\tCipherParams: cipherParamsJSON,\n\t\tKDF: nameKDF,\n\t\tKDFParams: sp,\n\t\tMAC: hex.EncodeToString(mac),\n\t}\n\n\tencjson := encryptedKeyJSON{\n\t\tAddress: key.KeyPair.Address,\n\t\tCrypto: keyjson,\n\t\tID: key.ID.String(),\n\t\tVersion: ksVersion,\n\t}\n\tdata, err := json.MarshalIndent(&encjson, \"\", \" \")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn data, nil\n}", "func Rekey(dbname string, oldPassphrase, newPassphrase []byte, newIter int) error {\n\treturn encdb.Rekey(dbname, oldPassphrase, newPassphrase, newIter)\n}", "func (w *RotateWriter) rotate() (err error) {\n\t// Close existing file if open.\n\tif w.fp != nil {\n\t\terr = w.fp.Close()\n\t\tw.fp = nil\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Rename dest file if it already exists.\n\t_, err = os.Stat(w.filename)\n\tif err == nil {\n\t\trot := w.filename + \".\" + time.Now().Format(TimeFmt)\n\t\terr = os.Rename(w.filename, rot)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif w.Compress {\n\t\t\terr = w.compress(rot) // TODO: async\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// Clean up old.\n\tw.drain()\n\n\t// Create new.\n\treturn w.open()\n}", "func (s *Surface) Rotate(radians float64) {\n\ts.Ctx.Call(\"rotate\", 2*math.Pi-radians)\n}", "func (t *transform) Rotate(rotate mgl32.Vec3) {\n\tt.dataLock.Lock()\n\tdefer t.dataLock.Unlock()\n\n\tt.rotation = t.rotation.Add(rotate)\n\ttotal := t.rotation\n\trotX := mgl32.HomogRotate3DX(total.X())\n\trotY := mgl32.HomogRotate3DY(total.Y())\n\trotZ := mgl32.HomogRotate3DZ(total.Z())\n\trotMatrix := rotZ.Mul4(rotY).Mul4(rotX)\n\ttrans := t.translation\n\tt.modelView = mgl32.Ident4().Mul4(mgl32.Translate3D(trans.X(), trans.Y(), trans.Z())).Mul4(rotMatrix)\n}", "func (c Cardinal) RotateCW() Cardinal {\r\n\treturn (c + 1) % 4\r\n}", "func Cipher(msg string, key string) string {\n\tvar ciphered string \n\tvar keylen int = len(key)\n\tif (keylen == 0) { return msg } // No key provided\n\tfor i := 0; i < len(msg); i++ {\n\t\tvar keyIndex int = i % keylen // Calculate the key index e.g. (i=10, keylen=4, keyIndex 2), (i=11, keylen=4, keyIndex=3)\n\t\tciphered += string(msg[i] ^ key[keyIndex])\n\t}\n\treturn ciphered\n}", "func EncryptionKey() []byte {\n\treturn store.EncryptionKey\n}", "func (this *Transformable) Rotate(angle float32) {\n\tC.sfTransformable_rotate(this.cptr, C.float(angle))\n}", "func (w *FileLogWriter) Rotate() {\n\tw.rot <- true\n}", "func AsJWK(key interface{}) (*jose.JsonWebKey, error) {\n\tJWK := jose.JsonWebKey{\n\t\tKey: key,\n\t\tAlgorithm: string(jose.RSA1_5),\n\t}\n\tthumbprint, err := JWK.Thumbprint(crypto.SHA256)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tJWK.KeyID = base64.URLEncoding.EncodeToString(thumbprint)\n\treturn &JWK, nil\n}", "func (a *AuthTime) keyString() string {\n\treturn fmt.Sprintf(\"%d;%d\", a.KeyStartTime.Unix(), a.KeyEndTime.Unix())\n}", "func (self *TileSprite) SetPreviousRotationA(member int) {\n self.Object.Set(\"previousRotation\", member)\n}", "func (self *TileSprite) PreviousRotation() int{\n return self.Object.Get(\"previousRotation\").Int()\n}", "func GenReencryptHash(runtime *config.ControlRuntime, keyName string) (string, error) {\n\n\tkeys, err := GetEncryptionKeys(runtime)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tnewKey := apiserverconfigv1.Key{\n\t\tName: keyName,\n\t\tSecret: \"12345\",\n\t}\n\tkeys = append(keys, newKey)\n\tb, err := json.Marshal(keys)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\thash := sha256.Sum256(b)\n\treturn hex.EncodeToString(hash[:]), nil\n}", "func (c *canvasRenderer) Rotate(angle sprec.Angle) {\n\tc.currentLayer.Transform = sprec.Mat4Prod(\n\t\tc.currentLayer.Transform,\n\t\tsprec.RotationMat4(angle, 0.0, 0.0, 1.0),\n\t)\n}", "func (key twofishKey) Key() []byte {\n\treturn key[:]\n}", "func (m *Manager) RotateSSH(name string, gOpt operator.Options, skipConfirm bool) error {\n\tmetadata, err := m.meta(name)\n\tif err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) &&\n\t\t!errors.Is(perrs.Cause(err), spec.ErrNoTiSparkMaster) {\n\t\treturn err\n\t}\n\n\ttopo := metadata.GetTopology()\n\tbase := metadata.GetBaseMeta()\n\tif !skipConfirm {\n\t\tif err := tui.PromptForConfirmOrAbortError(\n\t\t\t\"This operation will rotate ssh keys for user '%s' .\\nDo you want to continue? [y/N]:\",\n\t\t\tbase.User); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar rotateSSHTasks []*task.StepDisplay // tasks which are used to initialize environment\n\tuniqueHosts, _ := getMonitorHosts(topo)\n\tfor host, hostInfo := range uniqueHosts {\n\t\tt, err := m.sshTaskBuilder(name, topo, base.User, gOpt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tt = t.RotateSSH(host, base.User, m.specManager.Path(name, \"ssh\", \"new.pub\"))\n\n\t\trotateSSHTasks = append(rotateSSHTasks, t.BuildAsStep(fmt.Sprintf(\" - Rotate ssh key on %s:%d\", host, hostInfo.ssh)))\n\t}\n\n\tbuilder := task.NewBuilder(m.logger).\n\t\tStep(\"+ Generate new SSH keys\",\n\t\t\ttask.NewBuilder(m.logger).\n\t\t\t\tSSHKeyGen(m.specManager.Path(name, \"ssh\", \"new\")).\n\t\t\t\tBuild(),\n\t\t\tm.logger).\n\t\tParallelStep(\"+ rotate ssh keys of target host environments\", false, rotateSSHTasks...).\n\t\tStep(\"+ overwrite old SSH keys\",\n\t\t\ttask.NewBuilder(m.logger).\n\t\t\t\tFunc(\"rename\", func(ctx context.Context) error {\n\t\t\t\t\terr := os.Rename(m.specManager.Path(name, \"ssh\", \"new.pub\"), m.specManager.Path(name, \"ssh\", \"id_rsa.pub\"))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\terr = os.Rename(m.specManager.Path(name, \"ssh\", \"new\"), m.specManager.Path(name, \"ssh\", \"id_rsa\"))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t}).\n\t\t\t\tBuild(),\n\t\t\tm.logger)\n\n\tctx := ctxt.New(\n\t\tcontext.Background(),\n\t\tgOpt.Concurrency,\n\t\tm.logger,\n\t)\n\tif err := builder.Build().Execute(ctx); err != nil {\n\t\tif errorx.Cast(err) != nil {\n\t\t\t// FIXME: Map possible task errors and give suggestions.\n\t\t\treturn err\n\t\t}\n\t\treturn err\n\t}\n\tm.logger.Infof(\"ssh keys are successfully updated\")\n\treturn nil\n}", "func (eval *evaluator) RotateHoisted(ctIn *Ciphertext, rotations []int, ctOut map[int]*Ciphertext) {\n\tlevelQ := ctIn.Level()\n\teval.DecomposeNTT(levelQ, eval.params.PCount()-1, eval.params.PCount(), ctIn.Value[1], eval.PoolDecompQP)\n\tfor _, i := range rotations {\n\t\tif i == 0 {\n\t\t\tctOut[i].Copy(ctIn)\n\t\t} else {\n\t\t\teval.PermuteNTTHoisted(levelQ, ctIn.Value[0], ctIn.Value[1], eval.PoolDecompQP, i, ctOut[i].Value[0], ctOut[i].Value[1])\n\t\t}\n\t}\n}", "func (self *Graphics) SetPreviousRotationA(member int) {\n self.Object.Set(\"previousRotation\", member)\n}", "func (c *Cipher) ReKey(key, nonce []byte) error {\n\tc.Reset()\n\treturn c.doReKey(key, nonce)\n}", "func (c Cardinal) RotateCCW() Cardinal {\r\n\treturn (c + 3) % 4\r\n}", "func rc4K(key []byte, ciphertext []byte) ([]byte, error) {\n\tcipher, err := rc4P.NewCipher(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresult := make([]byte, len(ciphertext))\n\tcipher.XORKeyStream(result, ciphertext)\n\treturn result, nil\n}", "func encryptionKey(e *openpgp.Entity, now time.Time) (openpgp.Key, bool) {\n\tcandidateSubkey := -1\n\n\t// Iterate the keys to find the newest key\n\tvar maxTime time.Time\n\tfor i, subkey := range e.Subkeys {\n\t\tif subkey.Sig.FlagsValid &&\n\t\t\tsubkey.Sig.FlagEncryptCommunications &&\n\t\t\tsubkey.PublicKey.PubKeyAlgo.CanEncrypt() &&\n\t\t\t!subkey.PublicKey.KeyExpired(subkey.Sig, now) &&\n\t\t\t(maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) {\n\t\t\tcandidateSubkey = i\n\t\t\tmaxTime = subkey.Sig.CreationTime\n\t\t}\n\t}\n\n\tif candidateSubkey != -1 {\n\t\tsubkey := e.Subkeys[candidateSubkey]\n\t\treturn *entitySubkeyToKey(e, &subkey), true\n\t}\n\n\t// If we don't have any candidate subkeys for encryption and\n\t// the primary key doesn't have any usage metadata then we\n\t// assume that the primary key is ok. Or, if the primary key is\n\t// marked as ok to encrypt to, then we can obviously use it.\n\ti := primaryIdentity(e)\n\tif !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications && e.PrimaryKey.PubKeyAlgo.CanEncrypt() && !i.SelfSignature.SigExpired(now) {\n\t\treturn *entityToKey(e, i), true\n\t}\n\n\t// This Entity appears to be signing only.\n\treturn openpgp.Key{}, false\n}", "func (j *JWT) KeyFunc(token *gojwt.Token) (interface{}, error) {\n\tif _, ok := token.Method.(*gojwt.SigningMethodHMAC); ok {\n\t\treturn []byte(j.Secret), nil\n\t} else if _, ok := token.Method.(*gojwt.SigningMethodRSA); ok {\n\t\treturn j.KeyFuncRS256(token)\n\t}\n\treturn nil, fmt.Errorf(\"unexpected signing method: %v\", token.Header[\"alg\"])\n}", "func (self *Graphics) PreviousRotation() int{\n return self.Object.Get(\"previousRotation\").Int()\n}", "func (self *botStats) rotate(date flap.EpochTime, rdd flap.Days, t db.Table) {\n\ti:= len(self.Rows) -1\n\tself.Rows[i].Date = date\n\tself.Rows[i].Entries +=1\n\tif self.Rows[i].Entries == int(rdd) {\n\t\tself.newRow()\n\t}\n\tself.save(t,i)\n}", "func (r *ImageRef) Rotate(angle Angle) error {\n\twidth := r.Width()\n\n\tif r.Pages() > 1 && (angle == Angle90 || angle == Angle270) {\n\t\tif angle == Angle270 {\n\t\t\tif err := r.Flip(DirectionHorizontal); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif err := r.Grid(r.GetPageHeight(), r.Pages(), 1); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif angle == Angle270 {\n\t\t\tif err := r.Flip(DirectionHorizontal); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tout, err := vipsRotate(r.image, angle)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.setImage(out)\n\n\tif r.Pages() > 1 && (angle == Angle90 || angle == Angle270) {\n\t\tif err := r.SetPageHeight(width); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (k *DecryptionKey) Marshal() ([]byte, error) {\n\treturn ic.MarshalPrivateKey(k.sk)\n}", "func (r *RGBARotator) Rotate180() {\n\tsrc := r.Img\n\tsrcB := src.Bounds()\n\tsrcWidth := srcB.Dx()\n\tsrcHeight := srcB.Dy()\n\n\tdst := image.NewRGBA(image.Rect(0, 0, srcWidth, srcHeight))\n\n\tvar x, y, srcIdx, dstIdx int64\n\tmaxX, maxY := int64(srcWidth), int64(srcHeight)\n\tsrcStride, dstStride := int64(src.Stride), int64(dst.Stride)\n\tsrcPix := src.Pix\n\tdstPix := dst.Pix\n\tfor y = 0; y < maxY; y++ {\n\t\tfor x = 0; x < maxX; x++ {\n\t\t\tsrcIdx = y*srcStride + (x << 2)\n\t\t\tdstIdx = (maxY-1-y)*dstStride + ((maxX - 1 - x) << 2)\n\t\t\tcopy(dstPix[dstIdx:dstIdx+4], srcPix[srcIdx:srcIdx+4])\n\t\t}\n\t}\n\n\tr.Img = dst\n}", "func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {\n\tauthArray := []byte(auth)\n\tsalt := GetEntropyCSPRNG(32)\n\tderivedKey, err := scrypt.Key(authArray, salt, scryptN, scryptR, scryptP, scryptDKLen)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tencryptKey := derivedKey[:16]\n\tkeyBytes := math.PaddedBigBytes(key.PrivateKey.D, 32)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tiv := GetEntropyCSPRNG(aes.BlockSize) // 16\n\tcipherText, err := aesCTRXOR(encryptKey, keyBytes, iv)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmac := NewHash(derivedKey[16:32], cipherText)\n\n\tscryptParamsJSON := make(map[string]interface{}, 5)\n\tscryptParamsJSON[\"n\"] = scryptN\n\tscryptParamsJSON[\"r\"] = scryptR\n\tscryptParamsJSON[\"p\"] = scryptP\n\tscryptParamsJSON[\"dklen\"] = scryptDKLen\n\tscryptParamsJSON[\"salt\"] = hex.EncodeToString(salt)\n\n\tcipherParamsJSON := cipherparamsJSON{\n\t\tIV: hex.EncodeToString(iv),\n\t}\n\n\tcryptoStruct := cryptoJSON{\n\t\tCipher: \"aes-128-ctr\",\n\t\tCipherText: hex.EncodeToString(cipherText),\n\t\tCipherParams: cipherParamsJSON,\n\t\tKDF: keyHeaderKDF,\n\t\tKDFParams: scryptParamsJSON,\n\t\tMAC: hex.EncodeToString(mac.Bytes()),\n\t}\n\n\tencryptedKeyJSONV3 := encryptedKeyJSONV3{\n\t\tkey.Address,\n\t\tcryptoStruct,\n\t\tkey.Id.String(),\n\t\tversion,\n\t}\n\treturn json.Marshal(encryptedKeyJSONV3)\n}", "func decrypt(_message string, _rotors [3]int, _ref int, _key [3]int) string {\n\tvar builder strings.Builder\n\n\tfor _, char := range _message {\n\t\t_key = rotorsIncr(_key, _rotors)\n\t\tvar rd = (byte(rotors[_rotors[2]][(byte(char)-65+byte(_key[2])+26)%26]) - 65 + 26 - byte(_key[2])) % 26\n\t\tvar rm = (byte(rotors[_rotors[1]][(rd+byte(_key[1])+26)%26]) - 65 + 26 - byte(_key[1])) % 26\n\t\tvar rg = (byte(rotors[_rotors[0]][(rm+byte(_key[0])+26)%26]) - 65 + 26 - byte(_key[0])) % 26\n\t\tvar r = byte(rotors[_ref][rg] - 65)\n\n\t\tvar rg2 = (byte(rotorsInv[_rotors[0]][(r+byte(_key[0])+26)%26]) - 65 + 26 - byte(_key[0])) % 26\n\t\tvar rm2 = (byte(rotorsInv[_rotors[1]][(rg2+byte(_key[1])+26)%26]) - 65 + 26 - byte(_key[1])) % 26\n\t\tvar rd2 = (byte(rotorsInv[_rotors[2]][(rm2+byte(_key[2])+26)%26]) - 65 + 26 - byte(_key[2])) % 26\n\t\tbuilder.WriteRune(rune(rd2 + 65))\n\t}\n\n\treturn builder.String()\n}", "func (t *Tree) leftRotate(x *Node) {\n\ty := x.right\n\tx.right = y.left\n\tif y.left != nil {\n\t\ty.left.p = x\n\t}\n\tt.transplant(x, y)\n\ty.left = x\n\tx.p = y\n}" ]
[ "0.68230635", "0.66989833", "0.66093475", "0.647618", "0.6370345", "0.6170176", "0.60946894", "0.6072444", "0.5827766", "0.5811087", "0.5599854", "0.5597232", "0.55714077", "0.55631316", "0.55575067", "0.55345094", "0.55277187", "0.549797", "0.5486713", "0.545797", "0.5429036", "0.5378346", "0.53783274", "0.5357436", "0.5314408", "0.5300428", "0.52864677", "0.5284503", "0.52474976", "0.5246306", "0.52344793", "0.5229894", "0.5223779", "0.52181166", "0.51930183", "0.5153825", "0.5145179", "0.513208", "0.5121423", "0.5106245", "0.50927675", "0.50799125", "0.50498104", "0.5041327", "0.5033124", "0.5025203", "0.5017004", "0.5011344", "0.49986583", "0.4994486", "0.4992916", "0.49871346", "0.49832487", "0.4982121", "0.49747705", "0.496962", "0.49681312", "0.4954282", "0.49541104", "0.49461126", "0.49408337", "0.49377286", "0.4936801", "0.49272928", "0.49159196", "0.4915233", "0.4912421", "0.4906594", "0.49009353", "0.48956466", "0.48855308", "0.48813957", "0.48791447", "0.48776087", "0.4877485", "0.4873049", "0.48706427", "0.48697004", "0.48628548", "0.48496026", "0.48395374", "0.48369423", "0.48344013", "0.4831111", "0.48297057", "0.482435", "0.48176935", "0.48158333", "0.4797376", "0.4797112", "0.4790722", "0.4784462", "0.4779278", "0.47781667", "0.47714648", "0.47611937", "0.47581863", "0.475749", "0.47543073", "0.47535002" ]
0.7712946
0
toGA is an utility method to return the baseInstance data as a GA Instance object
toGA — это вспомогательный метод, возвращающий данные baseInstance в виде объекта GA Instance
func (bi *baseInstance) toGA() *ga.Instance { inst := &ga.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*ga.NetworkInterface{{}}} if bi.aliasRange != "" { inst.NetworkInterfaces[0].AliasIpRanges = []*ga.AliasIpRange{ {IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName}, } } return inst }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (bi *baseInstance) toBeta() *beta.Instance {\n\tinst := &beta.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*beta.NetworkInterface{{}}}\n\tif bi.aliasRange != \"\" {\n\t\tinst.NetworkInterfaces[0].AliasIpRanges = []*beta.AliasIpRange{\n\t\t\t{IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName},\n\t\t}\n\t}\n\treturn inst\n}", "func (e *GT) Base() *GT {\n\tif e.p == nil {\n\t\te.p = &gfP12{}\n\t}\n\te.p.Set(gfP12Gen)\n\treturn e\n}", "func (bil *baseInstanceList) newGAGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {\n\treturn func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {\n\t\tm.Lock.Lock()\n\t\tdefer m.Lock.Unlock()\n\n\t\tif _, found := m.Objects[*key]; !found {\n\t\t\tm.Objects[*key] = &cloud.MockInstancesObj{Obj: bil.getOrCreateBaseInstance(key).toGA()}\n\t\t}\n\t\treturn false, nil, nil\n\t}\n}", "func (a *abaImpl) AsGPA() gpa.GPA {\n\treturn a.asGPA\n}", "func CreateGqlDefFromInstance(name string, data interface{}) *graphql.Object {\n\tdataType := reflect.TypeOf(data)\n\treturn CreateGqlDefFromType(name, dataType)\n}", "func ToGObject(p unsafe.Pointer) *C.GObject {\n\treturn (*C.GObject)(p)\n}", "func SomeGraphToJSONable(\n\tinstance *SomeGraph) (\n\ttarget map[string]interface{}, err error) {\n\n\tif instance == nil {\n\t\tpanic(\"unexpected nil instance\")\n\t}\n\n\ttarget = make(map[string]interface{})\n\tdefer func() {\n\t\tif err != nil {\n\t\t\ttarget = nil\n\t\t}\n\t}()\n\t////\n\t// Serialize instance registry of SomeClass\n\t////\n\n\tif len(instance.SomeClasses) > 0 {\n\t\ttargetSomeClasses := make(map[string]interface{})\n\t\tfor id := range instance.SomeClasses {\n\t\t\tsomeClassInstance := instance.SomeClasses[id]\n\n\t\t\tif id != someClassInstance.ID {\n\t\t\t\terr = fmt.Errorf(\n\t\t\t\t\t\"expected the instance of SomeClass to have the ID %s according to the registry, but got: %s\",\n\t\t\t\t\tid, someClassInstance.ID)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttargetSomeClasses[id] = SomeClassToJSONable(\n\t\t\t\tsomeClassInstance)\n\t\t}\n\n\t\ttarget[\"some_classes\"] = targetSomeClasses\n\t}\n\n\t////\n\t// Serialize instance registry of OtherClass\n\t////\n\n\tif len(instance.OtherClasses) > 0 {\n\t\ttargetOtherClasses := make(map[string]interface{})\n\t\tfor id := range instance.OtherClasses {\n\t\t\totherClassInstance := instance.OtherClasses[id]\n\n\t\t\tif id != otherClassInstance.ID {\n\t\t\t\terr = fmt.Errorf(\n\t\t\t\t\t\"expected the instance of OtherClass to have the ID %s according to the registry, but got: %s\",\n\t\t\t\t\tid, otherClassInstance.ID)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttargetOtherClasses[id] = OtherClassToJSONable(\n\t\t\t\totherClassInstance)\n\t\t}\n\n\t\ttarget[\"other_classes\"] = targetOtherClasses\n\t}\n\n\treturn\n}", "func (v *Variant) ToGVariant() *C.GVariant {\n\tif v == nil {\n\t\treturn nil\n\t}\n\treturn v.native()\n}", "func NewGraph(base Base) {\n\n}", "func (s StatsGraph) construct() StatsGraphClass { return &s }", "func (bi *BridgerInfo) pushInstance(ins *zstypes.InsCacheInfo) (uint64, uint64, error) {\n\n\tmeta := dtypes.StFileMeta{\n\t\tAccount: ins.AName,\n\t\tFiletype: \"prometheus\",\n\t\tIid: ins.IName,\n\t\tIsCompressed: true,\n\t\tDTs: ins.DTs,\n\t\tOLabels: ins.OLabels,\n\t}\n\n\tif config.GlCfg.BrdigerNotifyStIngest {\n\t\tfor atomic.LoadInt32(&bi.stingestWorkerReady) == 0 {\n\t\t\tzlog.Info(\"Waiting for stingest req_q worker to be ready\")\n\t\t\ttime.Sleep(5 * time.Second)\n\t\t}\n\t}\n\n\tdmepoch := time.Now().UnixNano() / 1000\n\tdts := strconv.FormatInt(dmepoch, 10)\n\ttmpDir := path.Join(config.GlCfg.StatsReqsDir, ins.AName, strings.Replace(ins.IName, \":\", \"_\", 1), dts)\n\terr := os.MkdirAll(tmpDir, 0755)\n\tif err != nil {\n\t\tzlog.Error(\"Failed to create directory %s: %s\", tmpDir, err)\n\t\treturn uint64(0), uint64(0), err\n\t}\n\ttmpMFpath := filepath.Join(tmpDir, dts+\".json\")\n\ttmpDFpath := filepath.Join(tmpDir, dts+\".data.gz\")\n\n\tclblsMap, ntstamps, nbytes, err := bi.writeStatsFile(tmpDFpath, ins)\n\tif err != nil {\n\t\tzlog.Error(\"Failed to write to file %s, err %s\\n\", tmpDFpath, err)\n\t\treturn ntstamps, nbytes, err\n\t}\n\n\tif len(clblsMap) > 0 {\n\t\tmeta.Kvs = make([]dtypes.KvPair, 0, len(clblsMap))\n\t\tfor k, v := range clblsMap {\n\t\t\tmeta.Kvs = append(meta.Kvs, dtypes.KvPair{N: k, V: v})\n\t\t}\n\t}\n\tmjson, err := json.Marshal(meta)\n\tif err != nil {\n\t\tzlog.Error(\"Failed to marshal file metadata %v: %s\", meta, err)\n\t\treturn ntstamps, nbytes, err\n\t}\n\terr = ioutil.WriteFile(tmpMFpath, mjson, 0644)\n\tif err != nil {\n\t\tzlog.Error(\"Failed to write to file %s, err %s\\n\", tmpMFpath, err)\n\t\treturn ntstamps, nbytes, err\n\t}\n\n\t// If configured, send to the next stage for further processing.\n\tif config.GlCfg.BrdigerNotifyStIngest {\n\t\treq := stingestpb.StatsIngestRequest{\n\t\t\tAccount: ins.AName,\n\t\t\tDFpath: tmpDFpath,\n\t\t\tMFpath: tmpMFpath,\n\t\t\tInstanceId: ins.IName,\n\t\t\tType: meta.Filetype,\n\t\t\tDTs: ins.DTs,\n\t\t}\n\n\t\tsReq := &stiReq{tmpDir: tmpDir, req: &req}\n\t\tbi.stiReqCh <- sReq\n\t}\n\n\treturn ntstamps, nbytes, nil\n}", "func newDataInstance(repo datastore.Repo, t *testing.T, name dvid.DataString) *Data {\n\tconfig := dvid.NewConfig()\n\tconfig.SetVersioned(true)\n\tdataservice, err := repo.NewData(labelsT, name, config)\n\tif err != nil {\n\t\tt.Errorf(\"Unable to create labels64 instance %q: %s\\n\", name, err.Error())\n\t}\n\tlabels, ok := dataservice.(*Data)\n\tif !ok {\n\t\tt.Errorf(\"Can't cast labels data service into Data\\n\")\n\t}\n\treturn labels\n}", "func (track *Track) ToDb() interface{} {\n\treturn track.Id\n}", "func OtherClassToJSONable(\n\tinstance *OtherClass) (\n\ttarget map[string]interface{}) {\n\n\tif instance == nil {\n\t\tpanic(\"unexpected nil instance\")\n\t}\n\n\ttarget = make(map[string]interface{})\n\n\t////\n\t// Serialize ReferenceSome\n\t////\n\n\ttarget[\"reference_some\"] = instance.ReferenceSome.ID\n\n\t////\n\t// Serialize ArrayOfSomes\n\t////\n\n\tcount0 := len(instance.ArrayOfSomes)\n\tslice0 := instance.ArrayOfSomes\n\ttarget0 := make([]interface{}, count0)\n\tfor i0 := 0; i0 < count0; i0++ {\n\t\ttarget0[i0] = slice0[i0].ID\n\t}\n\ttarget[\"array_of_somes\"] = target0\n\n\t////\n\t// Serialize MapOfSomes\n\t////\n\n\ttarget1 := make(map[string]interface{})\n\tmap1 := instance.MapOfSomes\n\tfor k1, v1 := range map1 {\n\t\ttarget1[k1] = v1.ID\n\t}\n\ttarget[\"map_of_somes\"] = target1\n\n\treturn\n}", "func newdbBasePostgres() dbBaser {\n\tb := new(dbBasePostgres)\n\tb.ins = b\n\treturn b\n}", "func toRecord(cache airtabledb.DB, src Feature, dst interface{}) {\n\tdV := reflect.ValueOf(dst).Elem().FieldByName(\"Fields\")\n\tsV := reflect.ValueOf(src)\n\tcopyFields(cache, sV, dV)\n}", "func GAEResource(ctx context.Context) (*MonitoredResource, error) {\n\t// appengine.IsAppEngine is confusingly false as we're using a custom\n\t// container and building without the appenginevm build constraint.\n\t// Check metadata.OnGCE instead.\n\tif !metadata.OnGCE() {\n\t\treturn nil, fmt.Errorf(\"not running on appengine\")\n\t}\n\tprojID, err := metadata.ProjectID()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn (*MonitoredResource)(&mrpb.MonitoredResource{\n\t\tType: \"gae_instance\",\n\t\tLabels: map[string]string{\n\t\t\t\"project_id\": projID,\n\t\t\t\"module_id\": appengine.ModuleName(ctx),\n\t\t\t\"version_id\": appengine.VersionID(ctx),\n\t\t\t\"instance_id\": appengine.InstanceID(),\n\t\t\t\"location\": appengine.Datacenter(ctx),\n\t\t},\n\t}), nil\n}", "func (e *EncryptedChatRequested) GetGA() (value []byte) {\n\treturn e.GA\n}", "func (self *Graphics) Data() interface{}{\n return self.Object.Get(\"data\")\n}", "func Generational(MakeGenome GenomeMaker) GA {\n\tvar ga = GA{\n\t\tMakeGenome: MakeGenome,\n\t\tTopology: Topology{\n\t\t\tNPopulations: 2,\n\t\t\tNIndividuals: 50,\n\t\t},\n\t\tModel: ModGenerational{\n\t\t\tSelector: SelTournament{\n\t\t\t\tNParticipants: 3,\n\t\t\t},\n\t\t\tMutRate: 0.5,\n\t\t},\n\t}\n\tga.Initialize()\n\treturn ga\n}", "func (c *converter) ToEntity(in model.AutomaticScenarioAssignment) Entity {\n\treturn Entity{\n\t\tTenantID: in.Tenant,\n\t\tScenario: in.ScenarioName,\n\t\tTargetTenantID: in.TargetTenantID,\n\t}\n}", "func (m *AzureManager) GetAsgForInstance(instance *azureRef) (cloudprovider.NodeGroup, error) {\n\treturn m.asgCache.FindForInstance(instance, m.config.VMType)\n}", "func dataToSg(name string, d *schema.ResourceData) go_thunder.ServiceGroup {\n\t//\tlogger := util.GetLoggerInstance()\n\tvar s go_thunder.ServiceGroup\n\n\tvar sInstance go_thunder.ServiceGroupInstance\n\n\tsInstance.ConnRate = d.Get(\"conn_rate\").(int)\n\tsInstance.ResetOnServerSelectionFail = d.Get(\"reset_on_server_selection_fail\").(int)\n\tsInstance.HealthCheckDisable = d.Get(\"health_check_disable\").(int)\n\tsInstance.Protocol = d.Get(\"protocol\").(string)\n\tsInstance.TrafficReplicationMirrorIPRepl = d.Get(\"traffic_replication_mirror_ip_repl\").(int)\n\tsInstance.ResetPriorityAffinity = d.Get(\"reset_priority_affinity\").(int)\n\tsInstance.MinActiveMember = d.Get(\"min_active_member\").(int)\n\tsInstance.StatsDataAction = d.Get(\"stats_data_action\").(string)\n\tsInstance.TrafficReplicationMirrorDaRepl = d.Get(\"traffic_replication_mirror_da_repl\").(int)\n\tsInstance.TemplatePolicyShared = d.Get(\"template_policy_shared\").(string)\n\tsInstance.RptExtServer = d.Get(\"rpt_ext_server\").(int)\n\tsInstance.TemplatePort = d.Get(\"template_port\").(string)\n\tsInstance.ConnRateGracePeriod = d.Get(\"conn_rate_grace_period\").(int)\n\tsInstance.L4SessionUsageDuration = d.Get(\"l4_session_usage\").(int)\n\tsInstance.UUID = d.Get(\"uuid\").(string)\n\tsInstance.BackupServerEventLog = d.Get(\"backup_server_event_log\").(int)\n\tsInstance.LcMethod = d.Get(\"lc_method\").(string)\n\tsInstance.PseudoRoundRobin = d.Get(\"pseudo_round_robin\").(int)\n\tsInstance.SharedPartitionPolicyTemplate = d.Get(\"shared_partition_policy_template\").(int)\n\tsInstance.L4SessionUsageRevertRate = d.Get(\"l4_session_usage_revert_rate\").(int)\n\tsInstance.SharedPartitionSvcgrpHealthCheck = d.Get(\"shared_partition_svcgrp_health_check\").(int)\n\tsInstance.TemplateServer = d.Get(\"template_server\").(string)\n\tsInstance.SvcgrpHealthCheckShared = d.Get(\"svcgrp_health_check_shared\").(string)\n\tsInstance.TrafficReplicationMirror = d.Get(\"traffic_replication_mirror\").(int)\n\tsInstance.L4SessionRevertDuration = d.Get(\"l4_session_revert_duration\").(int)\n\tsInstance.TrafficReplicationMirrorSaDaRepl = d.Get(\"traffic_replication_mirror_sa_da_repl\").(int)\n\tsInstance.LbMethod = d.Get(\"lb_method\").(string)\n\tsInstance.StatelessAutoSwitch = d.Get(\"stateless_auto_switch\").(int)\n\tsInstance.MinActiveMemberAction = d.Get(\"min_active_member_action\").(string)\n\tsInstance.L4SessionUsage = d.Get(\"l4_session_usage\").(int)\n\tsInstance.ExtendedStats = d.Get(\"extended_stats\").(int)\n\tsInstance.ConnRateRevertDuration = d.Get(\"conn_rate_revert_duration\").(int)\n\tsInstance.StrictSelect = d.Get(\"strict_select\").(int)\n\tsInstance.Name = d.Get(\"name\").(string)\n\tsInstance.TrafficReplicationMirrorSaRepl = d.Get(\"traffic_replication_mirror_sa_repl\").(int)\n\tsInstance.ReportDelay = d.Get(\"report_delay\").(int)\n\tsInstance.ConnRateLog = d.Get(\"conn_rate_log\").(int)\n\tsInstance.L4SessionUsageLog = d.Get(\"l4_session_usage_log\").(int)\n\tsInstance.ConnRateDuration = d.Get(\"conn_rate_duration\").(int)\n\tsInstance.StatelessLbMethod = d.Get(\"stateless_lb_method\").(string)\n\tsInstance.TemplatePolicy = d.Get(\"template_policy\").(string)\n\tsInstance.StatelessLbMethod2 = d.Get(\"stateless_lb_method2\").(string)\n\tsInstance.UserTag = d.Get(\"user_tag\").(string)\n\tsInstance.SampleRspTime = d.Get(\"sample_rsp_time\").(int)\n\tsInstance.TopFastest = d.Get(\"top_fastest\").(int)\n\tsInstance.ConnRevertRate = d.Get(\"conn_revert_rate\").(int)\n\tsInstance.L4SessionUsageGracePeriod = d.Get(\"l4_session_usage_grace_period\").(int)\n\tsInstance.PriorityAffinity = d.Get(\"priority_affinity\").(int)\n\tsInstance.TopSlowest = d.Get(\"top_slowest\").(int)\n\tsInstance.HealthCheck = d.Get(\"health_check\").(string)\n\n\tpriorityCount := d.Get(\"priorities.#\").(int)\n\tsInstance.Priority = make([]go_thunder.Priorities, 0, priorityCount)\n\tfor i := 0; i < priorityCount; i++ {\n\t\tvar pr go_thunder.Priorities\n\t\tprefix := fmt.Sprintf(\"priorities.%d\", i)\n\t\tpr.Priority = d.Get(prefix + \".priority\").(int)\n\t\tpr.PriorityAction = d.Get(prefix + \".priority_action\").(string)\n\n\t\tsInstance.Priority = append(sInstance.Priority, pr)\n\t}\n\n\tsamplingCount := d.Get(\"sampling_enable.#\").(int)\n\tsInstance.Counters1 = make([]go_thunder.SamplingEnable, 0, samplingCount)\n\tfor i := 0; i < samplingCount; i++ {\n\t\tvar sm go_thunder.SamplingEnable\n\t\tprefix := fmt.Sprintf(\"sampling_enable.%d\", i)\n\t\tsm.Counters1 = d.Get(prefix + \".counters1\").(string)\n\n\t\tsInstance.Counters1 = append(sInstance.Counters1, sm)\n\t}\n\n\t//NEED TO FIGURE OUT IF VALUE IS PROVIDED IN TF FILE OR DEFAULT IS BEING USED\n\t//\tvar as Reset\n\t//\tas.AutoSwitch = d.Get(\"reset.0.auto_switch\").(int)\n\t//\tlogger.Println(\"[INFO] Auto switch is- \", d.Get(\"reset.0.auto_switch\").(int))\n\t//\tsInstance.AutoSwitch = as\n\n\tmemberCount := d.Get(\"member_list.#\").(int)\n\tsInstance.Host = make([]go_thunder.MemberList, 0, memberCount)\n\tfor i := 0; i < memberCount; i++ {\n\t\tvar ml go_thunder.MemberList\n\t\tprefix := fmt.Sprintf(\"member_list.%d\", i)\n\t\tml.FqdnName = d.Get(prefix + \".fqdn_name\").(string)\n\t\tml.Host = d.Get(prefix + \".host\").(string)\n\t\tml.MemberPriority = d.Get(prefix + \".member_priority\").(int)\n\t\tml.MemberState = d.Get(prefix + \".member_state\").(string)\n\t\tml.MemberStatsDataDisable = d.Get(prefix + \".member_stats_data_disable\").(int)\n\t\tml.MemberTemplate = d.Get(prefix + \".member_template\").(string)\n\t\tml.Name = d.Get(prefix + \".name\").(string)\n\t\tml.Port = d.Get(prefix + \".port\").(int)\n\t\tml.ResolveAs = d.Get(prefix + \".resolve_as\").(string)\n\t\tml.ServerIpv6Addr = d.Get(prefix + \".server_ipv6_addr\").(string)\n\t\tml.UUID = d.Get(prefix + \".uuid\").(string)\n\t\tml.UserTag = d.Get(prefix + \".user_tag\").(string)\n\n\t\tsampleCount := d.Get(prefix + \".sampling_enable.#\").(int)\n\t\tml.Counters1 = make([]go_thunder.SamplingEnable, sampleCount, sampleCount)\n\n\t\tfor x := 0; x < sampleCount; x++ {\n\t\t\tvar s go_thunder.SamplingEnable\n\t\t\tmapEntity(d.Get(fmt.Sprintf(\"%s.sampling_enable.%d\", prefix, x)).(map[string]interface{}), &s)\n\t\t\tml.Counters1[x] = s\n\t\t}\n\n\t\tsInstance.Host = append(sInstance.Host, ml)\n\t}\n\n\ts.Name = sInstance\n\n\treturn s\n}", "func (ga *GenesisAccount) ToAccount() auth.Account {\n\tbacc := &auth.BaseAccount{\n\t\tAddress: ga.Address,\n\t\tCoins: ga.Coins.Sort(),\n\t\tAccountNumber: ga.AccountNumber,\n\t\tSequence: ga.Sequence,\n\t}\n\n\tif !ga.OriginalVesting.IsZero() {\n\t\tbaseVestingAcc := &auth.BaseVestingAccount{\n\t\t\tBaseAccount: bacc,\n\t\t\tOriginalVesting: ga.OriginalVesting,\n\t\t\tDelegatedFree: ga.DelegatedFree,\n\t\t\tDelegatedVesting: ga.DelegatedVesting,\n\t\t\tEndTime: ga.EndTime,\n\t\t}\n\n\t\tif ga.StartTime != 0 && ga.EndTime != 0 {\n\t\t\treturn &auth.ContinuousVestingAccount{\n\t\t\t\tBaseVestingAccount: baseVestingAcc,\n\t\t\t\tStartTime: ga.StartTime,\n\t\t\t}\n\t\t} else if ga.EndTime != 0 {\n\t\t\treturn &auth.DelayedVestingAccount{\n\t\t\t\tBaseVestingAccount: baseVestingAcc,\n\t\t\t}\n\t\t} else {\n\t\t\tpanic(fmt.Sprintf(\"invalid genesis vesting account: %+v\", ga))\n\t\t}\n\t}\n\n\treturn bacc\n}", "func (ga *GA) Init(pool_size uint, data_size uint, fit_func FitnessFunc, p GA_Params) {\n\n rand.Seed(time.Now().UTC().UnixNano())\n data_bytes := (data_size + 7) / 8\n ga.Population = make(GenePool, pool_size)\n // for _, ind := range ga.Population {\n for i := range ga.Population {\n\t// var ind *Individual\n\tind := new(Individual)\n\tind.Data = make([]byte, data_bytes)\n\trandom_word := rand.Uint32()\n\tfor j := range ind.Data {\n\t if (j % 4) == 0 {\n\t\trandom_word = rand.Uint32()\n\t }\n\t ind.Data[j] = byte(random_word & 0xff)\n\t random_word >>= 8\n\t}\n\tga.Population[i] = ind\n }\n ga.Params = p\n ga.data_size = data_size\n ga.Generation = 0\n ga.fit_func = fit_func\n ga.MeasureAndSort()\n ga.Stats_best = make([]float64, 0, 1024)\n ga.Stats_avg = make([]float64, 0, 1024)\n ga.Stats_best = append(ga.Stats_best, ga.Population[0].Fitness)\n ga.Stats_avg = append(ga.Stats_avg, ga.AvgFitness())\n}", "func GetDataBase() *gorm.DB {\n\treturn db\n}", "func (g UGaugeSnapshot) Snapshot() UGauge { return g }", "func (c *converter) ToGraphQL(in model.AutomaticScenarioAssignment, targetTenantExternalID string) graphql.AutomaticScenarioAssignment {\n\treturn graphql.AutomaticScenarioAssignment{\n\t\tScenarioName: in.ScenarioName,\n\t\tSelector: &graphql.Label{\n\t\t\tKey: SubaccountIDKey,\n\t\t\tValue: targetTenantExternalID,\n\t\t},\n\t}\n}", "func (self *Tween) Game() *Game{\n return &Game{self.Object.Get(\"game\")}\n}", "func GetInstance() *CoinBase {\n\treturn ins\n}", "func ToGob(src interface{}) ([]byte, error) {\n\treturn NewGobber().To(src)\n}", "func (self *GameObjectCreator) Game() *Game{\n return &Game{self.Object.Get(\"game\")}\n}", "func (profile *Profile) ToDb() interface{} {\n\treturn profile.Id\n}", "func instancesToProto(insts registry.Instances) []*instances.Instance {\n\tret := make([]*instances.Instance, 0)\n\tfor _, inst := range insts {\n\t\tprotoInst := &instances.Instance{\n\t\t\tInstanceId: proto.String(inst.Id),\n\t\t\tHostname: proto.String(inst.Hostname),\n\t\t\tMachineClass: proto.String(inst.MachineClass),\n\t\t\tServiceName: proto.String(inst.Name),\n\t\t\tServiceDescription: proto.String(inst.Description),\n\t\t\tServiceVersion: proto.Uint64(inst.Version),\n\t\t\tAzName: proto.String(inst.AzName),\n\t\t\tSubTopic: make([]string, 0),\n\t\t}\n\t\tfor _, ep := range inst.Endpoints {\n\t\t\tif ep.Subscribe != \"\" {\n\t\t\t\tprotoInst.SubTopic = append(protoInst.SubTopic, ep.Subscribe)\n\t\t\t}\n\t\t}\n\t\tret = append(ret, protoInst)\n\t}\n\treturn ret\n}", "func VFromDB(gid GoogleID) (*VAgent, time.Time, error) {\n\ta := VAgent{\n\t\tGid: gid,\n\t}\n\tvar fetched string\n\tvar t time.Time\n\tvar vlevel, vpoints, distance sql.NullInt64\n\tvar telegram, cellid sql.NullString\n\tvar startlat, startlon sql.NullFloat64\n\n\terr := db.QueryRow(\"SELECT enlid, vlevel, vpoints, agent, level, quarantine, active, blacklisted, verified, flagged, banned, cellid, telegram, startlat, startlon, distance, fetched FROM v WHERE gid = ?\", gid).Scan(&a.EnlID, &vlevel, &vpoints, &a.Agent, &a.Level, &a.Quarantine, &a.Active, &a.Blacklisted, &a.Verified, &a.Flagged, &a.Banned, &cellid, &telegram, &startlat, &startlon, &distance, &fetched)\n\tif err != nil && err != sql.ErrNoRows {\n\t\tlog.Error(err)\n\t\treturn &a, t, err\n\t}\n\n\tif err == sql.ErrNoRows {\n\t\treturn &a, t, nil\n\t}\n\n\tif fetched == \"\" {\n\t\treturn &a, t, nil\n\t}\n\n\tif vlevel.Valid {\n\t\ta.Vlevel = vlevel.Int64\n\t}\n\tif vpoints.Valid {\n\t\ta.Vpoints = vpoints.Int64\n\t}\n\tif telegram.Valid {\n\t\ta.Telegram = telegram.String\n\t}\n\tif cellid.Valid {\n\t\ta.CellID = cellid.String\n\t}\n\tif startlat.Valid {\n\t\ta.StartLat = startlat.Float64\n\t}\n\tif startlon.Valid {\n\t\ta.StartLon = startlon.Float64\n\t}\n\tif distance.Valid {\n\t\ta.Distance = distance.Int64\n\t}\n\n\tt, err = time.ParseInLocation(\"2006-01-02 15:04:05\", fetched, time.UTC)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\t// return &a, t, err\n\t}\n\t// log.Debugw(\"VFromDB\", \"gid\", gid, \"fetched\", fetched, \"data\", a)\n\treturn &a, t, nil\n}", "func toGame(data interface{}, isResponse bool) *Game {\n\tif data == nil {\n\t\treturn nil\n\t}\n\n\tif isResponse {\n\t\tdest := gameResponse{}\n\n\t\tif recast(data, &dest) == nil {\n\t\t\treturn &dest.Data\n\t\t}\n\t} else {\n\t\tdest := Game{}\n\n\t\tif recast(data, &dest) == nil {\n\t\t\treturn &dest\n\t\t}\n\t}\n\n\treturn nil\n}", "func gceInfo(inst *instance) error {\n\tvar err error\n\tinst.zone, err = metadata.Zone()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinst.name, err = metadata.InstanceName()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinst.hostname, err = metadata.Hostname()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinst.project, err = metadata.ProjectID()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func SomeClassToJSONable(\n\tinstance *SomeClass) (\n\ttarget map[string]interface{}) {\n\n\tif instance == nil {\n\t\tpanic(\"unexpected nil instance\")\n\t}\n\n\ttarget = make(map[string]interface{})\n\n\t////\n\t// Serialize ReferenceOther\n\t////\n\n\ttarget[\"reference_other\"] = instance.ReferenceOther.ID\n\n\t////\n\t// Serialize ArrayOfOthers\n\t////\n\n\tcount0 := len(instance.ArrayOfOthers)\n\tslice0 := instance.ArrayOfOthers\n\ttarget0 := make([]interface{}, count0)\n\tfor i0 := 0; i0 < count0; i0++ {\n\t\ttarget0[i0] = slice0[i0].ID\n\t}\n\ttarget[\"array_of_others\"] = target0\n\n\t////\n\t// Serialize MapOfOthers\n\t////\n\n\ttarget1 := make(map[string]interface{})\n\tmap1 := instance.MapOfOthers\n\tfor k1, v1 := range map1 {\n\t\ttarget1[k1] = v1.ID\n\t}\n\ttarget[\"map_of_others\"] = target1\n\n\treturn\n}", "func newBase() *base {\n\treturn &base{shared.NewUUID(), time.Now().UTC(), time.Now().UTC(), false/*, shared.NewUUID()*/}\n}", "func ProtoToInstance(p *sqlpb.SqlInstance) *sql.Instance {\n\tobj := &sql.Instance{\n\t\tBackendType: ProtoToSqlInstanceBackendTypeEnum(p.GetBackendType()),\n\t\tConnectionName: dcl.StringOrNil(p.ConnectionName),\n\t\tDatabaseVersion: ProtoToSqlInstanceDatabaseVersionEnum(p.GetDatabaseVersion()),\n\t\tEtag: dcl.StringOrNil(p.Etag),\n\t\tGceZone: dcl.StringOrNil(p.GceZone),\n\t\tInstanceType: ProtoToSqlInstanceInstanceTypeEnum(p.GetInstanceType()),\n\t\tMasterInstanceName: dcl.StringOrNil(p.MasterInstanceName),\n\t\tMaxDiskSize: ProtoToSqlInstanceMaxDiskSize(p.GetMaxDiskSize()),\n\t\tName: dcl.StringOrNil(p.Name),\n\t\tProject: dcl.StringOrNil(p.Project),\n\t\tRegion: dcl.StringOrNil(p.Region),\n\t\tRootPassword: dcl.StringOrNil(p.RootPassword),\n\t\tCurrentDiskSize: ProtoToSqlInstanceCurrentDiskSize(p.GetCurrentDiskSize()),\n\t\tDiskEncryptionConfiguration: ProtoToSqlInstanceDiskEncryptionConfiguration(p.GetDiskEncryptionConfiguration()),\n\t\tFailoverReplica: ProtoToSqlInstanceFailoverReplica(p.GetFailoverReplica()),\n\t\tMasterInstance: ProtoToSqlInstanceMasterInstance(p.GetMasterInstance()),\n\t\tReplicaConfiguration: ProtoToSqlInstanceReplicaConfiguration(p.GetReplicaConfiguration()),\n\t\tScheduledMaintenance: ProtoToSqlInstanceScheduledMaintenance(p.GetScheduledMaintenance()),\n\t\tSettings: ProtoToSqlInstanceSettings(p.GetSettings()),\n\t}\n\tfor _, r := range p.GetIpAddresses() {\n\t\tobj.IPAddresses = append(obj.IPAddresses, *ProtoToSqlInstanceIPAddresses(r))\n\t}\n\treturn obj\n}", "func (self *Graphics) Game() *Game{\n return &Game{self.Object.Get(\"game\")}\n}", "func newdbBaseClickHouse() dbBaser {\n\tb := new(dbBaseClickHouse)\n\tb.ins = b\n\treturn b\n}", "func (obj *transaction) Graphbase() graphbases.Transaction {\n\treturn obj.graphbase\n}", "func (s StatsGraphAsync) construct() StatsGraphClass { return &s }", "func ToDomain(gymData *Gym) gyms.Domain {\n\treturn gyms.Domain{\n\t\tID: gymData.ID,\n\t\tName: gymData.Name,\n\t\tAddress: gymData.Address,\n\t\tCreatedAt: gymData.CreatedAt,\n\t\tUpdatedAt: gymData.UpdatedAt,\n\t\tDeletedAt: gymData.DeletedAt,\n\t}\n}", "func Instance() *gorm.DB {\n return database\n}", "func (g *Group) TOML() interface{} {\n\tgtoml := &GroupTOML{\n\t\tThreshold: g.Threshold,\n\t}\n\tgtoml.Nodes = make([]*NodeTOML, g.Len())\n\tfor i, n := range g.Nodes {\n\t\tgtoml.Nodes[i] = n.TOML().(*NodeTOML)\n\t}\n\n\tif g.PublicKey != nil {\n\t\tgtoml.PublicKey = g.PublicKey.TOML().(*DistPublicTOML)\n\t}\n\n\tgtoml.ID = g.ID\n\tgtoml.SchemeID = g.Scheme.Name\n\tgtoml.Period = g.Period.String()\n\tgtoml.CatchupPeriod = g.CatchupPeriod.String()\n\tgtoml.GenesisTime = g.GenesisTime\n\tif g.TransitionTime != 0 {\n\t\tgtoml.TransitionTime = g.TransitionTime\n\t}\n\tgtoml.GenesisSeed = hex.EncodeToString(g.GetGenesisSeed())\n\treturn gtoml\n}", "func toGoBGPParameters(obj Protection, protectionID int64) []db_models.GoBgpParameter {\n\tresult := make([]db_models.GoBgpParameter, 0)\n t, _ := obj.(*RTBH)\n\tfor _, target := range t.RtbhTargets() {\n\t\tresult = append(result, db_models.GoBgpParameter{\n\t\t\tProtectionId: protectionID,\n\t\t\tTargetAddress: target})\n\t}\n\n\treturn result\n}", "func populateInstance(instance *spotcluster.Instance,\n\tdroplet provider.InstanceConfig) {\n\tinstance.Spec.InstanceName = droplet.Name\n\tinstance.Spec.RemoteAddress = func() string {\n\t\tif droplet.ExteralIP != \"\" {\n\t\t\treturn droplet.ExteralIP + \":22\"\n\t\t}\n\t\treturn \"\"\n\t}()\n\tinstance.Spec.ExternalIP = droplet.ExteralIP\n\tinstance.Spec.InternalIP = droplet.InternalIP\n\tinstance.Spec.InstanceAvailable = true\n\tinstance.Spec.InstanceReady = droplet.IsRunning\n\tinstance.Spec.NodeAvailable = false\n\tinstance.Finalizers = func() []string {\n\t\treturn []string{controller.InstanceProtectionFinalizer}\n\t}()\n\tinstance.Labels[controller.LabelInstanceID] = droplet.ID\n}", "func (c Config) toInterface() interface{} {\n\treturn c.RgwStorage\n}", "func InstanceToProto(resource *sql.Instance) *sqlpb.SqlInstance {\n\tp := &sqlpb.SqlInstance{\n\t\tBackendType: SqlInstanceBackendTypeEnumToProto(resource.BackendType),\n\t\tConnectionName: dcl.ValueOrEmptyString(resource.ConnectionName),\n\t\tDatabaseVersion: SqlInstanceDatabaseVersionEnumToProto(resource.DatabaseVersion),\n\t\tEtag: dcl.ValueOrEmptyString(resource.Etag),\n\t\tGceZone: dcl.ValueOrEmptyString(resource.GceZone),\n\t\tInstanceType: SqlInstanceInstanceTypeEnumToProto(resource.InstanceType),\n\t\tMasterInstanceName: dcl.ValueOrEmptyString(resource.MasterInstanceName),\n\t\tMaxDiskSize: SqlInstanceMaxDiskSizeToProto(resource.MaxDiskSize),\n\t\tName: dcl.ValueOrEmptyString(resource.Name),\n\t\tProject: dcl.ValueOrEmptyString(resource.Project),\n\t\tRegion: dcl.ValueOrEmptyString(resource.Region),\n\t\tRootPassword: dcl.ValueOrEmptyString(resource.RootPassword),\n\t\tCurrentDiskSize: SqlInstanceCurrentDiskSizeToProto(resource.CurrentDiskSize),\n\t\tDiskEncryptionConfiguration: SqlInstanceDiskEncryptionConfigurationToProto(resource.DiskEncryptionConfiguration),\n\t\tFailoverReplica: SqlInstanceFailoverReplicaToProto(resource.FailoverReplica),\n\t\tMasterInstance: SqlInstanceMasterInstanceToProto(resource.MasterInstance),\n\t\tReplicaConfiguration: SqlInstanceReplicaConfigurationToProto(resource.ReplicaConfiguration),\n\t\tScheduledMaintenance: SqlInstanceScheduledMaintenanceToProto(resource.ScheduledMaintenance),\n\t\tSettings: SqlInstanceSettingsToProto(resource.Settings),\n\t}\n\tfor _, r := range resource.IPAddresses {\n\t\tp.IpAddresses = append(p.IpAddresses, SqlInstanceIPAddressesToProto(&r))\n\t}\n\n\treturn p\n}", "func dataToLogging(name string, d *schema.ResourceData) go_thunder.Logging {\n\tvar s go_thunder.Logging\n\n\tvar sInstance go_thunder.LoggingInstance\n\n\tsInstance.PoolShared = d.Get(\"pool_shared\").(string)\n\tsInstance.Name = d.Get(\"name\").(string)\n\tsInstance.Format = d.Get(\"format\").(string)\n\tsInstance.Auto = d.Get(\"auto\").(string)\n\tsInstance.KeepEnd = d.Get(\"keep_end\").(int)\n\tsInstance.LocalLogging = d.Get(\"local_logging\").(int)\n\tsInstance.Mask = d.Get(\"mask\").(string)\n\tsInstance.TemplateTCPProxyShared = d.Get(\"template_tcp_proxy_shared\").(string)\n\tsInstance.SharedPartitionTCPProxyTemplate = d.Get(\"shared_partition_tcp_proxy_template\").(int)\n\tsInstance.KeepStart = d.Get(\"keep_start\").(int)\n\tsInstance.ServiceGroup = d.Get(\"service_group\").(string)\n\tsInstance.PcreMask = d.Get(\"pcre_mask\").(string)\n\tsInstance.UserTag = d.Get(\"user_tag\").(string)\n\tsInstance.TCPProxy = d.Get(\"tcp_proxy\").(string)\n\tsInstance.SharedPartitionPool = d.Get(\"shared_partition_pool\").(int)\n\tsInstance.Pool = d.Get(\"pool\").(string)\n\n\ts.Name = sInstance\n\n\treturn s\n}", "func (self *SinglePad) Game() *Game{\n return &Game{self.Object.Get(\"game\")}\n}", "func DrawElementsInstancedBaseVertexBaseInstance(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, basevertex int32, baseinstance uint32) {\n C.glowDrawElementsInstancedBaseVertexBaseInstance(gpDrawElementsInstancedBaseVertexBaseInstance, (C.GLenum)(mode), (C.GLsizei)(count), (C.GLenum)(xtype), indices, (C.GLsizei)(instancecount), (C.GLint)(basevertex), (C.GLuint)(baseinstance))\n}", "func (reg *Registry) ToReal(logger logr.Logger) (globalregistry.Registry, error) {\n\treturn globalregistry.New(logger, reg)\n}", "func (g FunctionalUGauge) Snapshot() UGauge { return UGaugeSnapshot(g.Value()) }", "func (self *PhysicsP2) Game() *Game{\n return &Game{self.Object.Get(\"game\")}\n}", "func VToDB(a *VAgent) error {\n\tif a.Agent == \"\" {\n\t\treturn nil\n\t}\n\n\tif len(a.Agent) > 15 {\n\t\tlog.Infow(\"bad agent name from V\", \"gid\", a.Gid, \"name\", a.Agent)\n\t}\n\n\t// telegram, startlat, startlon, distance, fetched are not set on the \"trust\" API call.\n\t// use ON DUPLICATE so as to not overwrite apikey or telegram\n\t// TODO: prune fields we will never use or that V never sends\n\t_, err := db.Exec(\"INSERT INTO v (enlid, gid, vlevel, vpoints, agent, level, quarantine, active, blacklisted, verified, flagged, banned, cellid, startlat, startlon, distance, fetched) VALUES (?,?,?,?,LEFT(?,15),?,?,?,?,?,?,?,?,?,?,?,UTC_TIMESTAMP()) ON DUPLICATE KEY UPDATE agent=LEFT(?, 15), quarantine=?, blacklisted=?, verified=?, flagged=?, banned=?, fetched=UTC_TIMESTAMP()\",\n\t\ta.EnlID, a.Gid, a.Vlevel, a.Vpoints, a.Agent, a.Level, a.Quarantine, a.Active, a.Blacklisted, a.Verified, a.Flagged, a.Banned, a.CellID, a.StartLat, a.StartLon, a.Distance,\n\t\ta.Agent, a.Quarantine, a.Blacklisted, a.Verified, a.Flagged, a.Banned)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn err\n\t}\n\n\tif a.TelegramID != 0 {\n\t\texisting, err := a.Gid.TelegramID()\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn err\n\t\t}\n\t\tif existing == 0 {\n\t\t\terr := a.Gid.SetTelegramID(TelegramID(a.TelegramID), a.Telegram)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (i *Interface) Instance() interface{} {\n\treturn i.base.instance\n}", "func (app *adapter) ToGenesis(js []byte) (Genesis, error) {\n\tins := new(genesis)\n\terr := json.Unmarshal(js, ins)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ins, nil\n}", "func GetInstance() *gorm.DB {\n\treturn DB\n}", "func (v *Vehicle) AsProto() *gtfsrt.FeedEntity {\n\tlat32 := float32(v.Lat)\n\tlon32 := float32(v.Lon)\n\tbearing32 := float32(v.Bearing)\n\ttstamp := uint64(v.TimeObj.Unix())\n\n\treturn &gtfsrt.FeedEntity{\n\t\tId: &v.ID,\n\t\tVehicle: &gtfsrt.VehiclePosition{\n\t\t\tTrip: &gtfsrt.TripDescriptor{TripId: &v.Trip},\n\t\t\tVehicle: &gtfsrt.VehicleDescriptor{Id: &v.ID, Label: &v.SideNumber},\n\t\t\tPosition: &gtfsrt.Position{\n\t\t\t\tLatitude: &lat32,\n\t\t\t\tLongitude: &lon32,\n\t\t\t\tBearing: &bearing32,\n\t\t\t},\n\t\t\tTimestamp: &tstamp,\n\t\t},\n\t}\n}", "func (info PGInfo) ToPGOption() *pg.Options {\n\treturn &pg.Options{\n\t\tApplicationName: \"unit testing\",\n\t\tDatabase: info.Database,\n\t\tUser: info.User,\n\t\tPassword: info.Password,\n\t\tAddr: fmt.Sprintf(\"127.0.0.1:%d\", info.Port),\n\t}\n}", "func (gs *GameSpec) toParams() (gsp GameSpecParams) {\n\tgsp = GameSpecParams{\n\t\tTeaser: gs.Description,\n\t\tPace: gs.Pace,\n\t\tNbTurn: gs.Turns,\n\t\tNbAntPerPlayer: gs.AntsPerPlayer,\n\t\tNbPlayer: gs.MaxPlayers,\n\t\tMinimalNbPlayer: gs.MinPlayers,\n\t\tInitialEnergy: gs.InitialEnergy,\n\t\tInitialAcid: gs.InitialAcid,\n\t}\n\n\t// the API requires that the `users` field contain either \"all\" for a\n\t// public game or a comma-separated list of usernames if it's private.\n\tif gs.Public {\n\t\tgsp.Users = \"all\"\n\t} else {\n\t\tgsp.Users = strings.Join(gs.Players, \",\")\n\t}\n\n\treturn\n}", "func GenerateGBfromproto(record *bioproto.Genbank) string {\n\tvar stringbuffer bytes.Buffer\n\n\tstringbuffer.WriteString(generateHeaderString(record))\n\tstringbuffer.WriteString(\"FEATURES Location/Qualifiers\\n\")\n\tstringbuffer.WriteString(generateQualifierString(record))\n\tif record.FEATURES != nil {\n\n\t}\n\tif record.CONTIG != \"\" {\n\t\tstringbuffer.WriteString(\"CONTIG \" + record.CONTIG + \"\\n\")\n\t}\n\tstringbuffer.WriteString(\"//\\n\")\n\treturn stringbuffer.String()\n}", "func (g *Generation) RunGenerationStatistics() (result GenerationResult) {\n\n\tcorrelation := stat.Correlation(g.AntagonistAvgFitnessValuesOfEveryIndividual,\n\t\tg.ProtagonistAvgFitnessOfEveryIndividual, nil)\n\tcovariance := stat.Covariance(g.AntagonistAvgFitnessValuesOfEveryIndividual,\n\t\tg.ProtagonistAvgFitnessOfEveryIndividual, nil)\n\n\tantMean, antStd := stat.MeanStdDev(g.AntagonistAvgFitnessValuesOfEveryIndividual, nil)\n\tproMean, proStd := stat.MeanStdDev(g.ProtagonistAvgFitnessOfEveryIndividual, nil)\n\n\tantVar := stat.Variance(g.AntagonistAvgFitnessValuesOfEveryIndividual, nil)\n\tproVar := stat.Variance(g.ProtagonistAvgFitnessOfEveryIndividual, nil)\n\n\tresult.AllAntagonistAverageFitness = antMean\n\tresult.AntagonistStdDev = antStd\n\tresult.AntagonistVariance = antVar\n\tresult.AllProtagonistAverageFitness = proMean\n\tresult.ProtagonistStdDev = proStd\n\tresult.ProtagonistVariance = proVar\n\tresult.Correlation = correlation\n\tresult.Covariance = covariance\n\tresult.AntagonistAvgAge = g.AntagonistsAvgAge\n\tresult.AntagonistAvgBirthGen = g.AntagonistsAvgBirthGen\n\tresult.ProtagonistAvgAge = g.ProtagonistsAvgAge\n\tresult.ProtagonistAvgBirthGen = g.ProtagonistsAvgBirthGen\n\n\tresult.BestAntagonist = g.BestAntagonist()\n\tresult.BestProtagonist = g.BestProtagonist()\n\n\t//statsString := result.ToString()\n\n\t//g.Parameters.LoggingChan <- evolog.Logger{Timestamp: time.Now(), Type: evolog.LoggerGeneration, Message: statsString}\n\n\treturn result\n}", "func GetG() *G {\n\treturn (*G)(getg())\n}", "func NewBgpConfiguration()(*BgpConfiguration) {\n m := &BgpConfiguration{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}", "func (self *PhysicsP2) ToJSON() interface{}{\n return self.Object.Call(\"toJSON\")\n}", "func ToGeo(h H3Index) GeoCoord {\n\tg := C.GeoCoord{}\n\tC.h3ToGeo(h, &g)\n\treturn geoCoordFromC(g)\n}", "func ToGeo(h H3Index) GeoCoord {\n\tg := C.GeoCoord{}\n\tC.h3ToGeo(h, &g)\n\treturn geoCoordFromC(g)\n}", "func SomeGraphToJSONable(\n\tinstance *SomeGraph) (\n\ttarget map[string]interface{}, err error) {\n\n\tif instance == nil {\n\t\tpanic(\"unexpected nil instance\")\n\t}\n\n\ttarget = make(map[string]interface{})\n\tdefer func() {\n\t\tif err != nil {\n\t\t\ttarget = nil\n\t\t}\n\t}()\n\t////\n\t// Serialize SomeProperty\n\t////\n\n\ttarget[\"some_property\"] = WithOptionalToJSONable(\n\t\t&instance.SomeProperty)\n\n\treturn\n}", "func TestGetData(t *testing.T) {\n\n\tgaTemp := new(GAData)\n\n\t// initialise GAData object\n\tgaTemp.Init()\n\n\ttestRequest := GaRequest{\"ga:23949588\",\n\t\t\"2014-01-01\",\n\t\t\"2014-01-02\",\n\t\t\"ga:visits\",\n\t\t\"ga:day\",\n\t\t\"\",\n\t\t\"\",\n\t\t\"\",\n\t\t100,\n\t\t5}\n\n\tresult := gaTemp.GetData(1, &testRequest)\n\tlog.Println(result)\n}", "func toPgOptions(config DbConfig) *pg.Options {\r\n\treturn &pg.Options{\r\n\t\tAddr: fmt.Sprintf(\"%s:%s\", config.Host, config.Port),\r\n\t\tUser: config.UserName,\r\n\t\tPassword: config.UserPassword,\r\n\t\tDatabase: config.DbName,\r\n\t\tApplicationName: AppName,\r\n\t\tReadTimeout: ReadTimeout,\r\n\t\tWriteTimeout: WriteTimeout,\r\n\t\tPoolSize: PoolSize,\r\n\t\tMinIdleConns: MinIdleConns,\r\n\t}\r\n}", "func (_m *gqlAssetConverter) ToGQL(in *v1beta1.Asset) (*gqlschema.Asset, error) {\n\tvar r0 *gqlschema.Asset\n\tvar r1 error\n\tr1 = _m.err\n\n\treturn r0, r1\n}", "func (client AccessGovernanceCPClient) getGovernanceInstance(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/governanceInstances/{governanceInstanceId}\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response GetGovernanceInstanceResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\tapiReferenceLink := \"https://docs.oracle.com/iaas/api/#/en/access-governance-cp/20220518/GovernanceInstance/GetGovernanceInstance\"\n\t\terr = common.PostProcessServiceError(err, \"AccessGovernanceCP\", \"GetGovernanceInstance\", apiReferenceLink)\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func logInstance(authRequest *authorization.HandleAuthorizationRequest) {\n\tinstance := authRequest.Instance\n\n\tif false {\n\t\tlog.Println(\"sourceAddress:\", instance.Subject.Properties[\"sourceAddress\"].GetStringValue())\n\t\tlog.Println(\"sourceName:\", instance.Subject.Properties[\"sourceName\"].GetStringValue())\n\t\tlog.Println(\"sourceUid:\", instance.Subject.Properties[\"sourceUid\"].GetStringValue())\n\t\tlog.Println(\"sourceNamespace:\", instance.Subject.Properties[\"sourceNamespace\"].GetStringValue())\n\t\tlog.Println(\"sourceVersion:\", instance.Subject.Properties[\"sourceVersion\"].GetStringValue())\n\t\tlog.Println(\"sourcePrincipal:\", instance.Subject.Properties[\"sourcePrincipal\"].GetStringValue())\n\t\tlog.Println(\"sourceOwner:\", instance.Subject.Properties[\"sourceOwnern\"].GetStringValue())\n\t\tlog.Println(\"sourceWorkloadUid:\", instance.Subject.Properties[\"sourceWorkloadUid\"].GetStringValue())\n\t\tlog.Println(\"sourceWorkloadName:\", instance.Subject.Properties[\"sourceWorkloadName\"].GetStringValue())\n\t\tlog.Println(\"sourceWorkloadNamespace:\", instance.Subject.Properties[\"sourceWorkloadNamespace\"].GetStringValue())\n\n\t\tlog.Println(\"instance.Action.Namespace:\", instance.Action.Namespace)\n\t\tlog.Println(\"instance.Action.Service:\", instance.Action.Service)\n\t\tlog.Println(\"instance.Action.Method:\", instance.Action.Method)\n\t\tlog.Println(\"instance.Action.Path:\", instance.Action.Path)\n\n\t\tlog.Println(\"protocol:\", instance.Action.Properties[\"protocol\"].GetStringValue())\n\t\tlog.Println(\"destinationAddress:\", instance.Action.Properties[\"destinationAddress\"].GetStringValue())\n\t\tlog.Println(\"destinationName:\", instance.Action.Properties[\"destinationName\"].GetStringValue())\n\t\tlog.Println(\"destinationUid:\", instance.Action.Properties[\"destinationUid\"].GetStringValue())\n\t\tlog.Println(\"destinationNamespace:\", instance.Action.Properties[\"destinationNamespace\"].GetStringValue())\n\t\tlog.Println(\"destinationVersion:\", instance.Action.Properties[\"destinationVersion\"].GetStringValue())\n\n\t\tlog.Println(\"destinationWorkloadUid:\", instance.Action.Properties[\"destinationWorkloadUid\"].GetStringValue())\n\t\tlog.Println(\"destinationWorkloadName:\", instance.Action.Properties[\"destinationWorkloadName\"].GetStringValue())\n\t\tlog.Println(\"destinationWorkloadNamespace:\", instance.Action.Properties[\"destinationWorkloadNamespace\"].GetStringValue())\n\t}\n\tlog.Println(\"-------------------------------------------\")\n\tlog.Println(instance)\n\tlog.Println(\"-------------------------------------------\")\n}", "func (pn *paxosNode) getInstance(key string) *paxosKeyData {\n\tpxi, ok := pn.instances[key]\n\tif !ok {\n\t\tpxi = &paxosKeyData{\n\t\t\tMyn: 0,\n\t\t\tNa: -1,\n\t\t\tNh: 0,\n\t\t\tVa: nil,\n\t\t\tmu: &sync.RWMutex{},\n\t\t\tCommittedVal: nil,\n\t\t\tstoreLock: &sync.RWMutex{},\n\t\t\tproposeLock: &sync.RWMutex{},\n\t\t}\n\t\tpn.instances[key] = pxi\n\t}\n\treturn pxi\n}", "func GetAPIGeneralInformation() *GeneralInformation {\n timeNow := time.Now()\n timeZone, _ := timeNow.Zone()\n timeFormat := \"2006-01-02 15:04:05\"\n return &GeneralInformation{\n ServerTime: ServerTime{\n Exact: timeNow.Unix(),\n Nice: timeNow.Format(timeFormat),\n Timezone: timeZone,\n },\n }\n}", "func (app *adapter) ToJSON(genesis Genesis) ([]byte, error) {\n\treturn json.Marshal(genesis)\n}", "func (gene *Gene) Copy() *Gene {\n\treturn &Gene{\n\t\tgene.A,\n\t\tgene.B,\n\t\tgene.C,\n\t\tgene.F,\n\t\tgene.Format,\n\t}\n}", "func GetInstance() *gorm.DB {\n\tonce.Do(func() {\n\t\t// refer https://github.com/go-sql-driver/mysql#dsn-data-source-name for details\n\t\tuser := viper.GetString(\"database.user\")\n\t\tpassword := viper.GetString(\"database.password\")\n\t\thost := viper.GetString(\"database.host\")\n\t\tport := viper.GetString(\"database.port\")\n\t\tdbname := viper.GetString(\"database.dbname\")\n\n\t\tdsn := fmt.Sprintf(\"%s:%s@tcp(%s:%s)/%s\", user, password, host, port, dbname)\n\t\tdb, err := gorm.Open(mysql.Open(dsn), &gorm.Config{})\n\t\tdba = db\n\t\tif err != nil {\n\t\t\tlog.Panic().Msgf(\"Error connecting to the database at %s:%s/%s\", host, port, dbname)\n\t\t}\n\t\tsqlDB, err := dba.DB()\n\t\tif err != nil {\n\t\t\tlog.Panic().Msgf(\"Error getting GORM DB definition\")\n\t\t}\n\t\tsqlDB.SetMaxIdleConns(10)\n\t\tsqlDB.SetMaxOpenConns(100)\n\n\t\tlog.Info().Msgf(\"Successfully established connection to %s:%s/%s\", host, port, dbname)\n\t})\n\treturn dba\n}", "func DrawArraysInstancedBaseInstance(mode uint32, first int32, count int32, instancecount int32, baseinstance uint32) {\n C.glowDrawArraysInstancedBaseInstance(gpDrawArraysInstancedBaseInstance, (C.GLenum)(mode), (C.GLint)(first), (C.GLsizei)(count), (C.GLsizei)(instancecount), (C.GLuint)(baseinstance))\n}", "func (self *TileSprite) Game() *Game{\n return &Game{self.Object.Get(\"game\")}\n}", "func (bil *baseInstanceList) getOrCreateBaseInstance(key *meta.Key) *baseInstance {\n\tbil.lock.Lock()\n\tdefer bil.lock.Unlock()\n\n\tinst, found := bil.instances[*key]\n\tif !found {\n\t\tinst = &baseInstance{name: key.Name, zone: key.Zone}\n\t\tif bil.allocateCIDR {\n\t\t\tnextRange, _ := bil.cidrSet.AllocateNext()\n\t\t\tinst.aliasRange = nextRange.String()\n\t\t}\n\t\tbil.instances[*key] = inst\n\t}\n\treturn inst\n}", "func getInstance() *KeyGen {\n\tonce.Do(\n\t\tfunc() {\n\t\t\tkeygen = new(KeyGen)\n\t\t\tkeygen.random = rand.New(rand.NewSource(time.Now().Unix()))\n\n\t\t})\n\treturn keygen\n}", "func (s *Superhero) ToMap() map[string]interface{} {\n\tdata := map[string]interface{}{\n\t\t\"id\": s.ID,\n\t\t\"affiliation_id\": s.AffiliationID,\n\t\t\"name\": s.Name,\n\t\t\"life\": s.Life,\n\t\t\"energy\": s.Energy,\n\t\t\"powers\": make([]interface{}, len(s.Powers)),\n\t}\n\n\tfor i, p := range s.Powers {\n\t\tdata[\"powers\"].([]interface{})[i] = p.ToMap()\n\t}\n\n\treturn data\n}", "func GetInstance() Proxy {\n\tonce.Do(func() {\n\t\tinstance = &proxy{\n\t\t\tproxy: &apiconfigv1.Proxy{},\n\t\t\tlock: sync.Mutex{},\n\t\t}\n\t})\n\treturn instance\n}", "func (p *Plugin) As(entity string) *PluginDB {\n\n\tmethod, host, err := run.GetEndpoint(p.Meta.DataDir, p.Meta.Config.GetAPI())\n\tif err != nil {\n\t\tlogrus.Panicf(\"Got an error parsing config API %v at %s\", err, dbutil.MiniStack(0))\n\t}\n\n\tc := http.Client{\n\t\tTimeout: time.Duration(5 * time.Second),\n\t}\n\tif method == \"unix\" {\n\t\tc.Transport = &http.Transport{\n\t\t\tDialContext: func(_ context.Context, _, _ string) (net.Conn, error) {\n\t\t\t\treturn net.Dial(\"unix\", host)\n\t\t\t},\n\t\t}\n\t}\n\n\treturn &PluginDB{\n\t\tP: p,\n\t\thost: host,\n\t\tclient: c,\n\t\tEntity: entity,\n\t}\n}", "func (v *Variant) ToVariant() *Variant {\n\treturn v\n}", "func (client AccessGovernanceCPClient) createGovernanceInstance(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/governanceInstances\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateGovernanceInstanceResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\tapiReferenceLink := \"https://docs.oracle.com/iaas/api/#/en/access-governance-cp/20220518/GovernanceInstance/CreateGovernanceInstance\"\n\t\terr = common.PostProcessServiceError(err, \"AccessGovernanceCP\", \"CreateGovernanceInstance\", apiReferenceLink)\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func GetPromoType() *graphql.Object{\n if promoType == nil{\n promoType = graphql.NewObject(graphql.ObjectConfig{\n Name: \"promoType\",\n Fields: graphql.Fields{\n \"id\" : &graphql.Field{\n Type:graphql.Int,\n },\n \"name\": &graphql.Field{\n Type:graphql.String,\n },\n \"promoPrice\": &graphql.Field{\n Type:graphql.Int,\n },\n \"promoCode\": &graphql.Field{\n Type: graphql.String,\n },\n \"availableUntil\": &graphql.Field{\n Type:graphql.DateTime,\n },\n \"platform\": &graphql.Field{\n Type:graphql.String,\n },\n \"promoFor\": &graphql.Field{\n Type:graphql.String,\n },\n \"description\": &graphql.Field{\n Type:graphql.String,\n },\n \"image\": &graphql.Field{\n Type:graphql.String,\n },\n\n },\n })\n }\n return promoType\n}", "func GetGaID() string {\n\t//Change This to Your Google Analytics ID\n\tconst gaID = \"UA-51746203-1\"\n\treturn gaID\n}", "func (a *API) GetDataFor7Days() (GA, error) {\n\tclient, err := a.GetOAuthClient()\n\tif err != nil {\n\t\treturn GA{}, err\n\t}\n\n\tanalyticsService, err := analytics.New(client)\n\tif err != nil {\n\t\treturn GA{}, err\n\t}\n\n\tgaData := analyticsService.Data.Ga.Get(a.ViewID, \"7daysAgo\", \"yesterday\",\n\t\t\"ga:users, ga:impressions, ga:adClicks, ga:organicSearches\")\n\tgaData.Dimensions(\"ga:day\")\n\tgaData.SamplingLevel(\"HIGHER_PRECISION\")\n\tgaData.Output(\"json\")\n\n\td, err := gaData.Do()\n\tif err != nil {\n\t\treturn GA{}, err\n\t}\n\n\tbytes, err := d.MarshalJSON()\n\tif err != nil {\n\t\treturn GA{}, err\n\t}\n\n\tgaDatas := []GAData{}\n\n\tfor _, value := range d.Rows {\n\t\tgaData := GAData{}\n\t\tgaData.Date = value[0]\n\t\tgaData.Users = value[1]\n\t\tgaData.AdImpressions = value[2]\n\t\tgaData.AdClicks = value[3]\n\t\tgaData.OrganicSearches = value[4]\n\t\tgaDatas = append(gaDatas, gaData)\n\t}\n\n\tga := GA{}\n\tga.GADatas = gaDatas\n\tga.JSON = string(bytes)\n\tga.URL = a.URL\n\n\treturn ga, nil\n}", "func (rt *resourceTracking) GetAppInstance(un *unstructured.Unstructured, key string, trackingMethod v1alpha1.TrackingMethod) *AppInstanceValue {\n\tswitch trackingMethod {\n\tcase TrackingMethodAnnotation, TrackingMethodAnnotationAndLabel:\n\t\treturn rt.getAppInstanceValue(un, key, trackingMethod)\n\tdefault:\n\t\treturn nil\n\t}\n}", "func (self *TileSprite) Data() interface{}{\n return self.Object.Get(\"data\")\n}", "func (l *LogEntry) addInstance(i *guardduty.InstanceDetails) {\n\tl.InstanceAz = aws.StringValue(i.AvailabilityZone)\n\tl.InstanceDesc = aws.StringValue(i.ImageDescription)\n\tl.InstanceImageId = aws.StringValue(i.ImageId)\n\tl.InstanceId = aws.StringValue(i.InstanceId)\n\tl.InstanceState = aws.StringValue(i.InstanceState)\n\tl.InstanceType = aws.StringValue(i.InstanceType)\n\tl.InstanceLaunchTime = aws.StringValue(i.LaunchTime)\n\tl.InstanceTags = make(map[string]string)\n\tfor _, t := range i.Tags {\n\t\tl.InstanceTags[aws.StringValue(t.Key)] = aws.StringValue(t.Value)\n\t}\n\t// build list of public, private IP's and subnets:\n\tl.InstanceSg = make(map[string]string)\n\tprv := make(map[string]bool)\n\tpub := make(map[string]bool)\n\tsub := make(map[string]bool)\n\tfor _, in := range i.NetworkInterfaces {\n\t\tprv[aws.StringValue(in.PrivateIpAddress)] = true\n\t\tfor _, ip := range in.PrivateIpAddresses {\n\t\t\tprv[aws.StringValue(ip.PrivateIpAddress)] = true\n\t\t}\n\t\tpub[aws.StringValue(in.PublicIp)] = true\n\t\tsub[aws.StringValue(in.SubnetId)] = true\n\t\tl.InstanceVpc = aws.StringValue(in.VpcId)\n\t\tfor _, sg := range in.SecurityGroups {\n\t\t\tl.InstanceSg[aws.StringValue(sg.GroupId)] = aws.StringValue(sg.GroupName)\n\t\t}\n\t}\n\tfor k := range prv {\n\t\tl.InstancePrivateIp = append(l.InstancePrivateIp, k)\n\t}\n\tfor k := range pub {\n\t\tl.InstancePublicIp = append(l.InstancePublicIp, k)\n\t}\n\tfor k := range sub {\n\t\tl.InstanceSubnet = append(l.InstanceSubnet, k)\n\t}\n}", "func (r GetInstanceMetricDataRequest) Send(ctx context.Context) (*GetInstanceMetricDataResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &GetInstanceMetricDataResponse{\n\t\tGetInstanceMetricDataOutput: r.Request.Data.(*GetInstanceMetricDataOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (e *Engine) GT() *GT {\n\treturn NewGT()\n}", "func QuotaInstanceGT(v int) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.GT(s.C(FieldQuotaInstance), v))\n\t})\n}" ]
[ "0.58498", "0.55195946", "0.5480924", "0.5227962", "0.50450355", "0.4820873", "0.48025364", "0.47862035", "0.4779465", "0.47661734", "0.47625732", "0.4697985", "0.46975428", "0.46894577", "0.46771544", "0.46434072", "0.46340284", "0.46209815", "0.46189263", "0.46185815", "0.46124786", "0.4596707", "0.4586061", "0.45847437", "0.45761085", "0.45670113", "0.45549384", "0.45388564", "0.45150253", "0.44999447", "0.4492385", "0.44681507", "0.44614404", "0.44425994", "0.44345447", "0.44322133", "0.4427989", "0.442248", "0.44191808", "0.44168016", "0.44137347", "0.44083557", "0.4404175", "0.44040695", "0.43985415", "0.4381498", "0.43733117", "0.4373088", "0.43536532", "0.43507817", "0.4347556", "0.43418857", "0.43347573", "0.43254834", "0.43233737", "0.43210745", "0.43190953", "0.4318587", "0.43184066", "0.431807", "0.43176547", "0.43174812", "0.43155617", "0.4315423", "0.43029207", "0.42935088", "0.42932442", "0.42913967", "0.4284365", "0.42817423", "0.42817423", "0.42795593", "0.4279244", "0.4270787", "0.4267292", "0.42612037", "0.42603227", "0.42529383", "0.42514777", "0.42492622", "0.42407325", "0.42388916", "0.42315164", "0.4230544", "0.42213994", "0.42186654", "0.4209816", "0.4200521", "0.41979453", "0.4197203", "0.41915935", "0.4185646", "0.41816333", "0.41811565", "0.41781718", "0.41745213", "0.41724864", "0.41668177", "0.41650268", "0.4163206" ]
0.8056762
0
newBaseInstanceList is the baseInstanceList constructor
newBaseInstanceList — это конструктор baseInstanceList
func newBaseInstanceList(allocateCIDR bool, clusterCIDR *net.IPNet, subnetMaskSize int) *baseInstanceList { cidrSet, _ := cidrset.NewCIDRSet(clusterCIDR, subnetMaskSize) return &baseInstanceList{ allocateCIDR: allocateCIDR, clusterCIDR: clusterCIDR, subnetMaskSize: subnetMaskSize, cidrSet: cidrSet, instances: make(map[meta.Key]*baseInstance), } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func newBase() *base {\n\treturn &base{shared.NewUUID(), time.Now().UTC(), time.Now().UTC(), false/*, shared.NewUUID()*/}\n}", "func newSubInstance(parent *BaseInstance, name string) *BaseInstance {\n\tsi := parent.module.subinstance\n\tif si == nil {\n\t\tsi = make(map[string][]*BaseInstance)\n\t\tparent.module.subinstance = si\n\t}\n\n\tbi := &BaseInstance{\n\t\tname: name,\n\t\tinput: parent.input,\n\t\tinput2: parent.input2,\n\t\trules: newRules(),\n\t\tenabled: false,\n\t\tsubinstance: true,\n\t\tinstance: parent.instance,\n\t\tmodule: parent.module,\n\t}\n\n\tsi[parent.name] = append(si[parent.name], bi)\n\treturn bi\n}", "func newList(ctx TransactionContextInterface) *list {\n\t stateList := new(ledgerapi.StateList)\n\t stateList.Ctx = ctx\n\t stateList.Class = \"Asset\"\n\t stateList.Deserialize = func(bytes []byte, state ledgerapi.StateInterface) error {\n\t\t return Deserialize(bytes, state.(*Asset))\n\t }\n \n\t list := new(list)\n\t list.stateList = stateList\n \n\t return list\n }", "func newBaseRuntime(erp *ECALRuntimeProvider, node *parser.ASTNode) *baseRuntime {\n\tinstanceCounter++\n\treturn &baseRuntime{fmt.Sprint(instanceCounter), erp, node, false}\n}", "func New() *List { return new(List).Init() }", "func newSubInstance(parent *BaseInstance, name string) *BaseInstance {\n\tbi := &BaseInstance{\n\t\tname: name,\n\t\tinput: parent.input,\n\t\tinput2: parent.input2,\n\t\trules: newRules(),\n\t\tenabled: false,\n\t\tinstance: parent.instance,\n\t}\n\treturn bi\n}", "func newIDList(p *idElementPool) *idList {\n\tl := &idList{Pool: p}\n\treturn l.Init()\n}", "func newList(data interface{}) *List {\n\tnewL := new(List)\n\tnewL.Insert(data)\n\treturn newL\n}", "func ListBase(base uint32) {\n\tsyscall.Syscall(gpListBase, 1, uintptr(base), 0, 0)\n}", "func DrawElementsInstancedBaseVertexBaseInstance(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, basevertex int32, baseinstance uint32) {\n C.glowDrawElementsInstancedBaseVertexBaseInstance(gpDrawElementsInstancedBaseVertexBaseInstance, (C.GLenum)(mode), (C.GLsizei)(count), (C.GLenum)(xtype), indices, (C.GLsizei)(instancecount), (C.GLint)(basevertex), (C.GLuint)(baseinstance))\n}", "func newList(vert bool, width, height float32) *List {\n\n\tli := new(List)\n\tli.initialize(vert, width, height)\n\treturn li\n}", "func newList(rowType reflect.Type) []*Info {\n\tvar list columnList\n\tvar state = stateT{}\n\tlist.addFields(rowType, state)\n\treturn list\n}", "func ListBase(base uint32) {\n\tC.glowListBase(gpListBase, (C.GLuint)(base))\n}", "func newList() *List {\n\tl := &List{\n\t\tch: make(chan sh.QData),\n\t}\n\treturn l\n}", "func NewList() *List {\n newObj := &List {\n counters : make(map[string]Counter),\n }\n\n return newObj\n}", "func ListBase(base uint32) {\n C.glowListBase(gpListBase, (C.GLuint)(base))\n}", "func DrawElementsInstancedBaseVertexBaseInstance(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, basevertex int32, baseinstance uint32) {\n\tC.glowDrawElementsInstancedBaseVertexBaseInstance(gpDrawElementsInstancedBaseVertexBaseInstance, (C.GLenum)(mode), (C.GLsizei)(count), (C.GLenum)(xtype), indices, (C.GLsizei)(instancecount), (C.GLint)(basevertex), (C.GLuint)(baseinstance))\n}", "func DrawElementsInstancedBaseVertexBaseInstance(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, basevertex int32, baseinstance uint32) {\n\tC.glowDrawElementsInstancedBaseVertexBaseInstance(gpDrawElementsInstancedBaseVertexBaseInstance, (C.GLenum)(mode), (C.GLsizei)(count), (C.GLenum)(xtype), indices, (C.GLsizei)(instancecount), (C.GLint)(basevertex), (C.GLuint)(baseinstance))\n}", "func (s *BasevhdlListener) EnterInstantiation_list(ctx *Instantiation_listContext) {}", "func New() *List {\n return &List{size:0}\n}", "func DrawElementsInstancedBaseInstance(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, baseinstance uint32) {\n C.glowDrawElementsInstancedBaseInstance(gpDrawElementsInstancedBaseInstance, (C.GLenum)(mode), (C.GLsizei)(count), (C.GLenum)(xtype), indices, (C.GLsizei)(instancecount), (C.GLuint)(baseinstance))\n}", "func NewBase() Base {\r\n\treturn Base{\r\n\t\tActive: \"\",\r\n\t\tTitle: \"Lemonade Stand Supply\",\r\n\t}\r\n}", "func newListProcessor(ctx context.Context, dynamicClient dynamic.Interface, workerFn workerFunc) *listProcessor {\n\treturn &listProcessor{\n\t\tconcurrency: defaultConcurrency,\n\t\tworkerFn: workerFn,\n\t\tdynamicClient: dynamicClient,\n\t\tctx: ctx,\n\t}\n}", "func DrawElementsInstancedBaseVertexBaseInstance(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, basevertex int32, baseinstance uint32) {\n\tsyscall.Syscall9(gpDrawElementsInstancedBaseVertexBaseInstance, 7, uintptr(mode), uintptr(count), uintptr(xtype), uintptr(indices), uintptr(instancecount), uintptr(basevertex), uintptr(baseinstance), 0, 0)\n}", "func newListFormulaArg(l []formulaArg) formulaArg {\n\treturn formulaArg{Type: ArgList, List: l}\n}", "func (bil *baseInstanceList) newMockCloud() cloud.Cloud {\n\tc := cloud.NewMockGCE(nil)\n\n\t// insert hooks to lazy create a instance when needed\n\tc.MockInstances.GetHook = bil.newGAGetHook()\n\tc.MockBetaInstances.GetHook = bil.newBetaGetHook()\n\n\treturn c\n}", "func newProcBase(name, bin, serviceAddr string, loggers []Logger) *procBase {\n\tlog.Infof(\"%s has addr %s\", name, serviceAddr)\n\treturn &procBase{\n\t\tname: name,\n\t\tbin: bin,\n\t\tserviceAddr: serviceAddr,\n\t\tloggers: loggers,\n\t}\n}", "func New(values ...uint16) (l *List) {\n\tl = &List{} // init the ptr\n\tfor _, value := range values {\n\t\tl.Insert(value)\n\t}\n\treturn l\n}", "func NewList(list uint32, mode uint32) {\n\tsyscall.Syscall(gpNewList, 2, uintptr(list), uintptr(mode), 0)\n}", "func baseListConvert(list baseIList) baseIList { return baseList(list.AsArray()) }", "func NewList()(*List) {\n m := &List{\n BaseItem: *NewBaseItem(),\n }\n odataTypeValue := \"#microsoft.graph.list\";\n m.SetOdataType(&odataTypeValue);\n return m\n}", "func newPodList(podsNumber int) *corev1.PodList {\n\tpods := []corev1.Pod{}\n\tfor i := 0; i < podsNumber; i++ {\n\t\tpods = append(pods, *newPod(fmt.Sprintf(\"test-pod%d\", i)))\n\t}\n\treturn &corev1.PodList{Items: pods}\n}", "func DrawElementsInstancedBaseInstance(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, baseinstance uint32) {\n\tsyscall.Syscall6(gpDrawElementsInstancedBaseInstance, 6, uintptr(mode), uintptr(count), uintptr(xtype), uintptr(indices), uintptr(instancecount), uintptr(baseinstance))\n}", "func newInstances(pod *Pod, prov provider.DataCenter, cfg *config.Instances) (*instances, error) {\n\tlog.Debug(\"Initializing Instances\")\n\n\ti := &instances{\n\t\tResources: resource.NewResources(),\n\t\tpod: pod,\n\t\tinstances: map[string]resource.Instance{},\n\t}\n\n\t// The reference to the network resource.\n\tnet := pod.Cluster().Compute().DataCenter().Network()\n\n\t// The availability zones available to these instances.\n\tavailabilityZones := net.AvailabilityZones()\n\n\t// The subnet group associated with these instances.\n\tsubnetGroup := net.SubnetGroups().Find(pod.SubnetGroup())\n\tif subnetGroup == nil {\n\t\treturn nil, fmt.Errorf(\"Cannot find subnet group %s configured for pod %s\", pod.SubnetGroup(), pod.Name())\n\t}\n\n\t// The keypair to be used with these instances.\n\tkeypair := pod.Cluster().Compute().KeyPair()\n\n\tn := 0\n\tfor _, conf := range *cfg {\n\t\t// Ensure the instance is uniquely named.\n\t\tif i.Find(conf.Name()) != nil {\n\t\t\treturn nil, fmt.Errorf(\"Instance name %q must be unique but is used multiple times\", conf.Name())\n\t\t}\n\n\t\t// The availability zone for this instance. Chosing via round robin. Always starting at 0.\n\t\taz := availabilityZones[n%len(availabilityZones)]\n\n\t\t// Get the subnet associated with the AZ.\n\t\tsubnetName := pod.SubnetGroup() + \"-\" + az\n\t\tsubnet := subnetGroup.Find(subnetName)\n\t\tif subnet == nil {\n\t\t\treturn nil, fmt.Errorf(\"Cannot find subnet %s configured for instance %s\", subnetName, conf.Name())\n\t\t}\n\n\t\tinstance, err := newInstance(pod, subnet, keypair, prov, conf)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ti.instances[instance.Name()] = instance\n\t\ti.Append(instance)\n\n\t\tn++\n\t}\n\treturn i, nil\n}", "func NewCustom_List(s *capnp.Segment, sz int32) (Custom_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 24, PointerCount: 1}, sz)\n\treturn Custom_List{l}, err\n}", "func DrawArraysInstancedBaseInstance(mode uint32, first int32, count int32, instancecount int32, baseinstance uint32) {\n C.glowDrawArraysInstancedBaseInstance(gpDrawArraysInstancedBaseInstance, (C.GLenum)(mode), (C.GLint)(first), (C.GLsizei)(count), (C.GLsizei)(instancecount), (C.GLuint)(baseinstance))\n}", "func DrawElementsInstancedBaseInstance(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, baseinstance uint32) {\n\tC.glowDrawElementsInstancedBaseInstance(gpDrawElementsInstancedBaseInstance, (C.GLenum)(mode), (C.GLsizei)(count), (C.GLenum)(xtype), indices, (C.GLsizei)(instancecount), (C.GLuint)(baseinstance))\n}", "func DrawElementsInstancedBaseInstance(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, baseinstance uint32) {\n\tC.glowDrawElementsInstancedBaseInstance(gpDrawElementsInstancedBaseInstance, (C.GLenum)(mode), (C.GLsizei)(count), (C.GLenum)(xtype), indices, (C.GLsizei)(instancecount), (C.GLuint)(baseinstance))\n}", "func newMemberlist(conf *Config) (*Memberlist, error) {\n\tif conf.ProtocolVersion < ProtocolVersionMin {\n\t\treturn nil, fmt.Errorf(\"Protocol version '%d' too low. Must be in range: [%d, %d]\",\n\t\t\tconf.ProtocolVersion, ProtocolVersionMin, ProtocolVersionMax)\n\t} else if conf.ProtocolVersion > ProtocolVersionMax {\n\t\treturn nil, fmt.Errorf(\"Protocol version '%d' too high. Must be in range: [%d, %d]\",\n\t\t\tconf.ProtocolVersion, ProtocolVersionMin, ProtocolVersionMax)\n\t}\n\n\tif len(conf.SecretKey) > 0 {\n\t\tif conf.Keyring == nil {\n\t\t\tkeyring, err := NewKeyring(nil, conf.SecretKey)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tconf.Keyring = keyring\n\t\t} else {\n\t\t\tif err := conf.Keyring.AddKey(conf.SecretKey); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif err := conf.Keyring.UseKey(conf.SecretKey); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\tif conf.LogOutput != nil && conf.Logger != nil {\n\t\treturn nil, fmt.Errorf(\"Cannot specify both LogOutput and Logger. Please choose a single log configuration setting.\")\n\t}\n\n\tlogDest := conf.LogOutput\n\tif logDest == nil {\n\t\tlogDest = os.Stderr\n\t}\n\n\tlogger := conf.Logger\n\tif logger == nil {\n\t\tlogger = log.New(logDest, \"\", log.LstdFlags)\n\t}\n\n\t// Set up a network transport by default if a custom one wasn't given\n\t// by the config.\n\ttransport := conf.Transport\n\tif transport == nil {\n\t\tnc := &NetTransportConfig{\n\t\t\tBindAddrs: []string{conf.BindAddr},\n\t\t\tBindPort: conf.BindPort,\n\t\t\tLogger: logger,\n\t\t\tMetricLabels: conf.MetricLabels,\n\t\t}\n\n\t\t// See comment below for details about the retry in here.\n\t\tmakeNetRetry := func(limit int) (*NetTransport, error) {\n\t\t\tvar err error\n\t\t\tfor try := 0; try < limit; try++ {\n\t\t\t\tvar nt *NetTransport\n\t\t\t\tif nt, err = NewNetTransport(nc); err == nil {\n\t\t\t\t\treturn nt, nil\n\t\t\t\t}\n\t\t\t\tif strings.Contains(err.Error(), \"address already in use\") {\n\t\t\t\t\tlogger.Printf(\"[DEBUG] memberlist: Got bind error: %v\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn nil, fmt.Errorf(\"failed to obtain an address: %v\", err)\n\t\t}\n\n\t\t// The dynamic bind port operation is inherently racy because\n\t\t// even though we are using the kernel to find a port for us, we\n\t\t// are attempting to bind multiple protocols (and potentially\n\t\t// multiple addresses) with the same port number. We build in a\n\t\t// few retries here since this often gets transient errors in\n\t\t// busy unit tests.\n\t\tlimit := 1\n\t\tif conf.BindPort == 0 {\n\t\t\tlimit = 10\n\t\t}\n\n\t\tnt, err := makeNetRetry(limit)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Could not set up network transport: %v\", err)\n\t\t}\n\t\tif conf.BindPort == 0 {\n\t\t\tport := nt.GetAutoBindPort()\n\t\t\tconf.BindPort = port\n\t\t\tconf.AdvertisePort = port\n\t\t\tlogger.Printf(\"[DEBUG] memberlist: Using dynamic bind port %d\", port)\n\t\t}\n\t\ttransport = nt\n\t}\n\n\tnodeAwareTransport, ok := transport.(NodeAwareTransport)\n\tif !ok {\n\t\tlogger.Printf(\"[DEBUG] memberlist: configured Transport is not a NodeAwareTransport and some features may not work as desired\")\n\t\tnodeAwareTransport = &shimNodeAwareTransport{transport}\n\t}\n\n\tif len(conf.Label) > LabelMaxSize {\n\t\treturn nil, fmt.Errorf(\"could not use %q as a label: too long\", conf.Label)\n\t}\n\n\tif conf.Label != \"\" {\n\t\tnodeAwareTransport = &labelWrappedTransport{\n\t\t\tlabel: conf.Label,\n\t\t\tNodeAwareTransport: nodeAwareTransport,\n\t\t}\n\t}\n\n\tm := &Memberlist{\n\t\tconfig: conf,\n\t\tshutdownCh: make(chan struct{}),\n\t\tleaveBroadcast: make(chan struct{}, 1),\n\t\ttransport: nodeAwareTransport,\n\t\thandoffCh: make(chan struct{}, 1),\n\t\thighPriorityMsgQueue: list.New(),\n\t\tlowPriorityMsgQueue: list.New(),\n\t\tnodeMap: make(map[string]*nodeState),\n\t\tnodeTimers: make(map[string]*suspicion),\n\t\tawareness: newAwareness(conf.AwarenessMaxMultiplier, conf.MetricLabels),\n\t\tackHandlers: make(map[uint32]*ackHandler),\n\t\tbroadcasts: &TransmitLimitedQueue{RetransmitMult: conf.RetransmitMult},\n\t\tlogger: logger,\n\t\tmetricLabels: conf.MetricLabels,\n\t}\n\tm.broadcasts.NumNodes = func() int {\n\t\treturn m.estNumNodes()\n\t}\n\n\t// Get the final advertise address from the transport, which may need\n\t// to see which address we bound to. We'll refresh this each time we\n\t// send out an alive message.\n\tif _, _, err := m.refreshAdvertise(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tgo m.streamListen()\n\tgo m.packetListen()\n\tgo m.packetHandler()\n\treturn m, nil\n}", "func newToDoList() toDoList {\n\treturn toDoList{}\n}", "func newObjectList() *ObjectList {\n\treturn &ObjectList{\n\t\tObjectIDs: make([]int, 0, 200),\n\t}\n}", "func NewList(vs ...Value) Value {\n\treturn StrictPrepend(vs, EmptyList)\n}", "func NewList(list uint32, mode uint32) {\n C.glowNewList(gpNewList, (C.GLuint)(list), (C.GLenum)(mode))\n}", "func NewList(client *secretsapi.Client, p listPrimeable) *List {\n\treturn &List{\n\t\tsecretsClient: client,\n\t\tout: p.Output(),\n\t\tproj: p.Project(),\n\t}\n}", "func NewList(e Type, i *debug.Information) List {\n\treturn List{e, i}\n}", "func NewList() List {\n\treturn List{}\n}", "func newListMetrics() *listMetrics {\n\treturn new(listMetrics)\n}", "func (bil *baseInstanceList) getOrCreateBaseInstance(key *meta.Key) *baseInstance {\n\tbil.lock.Lock()\n\tdefer bil.lock.Unlock()\n\n\tinst, found := bil.instances[*key]\n\tif !found {\n\t\tinst = &baseInstance{name: key.Name, zone: key.Zone}\n\t\tif bil.allocateCIDR {\n\t\t\tnextRange, _ := bil.cidrSet.AllocateNext()\n\t\t\tinst.aliasRange = nextRange.String()\n\t\t}\n\t\tbil.instances[*key] = inst\n\t}\n\treturn inst\n}", "func New(l interface{}) list.Interface {\n\tif reflect.TypeOf(l).Kind() != reflect.Slice {\n\t\tpanic(fmt.Errorf(\"Param must be a slice\"))\n\t}\n\n\ts := reflect.ValueOf(l)\n\titems := make([]interface{}, s.Len())\n\n\tfor i := 0; i < len(items); i++ {\n\t\titems[i] = s.Index(i).Interface()\n\t}\n\n\treturn &randList{\n\t\tlist: items,\n\t\tperm: rand.Perm(len(items)),\n\t\toffset: 0,\n\t}\n}", "func createList(arg string) []string {\n\tvar retObject = []string{arg}\n\treturn retObject\n}", "func NewList(parent sparta.Widget, name string, rect image.Rectangle) *List {\n\tl := &List{\n\t\tname: name,\n\t\tparent: parent,\n\t\tgeometry: rect,\n\t\tback: backColor,\n\t\tfore: foreColor,\n\t\ttarget: parent,\n\t}\n\tsparta.NewWindow(l)\n\tl.scroll = NewScroll(l, \"list\"+name+\"Scroll\", 0, 0, Vertical, image.Rect(rect.Dx()-10, 0, rect.Dx(), rect.Dy()))\n\treturn l\n}", "func New(vals ...interface{}) *List {\n\thead := list.New()\n\tfor _, v := range vals {\n\t\thead.PushBack(v)\n\t}\n\treturn &List{head}\n}", "func newListFromUIDs(uids []string) *CSPList {\n\treturn NewListBuilder().WithUIDs(uids...).List()\n}", "func newReassemblyList(epoch int, capacity int, s ingressSender,\n\tframesDiscarded metrics.Counter) *reassemblyList {\n\n\tlist := &reassemblyList{\n\t\tepoch: epoch,\n\t\tcapacity: capacity,\n\t\tsnd: s,\n\t\tmarkedForDeletion: false,\n\t\tentries: list.New(),\n\t\tbuf: bytes.NewBuffer(make([]byte, 0, frameBufCap)),\n\t}\n\tif framesDiscarded != nil {\n\t\tlist.tooOld = framesDiscarded.With(\"reason\", \"too_old\")\n\t\tlist.duplicate = framesDiscarded.With(\"reason\", \"duplicate\")\n\t\tlist.evicted = framesDiscarded.With(\"reason\", \"evicted\")\n\t\tlist.invalid = framesDiscarded.With(\"reason\", \"invalid\")\n\t}\n\treturn list\n}", "func newErrorList() *errorList {\n\treturn &errorList{\n\t\tlist: make([]string, 0, 16),\n\t}\n}", "func newBaseCount() baseCount {\n\treturn baseCount{words: make(map[word]int)}\n}", "func NewList(g ...Getter) *List {\n\tlist := &List{\n\t\tlist: g,\n\t}\n\tlist.GetProxy = NewGetProxy(list) // self\n\treturn list\n}", "func newBGPFilterList() *api.BGPFilterList {\n\treturn &api.BGPFilterList{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: api.KindBGPFilterList,\n\t\t\tAPIVersion: api.GroupVersionCurrent,\n\t\t},\n\t}\n}", "func DrawArraysInstancedBaseInstance(mode uint32, first int32, count int32, instancecount int32, baseinstance uint32) {\n\tsyscall.Syscall6(gpDrawArraysInstancedBaseInstance, 5, uintptr(mode), uintptr(first), uintptr(count), uintptr(instancecount), uintptr(baseinstance), 0)\n}", "func InitializeList(uninitializedList *List, itemSize uint64) {\n uninitializedList.itemSize = itemSize;\n uninitializedList.capacity = 16; //Allocate 16 items by default\n uninitializedList.baseAddress = Alloc(uninitializedList.capacity * uninitializedList.itemSize);\n uninitializedList.itemCount = 0; //Reset item count (to zero)\n}", "func NewBasePool() BasePool {\n\treturn BasePool{\n\t\tlastTuneTs: *atomicutil.NewTime(time.Now()),\n\t}\n}", "func DrawArraysInstancedBaseInstance(mode uint32, first int32, count int32, instancecount int32, baseinstance uint32) {\n\tC.glowDrawArraysInstancedBaseInstance(gpDrawArraysInstancedBaseInstance, (C.GLenum)(mode), (C.GLint)(first), (C.GLsizei)(count), (C.GLsizei)(instancecount), (C.GLuint)(baseinstance))\n}", "func DrawArraysInstancedBaseInstance(mode uint32, first int32, count int32, instancecount int32, baseinstance uint32) {\n\tC.glowDrawArraysInstancedBaseInstance(gpDrawArraysInstancedBaseInstance, (C.GLenum)(mode), (C.GLint)(first), (C.GLsizei)(count), (C.GLsizei)(instancecount), (C.GLuint)(baseinstance))\n}", "func (m *MockLoadBalancerServiceIface) NewListLoadBalancerRuleInstancesParams(id string) *ListLoadBalancerRuleInstancesParams {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NewListLoadBalancerRuleInstancesParams\", id)\n\tret0, _ := ret[0].(*ListLoadBalancerRuleInstancesParams)\n\treturn ret0\n}", "func New(values ...interface{}) *List {\n\tlist := &List{}\n\tif len(values) > 0 {\n\t\tlist.Add(values...)\n\t}\n\treturn list\n}", "func New(values ...interface{}) *List {\n\tlist := &List{}\n\tif len(values) > 0 {\n\t\tlist.Add(values)\n\t}\n\treturn list\n}", "func NewList(args ...interface{}) *List {\n\tl := &List{}\n\tfor _, v := range args {\n\t\tl.PushBack(v)\n\t}\n\treturn l\n}", "func DrawElementsInstancedBaseVertex(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, basevertex int32) {\n C.glowDrawElementsInstancedBaseVertex(gpDrawElementsInstancedBaseVertex, (C.GLenum)(mode), (C.GLsizei)(count), (C.GLenum)(xtype), indices, (C.GLsizei)(instancecount), (C.GLint)(basevertex))\n}", "func NewGraph(base Base) {\n\n}", "func (pr *pluginRegistry) InstanceList() []*Instance {\n\tpr.mut.Lock()\n\tdefer pr.mut.Unlock()\n\n\t// this gets called in the router for every message that comes in, so it\n\t// might come to pass that this will perform poorly, but for now with a\n\t// relatively small number of instances we'll take the copy hit in exchange\n\t// for not having to think about concurrent access to the list\n\tout := make([]*Instance, len(pr.instances))\n\tcopy(out, pr.instances) // intentional shallow copy\n\treturn out\n}", "func newBaseClient() *baseClient {\n\treturn &baseClient{\n\t\thttpClient: http.DefaultClient,\n\t\tmethod: \"GET\",\n\t\theader: make(http.Header),\n\t}\n}", "func newFiltBase(n int, mu float64, w []float64) (AdaptiveFilter, error) {\n\tvar err error\n\tp := new(filtBase)\n\tp.kind = \"Base filter\"\n\tp.n = n\n\tp.muMin = 0\n\tp.muMax = 1000\n\tp.mu, err = p.checkFloatParam(mu, p.muMin, p.muMax, \"mu\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = p.initWeights(w, n)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn p, nil\n}", "func New(elems ...interface{}) List {\n\tl := Mzero()\n\tfor _, elem := range elems {\n\t\tl = Cons(elem, l)\n\t}\n\treturn Reverse(l)\n}", "func newListFromUIDNode(UIDNodeMap map[string]string) *CSPList {\n\treturn NewListBuilder().WithUIDNode(UIDNodeMap).List()\n}", "func newRpcServices(c *RpccontrollerV1Client, namespace string) *rpcServices {\n\treturn &rpcServices{\n\t\tclient: c.RESTClient(),\n\t\tns: namespace,\n\t}\n}", "func (s CHF) NewCustom(n int32) (Custom_List, error) {\n\tl, err := NewCustom_List(s.Struct.Segment(), n)\n\tif err != nil {\n\t\treturn Custom_List{}, err\n\t}\n\terr = s.Struct.SetPtr(9, l.List.ToPtr())\n\treturn l, err\n}", "func newCoverageList(name string) *CoverageList {\n\treturn &CoverageList{\n\t\tCoverage: &Coverage{Name: name},\n\t\tGroup: []Coverage{},\n\t}\n}", "func New(maxlevel int, cmpFn CompareFn) *List {\n\treturn NewCustom(maxlevel, DefaultProbability, cmpFn, time.Now().Unix())\n}", "func newVaultLists(c *VaultV1alpha1Client, namespace string) *vaultLists {\n\treturn &vaultLists{\n\t\tclient: c.RESTClient(),\n\t\tns: namespace,\n\t}\n}", "func NewObj_List(s *capnp.Segment, sz int32) (Obj_List, error) {\n\tl, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 1}, sz)\n\treturn Obj_List{l}, err\n}", "func getInstanceList(nodeNames sets.String) *compute.InstanceGroupsListInstances {\n\tinstanceNames := nodeNames.List()\n\tcomputeInstances := []*compute.InstanceWithNamedPorts{}\n\tfor _, name := range instanceNames {\n\t\tinstanceLink := getInstanceUrl(name)\n\t\tcomputeInstances = append(\n\t\t\tcomputeInstances, &compute.InstanceWithNamedPorts{\n\t\t\t\tInstance: instanceLink})\n\t}\n\treturn &compute.InstanceGroupsListInstances{\n\t\tItems: computeInstances,\n\t}\n}", "func newTestList() LinkedList {\n\treturn newList(16384, 8192, func(baseIdx int, data []int) {\n\t\tfor i := range data {\n\t\t\tdata[i] = baseIdx + i\n\t\t}\n\t})\n}", "func NewList(args ...interface{}) *List {\n\tl := List{}\n\tfor _, data := range args {\n\t\tl.PushBack(data)\n\t}\n\treturn &l\n}", "func NewList(list uint32, mode uint32) {\n\tC.glowNewList(gpNewList, (C.GLuint)(list), (C.GLenum)(mode))\n}", "func NewList(vs ...Value) List {\n\treturn List{&vs}\n}", "func initList(myId id) []id {\n\tvar MembershipList []id\n\tMembershipList = append(MembershipList, myId)\n\treturn MembershipList\n}", "func Constructor() MyHashSet {\n\treturn MyHashSet{make([]list.List, base)}\n\n}", "func NewListCommand(parent common.Registerer, globals *config.Data) *ListCommand {\n\tvar c ListCommand\n\tc.Globals = globals\n\tc.manifest.File.SetOutput(c.Globals.Output)\n\tc.manifest.File.Read(manifest.Filename)\n\tc.CmdClause = parent.Command(\"list\", \"List Syslog endpoints on a Fastly service version\")\n\tc.CmdClause.Flag(\"service-id\", \"Service ID\").Short('s').StringVar(&c.manifest.Flag.ServiceID)\n\tc.CmdClause.Flag(\"version\", \"Number of service version\").Required().IntVar(&c.Input.ServiceVersion)\n\treturn &c\n}", "func NewList() *List {\n\tl := List{\n\t\tpostings: make(map[uint64]*Posting),\n\t}\n\treturn &l\n}", "func NewList() List {\n\tl := List{}\n\tl.Set = make(map[string]int)\n\treturn l\n}", "func newBaseRunner(collector *resourceStatusCollector) *baseRunner {\n\treturn &baseRunner{\n\t\tcollector: collector,\n\t}\n}", "func NewListOpts(validator ValidatorFctType) ListOpts {\n var values []string\n return *NewListOptsRef(&values, validator)\n}", "func NewList(initial []W) UpdatableList {\n\tul := &updatableList{}\n\tul.Update(initial)\n\treturn ul\n}", "func newModule(base mb.BaseModule) (mb.Module, error) {\n\t// Validate that at least one host has been specified.\n\tconfig := struct {\n\t\tHosts []string `config:\"hosts\" validate:\"nonzero,required\"`\n\t}{}\n\tif err := base.UnpackConfig(&config); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &base, nil\n}", "func initPeerList() {\n\t// peerList\n\tpeerList = make([]Peer, MaxPeers)\n\n\t// populate peerList with dead peers\n\tfor i := 0; i < MaxPeers; i++ {\n\t\tpeerList[i] = Peer{expirationTimer: 0}\n\t}\n}", "func NewList() List {\n\treturn make(List, 0)\n}", "func (p PageListOrderedItemBlocks) construct() PageListOrderedItemClass { return &p }", "func newInstance(x *runtime.Runtime, p *build.Instance, v *adt.Vertex) *Instance {\n\t// TODO: associate root source with structLit.\n\tinst := &Instance{\n\t\troot: v,\n\t\tinst: p,\n\t}\n\tif p != nil {\n\t\tinst.ImportPath = p.ImportPath\n\t\tinst.Dir = p.Dir\n\t\tinst.PkgName = p.PkgName\n\t\tinst.DisplayName = p.ImportPath\n\t\tif p.Err != nil {\n\t\t\tinst.setListOrError(p.Err)\n\t\t}\n\t}\n\n\tx.AddInst(p.ImportPath, v, p)\n\tx.SetBuildData(p, inst)\n\tinst.index = x\n\treturn inst\n}", "func NewList(slice []Unit) List {\n\treturn unitlist{slice}\n}", "func (e *exprHelper) NewList(elems ...ast.Expr) ast.Expr {\n\treturn e.exprFactory.NewList(e.nextMacroID(), elems, []int32{})\n}" ]
[ "0.6095147", "0.6051629", "0.60512936", "0.60462177", "0.6043791", "0.5972696", "0.58985627", "0.5896269", "0.58636135", "0.5861767", "0.57537234", "0.573758", "0.5711063", "0.5693298", "0.5691532", "0.5683488", "0.5661154", "0.5661154", "0.5643412", "0.56272656", "0.5625379", "0.5610555", "0.5580606", "0.5561794", "0.55542386", "0.5538938", "0.55370694", "0.5524617", "0.5523634", "0.5518314", "0.5477948", "0.5472562", "0.53989154", "0.53810954", "0.5375317", "0.53735423", "0.53653544", "0.53653544", "0.5331648", "0.5327969", "0.53155744", "0.5299514", "0.52828664", "0.52628875", "0.52416325", "0.5241076", "0.52374166", "0.5234415", "0.51587874", "0.5158299", "0.51518995", "0.5135716", "0.51280975", "0.51187694", "0.510828", "0.5108234", "0.51073116", "0.5105616", "0.5099046", "0.50945735", "0.50900227", "0.5082316", "0.5082316", "0.5077379", "0.505801", "0.5057991", "0.50555605", "0.50524056", "0.50481474", "0.50480324", "0.50468075", "0.5046412", "0.50434965", "0.5038662", "0.503643", "0.5029013", "0.50274116", "0.50236416", "0.5023445", "0.5017092", "0.50078213", "0.49991798", "0.49977162", "0.4996381", "0.49730483", "0.49658477", "0.496181", "0.49548334", "0.49285877", "0.4925838", "0.4920352", "0.4896176", "0.48828945", "0.48783025", "0.48701972", "0.48685795", "0.48639762", "0.4862713", "0.48625156", "0.48582953" ]
0.8217321
0
getOrCreateBaseInstance lazily creates a new base instance, assigning if allocateCIDR is true
getOrCreateBaseInstance создает новый базовый экземпляр по требованию, присваивая, если allocateCIDR истинно
func (bil *baseInstanceList) getOrCreateBaseInstance(key *meta.Key) *baseInstance { bil.lock.Lock() defer bil.lock.Unlock() inst, found := bil.instances[*key] if !found { inst = &baseInstance{name: key.Name, zone: key.Zone} if bil.allocateCIDR { nextRange, _ := bil.cidrSet.AllocateNext() inst.aliasRange = nextRange.String() } bil.instances[*key] = inst } return inst }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func newBaseInstanceList(allocateCIDR bool, clusterCIDR *net.IPNet, subnetMaskSize int) *baseInstanceList {\n\tcidrSet, _ := cidrset.NewCIDRSet(clusterCIDR, subnetMaskSize)\n\treturn &baseInstanceList{\n\t\tallocateCIDR: allocateCIDR,\n\t\tclusterCIDR: clusterCIDR,\n\t\tsubnetMaskSize: subnetMaskSize,\n\t\tcidrSet: cidrSet,\n\t\tinstances: make(map[meta.Key]*baseInstance),\n\t}\n}", "func newBase() *base {\n\treturn &base{shared.NewUUID(), time.Now().UTC(), time.Now().UTC(), false/*, shared.NewUUID()*/}\n}", "func newProcBase(name, bin, serviceAddr string, loggers []Logger) *procBase {\n\tlog.Infof(\"%s has addr %s\", name, serviceAddr)\n\treturn &procBase{\n\t\tname: name,\n\t\tbin: bin,\n\t\tserviceAddr: serviceAddr,\n\t\tloggers: loggers,\n\t}\n}", "func (instance *Network) Create(ctx context.Context, req abstract.NetworkRequest) (xerr fail.Error) {\n\tdefer fail.OnPanic(&xerr)\n\n\tif instance == nil || instance.IsNull() {\n\t\treturn fail.InvalidInstanceError()\n\t}\n\tif ctx == nil {\n\t\treturn fail.InvalidParameterCannotBeNilError(\"ctx\")\n\t}\n\n\ttask, xerr := concurrency.TaskFromContext(ctx)\n\txerr = debug.InjectPlannedFail(xerr)\n\tif xerr != nil {\n\t\treturn xerr\n\t}\n\n\tif task.Aborted() {\n\t\treturn fail.AbortedError(nil, \"aborted\")\n\t}\n\n\ttracer := debug.NewTracer(task, true, \"('%s', '%s')\", req.Name, req.CIDR).WithStopwatch().Entering()\n\tdefer tracer.Exiting()\n\n\tinstance.lock.Lock()\n\tdefer instance.lock.Unlock()\n\n\t// Check if subnet already exists and is managed by SafeScale\n\tsvc := instance.GetService()\n\tif existing, xerr := LoadNetwork(svc, req.Name); xerr == nil {\n\t\texisting.Released()\n\t\treturn fail.DuplicateError(\"Network '%s' already exists\", req.Name)\n\t}\n\n\tif task.Aborted() {\n\t\treturn fail.AbortedError(nil, \"aborted\")\n\t}\n\n\t// Verify if the subnet already exist and in this case is not managed by SafeScale\n\t_, xerr = svc.InspectNetworkByName(req.Name)\n\txerr = debug.InjectPlannedFail(xerr)\n\tif xerr != nil {\n\t\tswitch xerr.(type) {\n\t\tcase *fail.ErrNotFound:\n\t\t\t// continue\n\t\tdefault:\n\t\t\treturn xerr\n\t\t}\n\t} else {\n\t\treturn fail.DuplicateError(\"Network '%s' already exists (not managed by SafeScale)\", req.Name)\n\t}\n\n\t// Verify the CIDR is not routable\n\tif req.CIDR != \"\" {\n\t\troutable, xerr := netretry.IsCIDRRoutable(req.CIDR)\n\t\txerr = debug.InjectPlannedFail(xerr)\n\t\tif xerr != nil {\n\t\t\treturn fail.Wrap(xerr, \"failed to determine if CIDR is not routable\")\n\t\t}\n\n\t\tif routable {\n\t\t\treturn fail.InvalidRequestError(\"cannot create such a Networking, CIDR must not be routable; please choose an appropriate CIDR (RFC1918)\")\n\t\t}\n\t}\n\n\tif task.Aborted() {\n\t\treturn fail.AbortedError(nil, \"aborted\")\n\t}\n\n\t// Create the Network\n\tlogrus.Debugf(\"Creating Network '%s' with CIDR '%s'...\", req.Name, req.CIDR)\n\tan, xerr := svc.CreateNetwork(req)\n\txerr = debug.InjectPlannedFail(xerr)\n\tif xerr != nil {\n\t\treturn xerr\n\t}\n\n\tdefer func() {\n\t\tif xerr != nil && !req.KeepOnFailure {\n\t\t\tderr := svc.DeleteNetwork(an.ID)\n\t\t\tderr = debug.InjectPlannedFail(derr)\n\t\t\tif derr != nil {\n\t\t\t\t_ = xerr.AddConsequence(fail.Wrap(derr, \"cleaning up on failure, failed to delete Network\"))\n\t\t\t}\n\t\t}\n\t}()\n\n\tif task.Aborted() {\n\t\treturn fail.AbortedError(nil, \"aborted\")\n\t}\n\n\t// Write subnet object metadata\n\t// logrus.Debugf(\"Saving subnet metadata '%s' ...\", subnet.GetName)\n\treturn instance.carry(an)\n}", "func (r *Reconciler) create() error {\n\tif err := validateMachine(*r.machine); err != nil {\n\t\treturn fmt.Errorf(\"%v: failed validating machine provider spec: %w\", r.machine.GetName(), err)\n\t}\n\n\tif ipam.HasStaticIPConfiguration(r.providerSpec) {\n\t\tif !r.staticIPFeatureGateEnabled {\n\t\t\treturn fmt.Errorf(\"%v: static IP/IPAM configuration is only available with the VSphereStaticIPs feature gate\", r.machine.GetName())\n\t\t}\n\n\t\toutstandingClaims, err := ipam.HasOutstandingIPAddressClaims(\n\t\t\tr.Context,\n\t\t\tr.client,\n\t\t\tr.machine,\n\t\t\tr.providerSpec.Network.Devices,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcondition := metav1.Condition{\n\t\t\tType: string(machinev1.IPAddressClaimedCondition),\n\t\t\tReason: machinev1.WaitingForIPAddressReason,\n\t\t\tMessage: \"All IP address claims are bound\",\n\t\t\tStatus: metav1.ConditionFalse,\n\t\t}\n\t\tif outstandingClaims > 0 {\n\t\t\tcondition.Message = fmt.Sprintf(\"Waiting on %d IP address claims to be bound\", outstandingClaims)\n\t\t\tcondition.Status = metav1.ConditionTrue\n\t\t\tklog.Infof(\"Waiting for IPAddressClaims associated with machine %s to be bound\", r.machine.Name)\n\t\t}\n\t\tif err := setProviderStatus(\"\", condition, r.machineScope, nil); err != nil {\n\t\t\treturn fmt.Errorf(\"could not set provider status: %w\", err)\n\t\t}\n\t}\n\n\t// We only clone the VM template if we have no taskRef.\n\tif r.providerStatus.TaskRef == \"\" {\n\t\tif !r.machineScope.session.IsVC() {\n\t\t\treturn fmt.Errorf(\"%v: not connected to a vCenter\", r.machine.GetName())\n\t\t}\n\t\tklog.Infof(\"%v: cloning\", r.machine.GetName())\n\t\ttask, err := clone(r.machineScope)\n\t\tif err != nil {\n\t\t\tmetrics.RegisterFailedInstanceCreate(&metrics.MachineLabels{\n\t\t\t\tName: r.machine.Name,\n\t\t\t\tNamespace: r.machine.Namespace,\n\t\t\t\tReason: \"Clone task finished with error\",\n\t\t\t})\n\t\t\tconditionFailed := conditionFailed()\n\t\t\tconditionFailed.Message = err.Error()\n\t\t\tstatusError := setProviderStatus(task, conditionFailed, r.machineScope, nil)\n\t\t\tif statusError != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to set provider status: %w\", err)\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\treturn setProviderStatus(task, conditionSuccess(), r.machineScope, nil)\n\t}\n\n\tmoTask, err := r.session.GetTask(r.Context, r.providerStatus.TaskRef)\n\tif err != nil {\n\t\tmetrics.RegisterFailedInstanceCreate(&metrics.MachineLabels{\n\t\t\tName: r.machine.Name,\n\t\t\tNamespace: r.machine.Namespace,\n\t\t\tReason: \"GetTask finished with error\",\n\t\t})\n\t\treturn err\n\t}\n\n\tif moTask == nil {\n\t\t// Possible eventual consistency problem from vsphere\n\t\t// TODO: change error message here to indicate this might be expected.\n\t\treturn fmt.Errorf(\"unexpected moTask nil\")\n\t}\n\n\tif taskIsFinished, err := taskIsFinished(moTask); err != nil {\n\t\tif taskIsFinished {\n\t\t\tmetrics.RegisterFailedInstanceCreate(&metrics.MachineLabels{\n\t\t\t\tName: r.machine.Name,\n\t\t\t\tNamespace: r.machine.Namespace,\n\t\t\t\tReason: \"Task finished with error\",\n\t\t\t})\n\t\t\tconditionFailed := conditionFailed()\n\t\t\tconditionFailed.Message = err.Error()\n\t\t\tstatusError := setProviderStatus(moTask.Reference().Value, conditionFailed, r.machineScope, nil)\n\t\t\tif statusError != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to set provider status: %w\", statusError)\n\t\t\t}\n\t\t\treturn machinecontroller.CreateMachine(err.Error())\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"failed to check task status: %w\", err)\n\t\t}\n\t} else if !taskIsFinished {\n\t\treturn fmt.Errorf(\"%v task %v has not finished\", moTask.Info.DescriptionId, moTask.Reference().Value)\n\t}\n\n\t// if clone task finished successfully, power on the vm\n\tif moTask.Info.DescriptionId == cloneVmTaskDescriptionId {\n\t\tklog.Infof(\"Powering on cloned machine: %v\", r.machine.Name)\n\t\ttask, err := powerOn(r.machineScope)\n\t\tif err != nil {\n\t\t\tmetrics.RegisterFailedInstanceCreate(&metrics.MachineLabels{\n\t\t\t\tName: r.machine.Name,\n\t\t\t\tNamespace: r.machine.Namespace,\n\t\t\t\tReason: \"PowerOn task finished with error\",\n\t\t\t})\n\t\t\tconditionFailed := conditionFailed()\n\t\t\tconditionFailed.Message = err.Error()\n\t\t\tstatusError := setProviderStatus(task, conditionFailed, r.machineScope, nil)\n\t\t\tif statusError != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to set provider status: %w\", err)\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\treturn setProviderStatus(task, conditionSuccess(), r.machineScope, nil)\n\t}\n\n\t// If taskIsFinished then next reconcile should result in update.\n\treturn nil\n}", "func appNumOnUNetBaseCreate(baseID uuid.UUID) *types.Bitmap {\n\tif appNumOnUNetBaseGet(baseID) == nil {\n\t\tlog.Functionf(\"appNumOnUNetBaseCreate (%s)\", baseID.String())\n\t\tappNumBase[baseID.String()] = new(types.Bitmap)\n\t}\n\treturn appNumOnUNetBaseGet(baseID)\n}", "func newInstance(moduleName, name string, priv interface{}) (*BaseInstance, error) {\n\tm, found := modules[moduleName]\n\tif !found {\n\t\treturn nil, fmt.Errorf(\"No such module: %s\", moduleName)\n\t}\n\n\tif _, exists := m.instance[name]; exists {\n\t\treturn nil, fmt.Errorf(\"%s already exists in %s\", name, moduleName)\n\t}\n\n\tbi := &BaseInstance{name: name, module: m, subinstance: false}\n\n\tringName := fmt.Sprintf(\"input-%s\", name)\n\tbi.input = dpdk.RingCreate(ringName, m.ringParam.Count, m.ringParam.SocketId, dpdk.RING_F_SC_DEQ)\n\tif bi.input == nil {\n\t\treturn nil, fmt.Errorf(\"Input ring creation faild for %s.\\n\", name)\n\t}\n\n\tif m.ringParam.SecondaryInput {\n\t\tringName := fmt.Sprintf(\"input2-%s\", name)\n\t\tbi.input2 = dpdk.RingCreate(ringName, m.ringParam.Count, m.ringParam.SocketId, dpdk.RING_F_SC_DEQ)\n\t\tif bi.input2 == nil {\n\t\t\treturn nil, fmt.Errorf(\"Second input ring creation failed for %s\", name)\n\t\t}\n\t}\n\n\tbi.rules = newRules()\n\n\tif m.moduleType == TypeInterface || m.moduleType == TypeRIF {\n\t\tbi.counter = NewCounter()\n\t}\n\n\tinstance, err := m.factory(bi, priv)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Creating module '%s' with name '%s' failed: %v\\n\", moduleName, name, err)\n\t}\n\tbi.instance = instance\n\n\t// Set rule observer, if the module complies to RulesNotify.\n\tif rn, ok := instance.(RulesNotify); ok {\n\t\tbi.rules.setRulesNotify(rn)\n\t}\n\n\tm.instance[name] = bi\n\n\treturn bi, nil\n}", "func newBaseRuntime(erp *ECALRuntimeProvider, node *parser.ASTNode) *baseRuntime {\n\tinstanceCounter++\n\treturn &baseRuntime{fmt.Sprint(instanceCounter), erp, node, false}\n}", "func newInstance(moduleName, name string, priv interface{}) (*BaseInstance, error) {\n\tfactory, found := instanceFactories[moduleName]\n\tif !found {\n\t\treturn nil, fmt.Errorf(\"Module '%s' doesn't exist.\\n\", moduleName)\n\t}\n\n\trp, ok := ringParams[moduleName]\n\tif !ok {\n\t\trp = defaultRingParam\n\t}\n\n\tbi := &BaseInstance{name: name}\n\n\tringName := fmt.Sprintf(\"input-%s\", name)\n\tbi.input = dpdk.RingCreate(ringName, rp.Count, rp.SocketId, dpdk.RING_F_SC_DEQ)\n\tif bi.input == nil {\n\t\treturn nil, fmt.Errorf(\"Input ring creation faild for %s.\\n\", name)\n\t}\n\n\tif rp.SecondaryInput {\n\t\tringName := fmt.Sprintf(\"input2-%s\", name)\n\t\tbi.input2 = dpdk.RingCreate(ringName, rp.Count, rp.SocketId, dpdk.RING_F_SC_DEQ)\n\t\tif bi.input2 == nil {\n\t\t\treturn nil, fmt.Errorf(\"Second input ring creation failed for %s\", name)\n\t\t}\n\t}\n\n\tbi.rules = newRules()\n\n\tinstance, err := factory(bi, priv)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Creating module '%s' with name '%s' failed: %v\\n\", moduleName, name, err)\n\t}\n\tbi.instance = instance\n\n\treturn bi, nil\n}", "func NewBase(opt Opts) Dialer {\n\trv := &base{\n\t\tnetDialer: net.Dialer{\n\t\t\tTimeout: opt.GetTimeout(),\n\t\t\tControl: reuseport.Control,\n\t\t},\n\t\ttlsConfigs: cache.New(TLSConfigCacheSize,\n\t\t\tTLSConfigTTL,\n\t\t\tcache.NoopEvictCallback),\n\t\ttlsSkipVerify: opt.GetTLSSkipVerify(),\n\t}\n\n\treturn rv\n}", "func MakeBase(name, key, owner string, defaultValue interface{}, lifetime Lifetime, expose bool) Base {\n\treturn Base{\n\t\tname: name,\n\t\tkey: key,\n\t\towner: owner,\n\t\tdefaultValue: defaultValue,\n\t\tlifetime: lifetime,\n\t\texpose: expose,\n\t}\n}", "func NewBasePool() BasePool {\n\treturn BasePool{\n\t\tlastTuneTs: *atomicutil.NewTime(time.Now()),\n\t}\n}", "func newCache(nbClient libovsdbclient.Client) (*LBCache, error) {\n\t// first, list all load balancers\n\tlbs, err := listLBs(nbClient)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := LBCache{}\n\tc.existing = make(map[string]*CachedLB, len(lbs))\n\n\tfor i := range lbs {\n\t\tc.existing[lbs[i].UUID] = &lbs[i]\n\t}\n\n\tps := func(item *nbdb.LogicalSwitch) bool {\n\t\treturn len(item.LoadBalancer) > 0\n\t}\n\tswitches, err := libovsdbops.FindLogicalSwitchesWithPredicate(nbClient, ps)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, ls := range switches {\n\t\tfor _, lbuuid := range ls.LoadBalancer {\n\t\t\tif lb, ok := c.existing[lbuuid]; ok {\n\t\t\t\tlb.Switches.Insert(ls.Name)\n\t\t\t}\n\t\t}\n\t}\n\n\tpr := func(item *nbdb.LogicalRouter) bool {\n\t\treturn len(item.LoadBalancer) > 0\n\t}\n\trouters, err := libovsdbops.FindLogicalRoutersWithPredicate(nbClient, pr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, router := range routers {\n\t\tfor _, lbuuid := range router.LoadBalancer {\n\t\t\tif lb, ok := c.existing[lbuuid]; ok {\n\t\t\t\tlb.Routers.Insert(router.Name)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Get non-empty LB groups\n\tpg := func(item *nbdb.LoadBalancerGroup) bool {\n\t\treturn len(item.LoadBalancer) > 0\n\t}\n\tgroups, err := libovsdbops.FindLoadBalancerGroupsWithPredicate(nbClient, pg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, group := range groups {\n\t\tfor _, lbuuid := range group.LoadBalancer {\n\t\t\tif lb, ok := c.existing[lbuuid]; ok {\n\t\t\t\tlb.Groups.Insert(group.Name)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &c, nil\n}", "func newPrimary() *proxy {\n\tvar (\n\t\tp = &proxy{}\n\t\ttracker = mock.NewStatsTracker()\n\t\tsmap = newSmap()\n\t)\n\n\tp.owner.smap = newSmapOwner(cmn.GCO.Get())\n\tp.si = meta.NewSnode(\"primary\", apc.Proxy, meta.NetInfo{}, meta.NetInfo{}, meta.NetInfo{})\n\n\tsmap.addProxy(p.si)\n\tsmap.Primary = p.si\n\tp.owner.smap.put(smap)\n\n\tconfig := cmn.GCO.BeginUpdate()\n\tconfig.ConfigDir = \"/tmp/ais-tests\"\n\tconfig.Periodic.RetrySyncTime = cos.Duration(time.Millisecond * 100)\n\tconfig.Keepalive.Proxy.Name = \"heartbeat\"\n\tconfig.Keepalive.Proxy.Interval = cos.Duration(3 * time.Second)\n\tconfig.Timeout.CplaneOperation = cos.Duration(2 * time.Second)\n\tconfig.Timeout.MaxKeepalive = cos.Duration(4 * time.Second)\n\tconfig.Client.Timeout = cos.Duration(10 * time.Second)\n\tconfig.Client.TimeoutLong = cos.Duration(10 * time.Second)\n\tconfig.Cksum.Type = cos.ChecksumXXHash\n\tcmn.GCO.CommitUpdate(config)\n\tcmn.GCO.SetInitialGconfPath(\"/tmp/ais-tests/ais.config\")\n\n\tp.client.data = &http.Client{}\n\tp.client.control = &http.Client{}\n\tp.keepalive = newPalive(p, tracker, atomic.NewBool(true))\n\n\to := newBMDOwnerPrx(config)\n\to.put(newBucketMD())\n\tp.owner.bmd = o\n\n\te := newEtlMDOwnerPrx(config)\n\te.put(newEtlMD())\n\tp.owner.etl = e\n\n\tp.gmm = memsys.PageMM()\n\treturn p\n}", "func newInstances(pod *Pod, prov provider.DataCenter, cfg *config.Instances) (*instances, error) {\n\tlog.Debug(\"Initializing Instances\")\n\n\ti := &instances{\n\t\tResources: resource.NewResources(),\n\t\tpod: pod,\n\t\tinstances: map[string]resource.Instance{},\n\t}\n\n\t// The reference to the network resource.\n\tnet := pod.Cluster().Compute().DataCenter().Network()\n\n\t// The availability zones available to these instances.\n\tavailabilityZones := net.AvailabilityZones()\n\n\t// The subnet group associated with these instances.\n\tsubnetGroup := net.SubnetGroups().Find(pod.SubnetGroup())\n\tif subnetGroup == nil {\n\t\treturn nil, fmt.Errorf(\"Cannot find subnet group %s configured for pod %s\", pod.SubnetGroup(), pod.Name())\n\t}\n\n\t// The keypair to be used with these instances.\n\tkeypair := pod.Cluster().Compute().KeyPair()\n\n\tn := 0\n\tfor _, conf := range *cfg {\n\t\t// Ensure the instance is uniquely named.\n\t\tif i.Find(conf.Name()) != nil {\n\t\t\treturn nil, fmt.Errorf(\"Instance name %q must be unique but is used multiple times\", conf.Name())\n\t\t}\n\n\t\t// The availability zone for this instance. Chosing via round robin. Always starting at 0.\n\t\taz := availabilityZones[n%len(availabilityZones)]\n\n\t\t// Get the subnet associated with the AZ.\n\t\tsubnetName := pod.SubnetGroup() + \"-\" + az\n\t\tsubnet := subnetGroup.Find(subnetName)\n\t\tif subnet == nil {\n\t\t\treturn nil, fmt.Errorf(\"Cannot find subnet %s configured for instance %s\", subnetName, conf.Name())\n\t\t}\n\n\t\tinstance, err := newInstance(pod, subnet, keypair, prov, conf)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ti.instances[instance.Name()] = instance\n\t\ti.Append(instance)\n\n\t\tn++\n\t}\n\treturn i, nil\n}", "func (b *BridgeNetworkDriver) Create(name string, subnet string) (*Network, error) {\n\t// 取到网段字符串中的网关ip地址和网络的ip段\n\tip, IPRange, _ := net.ParseCIDR(subnet)\n\tIPRange.IP = ip\n\n\tn := &Network{\n\t\tName: name,\n\t\tIPRange: IPRange,\n\t\tDriver: b.Name(),\n\t}\n\n\terr := b.initBridge(n)\n\treturn n, err\n}", "func (p *pool) AllocateBlock(ctx context.Context, nodeName, requestUID string) (*coilv2.AddressBlock, error) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tnextIndex, ok := p.allocated.NextClear(0)\n\tif !ok {\n\t\tnextIndex = p.allocated.Len()\n\t}\n\n\tap := &coilv2.AddressPool{}\n\terr := p.client.Get(ctx, client.ObjectKey{Name: p.name}, ap)\n\tif err != nil {\n\t\tp.log.Error(err, \"failed to get AddressPool\")\n\t\treturn nil, err\n\t}\n\tif ap.DeletionTimestamp != nil {\n\t\tp.log.Info(\"unable to curve out a block because pool is under deletion\")\n\t\treturn nil, ErrNoBlock\n\t}\n\n\tvar currentIndex uint\n\tfor _, ss := range ap.Spec.Subnets {\n\t\tvar ones, bits int\n\t\tif ss.IPv4 != nil {\n\t\t\t_, n, _ := net.ParseCIDR(*ss.IPv4) // ss was validated\n\t\t\tones, bits = n.Mask.Size()\n\t\t} else {\n\t\t\t_, n, _ := net.ParseCIDR(*ss.IPv6) // ss was validated\n\t\t\tones, bits = n.Mask.Size()\n\t\t}\n\t\tsize := uint(1) << (bits - ones - int(ap.Spec.BlockSizeBits))\n\t\tif nextIndex >= (currentIndex + size) {\n\t\t\tcurrentIndex += size\n\t\t\tcontinue\n\t\t}\n\n\t\tipv4, ipv6 := ss.GetBlock(nextIndex-currentIndex, int(ap.Spec.BlockSizeBits))\n\n\t\tr := &coilv2.AddressBlock{}\n\t\tr.Name = fmt.Sprintf(\"%s-%d\", p.name, nextIndex)\n\t\tif err := controllerutil.SetControllerReference(ap, r, p.scheme); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tr.Labels = map[string]string{\n\t\t\tconstants.LabelPool: p.name,\n\t\t\tconstants.LabelNode: nodeName,\n\t\t\tconstants.LabelRequest: requestUID,\n\t\t}\n\t\tcontrollerutil.AddFinalizer(r, constants.FinCoil)\n\t\tr.Index = int32(nextIndex)\n\t\tif ipv4 != nil {\n\t\t\ts := ipv4.String()\n\t\t\tr.IPv4 = &s\n\t\t}\n\t\tif ipv6 != nil {\n\t\t\ts := ipv6.String()\n\t\t\tr.IPv6 = &s\n\t\t}\n\t\tif err := p.client.Create(ctx, r); err != nil {\n\t\t\tp.log.Error(err, \"failed to create AddressBlock\", \"index\", nextIndex, \"node\", nodeName)\n\t\t\treturn nil, err\n\t\t}\n\n\t\tp.log.Info(\"created AddressBlock\", \"index\", nextIndex, \"node\", nodeName)\n\t\tp.allocated.Set(nextIndex)\n\t\tp.allocatedBlocks.Inc()\n\t\treturn r, nil\n\t}\n\n\tp.log.Error(ErrNoBlock, \"no available blocks\")\n\treturn nil, ErrNoBlock\n}", "func TestAllocOnInit(t *testing.T) {\n\tinitDone := make(chan struct{})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, func() {\n\t\tclose(initDone)\n\t})\n\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: slim_core_v1.ServiceStatus{\n\t\t\t\tLoadBalancer: slim_core_v1.LoadBalancerStatus{\n\t\t\t\t\tIngress: []slim_core_v1.LoadBalancerIngress{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP: \"10.0.10.123\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-b\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceBUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.10.124\",\n\t\t\t},\n\t\t\tStatus: slim_core_v1.ServiceStatus{\n\t\t\t\tLoadBalancer: slim_core_v1.LoadBalancerStatus{\n\t\t\t\t\tIngress: []slim_core_v1.LoadBalancerIngress{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP: \"10.0.10.124\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tt.Error(\"No service updates expected\")\n\n\t\treturn false\n\t}, 100*time.Millisecond)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\t<-initDone\n\n\tawait.Block()\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.123\")) {\n\t\tt.Fatal(\"Expected the imported IP to be allocated\")\n\t}\n\n\tif !fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(\"10.0.10.124\")) {\n\t\tt.Fatal(\"Expected the imported IP to be allocated\")\n\t}\n}", "func (na *cnmNetworkAllocator) allocateVIP(vip *api.Endpoint_VirtualIP) error {\n\tvar opts map[string]string\n\tlocalNet := na.getNetwork(vip.NetworkID)\n\tif localNet == nil {\n\t\treturn errors.New(\"networkallocator: could not find local network state\")\n\t}\n\n\tif localNet.isNodeLocal {\n\t\treturn nil\n\t}\n\n\t// If this IP is already allocated in memory we don't need to\n\t// do anything.\n\tif _, ok := localNet.endpoints[vip.Addr]; ok {\n\t\treturn nil\n\t}\n\n\tipam, _, _, err := na.resolveIPAM(localNet.nw)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to resolve IPAM while allocating\")\n\t}\n\n\tvar addr net.IP\n\tif vip.Addr != \"\" {\n\t\tvar err error\n\n\t\taddr, _, err = net.ParseCIDR(vip.Addr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif localNet.nw.IPAM != nil && localNet.nw.IPAM.Driver != nil {\n\t\t// set ipam allocation method to serial\n\t\topts = setIPAMSerialAlloc(localNet.nw.IPAM.Driver.Options)\n\t}\n\n\tfor _, poolID := range localNet.pools {\n\t\tip, _, err := ipam.RequestAddress(poolID, addr, opts)\n\t\tif err != nil && err != ipamapi.ErrNoAvailableIPs && err != ipamapi.ErrIPOutOfRange {\n\t\t\treturn errors.Wrap(err, \"could not allocate VIP from IPAM\")\n\t\t}\n\n\t\t// If we got an address then we are done.\n\t\tif err == nil {\n\t\t\tipStr := ip.String()\n\t\t\tlocalNet.endpoints[ipStr] = poolID\n\t\t\tvip.Addr = ipStr\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn errors.New(\"could not find an available IP while allocating VIP\")\n}", "func (b *BaseImpl) New(n Base) Base {\n\treturn n\n}", "func lookupOrAllocateIPv4(\n\tctx *zedrouterContext,\n\tstatus *types.NetworkInstanceStatus,\n\tmac net.HardwareAddr) (string, error) {\n\n\tlog.Infof(\"lookupOrAllocateIPv4(%s-%s): mac:%s\\n\",\n\t\tstatus.DisplayName, status.Key(), mac.String())\n\t// Lookup to see if it exists\n\tif ip, ok := status.IPAssignments[mac.String()]; ok {\n\t\tlog.Infof(\"found Ip addr ( %s) for mac(%s)\\n\",\n\t\t\tip.String(), mac.String())\n\t\treturn ip.String(), nil\n\t}\n\n\tlog.Infof(\"bridgeName %s Subnet %v range %v-%v\\n\",\n\t\tstatus.BridgeName, status.Subnet,\n\t\tstatus.DhcpRange.Start, status.DhcpRange.End)\n\n\tif status.DhcpRange.Start == nil {\n\t\tif status.Type == types.NetworkInstanceTypeSwitch {\n\t\t\tlog.Infof(\"%s-%s switch means no bridgeIpAddr\",\n\t\t\t\tstatus.DisplayName, status.Key())\n\t\t\treturn \"\", nil\n\t\t}\n\t\tlog.Fatalf(\"%s-%s: nil DhcpRange.Start\",\n\t\t\tstatus.DisplayName, status.Key())\n\t}\n\n\t// Starting guess based on number allocated\n\tallocated := uint(len(status.IPAssignments))\n\ta := addToIP(status.DhcpRange.Start, allocated)\n\tfor status.DhcpRange.End == nil ||\n\t\tbytes.Compare(a, status.DhcpRange.End) < 0 {\n\n\t\tlog.Infof(\"lookupOrAllocateIPv4(%s) testing %s\\n\",\n\t\t\tmac.String(), a.String())\n\t\tif status.IsIpAssigned(a) {\n\t\t\ta = addToIP(a, 1)\n\t\t\tcontinue\n\t\t}\n\t\tlog.Infof(\"lookupOrAllocateIPv4(%s) found free %s\\n\",\n\t\t\tmac.String(), a.String())\n\t\tstatus.IPAssignments[mac.String()] = a\n\t\t// Publish the allocation\n\t\tpublishNetworkInstanceStatus(ctx, status)\n\t\treturn a.String(), nil\n\t}\n\terrStr := fmt.Sprintf(\"lookupOrAllocateIPv4(%s) no free address in DhcpRange\",\n\t\tstatus.Key())\n\treturn \"\", errors.New(errStr)\n}", "func createSingleHostNetworking(ctx context.Context, svc iaas.Service, singleHostRequest abstract.HostRequest) (_ resources.Subnet, _ func() fail.Error, ferr fail.Error) {\n\t// Build network name\n\tcfg, xerr := svc.GetConfigurationOptions(ctx)\n\tif xerr != nil {\n\t\treturn nil, nil, xerr\n\t}\n\n\tbucketName := cfg.GetString(\"MetadataBucketName\")\n\tif bucketName == \"\" {\n\t\treturn nil, nil, fail.InconsistentError(\"missing service configuration option 'MetadataBucketName'\")\n\t}\n\n\t// Trim and TrimPrefix don't do the same thing\n\tnetworkName := fmt.Sprintf(\"sfnet-%s\", strings.TrimPrefix(bucketName, objectstorage.BucketNamePrefix+\"-\"))\n\n\t// Create network if needed\n\tnetworkInstance, xerr := LoadNetwork(ctx, svc, networkName)\n\tif xerr != nil {\n\t\tswitch xerr.(type) {\n\t\tcase *fail.ErrNotFound:\n\t\t\tnetworkInstance, xerr = NewNetwork(svc)\n\t\t\tif xerr != nil {\n\t\t\t\treturn nil, nil, xerr\n\t\t\t}\n\n\t\t\trequest := abstract.NetworkRequest{\n\t\t\t\tName: networkName,\n\t\t\t\tCIDR: abstract.SingleHostNetworkCIDR,\n\t\t\t\tKeepOnFailure: true,\n\t\t\t}\n\t\t\txerr = networkInstance.Create(ctx, &request, nil)\n\t\t\tif xerr != nil {\n\t\t\t\t// handle a particular case of *fail.ErrDuplicate...\n\t\t\t\tswitch cerr := xerr.(type) {\n\t\t\t\tcase *fail.ErrDuplicate:\n\t\t\t\t\tvalue, found := cerr.Annotation(\"managed\")\n\t\t\t\t\tif found && value != nil {\n\t\t\t\t\t\tmanaged, ok := value.(bool)\n\t\t\t\t\t\tif ok && !managed {\n\t\t\t\t\t\t\treturn nil, nil, xerr\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t\t// ... otherwise, try to get Network that is created by another goroutine\n\t\t\t\tswitch xerr.(type) {\n\t\t\t\tcase *fail.ErrDuplicate, *fail.ErrNotAvailable:\n\t\t\t\t\t// If these errors occurred, another goroutine is running to create the same Network, so wait for it\n\t\t\t\t\tnetworkInstance, xerr = LoadNetwork(ctx, svc, networkName)\n\t\t\t\t\tif xerr != nil {\n\t\t\t\t\t\treturn nil, nil, xerr\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\treturn nil, nil, xerr\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, nil, xerr\n\t\t}\n\t}\n\n\tnid, err := networkInstance.GetID()\n\tif err != nil {\n\t\treturn nil, nil, fail.ConvertError(err)\n\t}\n\n\t// Check if Subnet exists\n\tvar (\n\t\tsubnetRequest abstract.SubnetRequest\n\t\tcidrIndex uint\n\t)\n\tsubnetInstance, xerr := LoadSubnet(ctx, svc, nid, singleHostRequest.ResourceName)\n\tif xerr != nil {\n\t\tswitch xerr.(type) {\n\t\tcase *fail.ErrNotFound:\n\t\t\tsubnetInstance, xerr = NewSubnet(svc)\n\t\t\tif xerr != nil {\n\t\t\t\treturn nil, nil, xerr\n\t\t\t}\n\n\t\t\tvar (\n\t\t\t\tsubnetCIDR string\n\t\t\t)\n\n\t\t\tsubnetCIDR, cidrIndex, xerr = ReserveCIDRForSingleHost(ctx, networkInstance)\n\t\t\txerr = debug.InjectPlannedFail(xerr)\n\t\t\tif xerr != nil {\n\t\t\t\treturn nil, nil, xerr\n\t\t\t}\n\n\t\t\tvar dnsServers []string\n\t\t\topts, xerr := svc.GetConfigurationOptions(ctx)\n\t\t\txerr = debug.InjectPlannedFail(xerr)\n\t\t\tif xerr != nil {\n\t\t\t\tswitch xerr.(type) {\n\t\t\t\tcase *fail.ErrNotFound:\n\t\t\t\tdefault:\n\t\t\t\t\treturn nil, nil, xerr\n\t\t\t\t}\n\t\t\t} else if servers := strings.TrimSpace(opts.GetString(\"DNSServers\")); servers != \"\" {\n\t\t\t\tdnsServers = strings.Split(servers, \",\")\n\t\t\t}\n\n\t\t\tsubnetRequest.Name = singleHostRequest.ResourceName\n\t\t\tsubnetRequest.NetworkID, err = networkInstance.GetID()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, fail.ConvertError(err)\n\t\t\t}\n\t\t\tsubnetRequest.IPVersion = ipversion.IPv4\n\t\t\tsubnetRequest.CIDR = subnetCIDR\n\t\t\tsubnetRequest.DNSServers = dnsServers\n\t\t\tsubnetRequest.HA = false\n\n\t\t\txerr = subnetInstance.CreateSubnetWithoutGateway(ctx, subnetRequest)\n\t\t\txerr = debug.InjectPlannedFail(xerr)\n\t\t\tif xerr != nil {\n\t\t\t\treturn nil, nil, xerr\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tferr = debug.InjectPlannedFail(ferr)\n\t\t\t\tif ferr != nil && !singleHostRequest.KeepOnFailure {\n\t\t\t\t\tderr := subnetInstance.Delete(cleanupContextFrom(ctx))\n\t\t\t\t\tif derr != nil {\n\t\t\t\t\t\t_ = ferr.AddConsequence(\n\t\t\t\t\t\t\tfail.Wrap(\n\t\t\t\t\t\t\t\tderr, \"cleaning up on failure, failed to delete Subnet '%s'\",\n\t\t\t\t\t\t\t\tsingleHostRequest.ResourceName,\n\t\t\t\t\t\t\t),\n\t\t\t\t\t\t)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\t// Sets the CIDR index in instance metadata\n\t\t\txerr = subnetInstance.Alter(ctx, func(clonable data.Clonable, _ *serialize.JSONProperties) fail.Error {\n\t\t\t\tas, ok := clonable.(*abstract.Subnet)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn fail.InconsistentError(\n\t\t\t\t\t\t\"'*abstract.Subnet' expected, '%s' provided\", reflect.TypeOf(clonable).String(),\n\t\t\t\t\t)\n\t\t\t\t}\n\n\t\t\t\tas.SingleHostCIDRIndex = cidrIndex\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\tif xerr != nil {\n\t\t\t\treturn nil, nil, xerr\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, nil, xerr\n\t\t}\n\t} else {\n\t\treturn nil, nil, fail.DuplicateError(\"there is already a Subnet named '%s'\", singleHostRequest.ResourceName)\n\t}\n\n\tundoFunc := func() fail.Error {\n\t\tvar errs []error\n\t\tif !singleHostRequest.KeepOnFailure {\n\t\t\tderr := subnetInstance.Delete(cleanupContextFrom(ctx))\n\t\t\tif derr != nil {\n\t\t\t\terrs = append(\n\t\t\t\t\terrs, fail.Wrap(\n\t\t\t\t\t\tderr, \"cleaning up on failure, failed to delete Subnet '%s'\", singleHostRequest.ResourceName,\n\t\t\t\t\t),\n\t\t\t\t)\n\t\t\t}\n\t\t\tderr = FreeCIDRForSingleHost(cleanupContextFrom(ctx), networkInstance, cidrIndex)\n\t\t\tif derr != nil {\n\t\t\t\terrs = append(\n\t\t\t\t\terrs, fail.Wrap(\n\t\t\t\t\t\tderr, \"cleaning up on failure, failed to free CIDR slot in Network '%s'\",\n\t\t\t\t\t\tnetworkInstance.GetName(),\n\t\t\t\t\t),\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t\tif len(errs) > 0 {\n\t\t\treturn fail.NewErrorList(errs)\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn subnetInstance, undoFunc, nil\n}", "func newSubInstance(parent *BaseInstance, name string) *BaseInstance {\n\tbi := &BaseInstance{\n\t\tname: name,\n\t\tinput: parent.input,\n\t\tinput2: parent.input2,\n\t\trules: newRules(),\n\t\tenabled: false,\n\t\tinstance: parent.instance,\n\t}\n\treturn bi\n}", "func (s *Service) CreateOrUpdate(ctx context.Context, spec azure.Spec) error {\n\tnicSpec, ok := spec.(*Spec)\n\tif !ok {\n\t\treturn errors.New(\"invalid network interface specification\")\n\t}\n\n\tnicConfig := &network.InterfaceIPConfigurationPropertiesFormat{}\n\n\tsubnetInterface, err := subnets.NewService(s.Scope).Get(ctx, &subnets.Spec{Name: nicSpec.SubnetName, VnetName: nicSpec.VnetName})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsubnet, ok := subnetInterface.(network.Subnet)\n\tif !ok {\n\t\treturn errors.New(\"subnet get returned invalid network interface\")\n\t}\n\n\tnicConfig.Subnet = &network.Subnet{ID: subnet.ID}\n\tnicConfig.PrivateIPAllocationMethod = network.Dynamic\n\tif nicSpec.StaticIPAddress != \"\" {\n\t\tnicConfig.PrivateIPAllocationMethod = network.Static\n\t\tnicConfig.PrivateIPAddress = to.StringPtr(nicSpec.StaticIPAddress)\n\t}\n\n\tbackendAddressPools := []network.BackendAddressPool{}\n\tif nicSpec.PublicLoadBalancerName != \"\" {\n\t\tlbInterface, lberr := publicloadbalancers.NewService(s.Scope).Get(ctx, &publicloadbalancers.Spec{Name: nicSpec.PublicLoadBalancerName})\n\t\tif lberr != nil {\n\t\t\treturn lberr\n\t\t}\n\n\t\tlb, ok := lbInterface.(network.LoadBalancer)\n\t\tif !ok {\n\t\t\treturn errors.New(\"public load balancer get returned invalid network interface\")\n\t\t}\n\n\t\tbackendAddressPools = append(backendAddressPools,\n\t\t\tnetwork.BackendAddressPool{\n\t\t\t\tID: (*lb.BackendAddressPools)[0].ID,\n\t\t\t})\n\t\tnicConfig.LoadBalancerInboundNatRules = &[]network.InboundNatRule{\n\t\t\t{\n\t\t\t\tID: (*lb.InboundNatRules)[nicSpec.NatRule].ID,\n\t\t\t},\n\t\t}\n\t}\n\tif nicSpec.InternalLoadBalancerName != \"\" {\n\t\tinternallbInterface, ilberr := internalloadbalancers.NewService(s.Scope).Get(ctx, &internalloadbalancers.Spec{Name: nicSpec.InternalLoadBalancerName})\n\t\tif ilberr != nil {\n\t\t\treturn ilberr\n\t\t}\n\n\t\tinternallb, ok := internallbInterface.(network.LoadBalancer)\n\t\tif !ok {\n\t\t\treturn errors.New(\"internal load balancer get returned invalid network interface\")\n\t\t}\n\t\tbackendAddressPools = append(backendAddressPools,\n\t\t\tnetwork.BackendAddressPool{\n\t\t\t\tID: (*internallb.BackendAddressPools)[0].ID,\n\t\t\t})\n\t}\n\tnicConfig.LoadBalancerBackendAddressPools = &backendAddressPools\n\n\tf, err := s.Client.CreateOrUpdate(ctx,\n\t\ts.Scope.ClusterConfig.ResourceGroup,\n\t\tnicSpec.Name,\n\t\tnetwork.Interface{\n\t\t\tLocation: to.StringPtr(s.Scope.ClusterConfig.Location),\n\t\t\tInterfacePropertiesFormat: &network.InterfacePropertiesFormat{\n\t\t\t\tIPConfigurations: &[]network.InterfaceIPConfiguration{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: to.StringPtr(\"pipConfig\"),\n\t\t\t\t\t\tInterfaceIPConfigurationPropertiesFormat: nicConfig,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to create network interface %s in resource group %s\", nicSpec.Name, s.Scope.ClusterConfig.ResourceGroup)\n\t}\n\n\terr = f.WaitForCompletionRef(ctx, s.Client.Client)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"cannot create, future response\")\n\t}\n\n\t_, err = f.Result(s.Client)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"result error\")\n\t}\n\tklog.V(2).Infof(\"successfully created network interface %s\", nicSpec.Name)\n\treturn err\n}", "func (s *Pool) ReserveForInstance(insId uint64) (*GroupInstance, error) {\n\tgins, exists := s.getActive(insId)\n\tif !exists {\n\t\treturn nil, fmt.Errorf(\"instance %d not found\", insId)\n\t}\n\n\tif IN_DEPLOYMENT_MIGRATION {\n\t\treturn gins, nil\n\t} else {\n\t\treturn s.ReserveForGroup(gins.group, gins.idx)\n\t}\n}", "func (d *V8interceptor) Base() *BaseRefCounted {\n\treturn (*BaseRefCounted)(&d.base)\n}", "func TestReallocOnInit(t *testing.T) {\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, nil)\n\n\t// Initially request only an IPv4\n\tpolicy := slim_core_v1.IPFamilyPolicySingleStack\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tIPFamilyPolicy: &policy,\n\t\t\t\tIPFamilies: []slim_core_v1.IPFamily{\n\t\t\t\t\tslim_core_v1.IPv4Protocol,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatus: slim_core_v1.ServiceStatus{\n\t\t\t\tLoadBalancer: slim_core_v1.LoadBalancerStatus{\n\t\t\t\t\tIngress: []slim_core_v1.LoadBalancerIngress{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIP: \"192.168.1.12\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.LoadBalancer.Ingress[0].IP == \"192.168.1.12\" {\n\t\t\tt.Error(\"Expected ingress IP to not be the initial, bad IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one condition\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected second condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionTrue {\n\t\t\tt.Error(\"Expected second condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service to be updated\")\n\t}\n}", "func NewIdentityProviderBase()(*IdentityProviderBase) {\n m := &IdentityProviderBase{\n Entity: *NewEntity(),\n }\n return m\n}", "func initPool() {\n\tpool = &sync.Pool{\n\t\tNew: func() interface{} {\n\t\t\tfmt.Println(\"Returning new A\")\n\t\t\treturn new(A)\n\t\t},\n\t}\n}", "func (hd *Datapath) CreateNatPool(np *netproto.NatPool, vrf *netproto.Vrf) error {\n\t// This will ensure that only one datapath config will be active at a time. This is a temporary restriction\n\t// to ensure that HAL will use a single config thread , this will be removed prior to FCS to allow parallel configs to go through.\n\t// TODO Remove Global Locking\n\thd.Lock()\n\tdefer hd.Unlock()\n\tvrfKey := &halproto.VrfKeyHandle{\n\t\tKeyOrHandle: &halproto.VrfKeyHandle_VrfId{\n\t\t\tVrfId: vrf.Status.VrfID,\n\t\t},\n\t}\n\n\tipRange := strings.Split(np.Spec.IPRange, \"-\")\n\tif len(ipRange) != 2 {\n\t\treturn fmt.Errorf(\"could not parse IP Range from the NAT Pool IPRange. {%v}\", np.Spec.IPRange)\n\t}\n\n\tstartIP := net.ParseIP(strings.TrimSpace(ipRange[0]))\n\tif len(startIP) == 0 {\n\t\treturn fmt.Errorf(\"could not parse IP from {%v}\", startIP)\n\t}\n\tendIP := net.ParseIP(strings.TrimSpace(ipRange[1]))\n\tif len(endIP) == 0 {\n\t\treturn fmt.Errorf(\"could not parse IP from {%v}\", endIP)\n\t}\n\n\tlowIP := halproto.IPAddress{\n\t\tIpAf: halproto.IPAddressFamily_IP_AF_INET,\n\t\tV4OrV6: &halproto.IPAddress_V4Addr{\n\t\t\tV4Addr: ipv4Touint32(startIP),\n\t\t},\n\t}\n\n\thighIP := halproto.IPAddress{\n\t\tIpAf: halproto.IPAddressFamily_IP_AF_INET,\n\t\tV4OrV6: &halproto.IPAddress_V4Addr{\n\t\t\tV4Addr: ipv4Touint32(endIP),\n\t\t},\n\t}\n\n\taddrRange := &halproto.Address_Range{\n\t\tRange: &halproto.AddressRange{\n\t\t\tRange: &halproto.AddressRange_Ipv4Range{\n\t\t\t\tIpv4Range: &halproto.IPRange{\n\t\t\t\t\tLowIpaddr: &lowIP,\n\t\t\t\t\tHighIpaddr: &highIP,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tnatPoolReqMsg := &halproto.NatPoolRequestMsg{\n\t\tRequest: []*halproto.NatPoolSpec{\n\t\t\t{\n\t\t\t\tKeyOrHandle: &halproto.NatPoolKeyHandle{\n\t\t\t\t\tKeyOrHandle: &halproto.NatPoolKeyHandle_PoolKey{\n\t\t\t\t\t\tPoolKey: &halproto.NatPoolKey{\n\t\t\t\t\t\t\tVrfKh: vrfKey,\n\t\t\t\t\t\t\tPoolId: np.Status.NatPoolID,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tAddress: []*halproto.Address{\n\t\t\t\t\t{\n\t\t\t\t\t\tAddress: addrRange,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tif hd.Kind == \"hal\" {\n\t\tresp, err := hd.Hal.Natclient.NatPoolCreate(context.Background(), natPoolReqMsg)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Error creating nat pool. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\t\tif !(resp.Response[0].ApiStatus == halproto.ApiStatus_API_STATUS_OK || resp.Response[0].ApiStatus == halproto.ApiStatus_API_STATUS_EXISTS_ALREADY) {\n\t\t\tlog.Errorf(\"HAL returned non OK status. %v\", resp.Response[0].ApiStatus.String())\n\t\t\treturn fmt.Errorf(\"HAL returned non OK status. %v\", resp.Response[0].ApiStatus.String())\n\t\t}\n\t} else {\n\t\t_, err := hd.Hal.Natclient.NatPoolCreate(context.Background(), natPoolReqMsg)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Error creating nat pool. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (v *Global) tryApplyBase(base *Global) bool {\n\tif !v.addressBase.IsEmpty() {\n\t\treturn false\n\t}\n\n\tif !base.IsSelfScope() {\n\t\tswitch base.GetScope() {\n\t\tcase LocalDomainMember, GlobalDomainMember:\n\t\t\tbreak\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\tv.addressBase = base.addressLocal\n\treturn true\n}", "func newCache(size int, withECS, optimistic bool) (c *cache) {\n\tc = &cache{\n\t\titemsLock: &sync.RWMutex{},\n\t\titemsWithSubnetLock: &sync.RWMutex{},\n\t\titems: createCache(size),\n\t\toptimistic: optimistic,\n\t}\n\n\tif withECS {\n\t\tc.itemsWithSubnet = createCache(size)\n\t}\n\n\treturn c\n}", "func newPeerBase(origCfg *Config, inbound bool) *Peer {\n\t// Default to the max supported protocol version if not specified by the\n\t// caller.\n\tcfg := *origCfg // Copy to avoid mutating caller.\n\tif cfg.ProtocolVersion == 0 {\n\t\tcfg.ProtocolVersion = MaxProtocolVersion\n\t}\n\n\t// Set the chain parameters to testnet if the caller did not specify any.\n\tif cfg.ChainParams == nil {\n\t\tcfg.ChainParams = &chaincfg.TestNet3Params\n\t}\n\n\t// Set the trickle interval if a non-positive value is specified.\n\tif cfg.TrickleInterval <= 0 {\n\t\tcfg.TrickleInterval = DefaultTrickleInterval\n\t}\n\n\tp := Peer{\n\t\tinbound: inbound,\n\t\twireEncoding: wire.BaseEncoding,\n\t\tknownInventory: lru.NewCache(maxKnownInventory),\n\t\tstallControl: make(chan stallControlMsg, 1), // nonblocking sync\n\t\toutputQueue: make(chan outMsg, outputBufferSize),\n\t\tsendQueue: make(chan outMsg, 1), // nonblocking sync\n\t\tsendDoneQueue: make(chan struct{}, 1), // nonblocking sync\n\t\toutputInvChan: make(chan *wire.InvVect, outputBufferSize),\n\t\tinQuit: make(chan struct{}),\n\t\tqueueQuit: make(chan struct{}),\n\t\toutQuit: make(chan struct{}),\n\t\tquit: make(chan struct{}),\n\t\tcfg: cfg, // Copy so caller can't mutate.\n\t\tservices: cfg.Services,\n\t\tprotocolVersion: cfg.ProtocolVersion,\n\t}\n\treturn &p\n}", "func (p *connPool) new() (*conn, error) {\n\tif p.rl.Limit() {\n\t\terr := fmt.Errorf(\n\t\t\t\"redis: you open connections too fast (last_error=%q)\",\n\t\t\tp.loadLastErr(),\n\t\t)\n\t\treturn nil, err\n\t}\n\n\tcn, err := p.dialer()\n\tif err != nil {\n\t\tp.storeLastErr(err.Error())\n\t\treturn nil, err\n\t}\n\n\treturn cn, nil\n}", "func (s *Service) CreateOrUpdate(ctx context.Context, spec azure.Spec) error {\n\tinternalLBSpec, ok := spec.(*Spec)\n\tif !ok {\n\t\treturn errors.New(\"invalid internal load balancer specification\")\n\t}\n\tklog.V(2).Infof(\"creating internal load balancer %s\", internalLBSpec.Name)\n\tprobeName := \"tcpHTTPSProbe\"\n\tfrontEndIPConfigName := \"controlplane-internal-lbFrontEnd\"\n\tbackEndAddressPoolName := \"controlplane-internal-backEndPool\"\n\tidPrefix := fmt.Sprintf(\"/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers\", s.Scope.SubscriptionID, s.Scope.ClusterConfig.ResourceGroup)\n\tlbName := internalLBSpec.Name\n\n\tklog.V(2).Infof(\"getting subnet %s\", internalLBSpec.SubnetName)\n\tsubnetInterface, err := subnets.NewService(s.Scope).Get(ctx, &subnets.Spec{Name: internalLBSpec.SubnetName, VnetName: internalLBSpec.VnetName})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsubnet, ok := subnetInterface.(network.Subnet)\n\tif !ok {\n\t\treturn errors.New(\"subnet Get returned invalid interface\")\n\t}\n\tklog.V(2).Infof(\"successfully got subnet %s\", internalLBSpec.SubnetName)\n\n\tfuture, err := s.Client.CreateOrUpdate(ctx,\n\t\ts.Scope.ClusterConfig.ResourceGroup,\n\t\tlbName,\n\t\tnetwork.LoadBalancer{\n\t\t\tSku: &network.LoadBalancerSku{Name: network.LoadBalancerSkuNameStandard},\n\t\t\tLocation: to.StringPtr(s.Scope.ClusterConfig.Location),\n\t\t\tLoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{\n\t\t\t\tFrontendIPConfigurations: &[]network.FrontendIPConfiguration{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &frontEndIPConfigName,\n\t\t\t\t\t\tFrontendIPConfigurationPropertiesFormat: &network.FrontendIPConfigurationPropertiesFormat{\n\t\t\t\t\t\t\tPrivateIPAllocationMethod: network.Static,\n\t\t\t\t\t\t\tSubnet: &subnet,\n\t\t\t\t\t\t\tPrivateIPAddress: to.StringPtr(internalLBSpec.IPAddress),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tBackendAddressPools: &[]network.BackendAddressPool{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &backEndAddressPoolName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tProbes: &[]network.Probe{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: &probeName,\n\t\t\t\t\t\tProbePropertiesFormat: &network.ProbePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.ProbeProtocolTCP,\n\t\t\t\t\t\t\tPort: to.Int32Ptr(6443),\n\t\t\t\t\t\t\tIntervalInSeconds: to.Int32Ptr(15),\n\t\t\t\t\t\t\tNumberOfProbes: to.Int32Ptr(4),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tLoadBalancingRules: &[]network.LoadBalancingRule{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: to.StringPtr(\"LBRuleHTTPS\"),\n\t\t\t\t\t\tLoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{\n\t\t\t\t\t\t\tProtocol: network.TransportProtocolTCP,\n\t\t\t\t\t\t\tFrontendPort: to.Int32Ptr(6443),\n\t\t\t\t\t\t\tBackendPort: to.Int32Ptr(6443),\n\t\t\t\t\t\t\tIdleTimeoutInMinutes: to.Int32Ptr(4),\n\t\t\t\t\t\t\tEnableFloatingIP: to.BoolPtr(false),\n\t\t\t\t\t\t\tLoadDistribution: network.LoadDistributionDefault,\n\t\t\t\t\t\t\tFrontendIPConfiguration: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/frontendIPConfigurations/%s\", idPrefix, lbName, frontEndIPConfigName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tBackendAddressPool: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/backendAddressPools/%s\", idPrefix, lbName, backEndAddressPoolName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tProbe: &network.SubResource{\n\t\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/%s/%s/probes/%s\", idPrefix, lbName, probeName)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"cannot create load balancer\")\n\t}\n\n\terr = future.WaitForCompletionRef(ctx, s.Client.Client)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"cannot get internal load balancer create or update future response\")\n\t}\n\n\t_, err = future.Result(s.Client)\n\tklog.V(2).Infof(\"successfully created internal load balancer %s\", internalLBSpec.Name)\n\treturn err\n}", "func newSubInstance(parent *BaseInstance, name string) *BaseInstance {\n\tsi := parent.module.subinstance\n\tif si == nil {\n\t\tsi = make(map[string][]*BaseInstance)\n\t\tparent.module.subinstance = si\n\t}\n\n\tbi := &BaseInstance{\n\t\tname: name,\n\t\tinput: parent.input,\n\t\tinput2: parent.input2,\n\t\trules: newRules(),\n\t\tenabled: false,\n\t\tsubinstance: true,\n\t\tinstance: parent.instance,\n\t\tmodule: parent.module,\n\t}\n\n\tsi[parent.name] = append(si[parent.name], bi)\n\treturn bi\n}", "func (d *ResourceHandler) Base() *BaseRefCounted {\n\treturn (*BaseRefCounted)(&d.base)\n}", "func New(ringWeight int) LoadBalancer {\n\t// TODO: Implement this!\n\tnewLB := new(loadBalancer)\n\tnewLB.sortedNames = make([]MMENode, 0)\n\tnewLB.weight = ringWeight\n\tnewLB.hashRing = NewRing()\n\tif 7 == 2 {\n\t\tfmt.Println(ringWeight)\n\t}\n\treturn newLB\n}", "func NewPooledWrapper(ctx context.Context, base wrapping.Wrapper) (*PooledWrapper, error) {\n\tbaseKeyId, err := base.KeyId(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// For safety, no real reason this should happen\n\tif baseKeyId == BaseEncryptor {\n\t\treturn nil, fmt.Errorf(\"base wrapper cannot have a key ID of built-in base encryptor\")\n\t}\n\n\tret := &PooledWrapper{\n\t\twrappers: make(map[string]wrapping.Wrapper, 3),\n\t}\n\tret.wrappers[BaseEncryptor] = base\n\tret.wrappers[baseKeyId] = base\n\treturn ret, nil\n}", "func newCockroachDBFromConfig(ctx context.Context, instanceConfig *config.InstanceConfig) (*pgxpool.Pool, error) {\n\tsingletonPoolMutex.Lock()\n\tdefer singletonPoolMutex.Unlock()\n\n\tif singletonPool != nil {\n\t\treturn singletonPool, nil\n\t}\n\n\tcfg, err := pgxpool.ParseConfig(instanceConfig.DataStoreConfig.ConnectionString)\n\tif err != nil {\n\t\treturn nil, skerr.Wrapf(err, \"Failed to parse database config: %q\", instanceConfig.DataStoreConfig.ConnectionString)\n\t}\n\n\tsklog.Infof(\"%#v\", *cfg)\n\tcfg.MaxConns = maxPoolConnections\n\tcfg.ConnConfig.Logger = pgxLogAdaptor{}\n\tsingletonPool, err = pgxpool.ConnectConfig(ctx, cfg)\n\tif err != nil {\n\t\treturn nil, skerr.Wrap(err)\n\t}\n\n\t// Confirm the database has the right schema.\n\texpectedSchema, err := expectedschema.Load()\n\tif err != nil {\n\t\treturn nil, skerr.Wrap(err)\n\t}\n\n\tactual, err := schema.GetDescription(singletonPool, sql.Tables{})\n\tif err != nil {\n\t\treturn nil, skerr.Wrap(err)\n\t}\n\tif diff := assertdeep.Diff(expectedSchema, *actual); diff != \"\" {\n\t\treturn nil, skerr.Fmt(\"Schema needs to be updated: %s.\", diff)\n\t}\n\n\treturn singletonPool, err\n}", "func InitRoundRobin(name string, endpoints []string) {\n\tif len(lb) == 0 {\n\t\tlb = make(map[string]*roundrobin.Balancer)\n\t}\n\n\tlb[name] = roundrobin.New(endpoints)\n}", "func (face *FaceBase) InitFaceBase(id FaceId, sizeofPriv int, socket dpdk.NumaSocket) error {\n\tface.id = id\n\n\tif socket == dpdk.NUMA_SOCKET_ANY {\n\t\tif lc := dpdk.GetCurrentLCore(); lc.IsValid() {\n\t\t\tsocket = lc.GetNumaSocket()\n\t\t} else {\n\t\t\tsocket = 0\n\t\t}\n\t}\n\n\tfaceC := face.getPtr()\n\t*faceC = C.Face{}\n\tfaceC.id = C.FaceId(face.id)\n\tfaceC.state = C.FACESTA_UP\n\tfaceC.numaSocket = C.int(socket)\n\n\tsizeofImpl := int(C.sizeof_FaceImpl) + sizeofPriv\n\tfaceC.impl = (*C.FaceImpl)(dpdk.ZmallocAligned(\"FaceImpl\", sizeofImpl, 1, socket))\n\n\treturn nil\n\n}", "func TestCNContainer_Base(t *testing.T) {\n\tassert := assert.New(t)\n\n\tcn, err := cnTestInit()\n\trequire.Nil(t, err)\n\n\tdockerPlugin, err := startDockerPlugin(t)\n\trequire.Nil(t, err)\n\n\t//From YAML on instance init\n\t//Two VNICs on the same tenant subnet\n\tmac, _ := net.ParseMAC(\"CA:FE:00:01:02:03\")\n\tmac2, _ := net.ParseMAC(\"CA:FE:00:02:02:03\")\n\t_, tnet, _ := net.ParseCIDR(\"192.168.111.0/24\")\n\ttip := net.ParseIP(\"192.168.111.100\")\n\ttip2 := net.ParseIP(\"192.168.111.102\")\n\tcip := net.ParseIP(\"192.168.200.200\")\n\n\tvnicCfg := &VnicConfig{\n\t\tVnicRole: TenantContainer,\n\t\tVnicIP: tip,\n\t\tConcIP: cip,\n\t\tVnicMAC: mac,\n\t\tSubnet: *tnet,\n\t\tSubnetKey: 0xF,\n\t\tVnicID: \"vuuid\",\n\t\tInstanceID: \"iuuid\",\n\t\tTenantID: \"tuuid\",\n\t\tSubnetID: \"suuid\",\n\t\tConcID: \"cnciuuid\",\n\t}\n\n\tvnicCfg2 := &VnicConfig{\n\t\tVnicRole: TenantContainer,\n\t\tVnicIP: tip2,\n\t\tConcIP: cip,\n\t\tVnicMAC: mac2,\n\t\tSubnet: *tnet,\n\t\tSubnetKey: 0xF,\n\t\tVnicID: \"vuuid2\",\n\t\tInstanceID: \"iuuid2\",\n\t\tTenantID: \"tuuid\",\n\t\tSubnetID: \"suuid\",\n\t\tConcID: \"cnciuuid\",\n\t}\n\n\tvar subnetID, iface string //Used to check that they match\n\n\t// Create a VNIC: Should create bridge and tunnels\n\tif vnic, ssntpEvent, cInfo, err := cn.CreateVnic(vnicCfg); err != nil {\n\t\tt.Error(err)\n\t} else {\n\t\t// expected SSNTP Event\n\t\tif assert.NotNil(ssntpEvent) {\n\t\t\tassert.Equal(ssntpEvent.Event, SsntpTunAdd)\n\t\t}\n\t\t// expected Container Event\n\t\tif assert.NotNil(cInfo) {\n\t\t\tassert.Equal(cInfo.CNContainerEvent, ContainerNetworkAdd)\n\t\t\tassert.NotEqual(cInfo.SubnetID, \"\")\n\t\t\tassert.NotEqual(cInfo.Subnet.String(), \"\")\n\t\t\tassert.NotEqual(cInfo.Gateway.String(), \"\")\n\t\t\tassert.NotEqual(cInfo.Bridge, \"\")\n\t\t}\n\t\tassert.Nil(validSsntpEvent(ssntpEvent, vnicCfg))\n\n\t\t//Cache the first subnet ID we see. All subsequent should have the same\n\t\tsubnetID = cInfo.SubnetID\n\t\tiface = vnic.InterfaceName()\n\t\tassert.NotEqual(iface, \"\")\n\n\t\t//Launcher will attach to this name and send out the event\n\t\t//Launcher will also create the logical docker network\n\t\tdebugPrint(t, \"VNIC created =\", vnic.LinkName, ssntpEvent, cInfo)\n\t\tassert.Nil(linkDump(t))\n\n\t\t//Now kick off the docker commands\n\t\tassert.Nil(dockerNetCreate(cInfo.Subnet, cInfo.Gateway, cInfo.Bridge, cInfo.SubnetID))\n\t\tassert.Nil(dockerNetInfo(cInfo.SubnetID))\n\t\tassert.Nil(dockerRunVerify(vnicCfg.VnicIP.String(), vnicCfg.VnicIP, vnicCfg.VnicMAC, cInfo.SubnetID))\n\t\tassert.Nil(dockerContainerDelete(vnicCfg.VnicIP.String()))\n\t}\n\n\t//Duplicate VNIC creation\n\tif vnic, ssntpEvent, cInfo, err := cn.CreateVnic(vnicCfg); err != nil {\n\t\tt.Error(err)\n\t} else {\n\t\tassert.Nil(ssntpEvent, \"ERROR: DUP unexpected event\")\n\t\tif assert.NotNil(cInfo) {\n\t\t\tassert.Equal(cInfo.SubnetID, subnetID)\n\t\t\tassert.Equal(cInfo.CNContainerEvent, ContainerNetworkInfo)\n\t\t\tassert.Equal(iface, vnic.InterfaceName())\n\t\t}\n\t}\n\n\t//Second VNIC creation - Should succeed\n\tif vnic, ssntpEvent, cInfo, err := cn.CreateVnic(vnicCfg2); err != nil {\n\t\tt.Error(err)\n\t} else {\n\t\tassert.Nil(ssntpEvent)\n\t\tif assert.NotNil(cInfo) {\n\t\t\tassert.Equal(cInfo.SubnetID, subnetID)\n\t\t\tassert.Equal(cInfo.CNContainerEvent, ContainerNetworkInfo)\n\t\t}\n\t\tiface = vnic.InterfaceName()\n\t\tassert.NotEqual(iface, \"\")\n\t\tassert.Nil(dockerRunVerify(vnicCfg2.VnicIP.String(), vnicCfg2.VnicIP,\n\t\t\tvnicCfg2.VnicMAC, cInfo.SubnetID))\n\t\tassert.Nil(dockerContainerDelete(vnicCfg2.VnicIP.String()))\n\t}\n\n\t//Duplicate VNIC creation\n\tif vnic, ssntpEvent, cInfo, err := cn.CreateVnic(vnicCfg2); err != nil {\n\t\tt.Error(err)\n\t} else {\n\t\tassert.Nil(ssntpEvent)\n\t\tif assert.NotNil(cInfo) {\n\t\t\tassert.Equal(cInfo.SubnetID, subnetID)\n\t\t\tassert.Equal(cInfo.CNContainerEvent, ContainerNetworkInfo)\n\t\t\tassert.Equal(iface, vnic.InterfaceName())\n\t\t}\n\t}\n\n\t//Destroy the first one\n\tif ssntpEvent, cInfo, err := cn.DestroyVnic(vnicCfg); err != nil {\n\t\tt.Error(err)\n\t} else {\n\t\tassert.Nil(ssntpEvent)\n\t\tassert.Nil(cInfo)\n\t}\n\n\t//Destroy it again\n\tif ssntpEvent, cInfo, err := cn.DestroyVnic(vnicCfg); err != nil {\n\t\tt.Error(err)\n\t} else {\n\t\tassert.Nil(ssntpEvent)\n\t\tassert.Nil(cInfo)\n\t}\n\n\t// Try and destroy - should work - cInfo should be reported\n\tif ssntpEvent, cInfo, err := cn.DestroyVnic(vnicCfg2); err != nil {\n\t\tt.Error(err)\n\t} else {\n\t\tassert.NotNil(ssntpEvent)\n\t\tif assert.NotNil(cInfo) {\n\t\t\tassert.Equal(cInfo.SubnetID, subnetID)\n\t\t\tassert.Equal(cInfo.CNContainerEvent, ContainerNetworkDel)\n\t\t}\n\t}\n\n\t//Has to be called after the VNIC has been deleted\n\tassert.Nil(dockerNetDelete(subnetID))\n\tassert.Nil(dockerNetList())\n\n\t//Destroy it again\n\tif ssntpEvent, cInfo, err := cn.DestroyVnic(vnicCfg2); err != nil {\n\t\tt.Error(err)\n\t} else {\n\t\tassert.Nil(ssntpEvent)\n\t\tassert.Nil(cInfo)\n\t}\n\n\tassert.Nil(stopDockerPlugin(dockerPlugin))\n}", "func newReconciler(mgr manager.Manager) reconcile.Reconciler {\n\tgceNew, err := gce.New(\"\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn &ReconcileTargetPool{\n\t\tclient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\tgce: gceNew,\n\t\treconcileResult: reconcile.Result{\n\t\t\tRequeueAfter: time.Duration(5 * time.Second),\n\t\t},\n\t\tk8sObject: &computev1.TargetPool{},\n\t}\n}", "func newPool(cfg *config) (*miningPool, error) {\n\tp := new(miningPool)\n\tp.cfg = cfg\n\tdcrdRPCCfg := &rpcclient.ConnConfig{\n\t\tHost: cfg.DcrdRPCHost,\n\t\tEndpoint: \"ws\",\n\t\tUser: cfg.RPCUser,\n\t\tPass: cfg.RPCPass,\n\t\tCertificates: cfg.dcrdRPCCerts,\n\t}\n\tp.ctx, p.cancel = context.WithCancel(context.Background())\n\tpowLimit := cfg.net.PowLimit\n\tpowLimitF, _ := new(big.Float).SetInt(powLimit).Float64()\n\titerations := math.Pow(2, 256-math.Floor(math.Log2(powLimitF)))\n\taddPort := func(ports map[string]uint32, key string, entry uint32) error {\n\t\tvar match bool\n\t\tvar miner string\n\t\tfor m, port := range ports {\n\t\t\tif port == entry {\n\t\t\t\tmatch = true\n\t\t\t\tminer = m\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif match {\n\t\t\treturn fmt.Errorf(\"%s and %s share port %d\", key, miner, entry)\n\t\t}\n\t\tports[key] = entry\n\t\treturn nil\n\t}\n\n\t// Ensure provided miner ports are unique.\n\tminerPorts := make(map[string]uint32)\n\t_ = addPort(minerPorts, pool.CPU, cfg.CPUPort)\n\terr := addPort(minerPorts, pool.InnosiliconD9, cfg.D9Port)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = addPort(minerPorts, pool.AntminerDR3, cfg.DR3Port)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = addPort(minerPorts, pool.AntminerDR5, cfg.DR5Port)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = addPort(minerPorts, pool.WhatsminerD1, cfg.D1Port)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = addPort(minerPorts, pool.ObeliskDCR1, cfg.DCR1Port)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdb, err := pool.InitDB(cfg.DBFile, cfg.SoloPool)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thcfg := &pool.HubConfig{\n\t\tDB: db,\n\t\tActiveNet: cfg.net.Params,\n\t\tPoolFee: cfg.PoolFee,\n\t\tMaxGenTime: cfg.MaxGenTime,\n\t\tPaymentMethod: cfg.PaymentMethod,\n\t\tLastNPeriod: cfg.LastNPeriod,\n\t\tWalletPass: cfg.WalletPass,\n\t\tPoolFeeAddrs: cfg.poolFeeAddrs,\n\t\tSoloPool: cfg.SoloPool,\n\t\tNonceIterations: iterations,\n\t\tMinerPorts: minerPorts,\n\t\tMaxConnectionsPerHost: cfg.MaxConnectionsPerHost,\n\t\tWalletAccount: cfg.WalletAccount,\n\t}\n\tp.hub, err = pool.NewHub(p.cancel, hcfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Establish a connection to the mining node.\n\tntfnHandlers := p.hub.CreateNotificationHandlers()\n\tnodeConn, err := rpcclient.New(dcrdRPCCfg, ntfnHandlers)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := nodeConn.NotifyWork(p.ctx); err != nil {\n\t\tnodeConn.Shutdown()\n\t\treturn nil, err\n\t}\n\tif err := nodeConn.NotifyBlocks(p.ctx); err != nil {\n\t\tnodeConn.Shutdown()\n\t\treturn nil, err\n\t}\n\n\tp.hub.SetNodeConnection(nodeConn)\n\n\t// Establish a connection to the wallet if the pool is mining as a\n\t// publicly available mining pool.\n\tif !cfg.SoloPool {\n\t\tcreds, err := credentials.\n\t\t\tNewClientTLSFromFile(cfg.WalletRPCCert, \"localhost\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tgrpc, err := grpc.Dial(cfg.WalletGRPCHost,\n\t\t\tgrpc.WithTransportCredentials(creds))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Perform a Balance request to check connectivity and account\n\t\t// existence.\n\t\twalletConn := walletrpc.NewWalletServiceClient(grpc)\n\t\treq := &walletrpc.BalanceRequest{\n\t\t\tAccountNumber: cfg.WalletAccount,\n\t\t\tRequiredConfirmations: 1,\n\t\t}\n\t\t_, err = walletConn.Balance(p.ctx, req)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tp.hub.SetWalletConnection(walletConn, grpc.Close)\n\n\t\tconfNotifs, err := walletConn.ConfirmationNotifications(p.ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tp.hub.SetTxConfNotifClient(confNotifs)\n\t}\n\n\terr = p.hub.FetchWork(p.ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = p.hub.Listen()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcsrfSecret, err := p.hub.CSRFSecret()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgcfg := &gui.Config{\n\t\tSoloPool: cfg.SoloPool,\n\t\tGUIDir: cfg.GUIDir,\n\t\tAdminPass: cfg.AdminPass,\n\t\tGUIPort: cfg.GUIPort,\n\t\tUseLEHTTPS: cfg.UseLEHTTPS,\n\t\tDomain: cfg.Domain,\n\t\tTLSCertFile: cfg.TLSCert,\n\t\tTLSKeyFile: cfg.TLSKey,\n\t\tActiveNet: cfg.net.Params,\n\t\tPaymentMethod: cfg.PaymentMethod,\n\t\tDesignation: cfg.Designation,\n\t\tPoolFee: cfg.PoolFee,\n\t\tCSRFSecret: csrfSecret,\n\t\tMinerPorts: minerPorts,\n\t\tWithinLimit: p.hub.WithinLimit,\n\t\tFetchLastWorkHeight: p.hub.FetchLastWorkHeight,\n\t\tFetchLastPaymentHeight: p.hub.FetchLastPaymentHeight,\n\t\tFetchMinedWork: p.hub.FetchMinedWork,\n\t\tFetchWorkQuotas: p.hub.FetchWorkQuotas,\n\t\tBackupDB: p.hub.BackupDB,\n\t\tFetchClients: p.hub.FetchClients,\n\t\tAccountExists: p.hub.AccountExists,\n\t\tFetchArchivedPayments: p.hub.FetchArchivedPayments,\n\t\tFetchPendingPayments: p.hub.FetchPendingPayments,\n\t\tFetchCacheChannel: p.hub.FetchCacheChannel,\n\t}\n\tp.gui, err = gui.NewGUI(gcfg)\n\tif err != nil {\n\t\tp.hub.CloseListeners()\n\t\treturn nil, err\n\t}\n\treturn p, nil\n}", "func (m *MicroService) getLoadBalancedInstance() (*Instance, int, error) {\r\n\tinstCount := len(m.Instances)\r\n\tif instCount == 0 {\r\n\t\treturn nil, -1, ErrServiceNoInstance\r\n\t}\r\n\r\n\tif len(m.BlackList) == instCount {\r\n\t\treturn nil, -1, ErrAllInstancesDown\r\n\t}\r\n\r\n\tinstances := make([]*Instance, instCount)\r\n\tcopy(instances, m.Instances)\r\n\r\n\tvar idx int\r\n\tvar err error\r\n\tfor {\r\n\t\tswitch m.Strategy {\r\n\t\tcase RoundRobin:\r\n\t\t\tidx, err = getRoundRobinInstIdx(instances, m.LastUsedIdx.Get())\r\n\t\tcase LeastConnected:\r\n\t\t\tidx = getLeastConInstIdx(instances)\r\n\t\tcase Random:\r\n\t\t\tidx = getRandomInstIdx(instances)\r\n\t\tdefault:\r\n\t\t\treturn nil, -1, NewError(ErrInvalidStrategyCode, \"Unexpected strategy \" + string(m.Strategy))\r\n\t\t}\r\n\r\n\t\tif err != nil {\r\n\t\t\treturn nil, -1, err\r\n\t\t}\r\n\r\n\t\tif m.isBlacklisted(idx) {\r\n\t\t\tinstances[idx] = nil\r\n\t\t} else {\r\n\t\t\tm.LastUsedIdx.Set(idx)\r\n\t\t\treturn instances[idx], idx, nil\r\n\t\t}\r\n\t}\r\n}", "func New(network, addr string, size int) (*Pool, error) {\n\treturn NewCustom(network, size, SingleAddrFunc(addr), redis.Dial)\n}", "func bindBaseFactory(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := ParsedABI(K_BaseFactory)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil\n}", "func newPool(addr string) (*pool, error) {\n\tp := pool{redis.Pool{\n\t\tMaxActive: 100,\n\t\tWait: true,\n\t\tMaxIdle: 10,\n\t\tIdleTimeout: 240 * time.Second,\n\t\tDial: func() (redis.Conn, error) { return redis.Dial(\"tcp\", addr) },\n\t}}\n\n\t// Test connection\n\tconn := p.Get()\n\tdefer conn.Close()\n\n\t_, err := conn.Do(\"PING\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &p, nil\n}", "func (t *strideTable[T]) getOrCreateChild(addr uint8) *strideTable[T] {\n\tidx := hostIndex(addr)\n\tif t.entries[idx].child == nil {\n\t\tt.entries[idx].child = new(strideTable[T])\n\t\tt.refs++\n\t}\n\treturn t.entries[idx].child\n}", "func (n NetworkTypeWiFi) construct() NetworkTypeClass { return &n }", "func NewKeybase(validatorMoniker, mnemonic, password string) (keyring.Keyring, keyring.Info, error) {\n\tkr := keyring.NewInMemory()\n\thdpath := *hd.NewFundraiserParams(0, sdk.CoinType, 0)\n\tinfo, err := kr.NewAccount(validatorMoniker, mnemonic, password, hdpath.String(), hd.Secp256k1)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn kr, info, nil\n}", "func newPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (*prSignedBaseLayer, error) {\n\tif baseLayerIdentity == nil {\n\t\treturn nil, InvalidPolicyFormatError(\"baseLayerIdentity not specified\")\n\t}\n\treturn &prSignedBaseLayer{\n\t\tprCommon: prCommon{Type: prTypeSignedBaseLayer},\n\t\tBaseLayerIdentity: baseLayerIdentity,\n\t}, nil\n}", "func NewBase(path string, hashName string) (*Base, error) {\n\tfor _, p := range []string{\"blobs/\" + hashName, \"state\", \"tmp\"} {\n\t\tif err := os.MkdirAll(filepath.Join(path, p), 0755); err != nil && !os.IsExist(err) {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn &Base{Path: path, HashName: hashName, Hash: cryptomap.DetermineHash(hashName)}, nil\n}", "func newBaseRunner(collector *resourceStatusCollector) *baseRunner {\n\treturn &baseRunner{\n\t\tcollector: collector,\n\t}\n}", "func (p *Periph) LoadBASE(n int) uint32 {\n\treturn p.base[n].Load()\n}", "func createPerNodePhysicalVIPs(isIPv6 bool, protocol v1.Protocol, sourcePort int32, targetIPs []string, targetPort int32) error {\n\tklog.V(5).Infof(\"Creating Node VIPs - %s, %d, [%v], %d\", protocol, sourcePort, targetIPs, targetPort)\n\t// Each gateway has a separate load-balancer for N/S traffic\n\tgatewayRouters, _, err := gateway.GetOvnGateways()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, gatewayRouter := range gatewayRouters {\n\t\tgatewayLB, err := gateway.GetGatewayLoadBalancer(gatewayRouter, protocol)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Gateway router %s does not have load balancer (%v)\",\n\t\t\t\tgatewayRouter, err)\n\t\t\tcontinue\n\t\t}\n\t\tphysicalIPs, err := gateway.GetGatewayPhysicalIPs(gatewayRouter)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Gateway router %s does not have physical ip (%v)\", gatewayRouter, err)\n\t\t\tcontinue\n\t\t}\n\t\t// Filter only phyiscal IPs of the same family\n\t\tphysicalIPs, err = util.MatchAllIPStringFamily(isIPv6, physicalIPs)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Failed to find node physical IPs, for gateway: %s, error: %v\", gatewayRouter, err)\n\t\t\treturn err\n\t\t}\n\n\t\t// If self ip is in target list, we need to use special IP to allow hairpin back to host\n\t\tnewTargets := util.UpdateIPsSlice(targetIPs, physicalIPs, []string{types.V4HostMasqueradeIP, types.V6HostMasqueradeIP})\n\n\t\terr = loadbalancer.CreateLoadBalancerVIPs(gatewayLB, physicalIPs, sourcePort, newTargets, targetPort)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Failed to create VIP in load balancer %s - %v\", gatewayLB, err)\n\t\t\treturn err\n\t\t}\n\n\t\tif config.Gateway.Mode == config.GatewayModeShared {\n\t\t\tworkerNode := util.GetWorkerFromGatewayRouter(gatewayRouter)\n\t\t\tworkerLB, err := loadbalancer.GetWorkerLoadBalancer(workerNode, protocol)\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"Worker switch %s does not have load balancer (%v)\", workerNode, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = loadbalancer.CreateLoadBalancerVIPs(workerLB, physicalIPs, sourcePort, targetIPs, targetPort)\n\t\t\tif err != nil {\n\t\t\t\tklog.Errorf(\"Failed to create VIP in load balancer %s - %v\", workerLB, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func ensureNetwork(name string) error {\n\t// TODO: the network might already exist and not have ipv6 ... :|\n\t// discussion: https://github.com/kubernetes-sigs/kind/pull/1508#discussion_r414594198\n\texists, err := checkIfNetworkExists(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// network already exists, we're good\n\tif exists {\n\t\treturn nil\n\t}\n\n\t// Generate unique subnet per network based on the name\n\t// obtained from the ULA fc00::/8 range\n\t// Make N attempts with \"probing\" in case we happen to collide\n\tsubnet := generateULASubnetFromName(name, 0)\n\terr = createNetwork(name, subnet)\n\tif err == nil {\n\t\t// Success!\n\t\treturn nil\n\t}\n\n\t// On the first try check if ipv6 fails entirely on this machine\n\t// https://github.com/kubernetes-sigs/kind/issues/1544\n\t// Otherwise if it's not a pool overlap error, fail\n\t// If it is, make more attempts below\n\tif isIPv6UnavailableError(err) {\n\t\t// only one attempt, IPAM is automatic in ipv4 only\n\t\treturn createNetwork(name, \"\")\n\t} else if isPoolOverlapError(err) {\n\t\t// unknown error ...\n\t\treturn err\n\t}\n\n\t// keep trying for ipv6 subnets\n\tconst maxAttempts = 5\n\tfor attempt := int32(1); attempt < maxAttempts; attempt++ {\n\t\tsubnet := generateULASubnetFromName(name, attempt)\n\t\terr = createNetwork(name, subnet)\n\t\tif err == nil {\n\t\t\t// success!\n\t\t\treturn nil\n\t\t} else if !isPoolOverlapError(err) {\n\t\t\t// unknown error ...\n\t\t\treturn err\n\t\t}\n\t}\n\treturn errors.New(\"exhausted attempts trying to find a non-overlapping subnet\")\n}", "func NewBase(name string) *Base {\n\treturn &Base{name}\n}", "func (n NetworkTypeOther) construct() NetworkTypeClass { return &n }", "func (bi *baseInstance) toGA() *ga.Instance {\n\tinst := &ga.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*ga.NetworkInterface{{}}}\n\tif bi.aliasRange != \"\" {\n\t\tinst.NetworkInterfaces[0].AliasIpRanges = []*ga.AliasIpRange{\n\t\t\t{IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName},\n\t\t}\n\t}\n\treturn inst\n}", "func (e *GT) Base() *GT {\n\tif e.p == nil {\n\t\te.p = &gfP12{}\n\t}\n\te.p.Set(gfP12Gen)\n\treturn e\n}", "func createDefaultVxlanIPPool(ctx context.Context, client client.Interface, cidr *cnet.IPNet, blockSize int, isNATOutgoingEnabled, checkVxlan bool) error {\n\tvar poolName string\n\tswitch cidr.Version() {\n\tcase 4:\n\t\tpoolName = defaultIpv4PoolName\n\tcase 6:\n\t\tpoolName = defaultIpv6PoolName\n\tdefault:\n\t\treturn fmt.Errorf(\"Unknown IP version for CIDR: %s\", cidr.String())\n\n\t}\n\tpool := &api.IPPool{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: poolName,\n\t\t},\n\t\tSpec: api.IPPoolSpec{\n\t\t\tCIDR: cidr.String(),\n\t\t\tBlockSize: blockSize,\n\t\t\tNATOutgoing: isNATOutgoingEnabled,\n\t\t\tIPIPMode: api.IPIPModeNever,\n\t\t\tVXLANMode: api.VXLANModeAlways,\n\t\t},\n\t}\n\n\tlog.Infof(\"Ensure default IPv%d pool (cidr %s, blockSize %d, nat %t, vxlanMode %s).\", cidr.Version(), cidr.String(), blockSize, isNATOutgoingEnabled, api.VXLANModeAlways)\n\n\tvar defaultPool *api.IPPool\n\tvar err error\n\tcreatePool := true\n\tif !checkVxlan {\n\t\t// Canal will always create a default ippool with vxlan disabled.\n\t\tdefaultPool, err = client.IPPools().Get(ctx, poolName, options.GetOptions{})\n\t\tif err == nil {\n\t\t\tif defaultPool.Spec.VXLANMode != api.VXLANModeAlways {\n\t\t\t\t// ippool is created by Canal. Delete it\n\t\t\t\t_, err := client.IPPools().Delete(ctx, poolName, options.DeleteOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.WithError(err).Errorf(\"Failed to delete existing default IPv%d IP pool\", cidr.Version())\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// We have a default pool and vxlan mode is enabled.\n\t\t\t\tcreatePool = false\n\t\t\t}\n\t\t} else {\n\t\t\tif _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {\n\t\t\t\tlog.WithError(err).Errorf(\"Failed to get default IPv%d pool for Canal\", cidr.Version())\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlog.WithError(err).Warnf(\"Default IPv%d pool for Canal not exists\", cidr.Version())\n\t\t}\n\t}\n\n\tif createPool {\n\t\t// Create the pool.\n\t\t// Validate if pool already exists.\n\t\t_, err = client.IPPools().Create(ctx, pool, options.SetOptions{})\n\t\tif err == nil {\n\t\t\tlog.Infof(\"Created default IPv%d pool.\", cidr.Version())\n\t\t\treturn nil\n\t\t}\n\n\t\tif _, ok := err.(cerrors.ErrorResourceAlreadyExists); !ok {\n\t\t\tlog.WithError(err).Errorf(\"Failed to create default IPv%d pool (%s)\", cidr.Version(), cidr.String())\n\t\t\treturn err\n\t\t}\n\n\t\t// Default pool exists.\n\t\tdefaultPool, err = client.IPPools().Get(ctx, poolName, options.GetOptions{})\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Errorf(\"Failed to get existing default IPv%d IP pool\", cidr.Version())\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Check CIDR/blockSize/NATOutgoing for existing pool.\n\tif defaultPool.Spec.CIDR != cidr.String() ||\n\t\tdefaultPool.Spec.BlockSize != blockSize ||\n\t\tdefaultPool.Spec.NATOutgoing != isNATOutgoingEnabled ||\n\t\tdefaultPool.Spec.VXLANMode != api.VXLANModeAlways {\n\t\tmsg := fmt.Sprintf(\"current [cidr:%s, blocksize:%d, nat:%t, vxlanMode %s], expected [cidr:%s, blocksize:%d, nat:%t, vxlanMode %s]\",\n\t\t\tdefaultPool.Spec.CIDR, defaultPool.Spec.BlockSize, defaultPool.Spec.NATOutgoing, defaultPool.Spec.VXLANMode,\n\t\t\tcidr.String(), blockSize, isNATOutgoingEnabled, api.VXLANModeAlways)\n\t\tlog.Errorf(\"Failed to validate existing default IPv%d IP pool (cidr/blocksize/nat/vxlanMode) %+v\", cidr.Version(), defaultPool.Spec)\n\t\treturn cerrors.ErrorValidation{\n\t\t\tErroredFields: []cerrors.ErroredField{{\n\t\t\t\tName: \"pool.Spec\",\n\t\t\t\tReason: msg,\n\t\t\t}},\n\t\t}\n\t}\n\n\tlog.Infof(\"Use current default IPv%d pool.\", cidr.Version())\n\treturn nil\n}", "func newVirtualNetworkClient(subID string, authorizer auth.Authorizer) (*client, error) {\n\tc, err := wssdcloudclient.GetVirtualNetworkClient(&subID, authorizer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &client{c}, nil\n}", "func init() {\n\t_drv = &Drv{}\n\t_drv.locations = make(map[string]*time.Location)\n\t_drv.openEnvs = newEnvList()\n\n\t// init general pools\n\t_drv.listPool = newPool(func() interface{} { return list.New() })\n\t_drv.envPool = newPool(func() interface{} { return &Env{openSrvs: newSrvList(), openCons: newConList()} })\n\t_drv.conPool = newPool(func() interface{} { return &Con{} })\n\t_drv.srvPool = newPool(func() interface{} { return &Srv{openSess: newSesList()} })\n\t_drv.sesPool = newPool(func() interface{} { return &Ses{openStmts: newStmtList(), openTxs: newTxList()} })\n\t_drv.stmtPool = newPool(func() interface{} { return &Stmt{openRsets: newRsetList()} })\n\t_drv.txPool = newPool(func() interface{} { return &Tx{} })\n\t_drv.rsetPool = newPool(func() interface{} { return &Rset{genByPool: true} })\n\n\t// init bind pools\n\t_drv.bndPools = make([]*sync.Pool, bndIdxNil+1)\n\t_drv.bndPools[bndIdxInt64] = newPool(func() interface{} { return &bndInt64{} })\n\t_drv.bndPools[bndIdxInt32] = newPool(func() interface{} { return &bndInt32{} })\n\t_drv.bndPools[bndIdxInt16] = newPool(func() interface{} { return &bndInt16{} })\n\t_drv.bndPools[bndIdxInt8] = newPool(func() interface{} { return &bndInt8{} })\n\t_drv.bndPools[bndIdxUint64] = newPool(func() interface{} { return &bndUint64{} })\n\t_drv.bndPools[bndIdxUint32] = newPool(func() interface{} { return &bndUint32{} })\n\t_drv.bndPools[bndIdxUint16] = newPool(func() interface{} { return &bndUint16{} })\n\t_drv.bndPools[bndIdxUint8] = newPool(func() interface{} { return &bndUint8{} })\n\t_drv.bndPools[bndIdxFloat64] = newPool(func() interface{} { return &bndFloat64{} })\n\t_drv.bndPools[bndIdxFloat32] = newPool(func() interface{} { return &bndFloat32{} })\n\t_drv.bndPools[bndIdxNumString] = newPool(func() interface{} { return &bndNumString{} })\n\t_drv.bndPools[bndIdxOCINum] = newPool(func() interface{} { return &bndOCINum{} })\n\t_drv.bndPools[bndIdxInt64Ptr] = newPool(func() interface{} { return &bndInt64Ptr{} })\n\t_drv.bndPools[bndIdxInt32Ptr] = newPool(func() interface{} { return &bndInt32Ptr{} })\n\t_drv.bndPools[bndIdxInt16Ptr] = newPool(func() interface{} { return &bndInt16Ptr{} })\n\t_drv.bndPools[bndIdxInt8Ptr] = newPool(func() interface{} { return &bndInt8Ptr{} })\n\t_drv.bndPools[bndIdxUint64Ptr] = newPool(func() interface{} { return &bndUint64Ptr{} })\n\t_drv.bndPools[bndIdxUint32Ptr] = newPool(func() interface{} { return &bndUint32Ptr{} })\n\t_drv.bndPools[bndIdxUint16Ptr] = newPool(func() interface{} { return &bndUint16Ptr{} })\n\t_drv.bndPools[bndIdxUint8Ptr] = newPool(func() interface{} { return &bndUint8Ptr{} })\n\t_drv.bndPools[bndIdxFloat64Ptr] = newPool(func() interface{} { return &bndFloat64Ptr{} })\n\t_drv.bndPools[bndIdxFloat32Ptr] = newPool(func() interface{} { return &bndFloat32Ptr{} })\n\t_drv.bndPools[bndIdxNumStringPtr] = newPool(func() interface{} { return &bndNumStringPtr{} })\n\t_drv.bndPools[bndIdxOCINumPtr] = newPool(func() interface{} { return &bndOCINumPtr{} })\n\t_drv.bndPools[bndIdxInt64Slice] = newPool(func() interface{} { return &bndInt64Slice{} })\n\t_drv.bndPools[bndIdxInt32Slice] = newPool(func() interface{} { return &bndInt32Slice{} })\n\t_drv.bndPools[bndIdxInt16Slice] = newPool(func() interface{} { return &bndInt16Slice{} })\n\t_drv.bndPools[bndIdxInt8Slice] = newPool(func() interface{} { return &bndInt8Slice{} })\n\t_drv.bndPools[bndIdxUint64Slice] = newPool(func() interface{} { return &bndUint64Slice{} })\n\t_drv.bndPools[bndIdxUint32Slice] = newPool(func() interface{} { return &bndUint32Slice{} })\n\t_drv.bndPools[bndIdxUint16Slice] = newPool(func() interface{} { return &bndUint16Slice{} })\n\t_drv.bndPools[bndIdxUint8Slice] = newPool(func() interface{} { return &bndUint8Slice{} })\n\t_drv.bndPools[bndIdxFloat64Slice] = newPool(func() interface{} { return &bndFloat64Slice{} })\n\t_drv.bndPools[bndIdxFloat32Slice] = newPool(func() interface{} { return &bndFloat32Slice{} })\n\t_drv.bndPools[bndIdxNumStringSlice] = newPool(func() interface{} { return &bndNumStringSlice{} })\n\t_drv.bndPools[bndIdxOCINumSlice] = newPool(func() interface{} { return &bndOCINumSlice{} })\n\t_drv.bndPools[bndIdxTime] = newPool(func() interface{} { return &bndTime{} })\n\t_drv.bndPools[bndIdxTimePtr] = newPool(func() interface{} { return &bndTimePtr{} })\n\t_drv.bndPools[bndIdxTimeSlice] = newPool(func() interface{} { return &bndTimeSlice{} })\n\t_drv.bndPools[bndIdxDate] = newPool(func() interface{} { return &bndDate{} })\n\t_drv.bndPools[bndIdxDatePtr] = newPool(func() interface{} { return &bndDatePtr{} })\n\t_drv.bndPools[bndIdxDateSlice] = newPool(func() interface{} { return &bndDateSlice{} })\n\t_drv.bndPools[bndIdxString] = newPool(func() interface{} { return &bndString{} })\n\t_drv.bndPools[bndIdxStringPtr] = newPool(func() interface{} { return &bndStringPtr{} })\n\t_drv.bndPools[bndIdxStringSlice] = newPool(func() interface{} { return &bndStringSlice{} })\n\t_drv.bndPools[bndIdxBool] = newPool(func() interface{} { return &bndBool{} })\n\t_drv.bndPools[bndIdxBoolPtr] = newPool(func() interface{} { return &bndBoolPtr{} })\n\t_drv.bndPools[bndIdxBoolSlice] = newPool(func() interface{} { return &bndBoolSlice{} })\n\t_drv.bndPools[bndIdxBin] = newPool(func() interface{} { return &bndBin{} })\n\t_drv.bndPools[bndIdxBinSlice] = newPool(func() interface{} { return &bndBinSlice{} })\n\t_drv.bndPools[bndIdxLob] = newPool(func() interface{} { return &bndLob{} })\n\t_drv.bndPools[bndIdxLobPtr] = newPool(func() interface{} { return &bndLobPtr{} })\n\t_drv.bndPools[bndIdxLobSlice] = newPool(func() interface{} { return &bndLobSlice{} })\n\t_drv.bndPools[bndIdxIntervalYM] = newPool(func() interface{} { return &bndIntervalYM{} })\n\t_drv.bndPools[bndIdxIntervalYMSlice] = newPool(func() interface{} { return &bndIntervalYMSlice{} })\n\t_drv.bndPools[bndIdxIntervalDS] = newPool(func() interface{} { return &bndIntervalDS{} })\n\t_drv.bndPools[bndIdxIntervalDSSlice] = newPool(func() interface{} { return &bndIntervalDSSlice{} })\n\t_drv.bndPools[bndIdxRset] = newPool(func() interface{} { return &bndRset{} })\n\t_drv.bndPools[bndIdxBfile] = newPool(func() interface{} { return &bndBfile{} })\n\t_drv.bndPools[bndIdxNil] = newPool(func() interface{} { return &bndNil{} })\n\n\t// init def pools\n\t_drv.defPools = make([]*sync.Pool, defIdxRset+1)\n\t_drv.defPools[defIdxInt64] = newPool(func() interface{} { return &defInt64{} })\n\t_drv.defPools[defIdxInt32] = newPool(func() interface{} { return &defInt32{} })\n\t_drv.defPools[defIdxInt16] = newPool(func() interface{} { return &defInt16{} })\n\t_drv.defPools[defIdxInt8] = newPool(func() interface{} { return &defInt8{} })\n\t_drv.defPools[defIdxUint64] = newPool(func() interface{} { return &defUint64{} })\n\t_drv.defPools[defIdxUint32] = newPool(func() interface{} { return &defUint32{} })\n\t_drv.defPools[defIdxUint16] = newPool(func() interface{} { return &defUint16{} })\n\t_drv.defPools[defIdxUint8] = newPool(func() interface{} { return &defUint8{} })\n\t_drv.defPools[defIdxFloat64] = newPool(func() interface{} { return &defFloat64{} })\n\t_drv.defPools[defIdxFloat32] = newPool(func() interface{} { return &defFloat32{} })\n\t_drv.defPools[defIdxOCINum] = newPool(func() interface{} { return &defOCINum{} })\n\t_drv.defPools[defIdxTime] = newPool(func() interface{} { return &defTime{} })\n\t_drv.defPools[defIdxDate] = newPool(func() interface{} { return &defDate{} })\n\t_drv.defPools[defIdxString] = newPool(func() interface{} { return &defString{} })\n\t_drv.defPools[defIdxNumString] = newPool(func() interface{} { return &defNumString{} })\n\t_drv.defPools[defIdxOCINum] = newPool(func() interface{} { return &defOCINum{} })\n\t_drv.defPools[defIdxBool] = newPool(func() interface{} { return &defBool{} })\n\t_drv.defPools[defIdxLob] = newPool(func() interface{} { return &defLob{} })\n\t_drv.defPools[defIdxRaw] = newPool(func() interface{} { return &defRaw{} })\n\t_drv.defPools[defIdxLongRaw] = newPool(func() interface{} { return &defLongRaw{} })\n\t_drv.defPools[defIdxBfile] = newPool(func() interface{} { return &defBfile{} })\n\t_drv.defPools[defIdxIntervalYM] = newPool(func() interface{} { return &defIntervalYM{} })\n\t_drv.defPools[defIdxIntervalDS] = newPool(func() interface{} { return &defIntervalDS{} })\n\t_drv.defPools[defIdxRowid] = newPool(func() interface{} { return &defRowid{} })\n\t_drv.defPools[defIdxRset] = newPool(func() interface{} { return &defRset{} })\n\n\tvar err error\n\tif _drv.sqlPkgEnv, err = OpenEnv(); err != nil {\n\t\tpanic(fmt.Sprintf(\"OpenEnv: %v\", err))\n\t}\n\t_drv.sqlPkgEnv.isPkgEnv = true\n\t// database/sql/driver expects binaryFloat to return float64 (not the Rset default of float32)\n\tcfg := _drv.sqlPkgEnv.Cfg()\n\tcfg.RsetCfg.binaryFloat = F64\n\t_drv.sqlPkgEnv.SetCfg(cfg)\n\tsql.Register(Name, _drv)\n}", "func LoadExistingBase(id string, dateCreatedUTC time.Time, dateUpdatedUTC time.Time, deleted bool) (*base, *shared.CustomError) {\n\tif uuid.FromStringOrNil(id) == uuid.Nil {\n\t\treturn nil, shared.NewCustomError(errInvalidGuid, shared.ErrorTypeSystem)\n\t}\n\n\tif dateCreatedUTC.After(time.Now().UTC()) {\n\t\treturn nil, shared.NewCustomError(errFutureCreated, shared.ErrorTypeSystem)\n\t}\n\n\tif dateUpdatedUTC.After(time.Now().UTC()) {\n\t\treturn nil, shared.NewCustomError(errFutureUpdated, shared.ErrorTypeSystem)\n\t}\n\n\tif dateUpdatedUTC.Before(dateCreatedUTC) {\n\t\treturn nil, shared.NewCustomError(errBeforeCreated, shared.ErrorTypeSystem)\n\t}\n\n\tif deleted != true && deleted != false {\n\t\treturn nil, shared.NewCustomError(errInvalidBool, shared.ErrorTypeSystem)\n\t}\n\treturn &base{\n\t\tIDx: id,\n\t\tDateCreatedUTCx: dateCreatedUTC,\n\t\tDateUpdatedUTCx: dateUpdatedUTC,\n\t\tDeletedx: deleted,\n\t\t//tracerID: eTag,\n\t}, nil\n}", "func NewWithBaseURI(baseURI string, ) BaseClient {\n return BaseClient{\n Client: autorest.NewClientWithUserAgent(UserAgent()),\n BaseURI: baseURI,\n }\n}", "func (n NetworkTypeMobileRoaming) construct() NetworkTypeClass { return &n }", "func Create(cfg *mgrconfig.Config, debug bool) (*Pool, error) {\n\ttyp, ok := vmimpl.Types[cfg.Type]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"unknown instance type '%v'\", cfg.Type)\n\t}\n\tenv := &vmimpl.Env{\n\t\tName: cfg.Name,\n\t\tOS: cfg.TargetOS,\n\t\tArch: cfg.TargetVMArch,\n\t\tWorkdir: cfg.Workdir,\n\t\tImage: cfg.Image,\n\t\tSSHKey: cfg.SSHKey,\n\t\tSSHUser: cfg.SSHUser,\n\t\tDebug: debug,\n\t\tConfig: cfg.VM,\n\t}\n\timpl, err := typ.Ctor(env)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Pool{\n\t\timpl: impl,\n\t\tworkdir: env.Workdir,\n\t}, nil\n}", "func (alloc *RuntimePortAllocator) createAndRestorePortAllocator() (err error) {\n\talloc.pa, err = portallocator.NewPortAllocatorCustom(*alloc.pr, func(max int, rangeSpec string) (allocator.Interface, error) {\n\t\treturn allocator.NewAllocationMap(max, rangeSpec), nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tports, err := alloc.getReservedPorts(alloc.client)\n\tif err != nil {\n\t\treturn err\n\t}\n\talloc.log.Info(\"Found reserved ports\", \"ports\", ports)\n\n\tfor _, port := range ports {\n\t\tif err = alloc.pa.Allocate(port); err != nil {\n\t\t\talloc.log.Error(err, \"can't allocate reserved ports\", \"port\", port)\n\t\t}\n\t}\n\n\treturn nil\n}", "func init() {\n\tpools = make([]*sync.Pool, len(bucketSize))\n\tfor i, v := range bucketSize {\n\t\t// to use new variable inside the New function\n\t\tv1 := v\n\t\tpools[i] = &sync.Pool{\n\t\t\tNew: func() interface{} {\n\t\t\t\treturn make([]byte, v1)\n\t\t\t},\n\t\t}\n\t}\n}", "func ReserveCIDRForSingleHost(ctx context.Context, networkInstance resources.Network) (_ string, _ uint, ferr fail.Error) {\n\tvar index uint\n\txerr := networkInstance.Alter(ctx, func(clonable data.Clonable, props *serialize.JSONProperties) fail.Error {\n\t\treturn props.Alter(networkproperty.SingleHostsV1, func(clonable data.Clonable) fail.Error {\n\t\t\tnshV1, ok := clonable.(*propertiesv1.NetworkSingleHosts)\n\t\t\tif !ok {\n\t\t\t\treturn fail.InconsistentError(\n\t\t\t\t\t\"'*propertiesv1.NetworkSingleHosts' expected, '%s' provided\",\n\t\t\t\t\treflect.TypeOf(clonable).String(),\n\t\t\t\t)\n\t\t\t}\n\n\t\t\tindex = nshV1.ReserveSlot()\n\t\t\treturn nil\n\t\t})\n\t})\n\tif xerr != nil {\n\t\treturn \"\", 0, xerr\n\t}\n\n\tdefer func() {\n\t\tferr = debug.InjectPlannedFail(ferr)\n\t\tif ferr != nil {\n\t\t\tderr := FreeCIDRForSingleHost(cleanupContextFrom(ctx), networkInstance, index)\n\t\t\tif derr != nil {\n\t\t\t\t_ = ferr.AddConsequence(fail.Wrap(derr, \"cleaning up on failure, failed to free CIDR slot '%d' in Network '%s'\", index, networkInstance.GetName()))\n\t\t\t}\n\t\t}\n\t}()\n\n\t_, networkNet, err := net.ParseCIDR(abstract.SingleHostNetworkCIDR)\n\terr = debug.InjectPlannedError(err)\n\tif err != nil {\n\t\treturn \"\", 0, fail.Wrap(err, \"failed to convert CIDR to net.IPNet\")\n\t}\n\n\tresult, xerr := netretry.NthIncludedSubnet(*networkNet, propertiesv1.SingleHostsCIDRMaskAddition, index)\n\tif xerr != nil {\n\t\treturn \"\", 0, xerr\n\t}\n\treturn result.String(), index, nil\n}", "func Create(self *IpAddress) (*Memberlist, error) {\n\n\tserviceUrl := os.Getenv(\"GCP_SERVICE_URL\")\n\tif serviceUrl == \"\" {\n\t\treturn nil, fmt.Errorf(\"GCP_SERVICE_URL environment variable unset or missing\")\n\t}\n\n\tid, _ := uuid.NewV4()\n\n\tipAddresses := make(map[string]*IpAddress)\n\tipAddresses[id.String()] = self\n\n\treturn &Memberlist{\n\t\tServiceUrl: serviceUrl,\n\t\tUuid: id.String(),\n\t\tSelf: self,\n\t}, nil\n}", "func DrawElementsInstancedBaseVertexBaseInstance(mode uint32, count int32, xtype uint32, indices unsafe.Pointer, instancecount int32, basevertex int32, baseinstance uint32) {\n C.glowDrawElementsInstancedBaseVertexBaseInstance(gpDrawElementsInstancedBaseVertexBaseInstance, (C.GLenum)(mode), (C.GLsizei)(count), (C.GLenum)(xtype), indices, (C.GLsizei)(instancecount), (C.GLint)(basevertex), (C.GLuint)(baseinstance))\n}", "func (a *PodAllocator) Init() error {\n\tvar err error\n\tif util.DoesNetworkRequireTunnelIDs(a.netInfo) {\n\t\ta.idAllocator, err = id.NewIDAllocator(a.netInfo.GetNetworkName(), types.MaxLogicalPortTunnelKey)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Reserve the id 0. We don't want to assign this id to any of the pods.\n\t\terr = a.idAllocator.ReserveID(\"zero\", 0)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif util.DoesNetworkRequireIPAM(a.netInfo) {\n\t\tsubnets := a.netInfo.Subnets()\n\t\tipNets := make([]*net.IPNet, 0, len(subnets))\n\t\tfor _, subnet := range subnets {\n\t\t\tipNets = append(ipNets, subnet.CIDR)\n\t\t}\n\n\t\treturn a.ipAllocator.AddOrUpdateSubnet(a.netInfo.GetNetworkName(), ipNets, a.netInfo.ExcludeSubnets()...)\n\t}\n\n\treturn nil\n}", "func newPool(addr string, num int) *TCPPool{\n\tif(num < 1){\n\t\tfmt.Println(\"Connection pool requires atleast one connection\")\n\t\treturn nil\n\t}\n\ttcpPool := &TCPPool{}\n\tfor i := 0; i < num; i++{\n\t\tconn, err := createConnection(addr);\n\t\tif err != nil{\n\t\t\tfmt.Println(\"Cannot create connection \", err)\n\t\t\treturn nil\n\t\t}\n\t\ttcpPool.putConnection(conn)\t\n\t}\n\treturn tcpPool\n}", "func New(userName string, subuidSrc, subgidSrc io.Reader) (intf.SubidAlloc, error) {\n\n\tfilter := func(entry user.SubID) bool {\n\t\treturn entry.Name == userName\n\t}\n\n\t// read subuid range(s) for userName\n\tuidRanges, err := user.ParseSubIDFilter(subuidSrc, filter)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(uidRanges) == 0 {\n\t\treturn nil, fmt.Errorf(\"could not find subuid info for user %s\", userName)\n\t}\n\n\t// read subgid range(s) for userName\n\tgidRanges, err := user.ParseSubIDFilter(subgidSrc, filter)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(gidRanges) == 0 {\n\t\treturn nil, fmt.Errorf(\"could not find subgid info for user %s\", userName)\n\t}\n\n\t// we need at least one common subuid and subgid range\n\tcommonRanges := getCommonRanges(uidRanges, gidRanges)\n\tif len(commonRanges) == 0 {\n\t\treturn nil, fmt.Errorf(\"could not find matching subuid and subgids range for user %s\", userName)\n\t}\n\n\tsub := &subidAlloc{}\n\n\t// find a common range that is large enough for the allocation size\n\tfoundRange := false\n\tfor _, subid := range commonRanges {\n\t\tif subid.Count >= int64(allocBlkSize) {\n\t\t\tfoundRange = true\n\t\t\tsub.idRange = subid\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !foundRange {\n\t\treturn nil, fmt.Errorf(\"did not find a large enough subuid range for user %s (need %v)\", userName, allocBlkSize)\n\t}\n\n\treturn sub, nil\n}", "func (ms *memoryStore) GetWithBase(base string) (*NameSpace, error) {\n\tms.RLock()\n\tdefer ms.RUnlock()\n\tns, ok := ms.base2prefix[base]\n\tif !ok {\n\t\treturn nil, ErrNameSpaceNotFound\n\t}\n\treturn ns, nil\n}", "func (m *InstanceManager) Ensure(ctx context.Context, obj runtime.Object) (bool, error) {\n\tinstance, err := convertInstance(obj)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tregion := scw.Region(instance.Spec.Region)\n\n\t// if instanceID is empty, we need to create the instance\n\tif instance.Spec.InstanceID == \"\" {\n\t\treturn false, m.createInstance(ctx, instance)\n\t}\n\n\trdbInstanceResp, err := m.API.GetInstance(&rdb.GetInstanceRequest{\n\t\tRegion: region,\n\t\tInstanceID: instance.Spec.InstanceID,\n\t})\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tneedReturn, err := m.updateInstance(instance, rdbInstanceResp)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif needReturn {\n\t\treturn false, nil\n\t}\n\n\tneedReturn, err = m.upgradeInstance(instance, rdbInstanceResp)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif needReturn {\n\t\treturn false, nil\n\t}\n\n\tif instance.Spec.ACL != nil {\n\t\terr = m.updateACLs(ctx, instance, rdbInstanceResp)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t}\n\n\tif rdbInstanceResp.Endpoint != nil {\n\t\tinstance.Status.Endpoint.IP = rdbInstanceResp.Endpoint.IP.String()\n\t\tinstance.Status.Endpoint.Port = int32(rdbInstanceResp.Endpoint.Port)\n\t}\n\n\treturn rdbInstanceResp.Status == rdb.InstanceStatusReady, nil\n}", "func (pool *ComplexPool) New() (Proxy, error) {\n\tlength := pool.SizeUnused()\n\n\tif length == 0 {\n\t\tif !pool.Config.ReloadWhenEmpty {\n\t\t\treturn Proxy{}, fmt.Errorf(\"prox (%p): cannot select proxy, no unused proxies left in pool\", pool)\n\t\t}\n\n\t\terr := pool.Load()\n\t\tif err != nil {\n\t\t\treturn Proxy{}, fmt.Errorf(\"prox (%p): cannot select unused proxy, error occurred while reloading pool: %v\", pool, err)\n\t\t}\n\n\t\tlength = pool.SizeUnused()\n\t\tif length == 0 {\n\t\t\treturn Proxy{}, fmt.Errorf(\"prox (%p): cannot select proxy, no unused proxies even after reload\", pool)\n\t\t}\n\t}\n\n\trawProxy := pool.Unused.Random()\n\tpool.Unused.Remove(rawProxy)\n\n\treturn *CastProxy(rawProxy), nil\n}", "func (pool *servicePool) malloc(t reflect.Type) interface{} {\n\t// 判断此 领域服务类型是否存在 pool\n\tsyncpool, ok := pool.pool[t]\n\tif !ok {\n\t\treturn nil\n\t}\n\t// Get 其实是在 BindService 时注入的 生成 service 对象的函数\n\tnewService := syncpool.Get()\n\tif newService == nil {\n\t\tpanic(fmt.Sprintf(\"[Freedom] BindService: func return to empty, %v\", t))\n\t}\n\treturn newService\n}", "func (rf *Factory) Create(address string) (types.Backend, error) {\n\t// No need to add prints in this function.\n\t// Make sure caller of this takes care of printing error\n\tlogrus.Infof(\"Connecting to remote: %s\", address)\n\n\tcontrolAddress, dataAddress, _, err := util.ParseAddresses(address)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr := &Remote{\n\t\tName: address,\n\t\treplicaURL: fmt.Sprintf(\"http://%s/v1/replicas/1\", controlAddress),\n\t\tpingURL: fmt.Sprintf(\"http://%s/ping\", controlAddress),\n\t\thttpClient: &http.Client{\n\t\t\tTimeout: timeout,\n\t\t},\n\t\t// We don't want sender to wait for receiver, because receiver may\n\t\t// has been already notified\n\t\tcloseChan: make(chan struct{}, 5),\n\t\tmonitorChan: make(types.MonitorChannel, 5),\n\t}\n\n\treplica, err := r.info()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif replica.State != \"closed\" {\n\t\treturn nil, fmt.Errorf(\"Replica must be closed, Can not add in state: %s\", replica.State)\n\t}\n\n\tconn, err := net.Dial(\"tcp\", dataAddress)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tremote := rpc.NewClient(conn, r.closeChan)\n\tr.IOs = remote\n\n\tif err := r.open(); err != nil {\n\t\tlogrus.Errorf(\"Failed to open replica, error: %v\", err)\n\t\tremote.Close()\n\t\treturn nil, err\n\t}\n\n\tgo r.monitorPing(remote)\n\n\treturn r, nil\n}", "func (db *DB) allocate(txid txid, count int) (*page, error) {\n\t// Allocate a temporary buffer for the page.\n\tvar buf []byte\n\tif count == 1 {\n\t\tbuf = db.pagePool.Get().([]byte)\n\t} else {\n\t\tbuf = make([]byte, count*db.pageSize)\n\t}\n\tp := (*page)(unsafe.Pointer(&buf[0]))\n\tp.overflow = uint32(count - 1)\n\n\t// Use pages from the freelist if they are available.\n\tif p.id = db.freelist.allocate(txid, count); p.id != 0 {\n\t\treturn p, nil\n\t}\n\n\t// Resize mmap() if we're at the end.\n\tp.id = db.rwtx.meta.pgid\n\tvar minsz = int((p.id+pgid(count))+1) * db.pageSize\n\tif minsz >= db.datasz {\n\t\tif err := db.mmap(minsz); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"mmap allocate error: %s\", err)\n\t\t}\n\t}\n\n\t// Move the page id high water mark.\n\tdb.rwtx.meta.pgid += pgid(count)\n\n\treturn p, nil\n}", "func newBaseConn(conn net.Conn) *BaseConn {\n\n\tb := new(BaseConn)\n\tb.conn = conn\n\n\tb.disconnected = make(chan struct{})\n\tb.send = make(chan *baseproto.Message, 5)\n\tb.stop = make(chan struct{})\n\tb.receivedCapabilities = make(chan *baseproto.Message, 1)\n\tb.received = make(chan *baseproto.Message, 5)\n\n\tb.Received = b.received\n\tb.Disconnected = b.disconnected\n\n\tgo b.readLoop()\n\tgo b.writeLoop()\n\n\treturn b\n}", "func HandleInstanceCreate(w rest.ResponseWriter, r *rest.Request) {\n\t// get ima\n\tima := Ima{}\n\terr := r.DecodeJsonPayload(&ima)\n\tif err != nil {\n\t\trest.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif ima.Ima == \"\" {\n\t\trest.Error(w, \"ima required\", 400)\n\t\treturn\n\t}\n\tif ima.Mem == 0 {\n\t\trest.Error(w, \"memory required\", 400)\n\t\treturn\n\t}\n\tif ima.Cpu == 0 {\n\t\trest.Error(w, \"cpu required\", 400)\n\t\treturn\n\t}\n\n\t// start the instance\n\tos := getImaOs(ima.Ima)\n\tswitch os {\n\tcase \"freebsd\":\n\t\t// clone ima to instance\n\t\tinstanceid := allocateInstanceId()\n\t\tcloneIma(ima.Ima, instanceid)\n\n\t\t// create network interface and bring up\n\t\ttap := allocateTap()\n\t\tif tap == \"\" {\n\t\t\treturn\n\t\t}\n\t\tsaveTap(tap, instanceid)\n\t\tbridge := findBridge()\n\t\taddTapToBridge(tap, bridge)\n\t\tbridgeUp(bridge)\n\n\t\tnmdm := \"/dev/nmdm-\" + instanceid + \"-A\"\n\t\tsaveCpu(ima.Cpu, instanceid)\n\t\tsaveMem(ima.Mem, instanceid)\n\t\tgo startFreeBSDVM(nmdm, ima.Cpu, ima.Mem, tap, instanceid)\n\t\tw.WriteJson(&instanceid)\n\tcase \"linux\":\n\t\t// clone ima to instance\n\t\tinstanceid := allocateInstanceId()\n\t\tcloneIma(ima.Ima, instanceid)\n\n\t\t// create network interface and bring up\n\t\ttap := allocateTap()\n\t\tif tap == \"\" {\n\t\t\treturn\n\t\t}\n\t\tsaveTap(tap, instanceid)\n\t\tbridge := findBridge()\n\t\taddTapToBridge(tap, bridge)\n\t\tbridgeUp(bridge)\n\n\t\t//nmdm := \"/dev/nmdm-\" + instanceid + \"-A\"\n\t\tsaveCpu(ima.Cpu, instanceid)\n\t\tsaveMem(ima.Mem, instanceid)\n\t\tbhyveDestroy(instanceid)\n\t\tnmdm := \"/dev/nmdm-\" + instanceid + \"-A\"\n\t\tgo startLinuxVM(nmdm, ima.Cpu, ima.Mem, tap, instanceid)\n\t\tw.WriteJson(&instanceid)\n\tdefault:\n\t\trest.Error(w, \"unknown OS\", 400)\n\t}\n}", "func newClient(addr string, max int, discardClientTimeout time.Duration, fn connectRPCFn) (Client, error) {\n\n\trpcClientFactory := func() (interface{}, error) {\n\t\treturn fn(addr)\n\t}\n\trpcPool, err := pool.NewPool(max, rpcClientFactory)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trc := &reconnectingClient{addr: addr, pool: rpcPool, discardClientTimeout: discardClientTimeout}\n\treturn rc, nil\n}", "func newBaseClient() *baseClient {\n\treturn &baseClient{\n\t\thttpClient: http.DefaultClient,\n\t\tmethod: \"GET\",\n\t\theader: make(http.Header),\n\t}\n}", "func NewCustom(network string, size int, af AddrFunc, df DialFunc) (*Pool, error) {\n\tp := Pool{\n\t\tpool: make(chan *redis.Client, size),\n\t\tspare: make(chan string, size),\n\t\tdf: df,\n\t\tstopCh: make(chan bool),\n\t\tnetwork: network,\n\t}\n\n\tclient, err := df(network, af(0))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp.pool <- client\n\n\tfor i := 1; i < size; i++ {\n\t\tp.spare <- af(i)\n\t}\n\n\t// set up a go-routine which will periodically ping connections in the pool.\n\t// if the pool is idle every connection will be hit once every 10 seconds.\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-time.After(10 * time.Second / time.Duration(size - len(p.spare))):\n\t\t\t\tp.ping()\n\t\t\tcase <-p.stopCh:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn &p, err\n}", "func (bi *baseInstance) toBeta() *beta.Instance {\n\tinst := &beta.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*beta.NetworkInterface{{}}}\n\tif bi.aliasRange != \"\" {\n\t\tinst.NetworkInterfaces[0].AliasIpRanges = []*beta.AliasIpRange{\n\t\t\t{IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName},\n\t\t}\n\t}\n\treturn inst\n}", "func (face *FaceBase) InitFaceBase(id FaceId, sizeofPriv int, socket eal.NumaSocket) error {\n\tface.id = id\n\n\tif socket.IsAny() {\n\t\tif lc := eal.GetCurrentLCore(); lc.IsValid() {\n\t\t\tsocket = lc.GetNumaSocket()\n\t\t} else {\n\t\t\tsocket = eal.NumaSocketFromID(0) // TODO what if socket 0 is unavailable?\n\t\t}\n\t}\n\n\tfaceC := face.getPtr()\n\t*faceC = C.Face{}\n\tfaceC.id = C.FaceId(face.id)\n\tfaceC.state = C.FACESTA_UP\n\tfaceC.numaSocket = C.int(socket.ID())\n\n\tsizeofImpl := int(C.sizeof_FaceImpl) + sizeofPriv\n\tfaceC.impl = (*C.FaceImpl)(eal.ZmallocAligned(\"FaceImpl\", sizeofImpl, 1, socket))\n\n\treturn nil\n\n}", "func CreateBaseConfigWithReadiness() *dynamic.Configuration {\n\treturn &dynamic.Configuration{\n\t\tHTTP: &dynamic.HTTPConfiguration{\n\t\t\tRouters: map[string]*dynamic.Router{\n\t\t\t\t\"readiness\": {\n\t\t\t\t\tRule: \"Path(`/ping`)\",\n\t\t\t\t\tEntryPoints: []string{\"readiness\"},\n\t\t\t\t\tService: \"readiness\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tServices: map[string]*dynamic.Service{\n\t\t\t\t\"readiness\": {\n\t\t\t\t\tLoadBalancer: &dynamic.ServersLoadBalancer{\n\t\t\t\t\t\tPassHostHeader: Bool(true),\n\t\t\t\t\t\tServers: []dynamic.Server{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tURL: \"http://127.0.0.1:8080\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tMiddlewares: map[string]*dynamic.Middleware{},\n\t\t},\n\t\tTCP: &dynamic.TCPConfiguration{\n\t\t\tRouters: map[string]*dynamic.TCPRouter{},\n\t\t\tServices: map[string]*dynamic.TCPService{},\n\t\t},\n\t}\n}", "func (c BaseConfig) GetBaseConfig() BaseConfig { return c }", "func (c *AntreaIPAMController) preallocateIPPoolForStatefulSet(ss *appsv1.StatefulSet) error {\n\tklog.InfoS(\"Processing create notification\", \"Namespace\", ss.Namespace, \"StatefulSet\", ss.Name)\n\n\tipPools := c.getIPPoolsForStatefulSet(ss)\n\n\tif ipPools == nil {\n\t\t// nothing to preallocate\n\t\treturn nil\n\t}\n\n\tif len(ipPools) > 1 {\n\t\treturn fmt.Errorf(\"annotation of multiple IP Pools is not supported\")\n\t}\n\n\t// Only one pool is supported for now. Dual stack support coming in future.\n\tipPoolName := ipPools[0]\n\tallocator, err := poolallocator.NewIPPoolAllocator(ipPoolName, c.crdClient, c.ipPoolLister)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to find IP Pool %s: %s\", ipPoolName, err)\n\t}\n\n\tsize := int(*ss.Spec.Replicas)\n\t// Note that AllocateStatefulSet would not preallocate IPs if this StatefulSet is already present\n\t// in the pool. This safeguards us from double allocation in case agent allocated IP by the time\n\t// controller task is executed. Note also that StatefulSet resize will not be handled.\n\tif size > 0 {\n\t\terr = allocator.AllocateStatefulSet(ss.Namespace, ss.Name, size)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to preallocate continuous IP space of size %d from Pool %s: %s\", size, ipPoolName, err)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (self *RegisObjManager) LoadPlayerBaseObj(id PLAYER_ID) *RedisPlayerBaseObj {\n\tvalue, ok := self.Load(id)\n\tif ok {\n\t\treturn value.(*RedisPlayerBaseObj)\n\t}\n\treturn nil\n}", "func (a *APILoadBalancers) New() (types.Resource, error) {\n\tif err := a.Validate(); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to validate API Load balancers configuration: %w\", err)\n\t}\n\n\tcc := &container.Containers{\n\t\tPreviousState: a.State,\n\t\tDesiredState: make(container.ContainersState),\n\t}\n\n\tfor i, lb := range a.APILoadBalancers {\n\t\tlb := lb\n\t\ta.propagateInstance(&lb)\n\n\t\tlbx, _ := lb.New()\n\t\tlbxHcc, _ := lbx.ToHostConfiguredContainer()\n\n\t\tcc.DesiredState[strconv.Itoa(i)] = lbxHcc\n\t}\n\n\tc, _ := cc.New()\n\n\treturn &apiLoadBalancers{\n\t\tcontainers: c,\n\t}, nil\n}", "func newNetwork(cfg *config.Network, c *ec2.EC2) (*network, error) {\n\tlog.Debug(\"Initializing AWS Network\")\n\tn := &network{\n\t\tResources: resource.NewResources(),\n\t\tNetwork: cfg,\n\t\tec2: c,\n\t}\n\n\tvpc, err := newVpc(c, n)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tn.vpc = vpc\n\tn.Append(vpc)\n\n\trouteTables, err := newRouteTables(c, n)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tn.routeTables = routeTables\n\tn.Append(routeTables)\n\n\tinternetGateway, err := newInternetGateway(c, n, \"public\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tn.internetGateway = internetGateway\n\tn.Append(internetGateway)\n\n\t// Load the vpc since it is needed for the caches.\n\terr = n.vpc.Load()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tn.subnetCache, err = newSubnetCache(n)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tn.secgroupCache, err = newSecurityGroupCache(n)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn n, nil\n}", "func newMaglevLoadBalancer(info types.ClusterInfo, set types.HostSet) types.LoadBalancer {\n\tnames := []string{}\n\tfor _, host := range set.Hosts() {\n\t\tnames = append(names, host.AddressString())\n\t}\n\tmgv := &maglevLoadBalancer{\n\t\thosts: set,\n\t}\n\n\tnameCount := len(names)\n\t// if host count > BigM, maglev table building will cross array boundary\n\t// maglev lb will not work in this scenario\n\tif nameCount >= maglev.BigM {\n\t\tlog.DefaultLogger.Errorf(\"[lb][maglev] host count too large, expect <= %d, get %d\",\n\t\t\tmaglev.BigM, nameCount)\n\t\treturn mgv\n\t}\n\tif nameCount == 0 {\n\t\treturn mgv\n\t}\n\n\tmaglevM := maglev.SmallM\n\t// according to test, 30000 host with testing 1e8 times, hash distribution begins to go wrong,\n\t// max=4855, mean=3333.3333333333335, peak-to-mean=1.4565\n\t// so use BigM when host >= 30000\n\tlimit := 30000\n\tif nameCount >= limit {\n\t\tlog.DefaultLogger.Infof(\"[lb][maglev] host count %d >= %d, using maglev.BigM\", nameCount, limit)\n\t\tmaglevM = maglev.BigM\n\t}\n\n\tmgv.maglev = maglev.New(names, uint64(maglevM))\n\treturn mgv\n}", "func (c *Repair) doRunOnce() error {\n\t// TODO: (per smarterclayton) if Get() or ListServices() is a weak consistency read,\n\t// or if they are executed against different leaders,\n\t// the ordering guarantee required to ensure no IP is allocated twice is violated.\n\t// ListServices must return a ResourceVersion higher than the etcd index Get triggers,\n\t// and the release code must not release services that have had IPs allocated but not yet been created\n\t// See #8295\n\n\t// If etcd server is not running we should wait for some time and fail only then. This is particularly\n\t// important when we start apiserver and etcd at the same time.\n\tsnapshotByFamily := make(map[v1.IPFamily]*api.RangeAllocation)\n\tstoredByFamily := make(map[v1.IPFamily]ipallocator.Interface)\n\n\terr := wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {\n\t\tfor family, allocator := range c.allocatorByFamily {\n\t\t\t// get snapshot if it is not there\n\t\t\tif _, ok := snapshotByFamily[family]; !ok {\n\t\t\t\tsnapshot, err := allocator.Get()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\n\t\t\t\tsnapshotByFamily[family] = snapshot\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\t})\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to refresh the service IP block: %v\", err)\n\t}\n\n\t// ensure that ranges are assigned\n\tfor family, snapshot := range snapshotByFamily {\n\t\tif snapshot.Range == \"\" {\n\t\t\tsnapshot.Range = c.networkByFamily[family].String()\n\t\t}\n\t}\n\n\t// Create an allocator because it is easy to use.\n\tfor family, snapshot := range snapshotByFamily {\n\t\tstored, err := ipallocator.NewFromSnapshot(snapshot)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to rebuild allocator from snapshots for family:%v with error:%v\", family, err)\n\t\t}\n\n\t\tstoredByFamily[family] = stored\n\t}\n\n\trebuiltByFamily := make(map[v1.IPFamily]*ipallocator.Range)\n\n\tfor family, network := range c.networkByFamily {\n\t\trebuilt, err := ipallocator.NewInMemory(network)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to create CIDR range for family %v: %v\", family, err)\n\t\t}\n\n\t\trebuiltByFamily[family] = rebuilt\n\t}\n\t// We explicitly send no resource version, since the resource version\n\t// of 'snapshot' is from a different collection, it's not comparable to\n\t// the service collection. The caching layer keeps per-collection RVs,\n\t// and this is proper, since in theory the collections could be hosted\n\t// in separate etcd (or even non-etcd) instances.\n\tlist, err := c.serviceClient.Services(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to refresh the service IP block: %v\", err)\n\t}\n\n\tgetFamilyByIP := func(ip net.IP) v1.IPFamily {\n\t\tif netutils.IsIPv6(ip) {\n\t\t\treturn v1.IPv6Protocol\n\t\t}\n\t\treturn v1.IPv4Protocol\n\t}\n\n\t// Check every Service's ClusterIP, and rebuild the state as we think it should be.\n\tfor _, svc := range list.Items {\n\t\tif !helper.IsServiceIPSet(&svc) {\n\t\t\t// didn't need a cluster IP\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, ip := range svc.Spec.ClusterIPs {\n\t\t\tip := netutils.ParseIPSloppy(ip)\n\t\t\tif ip == nil {\n\t\t\t\t// cluster IP is corrupt\n\t\t\t\tc.recorder.Eventf(&svc, nil, v1.EventTypeWarning, \"ClusterIPNotValid\", \"ClusterIPValidation\", \"Cluster IP %s is not a valid IP; please recreate service\", ip)\n\t\t\t\truntime.HandleError(fmt.Errorf(\"the cluster IP %s for service %s/%s is not a valid IP; please recreate\", ip, svc.Name, svc.Namespace))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfamily := getFamilyByIP(ip)\n\t\t\tif _, ok := rebuiltByFamily[family]; !ok {\n\t\t\t\t// this service is using an IPFamily no longer configured on cluster\n\t\t\t\tc.recorder.Eventf(&svc, nil, v1.EventTypeWarning, \"ClusterIPNotValid\", \"ClusterIPValidation\", \"Cluster IP %s(%s) is of ip family that is no longer configured on cluster; please recreate service\", ip, family)\n\t\t\t\truntime.HandleError(fmt.Errorf(\"the cluster IP %s(%s) for service %s/%s is of ip family that is no longer configured on cluster; please recreate\", ip, family, svc.Name, svc.Namespace))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// mark it as in-use\n\t\t\tactualAlloc := rebuiltByFamily[family]\n\t\t\tswitch err := actualAlloc.Allocate(ip); err {\n\t\t\tcase nil:\n\t\t\t\tactualStored := storedByFamily[family]\n\t\t\t\tif actualStored.Has(ip) {\n\t\t\t\t\t// remove it from the old set, so we can find leaks\n\t\t\t\t\tactualStored.Release(ip)\n\t\t\t\t} else {\n\t\t\t\t\t// cluster IP doesn't seem to be allocated\n\t\t\t\t\tc.recorder.Eventf(&svc, nil, v1.EventTypeWarning, \"ClusterIPNotAllocated\", \"ClusterIPAllocation\", \"Cluster IP [%v]:%s is not allocated; repairing\", family, ip)\n\t\t\t\t\truntime.HandleError(fmt.Errorf(\"the cluster IP [%v]:%s for service %s/%s is not allocated; repairing\", family, ip, svc.Name, svc.Namespace))\n\t\t\t\t}\n\t\t\t\tdelete(c.leaksByFamily[family], ip.String()) // it is used, so it can't be leaked\n\t\t\tcase ipallocator.ErrAllocated:\n\t\t\t\t// cluster IP is duplicate\n\t\t\t\tc.recorder.Eventf(&svc, nil, v1.EventTypeWarning, \"ClusterIPAlreadyAllocated\", \"ClusterIPAllocation\", \"Cluster IP [%v]:%s was assigned to multiple services; please recreate service\", family, ip)\n\t\t\t\truntime.HandleError(fmt.Errorf(\"the cluster IP [%v]:%s for service %s/%s was assigned to multiple services; please recreate\", family, ip, svc.Name, svc.Namespace))\n\t\t\tcase err.(*ipallocator.ErrNotInRange):\n\t\t\t\t// cluster IP is out of range\n\t\t\t\tc.recorder.Eventf(&svc, nil, v1.EventTypeWarning, \"ClusterIPOutOfRange\", \"ClusterIPAllocation\", \"Cluster IP [%v]:%s is not within the service CIDR %s; please recreate service\", family, ip, c.networkByFamily[family])\n\t\t\t\truntime.HandleError(fmt.Errorf(\"the cluster IP [%v]:%s for service %s/%s is not within the service CIDR %s; please recreate\", family, ip, svc.Name, svc.Namespace, c.networkByFamily[family]))\n\t\t\tcase ipallocator.ErrFull:\n\t\t\t\t// somehow we are out of IPs\n\t\t\t\tcidr := actualAlloc.CIDR()\n\t\t\t\tc.recorder.Eventf(&svc, nil, v1.EventTypeWarning, \"ServiceCIDRFull\", \"ClusterIPAllocation\", \"Service CIDR %v is full; you must widen the CIDR in order to create new services for Cluster IP [%v]:%s\", cidr, family, ip)\n\t\t\t\treturn fmt.Errorf(\"the service CIDR %v is full; you must widen the CIDR in order to create new services for Cluster IP [%v]:%s\", cidr, family, ip)\n\t\t\tdefault:\n\t\t\t\tc.recorder.Eventf(&svc, nil, v1.EventTypeWarning, \"UnknownError\", \"ClusterIPAllocation\", \"Unable to allocate cluster IP [%v]:%s due to an unknown error\", family, ip)\n\t\t\t\treturn fmt.Errorf(\"unable to allocate cluster IP [%v]:%s for service %s/%s due to an unknown error, exiting: %v\", family, ip, svc.Name, svc.Namespace, err)\n\t\t\t}\n\t\t}\n\t}\n\n\t// leak check\n\tfor family, leaks := range c.leaksByFamily {\n\t\tc.checkLeaked(leaks, storedByFamily[family], rebuiltByFamily[family])\n\t}\n\n\t// save logic\n\t// Blast the rebuilt state into storage.\n\tfor family, rebuilt := range rebuiltByFamily {\n\t\terr = c.saveSnapShot(rebuilt, c.allocatorByFamily[family], snapshotByFamily[family])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (na *cnmNetworkAllocator) Allocate(n *api.Network) error {\n\tif _, ok := na.networks[n.ID]; ok {\n\t\treturn fmt.Errorf(\"network %s already allocated\", n.ID)\n\t}\n\n\td, err := na.resolveDriver(n)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnw := &network{\n\t\tnw: n,\n\t\tendpoints: make(map[string]string),\n\t\tisNodeLocal: d.capability.DataScope == scope.Local,\n\t}\n\n\t// No swarm-level allocation can be provided by the network driver for\n\t// node-local networks. Only thing needed is populating the driver's name\n\t// in the driver's state.\n\tif nw.isNodeLocal {\n\t\tn.DriverState = &api.Driver{\n\t\t\tName: d.name,\n\t\t}\n\t\t// In order to support backward compatibility with older daemon\n\t\t// versions which assumes the network attachment to contains\n\t\t// non nil IPAM attribute, passing an empty object\n\t\tn.IPAM = &api.IPAMOptions{Driver: &api.Driver{}}\n\t} else {\n\t\tnw.pools, err = na.allocatePools(n)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed allocating pools and gateway IP for network %s\", n.ID)\n\t\t}\n\n\t\tif err := na.allocateDriverState(n); err != nil {\n\t\t\tna.freePools(n, nw.pools)\n\t\t\treturn errors.Wrapf(err, \"failed while allocating driver state for network %s\", n.ID)\n\t\t}\n\t}\n\n\tna.networks[n.ID] = nw\n\n\treturn nil\n}", "func (o BuildSpecRuntimePtrOutput) Base() BuildSpecRuntimeBasePtrOutput {\n\treturn o.ApplyT(func(v *BuildSpecRuntime) *BuildSpecRuntimeBase {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Base\n\t}).(BuildSpecRuntimeBasePtrOutput)\n}" ]
[ "0.5949005", "0.5725722", "0.5369504", "0.5214787", "0.5149179", "0.5123685", "0.5117184", "0.5099963", "0.5049843", "0.5029948", "0.50290453", "0.50138724", "0.49412763", "0.49271756", "0.48621428", "0.48538187", "0.47791496", "0.47764036", "0.47500402", "0.47404003", "0.47392026", "0.47286868", "0.47158033", "0.47081983", "0.4691098", "0.46894893", "0.4674756", "0.46728227", "0.466863", "0.4660493", "0.46567085", "0.46546787", "0.46466398", "0.46423286", "0.46196347", "0.4616663", "0.4585225", "0.45729405", "0.45725137", "0.45479596", "0.4547838", "0.4546467", "0.45397297", "0.45248595", "0.45191827", "0.45146802", "0.45107323", "0.45061502", "0.4505096", "0.4499987", "0.44824037", "0.44774488", "0.44768718", "0.4467408", "0.446287", "0.4462736", "0.44576836", "0.4448222", "0.44353896", "0.44297665", "0.44271377", "0.44262126", "0.44209427", "0.44085407", "0.4397695", "0.43925774", "0.4392097", "0.43910056", "0.43904477", "0.43850523", "0.43801013", "0.43771797", "0.43745402", "0.43695778", "0.4366509", "0.43612984", "0.43545148", "0.43533444", "0.4347718", "0.43467346", "0.43460947", "0.4345409", "0.43446916", "0.43419752", "0.43319753", "0.43311697", "0.43300846", "0.432516", "0.43236843", "0.43221587", "0.43209547", "0.43176502", "0.43176174", "0.4317361", "0.4317", "0.4316545", "0.43143174", "0.43054664", "0.43016604", "0.4300207" ]
0.72381115
0
newGAGetHook creates a new closure with the current baseInstanceList to be used as a MockInstances.GetHook
newGAGetHook создает новый замыкание с текущим baseInstanceList, используемое как MockInstances.GetHook
func (bil *baseInstanceList) newGAGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) { return func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) { m.Lock.Lock() defer m.Lock.Unlock() if _, found := m.Objects[*key]; !found { m.Objects[*key] = &cloud.MockInstancesObj{Obj: bil.getOrCreateBaseInstance(key).toGA()} } return false, nil, nil } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (bil *baseInstanceList) newBetaGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockBetaInstances) (bool, *beta.Instance, error) {\n\treturn func(ctx context.Context, key *meta.Key, m *cloud.MockBetaInstances) (bool, *beta.Instance, error) {\n\t\tm.Lock.Lock()\n\t\tdefer m.Lock.Unlock()\n\n\t\tif _, found := m.Objects[*key]; !found {\n\t\t\tm.Objects[*key] = &cloud.MockInstancesObj{Obj: bil.getOrCreateBaseInstance(key).toBeta()}\n\t\t}\n\t\treturn false, nil, nil\n\t}\n}", "func (bil *baseInstanceList) newMockCloud() cloud.Cloud {\n\tc := cloud.NewMockGCE(nil)\n\n\t// insert hooks to lazy create a instance when needed\n\tc.MockInstances.GetHook = bil.newGAGetHook()\n\tc.MockBetaInstances.GetHook = bil.newBetaGetHook()\n\n\treturn c\n}", "func (f *AutoIndexingServiceGetIndexesFunc) PushHook(hook func(context.Context, shared.GetIndexesOptions) ([]types.Index, int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *AutoIndexingServiceGetListTagsFunc) PushHook(hook func(context.Context, api.RepoName, ...string) ([]*gitdomain.Tag, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *ExtensionStoreGetFeaturedExtensionsFunc) PushHook(hook func(context.Context) ([]*stores.Extension, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *AutoIndexingServiceGetIndexByIDFunc) PushHook(hook func(context.Context, int) (types.Index, bool, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *AutoIndexingServiceGetIndexesByIDsFunc) PushHook(hook func(context.Context, ...int) ([]types.Index, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *AutoIndexingServiceGetUnsafeDBFunc) PushHook(hook func() database.DB) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func newGoGetter() *goGetter {\n\treturn &goGetter{}\n}", "func NewHookLister(indexer cache.Indexer) HookLister {\n\treturn &hookLister{indexer: indexer}\n}", "func (f *AutoIndexingServiceNumRepositoriesWithCodeIntelligenceFunc) PushHook(hook func(context.Context) (int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *UploadServiceGetListTagsFunc) PushHook(hook func(context.Context, api.RepoName, ...string) ([]*gitdomain.Tag, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *ReleaseStoreGetLatestFunc) PushHook(hook func(context.Context, int32, string, bool) (*stores.Release, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *DBStoreGetUploadsFunc) PushHook(hook func(context.Context, dbstore.GetUploadsOptions) ([]dbstore.Upload, int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *DBStoreGetUploadsFunc) PushHook(hook func(context.Context, dbstore.GetUploadsOptions) ([]dbstore.Upload, int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (ch *CloudwatchHook) GetHook() (func(zapcore.Entry) error, error) {\n\n\tvar cloudwatchWriter = func(e zapcore.Entry) error {\n\t\tif !ch.isAcceptedLevel(e.Level) {\n\t\t\treturn nil\n\t\t}\n\n\t\tevent := &cloudwatchlogs.InputLogEvent{\n\t\t\tMessage: aws.String(fmt.Sprintf(\"[%s] %s\", e.LoggerName, e.Message)),\n\t\t\tTimestamp: aws.Int64(int64(time.Nanosecond) * time.Now().UnixNano() / int64(time.Millisecond)),\n\t\t}\n\t\tparams := &cloudwatchlogs.PutLogEventsInput{\n\t\t\tLogEvents: []*cloudwatchlogs.InputLogEvent{event},\n\t\t\tLogGroupName: aws.String(ch.GroupName),\n\t\t\tLogStreamName: aws.String(ch.StreamName),\n\t\t\tSequenceToken: ch.nextSequenceToken,\n\t\t}\n\n\t\tif ch.Async {\n\t\t\tgo ch.sendEvent(params)\n\t\t\treturn nil\n\t\t}\n\n\t\treturn ch.sendEvent(params)\n\t}\n\n\tch.svc = cloudwatchlogs.New(session.New(ch.AWSConfig))\n\n\tlgresp, err := ch.svc.DescribeLogGroups(&cloudwatchlogs.DescribeLogGroupsInput{LogGroupNamePrefix: aws.String(ch.GroupName), Limit: aws.Int64(1)})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(lgresp.LogGroups) < 1 {\n\t\t// we need to create this log group\n\t\t_, err := ch.svc.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{LogGroupName: aws.String(ch.GroupName)})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tresp, err := ch.svc.DescribeLogStreams(&cloudwatchlogs.DescribeLogStreamsInput{\n\t\tLogGroupName: aws.String(ch.GroupName), // Required\n\t\tLogStreamNamePrefix: aws.String(ch.StreamName),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// grab the next sequence token\n\tif len(resp.LogStreams) > 0 {\n\t\tch.nextSequenceToken = resp.LogStreams[0].UploadSequenceToken\n\t\treturn cloudwatchWriter, nil\n\t}\n\n\t// create stream if it doesn't exist. the next sequence token will be null\n\t_, err = ch.svc.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{\n\t\tLogGroupName: aws.String(ch.GroupName),\n\t\tLogStreamName: aws.String(ch.StreamName),\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cloudwatchWriter, nil\n}", "func (f *ReleaseStoreGetLatestBatchFunc) PushHook(hook func(context.Context, []int32, string, bool) ([]*stores.Release, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *ReleaseStoreGetArtifactsFunc) PushHook(hook func(context.Context, int64) ([]byte, []byte, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *UploadServiceGetUploadsFunc) PushHook(hook func(context.Context, shared1.GetUploadsOptions) ([]types.Upload, int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *AutoIndexingServiceRepositoryIDsWithConfigurationFunc) PushHook(hook func(context.Context, int, int) ([]shared.RepositoryWithAvailableIndexers, int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *DBStoreGetConfigurationPoliciesFunc) PushHook(hook func(context.Context, dbstore.GetConfigurationPoliciesOptions) ([]dbstore.ConfigurationPolicy, int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *UploadServiceGetUploadsByIDsFunc) PushHook(hook func(context.Context, ...int) ([]types.Upload, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (e *Exporter) NewGaugesFuncWithMultiLabels(name, help string, labels []string, f func() map[string]int64) *stats.GaugesFuncWithMultiLabels {\n\tif e.name == \"\" || name == \"\" {\n\t\tv := stats.NewGaugesFuncWithMultiLabels(name, help, labels, f)\n\t\taddUnnamedExport(name, v)\n\t\treturn v\n\t}\n\tlvar := stats.NewGaugesFuncWithMultiLabels(\"\", help, labels, f)\n\t_ = e.createCountsTracker(name, help, labels, lvar, replaceOnDup, typeGauge)\n\treturn lvar\n}", "func (f *ExtensionStoreGetByIDFunc) PushHook(hook func(context.Context, int32) (*stores.Extension, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *ExtensionStoreGetByUUIDFunc) PushHook(hook func(context.Context, string) (*stores.Extension, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *ExtensionStoreGetByExtensionIDFunc) PushHook(hook func(context.Context, string) (*stores.Extension, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func New(serviceName string, environment string) LoggerWrapper {\n\tlogStore = &loggerWrapper{logrus.New().WithField(\"service\", serviceName).WithField(\"environment\", environment)}\n\tif environment == \"production\" {\n\t\tlogStore.SetFormat(&logrus.JSONFormatter{})\n\t}\n\n\t// fmt.Println(\"Adding hook\")\n\t// hook := logrusly.NewLogglyHook(\"71000042-f956-4c7e-987d-8694a20695a8\", \"https://logs-01.loggly.com/bulk/\", logrus.InfoLevel, serviceName)\n\t// logStore.Logger.Hooks.Add(hook)\n\treturn logStore\n}", "func (f *DBStoreSelectRepositoriesForRetentionScanFunc) PushHook(hook func(context.Context, time.Duration, int) ([]int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *ExtensionStoreListFunc) PushHook(hook func(context.Context, stores.ExtensionsListOptions) ([]*stores.Extension, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *PolicyServiceGetRetentionPolicyOverviewFunc) PushHook(hook func(context.Context, types.Upload, bool, int, int64, string, time.Time) ([]types.RetentionPolicyMatchCandidate, int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *ResolverGetUploadsByIDsFunc) PushHook(hook func(context.Context, ...int) ([]dbstore.Upload, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (this *service) GRPCHook() reflect.Value {\n\treturn reflect.ValueOf(pb.RegisterMiHomeServer)\n}", "func (f *ResolverGetIndexesByIDsFunc) PushHook(hook func(context.Context, ...int) ([]dbstore.Index, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func NewGet(g Getter) *Get {\n\treturn &Get{g}\n}", "func newMockGetDataFunc(mockStore *mock.NodeStore) func(addr Address) (data []byte, err error) {\n\treturn func(addr Address) (data []byte, err error) {\n\t\tdata, err = mockStore.Get(addr)\n\t\tif err == mock.ErrNotFound {\n\t\t\t// preserve ErrChunkNotFound error\n\t\t\terr = ErrChunkNotFound\n\t\t}\n\t\treturn data, err\n\t}\n}", "func ClosureNew(f interface{}) *C.GClosure {\n\tclosure := C._g_closure_new()\n\tclosures.Lock()\n\tclosures.m[closure] = reflect.ValueOf(f)\n\tclosures.Unlock()\n\treturn closure\n}", "func (f *ExtensionStoreCreateFunc) PushHook(hook func(context.Context, int32, int32, string) (int32, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (k *kubelet) getHooks() *container.Hooks {\n\treturn &container.Hooks{\n\t\tPostStart: k.postStartHook(),\n\t}\n}", "func NewLifeHook(e *Engine) Hook {\n\treturn Hook{\n\t\tOnStart: OnStart(e),\n\t\tOnStop: OnStop(e),\n\t}\n}", "func newGoFactory() *GOFactory {\n\tgologger.SLogger.Println(\"Init Game Object Factory Singleton\")\n\tfOnce.Do(func() {\n\t\tgofactory = &GOFactory{\n\t\t\tGoCreator: make(map[string]ICreator),\n\t\t}\n\t})\n\treturn gofactory\n}", "func (f *ReleaseStoreCreateFunc) PushHook(hook func(context.Context, *stores.Release) (int64, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func getHook(data *domain.Data, repo config.Repository) (config.Hook, bool, error) {\n\tfor _, hook := range repo.Hooks {\n\t\tf, err := matchHook(data, hook)\n\t\tif err != nil {\n\t\t\treturn config.Hook{}, false, err\n\t\t}\n\t\tif f {\n\t\t\treturn hook, true, nil\n\t\t}\n\t}\n\treturn config.Hook{}, false, nil\n}", "func GetBindHook() BindHook {\n\treturn bindHook\n}", "func NewHook(peerID int, token string) *VkHook {\n\thook := &VkHook{\n\t\tPeerID: peerID,\n\t\tVK: api.NewVK(token),\n\t\tUseLevels: DefaultLevels,\n\t}\n\n\treturn hook\n}", "func (f *Function) M__get__(instance, owner Object) (Object, error) {\n\tif instance != None {\n\t\treturn NewBoundMethod(instance, f), nil\n\t}\n\treturn f, nil\n}", "func (f *ResolverGetIndexByIDFunc) PushHook(hook func(context.Context, int) (dbstore.Index, bool, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func RegisterNewGroupHook(fn func(*Group)) {\n\tif newGroupHook != nil {\n\t\tpanic(\"RegisterNewGroupHook called more than once\")\n\t}\n\tnewGroupHook = fn\n}", "func newAlfredWatcher() *alfredWatcher {\n w, _ := inotify.NewWatcher()\n aw := &alfredWatcher{\n watcher: w,\n list: make(map[string]uint32),\n }\n return aw\n}", "func (a *App) GetHook(name string) (*Hook, error) {\n\tsp, err := a.GetSnapshot().FastForward()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn getHook(a, name, sp)\n}", "func newListeners() *listeners { return &listeners{m: make(map[string]nl.Listener, 64)} }", "func (ng *AlertNG) GetHooks() *api.Hooks {\n\treturn ng.api.Hooks\n}", "func New() logrus.Hook {\n\treturn &normalCallerHook{}\n}", "func (bi *baseInstance) toGA() *ga.Instance {\n\tinst := &ga.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*ga.NetworkInterface{{}}}\n\tif bi.aliasRange != \"\" {\n\t\tinst.NetworkInterfaces[0].AliasIpRanges = []*ga.AliasIpRange{\n\t\t\t{IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName},\n\t\t}\n\t}\n\treturn inst\n}", "func New(config Config) zapcore.WriteSyncer {\n\treturn &gelf{Config: config}\n}", "func (f *DBStoreDeleteOldIndexesFunc) PushHook(hook func(context.Context, time.Duration, time.Time) (int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *ExtensionStoreGetPublisherFunc) PushHook(hook func(context.Context, string) (*stores.Publisher, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *ExtensionStoreGetFeaturedExtensionsFunc) PushReturn(r0 []*stores.Extension, r1 error) {\n\tf.PushHook(func(context.Context) ([]*stores.Extension, error) {\n\t\treturn r0, r1\n\t})\n}", "func newLfsHook(writer *rotatelogs.RotateLogs, tf *moduleFormatter) logrus.Hook {\n\tlfsHook := lfshook.NewHook(lfshook.WriterMap{\n\t\tlogrus.DebugLevel: writer,\n\t\tlogrus.InfoLevel: writer,\n\t\tlogrus.WarnLevel: writer,\n\t\tlogrus.ErrorLevel: writer,\n\t\tlogrus.FatalLevel: writer,\n\t\tlogrus.PanicLevel: writer,\n\t}, tf)\n\n\treturn lfsHook\n}", "func newGauge(namespace, subsystem, name string, labelNames []string, client *statsd.Statter, isPrometheusEnabled bool) *Gauge {\n\topts := prometheus.GaugeOpts{\n\t\tNamespace: namespace,\n\t\tSubsystem: subsystem,\n\t\tName: name,\n\t}\n\tvec := prometheus.NewGaugeVec(opts, labelNames)\n\tif isPrometheusEnabled {\n\t\tprometheus.MustRegister(vec)\n\t}\n\n\treturn &Gauge{\n\t\twatcher: vec,\n\t\tlabels: labelNames,\n\t\tclient: client,\n\t\tprefix: strings.Join([]string{namespace, subsystem, name}, \".\"),\n\t}\n}", "func NewHook(client *Client, levels []logrus.Level) *Hook {\n\n\treturn &Hook{client, levels}\n}", "func generateListGetter(buf *bytes.Buffer, method *generatedGoListMethod) error {\n\treturn goListGetterTemplate.Execute(buf, method)\n}", "func (s *hookLister) Hooks(namespace string) HookNamespaceLister {\n\treturn hookNamespaceLister{indexer: s.indexer, namespace: namespace}\n}", "func (f *JobRunFunc) PushHook(hook func(context.Context, database.DB, streaming.Sender) (*search.Alert, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func newGrpcListener(ghandler *GrpcHandler) net.Listener {\n\tl := &grpcListener{\n\t\tGrpcHandler: ghandler,\n\t}\n\tl.listenerCtx, l.listenerCtxCancel = context.WithCancel(ghandler.ctx)\n\treturn l\n}", "func New(executor GetExecutor, lc logger.LoggingClient) *get {\n\treturn &get{\n\t\texecutor: executor,\n\t\tloggingClient: lc,\n\t}\n}", "func newLdbCacheIter(snap *dbCacheSnapshot, slice *util.Range) *ldbCacheIter {\n\titer := snap.pendingKeys.Iterator(slice.Start, slice.Limit)\n\treturn &ldbCacheIter{Iterator: iter}\n}", "func getHandleHookChange(\n\thooks []types.Hook, \n\trules []types.Rule, \n\texits []types.Exit,\n\tcallEvent func(string),\n\texitFunc func(code int),\n) func(string, string) {\n\thookChangeMap := generateHookChangeMap(rules, hooks)\n\thookChange := func(laststate string, newstate string) {\n\t\tif hookChangeMap[laststate] !=nil {\n\t\t\tlabels, hasMapping := hookChangeMap[laststate][newstate]\n\t\t\tif hasMapping {\n\t\t\t\tfor _, label := range(labels){\n\t\t\t\t\tcallEvent(label)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor _, exit := range(exits){\n\t\t\tif exit.State == newstate {\n\t\t\t\texitFunc(exit.Exitcode)\n\t\t\t}\n\t\t}\n\t}\n\n\n\treturn hookChange\n}", "func instrumentGet(inner func()) {\n\tTotalRequests.Add(1)\n\tPendingRequests.Add(1)\n\tdefer PendingRequests.Add(-1)\n\n\tstart := time.Now()\n\n\tinner()\n\n\t// Capture the histogram over 18 geometric buckets \n\tdelta := time.Since(start)\n\tswitch {\n\tcase delta < time.Millisecond:\n\t\tLatencies.Add(\"0ms\", 1)\n\tcase delta > 32768*time.Millisecond:\n\t\tLatencies.Add(\">32s\", 1)\n\tdefault:\n\t\tfor i := time.Millisecond; i < 32768*time.Millisecond; i *= 2 {\n\t\t\tif delta >= i && delta < i*2 {\n\t\t\t\tLatencies.Add(i.String(), 1)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}", "func (f *ExtensionStoreCountFunc) PushHook(hook func(context.Context, stores.ExtensionsListOptions) (int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func mockedGranter(kubeutil *kube.Kube, app *v1.RadixRegistration, namespace string, serviceAccount *corev1.ServiceAccount) error {\n\treturn nil\n}", "func NewGetGeneric[T constraints.Integer](t mockConstructorTestingTNewGetGeneric) *GetGeneric[T] {\n\tmock := &GetGeneric[T]{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func (f *ExtensionStoreListPublishersFunc) PushHook(hook func(context.Context, stores.PublishersListOptions) ([]*stores.Publisher, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *DBStoreDirtyRepositoriesFunc) PushHook(hook func(context.Context) (map[int]int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *DBStoreDirtyRepositoriesFunc) PushHook(hook func(context.Context) (map[int]int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *UploadServiceGetAuditLogsForUploadFunc) PushHook(hook func(context.Context, int) ([]types.UploadLog, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *DBStoreSoftDeleteOldUploadsFunc) PushHook(hook func(context.Context, time.Duration, time.Time) (int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *DBStoreDeleteIndexesWithoutRepositoryFunc) PushHook(hook func(context.Context, time.Time) (map[int]int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func NewHook(b BoltHook) (*BoltHook, error) {\n\tboltDB, err := bolt.Open(b.DBLoc, 0600, nil)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &BoltHook{\n\t\tDBLoc: b.DBLoc,\n\t\tBucket: b.Bucket,\n\t\tFormatter: b.Formatter,\n\t\tdb: boltDB,\n\t}, nil\n}", "func GetNew(target *core.ServiceInstance) (Client, error) {\n\tpool := getPool(target.Kind)\n\treturn connect(pool, target)\n}", "func newSnapshotCache() cache.SnapshotCache {\n\treturn cache.NewSnapshotCache(false, tbnProxyNodeHash{}, consoleLogger{})\n}", "func (f *AutoIndexingServiceGetListTagsFunc) PushReturn(r0 []*gitdomain.Tag, r1 error) {\n\tf.PushHook(func(context.Context, api.RepoName, ...string) ([]*gitdomain.Tag, error) {\n\t\treturn r0, r1\n\t})\n}", "func NewCustom(fieldName string, fullPath bool) logrus.Hook {\n\treturn &customCallerHook{fieldName: fieldName, fullPath: fullPath}\n}", "func (f *ExtensionStoreUpdateFunc) PushHook(hook func(context.Context, int32, *string) error) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (gatewayContext *GatewayContext) newGatewayWatch(name string) *cache.ListWatch {\n\tx := gatewayContext.gatewayClient.ArgoprojV1alpha1().RESTClient()\n\tresource := \"gateways\"\n\tfieldSelector := fields.ParseSelectorOrDie(fmt.Sprintf(\"metadata.name=%s\", name))\n\n\tlistFunc := func(options metav1.ListOptions) (runtime.Object, error) {\n\t\toptions.FieldSelector = fieldSelector.String()\n\t\treq := x.Get().\n\t\t\tNamespace(gatewayContext.namespace).\n\t\t\tResource(resource).\n\t\t\tVersionedParams(&options, metav1.ParameterCodec)\n\t\treturn req.Do().Get()\n\t}\n\twatchFunc := func(options metav1.ListOptions) (watch.Interface, error) {\n\t\toptions.Watch = true\n\t\toptions.FieldSelector = fieldSelector.String()\n\t\treq := x.Get().\n\t\t\tNamespace(gatewayContext.namespace).\n\t\t\tResource(resource).\n\t\t\tVersionedParams(&options, metav1.ParameterCodec)\n\t\treturn req.Watch()\n\t}\n\treturn &cache.ListWatch{ListFunc: listFunc, WatchFunc: watchFunc}\n}", "func (f *UploadServiceGetCommitGraphMetadataFunc) PushHook(hook func(context.Context, int) (bool, *time.Time, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (mrb *MrbState) ProcNewGofunc(f interface{}) RProc {\n\tproc, _ := mrb.ProcNewGofuncWithEnv(f)\n\treturn proc\n}", "func (f *AutoIndexingServiceGetListTagsFunc) SetDefaultHook(hook func(context.Context, api.RepoName, ...string) ([]*gitdomain.Tag, error)) {\n\tf.defaultHook = hook\n}", "func (f *DBStoreUpdateUploadRetentionFunc) PushHook(hook func(context.Context, []int, []int) error) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func newLabo(s *goquery.Selection, l *Labo) *Labo {\n\tfor _, fn := range laboFn {\n\t\tfn(s, l)\n\t}\n\treturn l\n}", "func NewGaWidget(keyfile string, viewID string) (*gaWidget, error) {\n\tan, err := platform.NewAnalyticsClient(keyfile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &gaWidget{\n\t\tanalytics: an,\n\t\tviewID: viewID,\n\t}, nil\n}", "func newInfluxDB(config *Config) (hook *InfluxDBHook, err error) {\n if config == nil {\n config = &Config{}\n }\n\n config.defaults()\n\n var client = newInfluxDBClient(config)\n\n // Make sure that we can connect to InfluxDB\n isReady, err := client.Ready(context.Background()) // if this takes more than 5 seconds then influxdb is probably down\n if err != nil || !isReady {\n return nil, fmt.Errorf(\"NewInfluxDB: Error connecting to InfluxDB, %v\", err)\n }\n\n hook = &InfluxDBHook{\n client: client,\n database: config.Database,\n measurement: config.Measurement,\n tagList: config.Tags,\n precision: config.Precision,\n syslog: config.Syslog,\n facility: config.Facility,\n facilityCode: config.FacilityCode,\n appName: config.AppName,\n version: config.Version,\n minLevel: config.MinLevel,\n org: config.Org,\n bucket: config.Bucket,\n ch: ringchan.NewRingChan(10, config.MaxBufferLog),\n }\n go hook.process()\n return hook, nil\n}", "func NewHookOptions(options map[string]interface{}) HookOptions {\n\tvar mapper models.Mapper\n\tif mapperOptions, ok := options[\"mapper\"]; ok {\n\t\tif m, ok := mapperOptions.(map[string]interface{}); ok {\n\t\t\tmapper = models.ParseMapper(m)\n\t\t}\n\t}\n\tvar name string\n\tif nameOption, ok := options[\"name\"]; ok {\n\t\tname = nameOption.(string)\n\t} else {\n\t\t//TODO: use hook-index as name\n\t\tname = \"default\"\n\t}\n\treturn HookOptions{\n\t\tName: name,\n\t\tMapper: mapper,\n\t}\n}", "func NewMockHook(ctrl *gomock.Controller) *MockHook {\n\tmock := &MockHook{ctrl: ctrl}\n\tmock.recorder = &MockHookMockRecorder{mock}\n\treturn mock\n}", "func (f *ExtensionStoreCountPublishersFunc) PushHook(hook func(context.Context, stores.PublishersListOptions) (int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func GetRpcServiceFb(key string) Factory {\n\treturn rpcServiceFactoryBuilder[key]\n}", "func (v *version) Hooks() HookInformer {\n\treturn &hookInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}\n}", "func (f *ResolverIndexConfigurationFunc) PushHook(hook func(context.Context, int) ([]byte, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *DBStoreHandleFunc) PushHook(hook func() *basestore.TransactableHandle) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func NewForTest(t *testing.T) (context.Context, *pgxpool.Pool, *testutils.GitBuilder, []string, provider.Provider, *config.InstanceConfig) {\n\tctx := cipd_git.UseGitFinder(context.Background())\n\tctx, cancel := context.WithCancel(ctx)\n\n\t// Create a git repo for testing purposes.\n\tgb := testutils.GitInit(t, ctx)\n\thashes := []string{}\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(2*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"bar.txt\", StartTime.Add(3*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(4*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(5*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"bar.txt\", StartTime.Add(6*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(7*time.Minute)))\n\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(8*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(9*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(10*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(11*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(12*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(13*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(14*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(15*time.Minute)))\n\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(16*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(17*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(18*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(19*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(20*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(21*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(22*time.Minute)))\n\thashes = append(hashes, gb.CommitGenAt(ctx, \"foo.txt\", StartTime.Add(23*time.Minute)))\n\n\t// Init our sql database.\n\tdb := sqltest.NewCockroachDBForTests(t, \"dbgit\")\n\n\t// Get tmp dir to use for repo checkout.\n\ttmpDir, err := ioutil.TempDir(\"\", \"git\")\n\trequire.NoError(t, err)\n\n\t// Create the cleanup function.\n\tt.Cleanup(func() {\n\t\tcancel()\n\t\terr = os.RemoveAll(tmpDir)\n\t\tassert.NoError(t, err)\n\t\tgb.Cleanup()\n\t})\n\n\tinstanceConfig := &config.InstanceConfig{\n\t\tGitRepoConfig: config.GitRepoConfig{\n\t\t\tURL: gb.Dir(),\n\t\t\tDir: filepath.Join(tmpDir, \"checkout\"),\n\t\t},\n\t}\n\tgp, err := git_checkout.New(ctx, instanceConfig)\n\trequire.NoError(t, err)\n\treturn ctx, db, gb, hashes, gp, instanceConfig\n}", "func (f *DBStoreHandleFunc) PushHook(hook func() basestore.TransactableHandle) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}" ]
[ "0.73362684", "0.55606365", "0.5346862", "0.53349245", "0.51018983", "0.50766736", "0.50756764", "0.5031727", "0.49362725", "0.49105883", "0.48886675", "0.48711956", "0.48659244", "0.4857758", "0.4857758", "0.4845478", "0.4821866", "0.47960016", "0.47906336", "0.4775277", "0.47730014", "0.47523877", "0.47479406", "0.472726", "0.47114184", "0.46798778", "0.46782318", "0.4665728", "0.4643404", "0.46396217", "0.46335372", "0.4630124", "0.46241173", "0.46231842", "0.46189186", "0.46035084", "0.4603419", "0.4599399", "0.45930314", "0.45843828", "0.4575383", "0.45700067", "0.45693627", "0.45690772", "0.45450547", "0.45424673", "0.4541671", "0.45341465", "0.45293367", "0.45238426", "0.45215097", "0.4521429", "0.4516111", "0.44893545", "0.44876432", "0.44865716", "0.44820508", "0.44794202", "0.4473823", "0.44678614", "0.4451733", "0.44343713", "0.44329304", "0.44183034", "0.44150752", "0.44136", "0.44015387", "0.43969283", "0.43952924", "0.4385314", "0.43804273", "0.43759438", "0.43713394", "0.43713394", "0.4349196", "0.43485594", "0.43373233", "0.43318668", "0.43300954", "0.4328112", "0.4323547", "0.4322696", "0.43063924", "0.43027085", "0.42972922", "0.4294898", "0.42808393", "0.42671457", "0.42611027", "0.4260794", "0.42551446", "0.42520428", "0.42402652", "0.4232919", "0.4228743", "0.4224091", "0.4221166", "0.42158407", "0.42133886", "0.42028248" ]
0.88796777
0
newBetaGetHook creates a new closure with the current baseInstanceList to be used as a MockBetaInstances.GetHook
newBetaGetHook создает новый замыкание с текущим baseInstanceList, которое используется как MockBetaInstances.GetHook
func (bil *baseInstanceList) newBetaGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockBetaInstances) (bool, *beta.Instance, error) { return func(ctx context.Context, key *meta.Key, m *cloud.MockBetaInstances) (bool, *beta.Instance, error) { m.Lock.Lock() defer m.Lock.Unlock() if _, found := m.Objects[*key]; !found { m.Objects[*key] = &cloud.MockInstancesObj{Obj: bil.getOrCreateBaseInstance(key).toBeta()} } return false, nil, nil } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (bil *baseInstanceList) newGAGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {\n\treturn func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {\n\t\tm.Lock.Lock()\n\t\tdefer m.Lock.Unlock()\n\n\t\tif _, found := m.Objects[*key]; !found {\n\t\t\tm.Objects[*key] = &cloud.MockInstancesObj{Obj: bil.getOrCreateBaseInstance(key).toGA()}\n\t\t}\n\t\treturn false, nil, nil\n\t}\n}", "func (bil *baseInstanceList) newMockCloud() cloud.Cloud {\n\tc := cloud.NewMockGCE(nil)\n\n\t// insert hooks to lazy create a instance when needed\n\tc.MockInstances.GetHook = bil.newGAGetHook()\n\tc.MockBetaInstances.GetHook = bil.newBetaGetHook()\n\n\treturn c\n}", "func NewHookLister(indexer cache.Indexer) HookLister {\n\treturn &hookLister{indexer: indexer}\n}", "func (bi *baseInstance) toBeta() *beta.Instance {\n\tinst := &beta.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*beta.NetworkInterface{{}}}\n\tif bi.aliasRange != \"\" {\n\t\tinst.NetworkInterfaces[0].AliasIpRanges = []*beta.AliasIpRange{\n\t\t\t{IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName},\n\t\t}\n\t}\n\treturn inst\n}", "func (t *T) Beta(name string, f interface{}) bool {\n\tt.Helper()\n\treturn t.invokeFeature(feature.Beta, name, f)\n}", "func (f *AutoIndexingServiceGetListTagsFunc) PushHook(hook func(context.Context, api.RepoName, ...string) ([]*gitdomain.Tag, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func NewLifeHook(e *Engine) Hook {\n\treturn Hook{\n\t\tOnStart: OnStart(e),\n\t\tOnStop: OnStop(e),\n\t}\n}", "func NewHook(b BoltHook) (*BoltHook, error) {\n\tboltDB, err := bolt.Open(b.DBLoc, 0600, nil)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &BoltHook{\n\t\tDBLoc: b.DBLoc,\n\t\tBucket: b.Bucket,\n\t\tFormatter: b.Formatter,\n\t\tdb: boltDB,\n\t}, nil\n}", "func (f *ReleaseStoreGetLatestBatchFunc) PushHook(hook func(context.Context, []int32, string, bool) ([]*stores.Release, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func NewHook(peerID int, token string) *VkHook {\n\thook := &VkHook{\n\t\tPeerID: peerID,\n\t\tVK: api.NewVK(token),\n\t\tUseLevels: DefaultLevels,\n\t}\n\n\treturn hook\n}", "func (f *AutoIndexingServiceGetIndexesFunc) PushHook(hook func(context.Context, shared.GetIndexesOptions) ([]types.Index, int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (ch *CloudwatchHook) GetHook() (func(zapcore.Entry) error, error) {\n\n\tvar cloudwatchWriter = func(e zapcore.Entry) error {\n\t\tif !ch.isAcceptedLevel(e.Level) {\n\t\t\treturn nil\n\t\t}\n\n\t\tevent := &cloudwatchlogs.InputLogEvent{\n\t\t\tMessage: aws.String(fmt.Sprintf(\"[%s] %s\", e.LoggerName, e.Message)),\n\t\t\tTimestamp: aws.Int64(int64(time.Nanosecond) * time.Now().UnixNano() / int64(time.Millisecond)),\n\t\t}\n\t\tparams := &cloudwatchlogs.PutLogEventsInput{\n\t\t\tLogEvents: []*cloudwatchlogs.InputLogEvent{event},\n\t\t\tLogGroupName: aws.String(ch.GroupName),\n\t\t\tLogStreamName: aws.String(ch.StreamName),\n\t\t\tSequenceToken: ch.nextSequenceToken,\n\t\t}\n\n\t\tif ch.Async {\n\t\t\tgo ch.sendEvent(params)\n\t\t\treturn nil\n\t\t}\n\n\t\treturn ch.sendEvent(params)\n\t}\n\n\tch.svc = cloudwatchlogs.New(session.New(ch.AWSConfig))\n\n\tlgresp, err := ch.svc.DescribeLogGroups(&cloudwatchlogs.DescribeLogGroupsInput{LogGroupNamePrefix: aws.String(ch.GroupName), Limit: aws.Int64(1)})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(lgresp.LogGroups) < 1 {\n\t\t// we need to create this log group\n\t\t_, err := ch.svc.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{LogGroupName: aws.String(ch.GroupName)})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tresp, err := ch.svc.DescribeLogStreams(&cloudwatchlogs.DescribeLogStreamsInput{\n\t\tLogGroupName: aws.String(ch.GroupName), // Required\n\t\tLogStreamNamePrefix: aws.String(ch.StreamName),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// grab the next sequence token\n\tif len(resp.LogStreams) > 0 {\n\t\tch.nextSequenceToken = resp.LogStreams[0].UploadSequenceToken\n\t\treturn cloudwatchWriter, nil\n\t}\n\n\t// create stream if it doesn't exist. the next sequence token will be null\n\t_, err = ch.svc.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{\n\t\tLogGroupName: aws.String(ch.GroupName),\n\t\tLogStreamName: aws.String(ch.StreamName),\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cloudwatchWriter, nil\n}", "func getHook(data *domain.Data, repo config.Repository) (config.Hook, bool, error) {\n\tfor _, hook := range repo.Hooks {\n\t\tf, err := matchHook(data, hook)\n\t\tif err != nil {\n\t\t\treturn config.Hook{}, false, err\n\t\t}\n\t\tif f {\n\t\t\treturn hook, true, nil\n\t\t}\n\t}\n\treturn config.Hook{}, false, nil\n}", "func GetBindHook() BindHook {\n\treturn bindHook\n}", "func NewHook(client *Client, levels []logrus.Level) *Hook {\n\n\treturn &Hook{client, levels}\n}", "func newLabo(s *goquery.Selection, l *Labo) *Labo {\n\tfor _, fn := range laboFn {\n\t\tfn(s, l)\n\t}\n\treturn l\n}", "func getHandleHookChange(\n\thooks []types.Hook, \n\trules []types.Rule, \n\texits []types.Exit,\n\tcallEvent func(string),\n\texitFunc func(code int),\n) func(string, string) {\n\thookChangeMap := generateHookChangeMap(rules, hooks)\n\thookChange := func(laststate string, newstate string) {\n\t\tif hookChangeMap[laststate] !=nil {\n\t\t\tlabels, hasMapping := hookChangeMap[laststate][newstate]\n\t\t\tif hasMapping {\n\t\t\t\tfor _, label := range(labels){\n\t\t\t\t\tcallEvent(label)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor _, exit := range(exits){\n\t\t\tif exit.State == newstate {\n\t\t\t\texitFunc(exit.Exitcode)\n\t\t\t}\n\t\t}\n\t}\n\n\n\treturn hookChange\n}", "func NewBehatGetList(variables templateUtils.TemplateVariables) Template {\n\trawTemplate, err := template.New(\"behat_get_list\").Parse(BehatGetListTemplate)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn New(resource.New(geography.BehatDir+string(variables.Entity)+\"/crud/\", \"get_list.feature\"),\n\t\trawTemplate, variables)\n}", "func NewChangelistLandedUpdater(t testing.TB) *ChangelistLandedUpdater {\n\tmock := &ChangelistLandedUpdater{}\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func (k *kubelet) getHooks() *container.Hooks {\n\treturn &container.Hooks{\n\t\tPostStart: k.postStartHook(),\n\t}\n}", "func defaultNewExpBundler(uploader func(interface{}), delayThreshold time.Duration, countThreshold int) expBundler {\n\tbndler := bundler.NewBundler((*RowData)(nil), uploader)\n\n\t// Set options for bundler if they are provided by users.\n\tif 0 < delayThreshold {\n\t\tbndler.DelayThreshold = delayThreshold\n\t}\n\tif 0 < countThreshold {\n\t\tbndler.BundleCountThreshold = countThreshold\n\t}\n\n\treturn bndler\n}", "func newAlfredWatcher() *alfredWatcher {\n w, _ := inotify.NewWatcher()\n aw := &alfredWatcher{\n watcher: w,\n list: make(map[string]uint32),\n }\n return aw\n}", "func getHook(w http.ResponseWriter, req *http.Request) {\n\t// Get the JSON and put it into a hook struct\n\tdecoder := json.NewDecoder(req.Body)\n\tvar h github.PostReceiveHook\n\terr := decoder.Decode(&h)\n\n\tif err != nil {\n\t\tfmt.Println(\"ERROR: \", err.Error())\n\t\tfmt.Fprint(w, \"No JSON... what? (\"+err.Error()+\")\")\n\n\t\treturn\n\t}\n\n\t// If there is a branch, run the commands\n\tif len(h.Branch()) > 0 {\n\t\trunCommands(h.Branch())\n\t}\n\n\tfmt.Fprint(w, \"OK: \"+h.Ref)\n}", "func (f *ExtensionStoreGetFeaturedExtensionsFunc) PushHook(hook func(context.Context) ([]*stores.Extension, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *ReleaseStoreGetLatestFunc) PushHook(hook func(context.Context, int32, string, bool) (*stores.Release, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (t TestDescription) Beta() TestDescription {\n\treturn t.newLabel(\"BETA\")\n}", "func NewBlueprint(uri string, cache time.Duration) (api.Meter, error) {\n\tlog := util.NewLogger(\"foo\")\n\n\tm := &Blueprint{\n\t\tHelper: request.NewHelper(log),\n\t\tcache: cache,\n\t}\n\n\treturn m, nil\n}", "func newBaseInstanceList(allocateCIDR bool, clusterCIDR *net.IPNet, subnetMaskSize int) *baseInstanceList {\n\tcidrSet, _ := cidrset.NewCIDRSet(clusterCIDR, subnetMaskSize)\n\treturn &baseInstanceList{\n\t\tallocateCIDR: allocateCIDR,\n\t\tclusterCIDR: clusterCIDR,\n\t\tsubnetMaskSize: subnetMaskSize,\n\t\tcidrSet: cidrSet,\n\t\tinstances: make(map[meta.Key]*baseInstance),\n\t}\n}", "func (f *UploadServiceGetListTagsFunc) PushHook(hook func(context.Context, api.RepoName, ...string) ([]*gitdomain.Tag, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *AutoIndexingServiceGetUnsafeDBFunc) PushHook(hook func() database.DB) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func newLfsHook(writer *rotatelogs.RotateLogs, tf *moduleFormatter) logrus.Hook {\n\tlfsHook := lfshook.NewHook(lfshook.WriterMap{\n\t\tlogrus.DebugLevel: writer,\n\t\tlogrus.InfoLevel: writer,\n\t\tlogrus.WarnLevel: writer,\n\t\tlogrus.ErrorLevel: writer,\n\t\tlogrus.FatalLevel: writer,\n\t\tlogrus.PanicLevel: writer,\n\t}, tf)\n\n\treturn lfsHook\n}", "func (f *AutoIndexingServiceGetIndexByIDFunc) PushHook(hook func(context.Context, int) (types.Index, bool, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *AutoIndexingServiceGetIndexesByIDsFunc) PushHook(hook func(context.Context, ...int) ([]types.Index, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (a *alphaMock) GetList(ctx context.Context, in *alpha.GetListRequest, opts ...grpc.CallOption) (*alpha.List, error) {\n\t// TODO(#2716): Implement me!\n\treturn nil, errors.Errorf(\"Unimplemented -- GetList coming soon\")\n}", "func NewHook(token string, env string, r ...RollrusInitializer) *Hook {\n\th := &Hook{\n\t\tClient: roll.New(token, env),\n\t\ttriggers: defaultTriggerLevels,\n\t\tclosed: make(chan struct{}),\n\t\tentries: channel.NewBuffer(defaultBufferSize),\n\t\tonce: new(sync.Once),\n\t\tpool: make(chan chan job, defaultNumWorkers),\n\t\tnumWorkers: defaultNumWorkers,\n\t\twg: new(sync.WaitGroup),\n\t}\n\n\tfor _, init := range r {\n\t\tinit(h)\n\t}\n\n\tfor i := 0; i < h.numWorkers; i++ {\n\t\th.wg.Add(1)\n\t\tworker := newWorker(h.pool, h.closed, h.wg)\n\t\tworker.Work()\n\t}\n\n\tgo h.dispatch()\n\n\treturn h\n}", "func newListenerBuilder(meshCatalog catalog.MeshCataloger, svcIdentity identity.ServiceIdentity, cfg configurator.Configurator, statsHeaders map[string]string) *listenerBuilder {\n\treturn &listenerBuilder{\n\t\tmeshCatalog: meshCatalog,\n\t\tserviceIdentity: svcIdentity,\n\t\tcfg: cfg,\n\t\tstatsHeaders: statsHeaders,\n\t}\n}", "func NewBetaProposer(alpha, beta float64) *BetaProposer {\n\tp, _ := prob.NewBeta(alpha, beta)\n\treturn &BetaProposer{p}\n}", "func (mock *HarborRepositoryInterfaceMock) AddFeatureLifecycleCalls() []struct {\n\tCtx context.Context\n\tEnabled func() bool\n\tName string\n\tLifecycle v3.HarborRepositoryLifecycle\n} {\n\tvar calls []struct {\n\t\tCtx context.Context\n\t\tEnabled func() bool\n\t\tName string\n\t\tLifecycle v3.HarborRepositoryLifecycle\n\t}\n\tlockHarborRepositoryInterfaceMockAddFeatureLifecycle.RLock()\n\tcalls = mock.calls.AddFeatureLifecycle\n\tlockHarborRepositoryInterfaceMockAddFeatureLifecycle.RUnlock()\n\treturn calls\n}", "func (a *App) GetHook(name string) (*Hook, error) {\n\tsp, err := a.GetSnapshot().FastForward()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn getHook(a, name, sp)\n}", "func (v *version) Hooks() HookInformer {\n\treturn &hookInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}\n}", "func (f *Function) M__get__(instance, owner Object) (Object, error) {\n\tif instance != None {\n\t\treturn NewBoundMethod(instance, f), nil\n\t}\n\treturn f, nil\n}", "func (f *AutoIndexingServiceNumRepositoriesWithCodeIntelligenceFunc) PushHook(hook func(context.Context) (int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func TestGetAllBackendServer(t *testing.T) {\n\tcloud, _, resp, err := beforeTestBlb()\n\tif err != nil {\n\t\tt.Errorf(\"beforeTestBlb err, err: %v\", err)\n\t}\n\tctx := context.Background()\n\t// bs is nil\n\tlb := &blb.LoadBalancer{\n\t\tBlbId: resp.LoadBalancerId,\n\t}\n\tbs, err := cloud.getAllBackendServer(ctx, lb)\n\tif err != nil {\n\t\tt.Errorf(\"getAllBackendServer err, err: %v\", err)\n\t}\n\tif len(bs) != 0 {\n\t\tt.Errorf(\"getAllBackendServer err, bs should be nil but get : %v\", bs)\n\t}\n\t// add bs\n\tbsAdd := []blb.BackendServer{\n\t\t{\n\t\t\tInstanceId: \"1\",\n\t\t},\n\t\t{\n\t\t\tInstanceId: \"2\",\n\t\t},\n\t}\n\targs := blb.AddBackendServersArgs{\n\t\tLoadBalancerId: lb.BlbId,\n\t\tBackendServerList: bsAdd,\n\t}\n\terr = cloud.clientSet.BLBClient.AddBackendServers(ctx, &args, &bce.SignOption{\n\t\tCustomSignFunc: CCEServiceSign,\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"AddBackendServers err, err: %v\", err)\n\t}\n\t// get bs\n\tbs, err = cloud.getAllBackendServer(ctx, lb)\n\tif err != nil {\n\t\tt.Errorf(\"getAllBackendServer err, err: %v\", err)\n\t}\n\tif len(bs) != 2 {\n\t\tt.Errorf(\"getAllBackendServer err, bs should be nil but get : %v\", bs)\n\t}\n}", "func newAssert(t *testing.T) func(args ...interface{}) *rpcAssert {\n\treturn func(args ...interface{}) *rpcAssert {\n\t\treturn &rpcAssert{\n\t\t\tt: t,\n\t\t\targs: args,\n\t\t}\n\t}\n}", "func getActivatedVersioners(pluginsToActivate []string, args plugins.PluginArgs, versioners *[]*plugins.ActivatedVersioner) func() {\n\tvar err error\n\tvm, err := plugins.NewVersionerManager(getPluginArgs())\n\tif err != nil {\n\t\tlogAndExit(app.ErrorLogger(log.StandardLogger(), err), \"Failed to create lifecycle plugin manager\")\n\t}\n\n\t*versioners, err = vm.ActivatePlugins(pluginsToActivate)\n\tif err != nil {\n\t\tlogAndExit(app.ErrorLogger(log.StandardLogger(), err), \"Failed to activate lifecycle plugins\")\n\t}\n\n\treturn vm.Close\n}", "func (*llcFactory) New(args *xreg.XactArgs) xreg.BucketEntry {\n\treturn &llcFactory{t: args.T, uuid: args.UUID}\n}", "func NewMockHook(ctrl *gomock.Controller) *MockHook {\n\tmock := &MockHook{ctrl: ctrl}\n\tmock.recorder = &MockHookMockRecorder{mock}\n\treturn mock\n}", "func getNewAPI(anonymous bool) API {\n\t// ignore errors for now\n\td, err := xl.New()\n\tfatalIf(err.Trace(), \"Instantiating xl failed.\", nil)\n\n\treturn API{\n\t\tOP: make(chan APIOperation),\n\t\tXL: d,\n\t\tAnonymous: anonymous,\n\t}\n}", "func newBackingServices(c *Client, namespace string) *backingservices {\n\treturn &backingservices{\n\t\tr: c,\n\t\tns: namespace,\n\t}\n}", "func (s *hookLister) Hooks(namespace string) HookNamespaceLister {\n\treturn hookNamespaceLister{indexer: s.indexer, namespace: namespace}\n}", "func (f *AutoIndexingServiceGetListTagsFunc) SetDefaultHook(hook func(context.Context, api.RepoName, ...string) ([]*gitdomain.Tag, error)) {\n\tf.defaultHook = hook\n}", "func getSwarmingRpcsBotList(ctx context.Context, c *Client, call *swarming_api.BotsListCall) (*swarming_api.SwarmingRpcsBotList, error) {\n\tvar tl *swarming_api.SwarmingRpcsBotList\n\tf := func() error {\n\t\tvar err error\n\t\ttl, err = call.Context(ctx).Do()\n\t\treturn err\n\t}\n\terr := callWithRetries(ctx, f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn tl, nil\n}", "func NewGetBalanceCallback(t mockConstructorTestingTNewGetBalanceCallback) *GetBalanceCallback {\n\tmock := &GetBalanceCallback{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func newCache(nbClient libovsdbclient.Client) (*LBCache, error) {\n\t// first, list all load balancers\n\tlbs, err := listLBs(nbClient)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := LBCache{}\n\tc.existing = make(map[string]*CachedLB, len(lbs))\n\n\tfor i := range lbs {\n\t\tc.existing[lbs[i].UUID] = &lbs[i]\n\t}\n\n\tps := func(item *nbdb.LogicalSwitch) bool {\n\t\treturn len(item.LoadBalancer) > 0\n\t}\n\tswitches, err := libovsdbops.FindLogicalSwitchesWithPredicate(nbClient, ps)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, ls := range switches {\n\t\tfor _, lbuuid := range ls.LoadBalancer {\n\t\t\tif lb, ok := c.existing[lbuuid]; ok {\n\t\t\t\tlb.Switches.Insert(ls.Name)\n\t\t\t}\n\t\t}\n\t}\n\n\tpr := func(item *nbdb.LogicalRouter) bool {\n\t\treturn len(item.LoadBalancer) > 0\n\t}\n\trouters, err := libovsdbops.FindLogicalRoutersWithPredicate(nbClient, pr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, router := range routers {\n\t\tfor _, lbuuid := range router.LoadBalancer {\n\t\t\tif lb, ok := c.existing[lbuuid]; ok {\n\t\t\t\tlb.Routers.Insert(router.Name)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Get non-empty LB groups\n\tpg := func(item *nbdb.LoadBalancerGroup) bool {\n\t\treturn len(item.LoadBalancer) > 0\n\t}\n\tgroups, err := libovsdbops.FindLoadBalancerGroupsWithPredicate(nbClient, pg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, group := range groups {\n\t\tfor _, lbuuid := range group.LoadBalancer {\n\t\t\tif lb, ok := c.existing[lbuuid]; ok {\n\t\t\t\tlb.Groups.Insert(group.Name)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &c, nil\n}", "func newLdbCacheIter(snap *dbCacheSnapshot, slice *util.Range) *ldbCacheIter {\n\titer := snap.pendingKeys.Iterator(slice.Start, slice.Limit)\n\treturn &ldbCacheIter{Iterator: iter}\n}", "func (f *AutoIndexingServiceGetListTagsFunc) History() []AutoIndexingServiceGetListTagsFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]AutoIndexingServiceGetListTagsFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}", "func NewBleveBackend() BackendFactory {\n\treturn newBleveBackend\n}", "func (d *dataUpdateTracker) newBloomFilter() bloomFilter {\n\treturn bloomFilter{bloom.NewWithEstimates(dataUpdateTrackerEstItems, dataUpdateTrackerFP)}\n}", "func newMockGetDataFunc(mockStore *mock.NodeStore) func(addr Address) (data []byte, err error) {\n\treturn func(addr Address) (data []byte, err error) {\n\t\tdata, err = mockStore.Get(addr)\n\t\tif err == mock.ErrNotFound {\n\t\t\t// preserve ErrChunkNotFound error\n\t\t\terr = ErrChunkNotFound\n\t\t}\n\t\treturn data, err\n\t}\n}", "func (f *ExtensionStoreGetFeaturedExtensionsFunc) History() []ExtensionStoreGetFeaturedExtensionsFuncCall {\n\tf.mutex.Lock()\n\thistory := make([]ExtensionStoreGetFeaturedExtensionsFuncCall, len(f.history))\n\tcopy(history, f.history)\n\tf.mutex.Unlock()\n\n\treturn history\n}", "func NewBoltHook(options ...HookOption) log.Hook {\n\n\tdefaultOptions := &HookOptions{\n\t\tIDGenerator: NewSatoru(),\n\t\tDbpath: \"log.db\",\n\t\tFileMode: 0600,\n\t\tBoltOptions: nil,\n\t}\n\n\tfor _, option := range options {\n\t\toption(defaultOptions)\n\t}\n\n\tboltDb, err := bolt.Open(defaultOptions.Dbpath, defaultOptions.FileMode, defaultOptions.BoltOptions)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn &boltHook{boltDb, defaultOptions.IDGenerator}\n}", "func (bc *BlueprintController) GetBlueprint(bi usecase.BlueprintInteractor) func(*gin.Context) {\n\tb, err := bi.GetBlueprint()\n\n\tif err != nil {\n\t\treturn func(c *gin.Context) {\n\t\t\tc.JSON(http.StatusInternalServerError, gin.H{\n\t\t\t\t\"error\": \"error occur\",\n\t\t\t})\n\t\t}\n\t}\n\n\treturn func(c *gin.Context) {\n\t\tkey := \"j9uzyqp6cyzq\"\n\t\tsecret := \"5y485r8nq9jre4fk6anpu59sqdcpq8xdkuqbd5jxqpvw455gek3aw27ysx4uq7tz\"\n\n\t\tclient, _ := stream.NewClient(key, secret)\n\n\t\tnotifFeed := client.NotificationFeed(\"agency\", \"125\")\n\n\t\tactor, _ := json.Marshal(map[string]interface{}{\n\t\t\t\"url\": \"http://example.org/martin\",\n\t\t\t\"objectType\": \"person\",\n\t\t\t\"id\": \"tag:example.org,2011:martin\",\n\t\t\t\"image\": map[string]interface{}{\n\t\t\t\t\"url\": \"http://example.org/martin/image\",\n\t\t\t\t\"width\": 250,\n\t\t\t\t\"height\": 250,\n\t\t\t},\n\t\t\t\"displayName\": \"Martin Smith\",\n\t\t})\n\t\tobject, _ := json.Marshal(map[string]string{\n\t\t\t\"url\": \"http://example.org/blog/2011/02/entry\",\n\t\t\t\"id\": \"tag:example.org,2011:abc123/xyz\",\n\t\t})\n\t\ttarget, _ := json.Marshal(map[string]string{\n\t\t\t\"url\": \"http://example.org/blog/\",\n\t\t\t\"objectType\": \"blog\",\n\t\t\t\"id\": \"tag:example.org,2011:abc123\",\n\t\t\t\"displayName\": \"Martin's Blog\",\n\t\t})\n\n\t\tresp, err := notifFeed.AddActivity(stream.Activity{\n\t\t\tActor: string(actor),\n\t\t\tVerb: \"post\",\n\t\t\tObject: string(object),\n\t\t\tTarget: string(target),\n\t\t})\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tlog.Printf(\"%v\", resp)\n\n\t\tc.JSON(http.StatusOK, b)\n\t}\n}", "func NewHook(handler HookFunc, kind HookKind) *Hook {\n\thook := &Hook{\n\t\tHandler: handler,\n\t\tKind: kind,\n\t}\n\n\treturn hook\n}", "func (ct *ctrlerCtx) diffBucket(apicl apiclient.Services) {\n\topts := api.ListWatchOptions{}\n\n\t// get a list of all objects from API server\n\tobjlist, err := apicl.ObjstoreV1().Bucket().List(context.Background(), &opts)\n\tif err != nil {\n\t\tct.logger.Errorf(\"Error getting a list of objects. Err: %v\", err)\n\t\treturn\n\t}\n\n\tct.logger.Infof(\"diffBucket(): BucketList returned %d objects\", len(objlist))\n\n\t// build an object map\n\tobjmap := make(map[string]*objstore.Bucket)\n\tfor _, obj := range objlist {\n\t\tobjmap[obj.GetKey()] = obj\n\t}\n\n\tlist, err := ct.Bucket().List(context.Background(), &opts)\n\tif err != nil && !strings.Contains(err.Error(), \"not found in local cache\") {\n\t\tct.logger.Infof(\"Failed to get a list of objects. Err: %s\", err)\n\t\treturn\n\t}\n\n\t// if an object is in our local cache and not in API server, trigger delete for it\n\tfor _, obj := range list {\n\t\t_, ok := objmap[obj.GetKey()]\n\t\tif !ok {\n\t\t\tct.logger.Infof(\"diffBucket(): Deleting existing object %#v since its not in apiserver\", obj.GetKey())\n\t\t\tevt := kvstore.WatchEvent{\n\t\t\t\tType: kvstore.Deleted,\n\t\t\t\tKey: obj.GetKey(),\n\t\t\t\tObject: &obj.Bucket,\n\t\t\t}\n\t\t\tct.handleBucketEvent(&evt)\n\t\t}\n\t}\n\n\t// trigger create event for all others\n\tfor _, obj := range objlist {\n\t\tct.logger.Infof(\"diffBucket(): Adding object %#v\", obj.GetKey())\n\t\tevt := kvstore.WatchEvent{\n\t\t\tType: kvstore.Created,\n\t\t\tKey: obj.GetKey(),\n\t\t\tObject: obj,\n\t\t}\n\t\tct.handleBucketEvent(&evt)\n\t}\n}", "func (f *ReleaseStoreGetArtifactsFunc) PushHook(hook func(context.Context, int64) ([]byte, []byte, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func generateListGetter(buf *bytes.Buffer, method *generatedGoListMethod) error {\n\treturn goListGetterTemplate.Execute(buf, method)\n}", "func (mock *MultiClusterAppInterfaceMock) AddFeatureLifecycleCalls() []struct {\n\tCtx context.Context\n\tEnabled func() bool\n\tName string\n\tLifecycle v31.MultiClusterAppLifecycle\n} {\n\tvar calls []struct {\n\t\tCtx context.Context\n\t\tEnabled func() bool\n\t\tName string\n\t\tLifecycle v31.MultiClusterAppLifecycle\n\t}\n\tlockMultiClusterAppInterfaceMockAddFeatureLifecycle.RLock()\n\tcalls = mock.calls.AddFeatureLifecycle\n\tlockMultiClusterAppInterfaceMockAddFeatureLifecycle.RUnlock()\n\treturn calls\n}", "func New(maxlevel int, cmpFn CompareFn) *List {\n\treturn NewCustom(maxlevel, DefaultProbability, cmpFn, time.Now().Unix())\n}", "func BetaDiff(matches []Match, beta float64) []float64 {\n\tvar diffs []float64\n\tplayers := make(map[string]*glicko2.Player)\n\tparams := glicko2.Parameters{\n\t\tInitialDeviation: 27,\n\t\tInitialVolatility: .06,\n\t}\n\tfor _, match := range matches {\n\t\t// Add players as we discover them.\n\t\tp1, ok := players[match.P1name]\n\t\tif !ok {\n\t\t\tparams.InitialRating = match.P1skill\n\t\t\tplayers[match.P1name] = glicko2.NewPlayer(params)\n\t\t\tp1 = players[match.P1name]\n\t\t}\n\t\tp2, ok := players[match.P2name]\n\t\tif !ok {\n\t\t\tparams.InitialRating = match.P2skill\n\t\t\tplayers[match.P2name] = glicko2.NewPlayer(params)\n\t\t\tp2 = players[match.P2name]\n\t\t}\n\n\t\texpected := Pwin(p1, p2, beta)\n\t\tactual := float64(\n\t\t\tfloat64(match.P1got) / float64(match.P1got+match.P2got))\n\t\tdiff := math.Abs(expected - actual)\n\t\tdiffs = append(diffs, diff)\n\t}\n\n\treturn diffs\n}", "func newListenerCfg(config *Config, rpcCfg RPCConfig) *listenerCfg {\n\treturn &listenerCfg{\n\t\tgrpcListener: func() (net.Listener, error) {\n\t\t\t// If a custom RPC listener is set, we will listen on\n\t\t\t// it instead of the regular tcp socket.\n\t\t\tif rpcCfg.RPCListener != nil {\n\t\t\t\treturn rpcCfg.RPCListener, nil\n\t\t\t}\n\n\t\t\treturn net.Listen(\"tcp\", config.RPCListen)\n\t\t},\n\t\trestListener: func() (net.Listener, error) {\n\t\t\t// If a custom RPC listener is set, we disable REST.\n\t\t\tif rpcCfg.RPCListener != nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\n\t\t\treturn net.Listen(\"tcp\", config.RESTListen)\n\t\t},\n\t\tgetLnd: func(network lndclient.Network, cfg *lndConfig) (\n\t\t\t*lndclient.GrpcLndServices, error) {\n\n\t\t\tsvcCfg := &lndclient.LndServicesConfig{\n\t\t\t\tLndAddress: cfg.Host,\n\t\t\t\tNetwork: network,\n\t\t\t\tMacaroonDir: cfg.MacaroonDir,\n\t\t\t\tTLSPath: cfg.TLSPath,\n\t\t\t\tCheckVersion: LoopMinRequiredLndVersion,\n\t\t\t}\n\n\t\t\t// If a custom lnd connection is specified we use that\n\t\t\t// directly.\n\t\t\tif rpcCfg.LndConn != nil {\n\t\t\t\tsvcCfg.Dialer = func(context.Context, string) (\n\t\t\t\t\tnet.Conn, error) {\n\t\t\t\t\treturn rpcCfg.LndConn, nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn lndclient.NewLndServices(svcCfg)\n\t\t},\n\t}\n}", "func getBeta(fAlpha, fBeta float64) float64 {\n\tvar fA, fB float64\n\tif fAlpha > fBeta {\n\t\tfA = fAlpha\n\t\tfB = fBeta\n\t} else {\n\t\tfA = fBeta\n\t\tfB = fAlpha\n\t}\n\tconst maxGammaArgument = 171.624376956302\n\tif fA+fB < maxGammaArgument {\n\t\treturn math.Gamma(fA) / math.Gamma(fA+fB) * math.Gamma(fB)\n\t}\n\tfg := 6.024680040776729583740234375\n\tfgm := fg - 0.5\n\tfLanczos := getLanczosSum(fA)\n\tfLanczos /= getLanczosSum(fA + fB)\n\tfLanczos *= getLanczosSum(fB)\n\tfABgm := fA + fB + fgm\n\tfLanczos *= math.Sqrt((fABgm / (fA + fgm)) / (fB + fgm))\n\tfTempA := fB / (fA + fgm)\n\tfTempB := fA / (fB + fgm)\n\tfResult := math.Exp(-fA*math.Log1p(fTempA) - fB*math.Log1p(fTempB) - fgm)\n\tfResult *= fLanczos\n\treturn fResult\n}", "func (o *InlineObject885) GetBeta() AnyOfobject {\n\tif o == nil || o.Beta == nil {\n\t\tvar ret AnyOfobject\n\t\treturn ret\n\t}\n\treturn *o.Beta\n}", "func NewHookOptions(options map[string]interface{}) HookOptions {\n\tvar mapper models.Mapper\n\tif mapperOptions, ok := options[\"mapper\"]; ok {\n\t\tif m, ok := mapperOptions.(map[string]interface{}); ok {\n\t\t\tmapper = models.ParseMapper(m)\n\t\t}\n\t}\n\tvar name string\n\tif nameOption, ok := options[\"name\"]; ok {\n\t\tname = nameOption.(string)\n\t} else {\n\t\t//TODO: use hook-index as name\n\t\tname = \"default\"\n\t}\n\treturn HookOptions{\n\t\tName: name,\n\t\tMapper: mapper,\n\t}\n}", "func (c *ClubBranchClient) Hooks() []Hook {\n\treturn c.hooks.ClubBranch\n}", "func NewSlowLogHook(threshold time.Duration) HookFunc {\n\treturn func(ctx context.Context, call hrpc.Call, customName string) func(err error) {\n\t\tstart := time.Now()\n\t\treturn func(error) {\n\t\t\tduration := time.Since(start)\n\t\t\tif duration < threshold {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Warn(\"hbase slow log: %s %s %s time: %s\", customName, call.Table(), call.Key(), duration)\n\t\t}\n\t}\n}", "func (*llcFactory) PreRenewHook(_ xreg.BucketEntry) (bool, error) { return true, nil }", "func testHook() *Hook {\n\treturn &Hook{\n\t\tID: sql.NullInt64{Int64: 1, Valid: true},\n\t\tRepoID: sql.NullInt64{Int64: 1, Valid: true},\n\t\tBuildID: sql.NullInt64{Int64: 1, Valid: true},\n\t\tNumber: sql.NullInt32{Int32: 1, Valid: true},\n\t\tSourceID: sql.NullString{String: \"c8da1302-07d6-11ea-882f-4893bca275b8\", Valid: true},\n\t\tCreated: sql.NullInt64{Int64: time.Now().UTC().Unix(), Valid: true},\n\t\tHost: sql.NullString{String: \"github.com\", Valid: true},\n\t\tEvent: sql.NullString{String: \"push\", Valid: true},\n\t\tEventAction: sql.NullString{String: \"\", Valid: false},\n\t\tBranch: sql.NullString{String: \"master\", Valid: true},\n\t\tError: sql.NullString{String: \"\", Valid: false},\n\t\tStatus: sql.NullString{String: \"success\", Valid: true},\n\t\tLink: sql.NullString{String: \"https://github.com/github/octocat/settings/hooks/1\", Valid: true},\n\t\tWebhookID: sql.NullInt64{Int64: 123456, Valid: true},\n\t}\n}", "func (mock *HarborRepositoryInterfaceMock) AddClusterScopedFeatureLifecycleCalls() []struct {\n\tCtx context.Context\n\tEnabled func() bool\n\tName string\n\tClusterName string\n\tLifecycle v3.HarborRepositoryLifecycle\n} {\n\tvar calls []struct {\n\t\tCtx context.Context\n\t\tEnabled func() bool\n\t\tName string\n\t\tClusterName string\n\t\tLifecycle v3.HarborRepositoryLifecycle\n\t}\n\tlockHarborRepositoryInterfaceMockAddClusterScopedFeatureLifecycle.RLock()\n\tcalls = mock.calls.AddClusterScopedFeatureLifecycle\n\tlockHarborRepositoryInterfaceMockAddClusterScopedFeatureLifecycle.RUnlock()\n\treturn calls\n}", "func (*TestingKnobs) ModuleTestingKnobs() {}", "func (f *ExtensionStoreListFunc) PushHook(hook func(context.Context, stores.ExtensionsListOptions) ([]*stores.Extension, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (f *ReleaseStoreGetLatestBatchFunc) SetDefaultHook(hook func(context.Context, []int32, string, bool) ([]*stores.Release, error)) {\n\tf.defaultHook = hook\n}", "func newBlockRetrievalWorker(bg blockGetter, q *blockRetrievalQueue) *blockRetrievalWorker {\n\tbrw := &blockRetrievalWorker{\n\t\tblockGetter: bg,\n\t\tstopCh: make(chan struct{}),\n\t\tqueue: q,\n\t}\n\tgo brw.run()\n\treturn brw\n}", "func (m *MockPoolRegistry) GetNewHostBrickAllocations(ctxt context.Context, hostname string) <-chan registry.BrickAllocation {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetNewHostBrickAllocations\", ctxt, hostname)\n\tret0, _ := ret[0].(<-chan registry.BrickAllocation)\n\treturn ret0\n}", "func (o *InlineObject885) SetBeta(v AnyOfobject) {\n\to.Beta = &v\n}", "func initHistlist(ed editor, ev *eval.Evaler, getCmds func() ([]string, error), lsMode *listing.Mode, lsBinding *bindingMap) eval.Ns {\n\tbinding := emptyBindingMap\n\tmode := histlist.Mode{\n\t\tMode: lsMode,\n\t\tKeyHandler: keyHandlerFromBindings(ed, ev, &binding, lsBinding),\n\t}\n\tns := eval.Ns{}.\n\t\tAddGoFn(\"<edit:histlist>\", \"start\", func() {\n\t\t\tstartHistlist(ed, getCmds, &mode)\n\t\t})\n\treturn ns\n}", "func (c *ClientWithResponses) BetaTestersGetInstanceWithResponse(ctx context.Context, id string, params *BetaTestersGetInstanceParams, reqEditors ...RequestEditorFn) (*BetaTestersGetInstanceResponse, error) {\n\trsp, err := c.BetaTestersGetInstance(ctx, id, params, reqEditors...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseBetaTestersGetInstanceResponse(rsp)\n}", "func (f *AutoIndexingServiceRepositoryIDsWithConfigurationFunc) PushHook(hook func(context.Context, int, int) ([]shared.RepositoryWithAvailableIndexers, int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func newListeners() *listeners { return &listeners{m: make(map[string]nl.Listener, 64)} }", "func (f *DBStoreGetConfigurationPoliciesFunc) PushHook(hook func(context.Context, dbstore.GetConfigurationPoliciesOptions) ([]dbstore.ConfigurationPolicy, int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func New() logrus.Hook {\n\treturn &normalCallerHook{}\n}", "func (m *MockPool) Get() MutableList {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Get\")\n\tret0, _ := ret[0].(MutableList)\n\treturn ret0\n}", "func NewGet(g Getter) *Get {\n\treturn &Get{g}\n}", "func newBinder(chart *chart.Chart, cmIface v1.ConfigMapInterface) (mode.Binder, error) {\n\t// parse the values file for steward-specific config map info\n\tcmNames, err := getStewardConfigMapInfo(chart.Values)\n\tif err != nil {\n\t\tlogger.Errorf(\"getting steward config map info (%s)\", err)\n\t\treturn nil, err\n\t}\n\tlogger.Debugf(\"got config map names for helm chart %s\", cmNames)\n\treturn binder{\n\t\tcmNames: cmNames,\n\t\tcmIface: cmIface,\n\t}, nil\n}", "func generateGetOrCreateList(buf *bytes.Buffer, method *generatedGoListMethod) error {\n\treturn goGetOrCreateListTemplate.Execute(buf, method)\n}", "func (c *TestClient) CreateInstanceBeta(project, zone string, i *computeBeta.Instance) error {\n\tif c.CreateInstanceBetaFn != nil {\n\t\treturn c.CreateInstanceBetaFn(project, zone, i)\n\t}\n\treturn c.client.CreateInstanceBeta(project, zone, i)\n}", "func GetMutableBagForTesting(v map[string]interface{}) *MutableBag {\n\tm := GetMutableBag(nil)\n\tm.values = v\n\treturn m\n}", "func (f *PolicyServiceGetRetentionPolicyOverviewFunc) PushHook(hook func(context.Context, types.Upload, bool, int, int64, string, time.Time) ([]types.RetentionPolicyMatchCandidate, int, error)) {\n\tf.mutex.Lock()\n\tf.hooks = append(f.hooks, hook)\n\tf.mutex.Unlock()\n}", "func (c *BillClient) Hooks() []Hook {\n\treturn c.hooks.Bill\n}", "func (c *BillClient) Hooks() []Hook {\n\treturn c.hooks.Bill\n}", "func (c *BillClient) Hooks() []Hook {\n\treturn c.hooks.Bill\n}" ]
[ "0.75942796", "0.5408831", "0.5109261", "0.49638084", "0.4917815", "0.4800882", "0.47378483", "0.46847326", "0.46703503", "0.46522126", "0.46307093", "0.45831934", "0.45636097", "0.45611542", "0.45577788", "0.45528764", "0.45515147", "0.4545838", "0.4534998", "0.4518134", "0.44979674", "0.4494156", "0.44940612", "0.44720718", "0.4467337", "0.44011977", "0.43894717", "0.43653357", "0.43468305", "0.43416816", "0.43360996", "0.43265915", "0.43110937", "0.4306581", "0.4289749", "0.4282926", "0.427915", "0.42504057", "0.42370427", "0.42334604", "0.4231189", "0.42269793", "0.421497", "0.42106593", "0.42046946", "0.4203805", "0.41849655", "0.41782472", "0.41781804", "0.4177024", "0.41654238", "0.41649538", "0.41560593", "0.41554624", "0.41546685", "0.415336", "0.4132749", "0.4131572", "0.41230866", "0.41198125", "0.4119241", "0.4113467", "0.41127306", "0.41097945", "0.4106927", "0.41059798", "0.4094409", "0.40905142", "0.409001", "0.40887478", "0.4084991", "0.4083814", "0.40782285", "0.4076583", "0.4075371", "0.40753412", "0.40749973", "0.4074948", "0.40716353", "0.40679857", "0.40677813", "0.4067077", "0.40657103", "0.4065607", "0.40633082", "0.4061706", "0.406094", "0.40560326", "0.40547928", "0.40527216", "0.40362617", "0.40340793", "0.40319988", "0.4027003", "0.4021774", "0.40187457", "0.40181366", "0.40139788", "0.40139788", "0.40139788" ]
0.87772727
0
newMockCloud returns a mock GCE instance with the appropriate handlers hooks
newMockCloud возвращает мок-экземпляр GCE с соответствующими обработчиками и хуками
func (bil *baseInstanceList) newMockCloud() cloud.Cloud { c := cloud.NewMockGCE(nil) // insert hooks to lazy create a instance when needed c.MockInstances.GetHook = bil.newGAGetHook() c.MockBetaInstances.GetHook = bil.newBetaGetHook() return c }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewCloudMock() *CloudMock {\n\taddress, grpcServer, mockTrace := startMockServer()\n\n\tconn, err := grpc.Dial(address, grpc.WithInsecure())\n\tif err != nil {\n\t\tlog.Fatalf(\"did not connect: %s\", err)\n\t}\n\n\ttraceClient := cloudtrace.NewTraceServiceClient(conn)\n\tmetricClient := monitoring.NewMetricServiceClient(conn)\n\treturn &CloudMock{\n\t\tconn,\n\t\tgrpcServer,\n\t\tmockTrace,\n\t\ttraceClient,\n\t\tmetricClient,\n\t}\n}", "func NewMockCloud(ctrl *gomock.Controller) *MockCloud {\n\tmock := &MockCloud{ctrl: ctrl}\n\tmock.recorder = &MockCloudMockRecorder{mock}\n\treturn mock\n}", "func NewFakeGCECloud(vals TestClusterValues) *Cloud {\n\tservice, err := compute.NewService(context.Background(), option.WithoutAuthentication())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tgce := &Cloud{\n\t\tregion: vals.Region,\n\t\tservice: service,\n\t\tmanagedZones: []string{vals.ZoneName},\n\t\tprojectID: vals.ProjectID,\n\t\tnetworkProjectID: vals.ProjectID,\n\t\tClusterID: fakeClusterID(vals.ClusterID),\n\t\tonXPN: vals.OnXPN,\n\t\tmetricsCollector: newLoadBalancerMetrics(),\n\t\tprojectsBasePath: getProjectsBasePath(service.BasePath),\n\t}\n\tc := cloud.NewMockGCE(&gceProjectRouter{gce})\n\tgce.c = c\n\treturn gce\n}", "func newK8SCloud(opts Options) (CloudProvider, error) {\n\n\tif opts.Name == \"\" {\n\t\treturn nil, errors.New(\"K8SCloud: Invalid cloud name\")\n\t}\n\tif opts.Host == \"\" {\n\t\treturn nil, errors.New(\"K8SCloud: Invalid cloud host\")\n\t}\n\tif opts.K8SNamespace == \"\" {\n\t\topts.K8SNamespace = apiv1.NamespaceDefault\n\t}\n\n\tcloud := &K8SCloud{\n\t\tname: opts.Name,\n\t\thost: opts.Host,\n\t\tbearerToken: opts.K8SBearerToken,\n\t\tnamespace: opts.K8SNamespace,\n\t\tinsecure: opts.Insecure,\n\t}\n\tconfig := &rest.Config{\n\t\tHost: opts.Host,\n\t\tBearerToken: opts.K8SBearerToken,\n\t\tTLSClientConfig: rest.TLSClientConfig{\n\t\t\tInsecure: opts.Insecure,\n\t\t},\n\t}\n\tclientset, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcloud.client = clientset\n\treturn cloud, nil\n}", "func NewK8sClient(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *K8sClient {\n\tmock := &K8sClient{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewCloud(cfg CloudConfig, metricsRegisterer prometheus.Registerer) (Cloud, error) {\n\tmetadataSess := session.Must(session.NewSession(aws.NewConfig()))\n\tmetadata := services.NewEC2Metadata(metadataSess)\n\tif len(cfg.Region) == 0 {\n\t\tregion, err := metadata.Region()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to introspect region from EC2Metadata, specify --aws-region instead if EC2Metadata is unavailable\")\n\t\t}\n\t\tcfg.Region = region\n\t}\n\n\tif len(cfg.VpcID) == 0 {\n\t\tvpcId, err := metadata.VpcID()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to introspect vpcID from EC2Metadata, specify --aws-vpc-id instead if EC2Metadata is unavailable\")\n\t\t}\n\t\tcfg.VpcID = vpcId\n\t}\n\n\tawsCFG := aws.NewConfig().WithRegion(cfg.Region).WithSTSRegionalEndpoint(endpoints.RegionalSTSEndpoint).WithMaxRetries(cfg.MaxRetries)\n\tsess := session.Must(session.NewSession(awsCFG))\n\tinjectUserAgent(&sess.Handlers)\n\n\tif cfg.ThrottleConfig != nil {\n\t\tthrottler := throttle.NewThrottler(cfg.ThrottleConfig)\n\t\tthrottler.InjectHandlers(&sess.Handlers)\n\t}\n\tif metricsRegisterer != nil {\n\t\tmetricsCollector, err := metrics.NewCollector(metricsRegisterer)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to initialize sdk metrics collector\")\n\t\t}\n\t\tmetricsCollector.InjectHandlers(&sess.Handlers)\n\t}\n\n\treturn &defaultCloud{\n\t\tcfg: cfg,\n\t\tec2: services.NewEC2(sess),\n\t\telbv2: services.NewELBV2(sess),\n\t\tacm: services.NewACM(sess),\n\t\twafv2: services.NewWAFv2(sess),\n\t\twafRegional: services.NewWAFRegional(sess, cfg.Region),\n\t\tshield: services.NewShield(sess),\n\t\trgt: services.NewRGT(sess),\n\t}, nil\n}", "func fakeGcp() (*compute.Service, error) {\n\tclient := &http.Client{}\n\thttpmock.ActivateNonDefault(client)\n\treturn compute.NewService(context.Background(), option.WithoutAuthentication(), option.WithHTTPClient(client))\n}", "func NewMockAzureCloud(location string) *MockAzureCloud {\n\treturn &MockAzureCloud{\n\t\tLocation: location,\n\t\tResourceGroupsClient: &MockResourceGroupsClient{\n\t\t\tRGs: map[string]resources.Group{},\n\t\t},\n\t\tVirtualNetworksClient: &MockVirtualNetworksClient{\n\t\t\tVNets: map[string]network.VirtualNetwork{},\n\t\t},\n\t\tSubnetsClient: &MockSubnetsClient{\n\t\t\tSubnets: map[string]network.Subnet{},\n\t\t},\n\t\tRouteTablesClient: &MockRouteTablesClient{\n\t\t\tRTs: map[string]network.RouteTable{},\n\t\t},\n\t\tNetworkSecurityGroupsClient: &MockNetworkSecurityGroupsClient{\n\t\t\tNSGs: map[string]network.SecurityGroup{},\n\t\t},\n\t\tApplicationSecurityGroupsClient: &MockApplicationSecurityGroupsClient{\n\t\t\tASGs: map[string]network.ApplicationSecurityGroup{},\n\t\t},\n\t\tVMScaleSetsClient: &MockVMScaleSetsClient{\n\t\t\tVMSSes: map[string]compute.VirtualMachineScaleSet{},\n\t\t},\n\t\tVMScaleSetVMsClient: &MockVMScaleSetVMsClient{\n\t\t\tVMs: map[string]compute.VirtualMachineScaleSetVM{},\n\t\t},\n\t\tDisksClient: &MockDisksClient{\n\t\t\tDisks: map[string]compute.Disk{},\n\t\t},\n\t\tRoleAssignmentsClient: &MockRoleAssignmentsClient{\n\t\t\tRAs: map[string]authz.RoleAssignment{},\n\t\t},\n\t\tNetworkInterfacesClient: &MockNetworkInterfacesClient{\n\t\t\tNIs: map[string]network.Interface{},\n\t\t},\n\t\tLoadBalancersClient: &MockLoadBalancersClient{\n\t\t\tLBs: map[string]network.LoadBalancer{},\n\t\t},\n\t\tPublicIPAddressesClient: &MockPublicIPAddressesClient{\n\t\t\tPubIPs: map[string]network.PublicIPAddress{},\n\t\t},\n\t\tNatGatewaysClient: &MockNatGatewaysClient{\n\t\t\tNGWs: map[string]network.NatGateway{},\n\t\t},\n\t}\n}", "func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {\n\taz, err := NewCloudWithoutFeatureGates(configReader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\taz.ipv6DualStackEnabled = true\n\n\treturn az, nil\n}", "func NewGCEClient() *gce.Cloud {\n\tvar configReader func() io.Reader\n\tif flags.F.ConfigFilePath != \"\" {\n\t\tklog.Infof(\"Reading config from path %q\", flags.F.ConfigFilePath)\n\t\tconfig, err := os.Open(flags.F.ConfigFilePath)\n\t\tif err != nil {\n\t\t\tklog.Fatalf(\"%v\", err)\n\t\t}\n\t\tdefer config.Close()\n\n\t\tallConfig, err := io.ReadAll(config)\n\t\tif err != nil {\n\t\t\tklog.Fatalf(\"Error while reading config (%q): %v\", flags.F.ConfigFilePath, err)\n\t\t}\n\t\tklog.V(4).Infof(\"Cloudprovider config file contains: %q\", string(allConfig))\n\n\t\tconfigReader = generateConfigReaderFunc(allConfig)\n\t} else {\n\t\tklog.V(2).Infof(\"No cloudprovider config file provided, using default values.\")\n\t\tconfigReader = func() io.Reader { return nil }\n\t}\n\n\t// Creating the cloud interface involves resolving the metadata server to get\n\t// an oauth token. If this fails, the token provider assumes it's not on GCE.\n\t// No errors are thrown. So we need to keep retrying till it works because\n\t// we know we're on GCE.\n\tfor {\n\t\tprovider, err := cloudprovider.GetCloudProvider(\"gce\", configReader())\n\t\tif err == nil {\n\t\t\tcloud := provider.(*gce.Cloud)\n\t\t\t// Configure GCE rate limiting\n\t\t\trl, err := ratelimit.NewGCERateLimiter(flags.F.GCERateLimit.Values(), flags.F.GCEOperationPollInterval)\n\t\t\tif err != nil {\n\t\t\t\tklog.Fatalf(\"Error configuring rate limiting: %v\", err)\n\t\t\t}\n\t\t\tcloud.SetRateLimiter(rl)\n\t\t\t// If this controller is scheduled on a node without compute/rw\n\t\t\t// it won't be allowed to list backends. We can assume that the\n\t\t\t// user has no need for Ingress in this case. If they grant\n\t\t\t// permissions to the node they will have to restart the controller\n\t\t\t// manually to re-create the client.\n\t\t\t// TODO: why do we bail with success out if there is a permission error???\n\t\t\tif _, err = cloud.ListGlobalBackendServices(); err == nil || utils.IsHTTPErrorCode(err, http.StatusForbidden) {\n\t\t\t\treturn cloud\n\t\t\t}\n\t\t\tklog.Warningf(\"Failed to list backend services, retrying: %v\", err)\n\t\t} else {\n\t\t\tklog.Warningf(\"Failed to get cloud provider, retrying: %v\", err)\n\t\t}\n\t\ttime.Sleep(cloudClientRetryInterval)\n\t}\n}", "func newHTTPCloud(config io.Reader) (*httpCloud, error) {\n\tif config != nil {\n\t\tvar cfg Config\n\t\tif err := gcfg.ReadInto(&cfg, config); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Couldn't read config: %v\", err)\n\t\t}\n\n\t\tinstancesURL := cfg.Global.InstancesURL\n\t\t// Validate URL\n\t\t_, err := url.ParseRequestURI(instancesURL)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Can't parse the instances-url provided: %s\", err)\n\t\t}\n\t\t// Handle Trailing slashes\n\t\tinstancesURL = strings.TrimRight(instancesURL, \"/\")\n\n\t\tschedulerExtensionURL := cfg.Global.SchedulerExtensionURL\n\t\t// Validate URL\n\t\t_, err = url.ParseRequestURI(schedulerExtensionURL)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Can't parse the scheduler-extension-url provided: %s\", err)\n\t\t}\n\t\t// Handle Trailing slashes\n\t\tschedulerExtensionURL = strings.TrimRight(schedulerExtensionURL, \"/\")\n\n\t\treturn &httpCloud{\n\t\t\tinstancesURL: instancesURL,\n\t\t\tinstancesSupported: cfg.Global.InstancesSupported,\n\t\t\ttcpLoadBalancerSupported: cfg.Global.TcpLoadBalancerSupported,\n\t\t\tzonesSupported: cfg.Global.ZonesSupported,\n\t\t\tclustersSupported: cfg.Global.ClustersSupported,\n\t\t\tschedulerExtensionURL: schedulerExtensionURL,\n\t\t\tschedulerExtensionSupported: cfg.Global.SchedulerExtensionSupported,\n\t\t}, nil\n\t}\n\treturn nil, fmt.Errorf(\"Config file is empty or is not provided\")\n}", "func newClient() (*storage.Client, error) {\n\tctx := context.Background()\n\n\tbyteKey, err := gcloud.GetDecodedKey()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get gcp key, err: %w\", err)\n\t}\n\tclient, err := storage.NewClient(ctx, option.WithCredentialsJSON(byteKey))\n\tif err != nil {\n\t\tlog.Println(\"failed to login with GCP_KEY, trying with default application credentials...\")\n\t\tclient, err = storage.NewClient(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to open Google Cloud Storage client: %w\", err)\n\t\t}\n\t}\n\n\treturn client, nil\n}", "func NewGCSUploader(t testing.TB) *GCSUploader {\n\tmock := &GCSUploader{}\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func New(d diag.Sink, cloudURL string, project *workspace.Project, insecure bool) (Backend, error) {\n\tcloudURL = ValueOrDefaultURL(cloudURL)\n\taccount, err := workspace.GetAccount(cloudURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"getting stored credentials: %w\", err)\n\t}\n\tapiToken := account.AccessToken\n\n\tclient := client.NewClient(cloudURL, apiToken, insecure, d)\n\tcapabilities := detectCapabilities(d, client)\n\n\treturn &cloudBackend{\n\t\td: d,\n\t\turl: cloudURL,\n\t\tclient: client,\n\t\tcapabilities: capabilities,\n\t\tcurrentProject: project,\n\t}, nil\n}", "func NewCloudCommunications()(*CloudCommunications) {\n m := &CloudCommunications{\n Entity: *NewEntity(),\n }\n return m\n}", "func newCloudlyckeClient() *http.Client {\n\treturn &http.Client{}\n}", "func configureCloud(res http.ResponseWriter, req *http.Request) (gcs *gcsPhotos) {\n\tctx := appengine.NewContext(req)\n\tclient, err := storage.NewClient(ctx)\n\tif err != nil {\n\t\tlog.Errorf(ctx, \"ERROR handler NewClient: \", err)\n\t\treturn\n\t}\n\tdefer client.Close()\n\t\n\tgcs = &gcsPhotos{\n\t\tctx: ctx,\n\t\tres: res,\n\t\tclient: client,\n\t\tbucket: client.Bucket(gcsBucket),\n\t}\n\treturn\n}", "func New() (*mock, error) {\n\treturn &mock{\n\t\tConfigService: ConfigService{},\n\t\tContainerService: ContainerService{},\n\t\tDistributionService: DistributionService{},\n\t\tImageService: ImageService{},\n\t\tNetworkService: NetworkService{},\n\t\tNodeService: NodeService{},\n\t\tPluginService: PluginService{},\n\t\tSecretService: SecretService{},\n\t\tServiceService: ServiceService{},\n\t\tSystemService: SystemService{},\n\t\tSwarmService: SwarmService{},\n\t\tVolumeService: VolumeService{},\n\t\tVersion: Version,\n\t}, nil\n}", "func newCloudConnection(config io.Reader) (cloudprovider.Interface, error) {\n\tklog.V(4).Infof(\"newCloudConnection called with %+v\", config)\n\tif config != nil {\n\t\tklog.Warningf(\"supplied config is not read by this version. Using environment\")\n\t}\n\tnewCloud := &cloud{}\n\t_, err := newCloud.cloudClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn newCloud, nil\n}", "func NewCloudStorage(devservers []string,\n\ttlwServer, dutName, dutServer, buildArtifactsURL, swarmingTaskID, buildBucketID string) *CloudStorage {\n\treturn &CloudStorage{\n\t\tnewClient: func(ctx context.Context) (devserver.Client, error) {\n\t\t\treturn newClientForURLs(ctx, devservers, tlwServer, dutName, dutServer, swarmingTaskID, buildBucketID)\n\t\t},\n\t\tbuildArtifactsURL: buildArtifactsURL,\n\t}\n}", "func TestGetCloudProvider(t *testing.T) {\n\tfakeCredFile := \"fake-cred-file.json\"\n\tfakeKubeConfig := \"fake-kube-config\"\n\temptyKubeConfig := \"empty-kube-config\"\n\tfakeContent := `\napiVersion: v1\nclusters:\n- cluster:\n server: https://localhost:8080\n name: foo-cluster\ncontexts:\n- context:\n cluster: foo-cluster\n user: foo-user\n namespace: bar\n name: foo-context\ncurrent-context: foo-context\nkind: Config\nusers:\n- name: foo-user\n user:\n exec:\n apiVersion: client.authentication.k8s.io/v1alpha1\n args:\n - arg-1\n - arg-2\n command: foo-command\n`\n\n\terr := createTestFile(emptyKubeConfig)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tdefer func() {\n\t\tif err := os.Remove(emptyKubeConfig); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}()\n\n\ttests := []struct {\n\t\tdesc string\n\t\tcreateFakeCredFile bool\n\t\tcreateFakeKubeConfig bool\n\t\tkubeconfig string\n\t\tnodeID string\n\t\tuserAgent string\n\t\tallowEmptyCloudConfig bool\n\t\texpectedErr error\n\t}{\n\t\t{\n\t\t\tdesc: \"out of cluster, no kubeconfig, no credential file\",\n\t\t\tkubeconfig: \"\",\n\t\t\tnodeID: \"\",\n\t\t\tallowEmptyCloudConfig: true,\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t{\n\t\t\tdesc: \"[failure][disallowEmptyCloudConfig] out of cluster, no kubeconfig, no credential file\",\n\t\t\tkubeconfig: \"\",\n\t\t\tnodeID: \"\",\n\t\t\tallowEmptyCloudConfig: false,\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t{\n\t\t\tdesc: \"[failure] out of cluster & in cluster, specify a non-exist kubeconfig, no credential file\",\n\t\t\tkubeconfig: \"/tmp/non-exist.json\",\n\t\t\tnodeID: \"\",\n\t\t\tallowEmptyCloudConfig: true,\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t{\n\t\t\tdesc: \"[failure] out of cluster & in cluster, specify a empty kubeconfig, no credential file\",\n\t\t\tkubeconfig: emptyKubeConfig,\n\t\t\tnodeID: \"\",\n\t\t\tallowEmptyCloudConfig: true,\n\t\t\texpectedErr: fmt.Errorf(\"failed to get KubeClient: invalid configuration: no configuration has been provided, try setting KUBERNETES_MASTER environment variable\"),\n\t\t},\n\t\t{\n\t\t\tdesc: \"[failure] out of cluster & in cluster, specify a fake kubeconfig, no credential file\",\n\t\t\tcreateFakeKubeConfig: true,\n\t\t\tkubeconfig: fakeKubeConfig,\n\t\t\tnodeID: \"\",\n\t\t\tallowEmptyCloudConfig: true,\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t{\n\t\t\tdesc: \"[success] out of cluster & in cluster, no kubeconfig, a fake credential file\",\n\t\t\tcreateFakeCredFile: true,\n\t\t\tkubeconfig: \"\",\n\t\t\tnodeID: \"\",\n\t\t\tuserAgent: \"useragent\",\n\t\t\tallowEmptyCloudConfig: true,\n\t\t\texpectedErr: nil,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tif test.createFakeKubeConfig {\n\t\t\tif err := createTestFile(fakeKubeConfig); err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t\tdefer func() {\n\t\t\t\tif err := os.Remove(fakeKubeConfig); err != nil {\n\t\t\t\t\tt.Error(err)\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tif err := os.WriteFile(fakeKubeConfig, []byte(fakeContent), 0666); err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t}\n\t\tif test.createFakeCredFile {\n\t\t\tif err := createTestFile(fakeCredFile); err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t\tdefer func() {\n\t\t\t\tif err := os.Remove(fakeCredFile); err != nil {\n\t\t\t\t\tt.Error(err)\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\toriginalCredFile, ok := os.LookupEnv(DefaultAzureCredentialFileEnv)\n\t\t\tif ok {\n\t\t\t\tdefer os.Setenv(DefaultAzureCredentialFileEnv, originalCredFile)\n\t\t\t} else {\n\t\t\t\tdefer os.Unsetenv(DefaultAzureCredentialFileEnv)\n\t\t\t}\n\t\t\tos.Setenv(DefaultAzureCredentialFileEnv, fakeCredFile)\n\t\t}\n\t\tcloud, err := getCloudProvider(test.kubeconfig, test.nodeID, \"\", \"\", test.userAgent, test.allowEmptyCloudConfig, 25.0, 50)\n\t\tif !reflect.DeepEqual(err, test.expectedErr) && test.expectedErr != nil && !strings.Contains(err.Error(), test.expectedErr.Error()) {\n\t\t\tt.Errorf(\"desc: %s,\\n input: %q, GetCloudProvider err: %v, expectedErr: %v\", test.desc, test.kubeconfig, err, test.expectedErr)\n\t\t}\n\t\tif cloud == nil {\n\t\t\tt.Errorf(\"return value of getCloudProvider should not be nil even there is error\")\n\t\t} else {\n\t\t\tassert.Equal(t, cloud.Environment.StorageEndpointSuffix, storage.DefaultBaseURL)\n\t\t\tassert.Equal(t, cloud.UserAgent, test.userAgent)\n\t\t}\n\t}\n}", "func New(ip string, user string, name string) *Cloud {\n\treturn &Cloud{\n\t\tIP: ip,\n\t\tUser: user,\n\t\tName: name,\n\t\tType: types.CloudTypeDocker,\n\t}\n}", "func NewMockBackend(conf config.Config) (*MockBackend, error) {\n\t// Set up a GCE scheduler backend that has a mock client\n\t// so that it doesn't actually communicate with GCE.\n\n\tgceWrapper := new(gcemock.Wrapper)\n\tgceClient := &gceClient{\n\t\twrapper: gceWrapper,\n\t\tproject: conf.Backends.GCE.Project,\n\t\tzone: conf.Backends.GCE.Zone,\n\t}\n\n\tschedClient, err := scheduler.NewClient(conf.Worker)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &MockBackend{\n\t\tBackend: &Backend{\n\t\t\tconf: conf,\n\t\t\tclient: schedClient,\n\t\t\tgce: gceClient,\n\t\t},\n\t\tWrapper: gceWrapper,\n\t}, nil\n}", "func NewClient(t mockConstructorTestingTNewClient) *Client {\n\tmock := &Client{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewClient(t mockConstructorTestingTNewClient) *Client {\n\tmock := &Client{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewClient(t mockConstructorTestingTNewClient) *Client {\n\tmock := &Client{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewClient(t mockConstructorTestingTNewClient) *Client {\n\tmock := &Client{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewStorage(t mockConstructorTestingTNewStorage) *Storage {\n\tmock := &Storage{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewStorage(t mockConstructorTestingTNewStorage) *Storage {\n\tmock := &Storage{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func newMockKvCapabilityVerifier(t mockConstructorTestingTnewMockKvCapabilityVerifier) *mockKvCapabilityVerifier {\n\tmock := &mockKvCapabilityVerifier{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewClient(hCloudToken string) *Client {\n\treturn &Client{\n\t\thCloudToken: hCloudToken,\n\t\thttpClient: &http.Client{Timeout: 3 * time.Second},\n\t}\n}", "func test_cloud() {\n\tfmt.Println(\"Testing the clouded background...\")\n\tbg := initBackground()\n\tbg = insertCloud(bg)\n\trender(bg)\n}", "func NewMock(opts ...ClientOpt) (*client, error) {\n\t// create new Docker runtime client\n\tc, err := New(opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// create Docker client from the mock client\n\t//\n\t// https://pkg.go.dev/github.com/go-vela/mock/docker#New\n\t_docker, err := mock.New()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// set the Docker client in the runtime client\n\tc.Docker = _docker\n\n\treturn c, nil\n}", "func New(username, password string) (up *UpCloud, err error) {\n\tvar u UpCloud\n\n\tu.req = requester.New(&http.Client{}, Hostname)\n\n\t// Set username\n\tu.username = username\n\t// Set password\n\tu.password = password\n\t// Assign pointer reference\n\tup = &u\n\treturn\n}", "func NewGCP() (*GCP, error) {\n\n\tctx := context.Background()\n\tclient, err := monitoring.NewMetricClient(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"NewMetricClient: %v\", err)\n\t}\n\n\treturn &GCP{\n\t\tctx: ctx,\n\t\tclient: client,\n\t}, nil\n}", "func FakeNewStorage() *fakeStorage {\n\treturn &fakeStorage{}\n}", "func newGoogleStorageStore(config *GoogleStorageStoreConfig) (*googleStorageStore, error) {\n\tif config.Bucket == \"\" {\n\t\treturn nil, errors.New(\"bucket required\")\n\t}\n\n\tvar opts []option.ClientOption\n\tvar noAuth bool\n\tcredsPath := getGoogleCredsPath()\n\tif credsPath == \"\" {\n\t\tnoAuth = true\n\t\topts = append(opts, option.WithoutAuthentication())\n\t} else {\n\t\topts = append(opts, option.WithCredentialsFile(credsPath), option.WithScopes(storage.ScopeFullControl))\n\t}\n\n\tvar httpTransport http.Transport\n\tvar err error\n\tctx := context.WithValue(context.Background(), oauth2.HTTPClient, &http.Client{Transport: &httpTransport})\n\tgcpTransport, err := gcphttp.NewTransport(ctx, &httpTransport, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thttpClient := &http.Client{Transport: gcpTransport}\n\tclientOpt := option.WithHTTPClient(httpClient)\n\tclient, err := storage.NewClient(context.Background(), clientOpt)\n\tif err != nil {\n\t\thttpTransport.CloseIdleConnections()\n\t\tif noAuth {\n\t\t\treturn nil, errors.Wrap(err, 0)\n\t\t}\n\t\thttpClient.Transport, err = gcphttp.NewTransport(ctx, &httpTransport, option.WithoutAuthentication())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tclient, err = storage.NewClient(context.Background(), clientOpt)\n\t\tif err != nil {\n\t\t\thttpTransport.CloseIdleConnections()\n\t\t\treturn nil, errors.Wrap(err, 0)\n\t\t}\n\t}\n\n\treturn &googleStorageStore{\n\t\tclient: client,\n\t\tbucket: client.Bucket(config.Bucket),\n\t\thttpTransport: &httpTransport,\n\t}, nil\n}", "func newFactory() func(config *client.Config) (client.Client, *probe.Error) {\n\tclientCache := make(map[uint32]minio.CloudStorageAPI)\n\tmutex := &sync.Mutex{}\n\n\t// Return New function.\n\treturn func(config *client.Config) (client.Client, *probe.Error) {\n\t\tu := client.NewURL(config.HostURL)\n\t\ttransport := http.DefaultTransport\n\t\tif config.Debug == true {\n\t\t\tif config.Signature == \"S3v4\" {\n\t\t\t\ttransport = httptracer.GetNewTraceTransport(NewTraceV4(), http.DefaultTransport)\n\t\t\t}\n\t\t\tif config.Signature == \"S3v2\" {\n\t\t\t\ttransport = httptracer.GetNewTraceTransport(NewTraceV2(), http.DefaultTransport)\n\t\t\t}\n\t\t}\n\n\t\t// New S3 configuration.\n\t\ts3Conf := minio.Config{\n\t\t\tAccessKeyID: config.AccessKey,\n\t\t\tSecretAccessKey: config.SecretKey,\n\t\t\tTransport: transport,\n\t\t\tEndpoint: u.Scheme + u.SchemeSeparator + u.Host,\n\t\t\tSignature: func() minio.SignatureType {\n\t\t\t\tif config.Signature == \"S3v2\" {\n\t\t\t\t\treturn minio.SignatureV2\n\t\t\t\t}\n\t\t\t\treturn minio.SignatureV4\n\t\t\t}(),\n\t\t}\n\n\t\ts3Conf.SetUserAgent(config.AppName, config.AppVersion, config.AppComments...)\n\n\t\t// Generate a hash out of s3Conf.\n\t\tconfHash := fnv.New32a()\n\t\tconfHash.Write([]byte(s3Conf.Endpoint + s3Conf.AccessKeyID + s3Conf.SecretAccessKey))\n\t\tconfSum := confHash.Sum32()\n\n\t\t// Lookup previous cache by hash.\n\t\tmutex.Lock()\n\t\tdefer mutex.Unlock()\n\t\tvar api minio.CloudStorageAPI\n\t\tfound := false\n\t\tif api, found = clientCache[confSum]; !found {\n\t\t\t// Not found. Instantiate a new minio client.\n\t\t\tvar e error\n\t\t\tapi, e = minio.New(s3Conf)\n\t\t\tif e != nil {\n\t\t\t\treturn nil, probe.NewError(e)\n\t\t\t}\n\t\t\t// Cache the new minio client with hash of config as key.\n\t\t\tclientCache[confSum] = api\n\t\t}\n\n\t\ts3Clnt := &s3Client{\n\t\t\tmu: new(sync.Mutex),\n\t\t\tapi: api,\n\t\t\thostURL: u,\n\t\t\tvirtualStyle: isVirtualHostStyle(u.Host),\n\t\t}\n\t\treturn s3Clnt, nil\n\t}\n}", "func NewForTesting() buckets.Provider {\n\treturn newWithOptions(option.WithoutAuthentication())\n}", "func newWithOptions(options ...option.ClientOption) buckets.Provider {\n\tctx := context.Background()\n\tclient, err := storage.NewClient(ctx, options...)\n\tif err != nil {\n\t\tlogs.Panicf(\"Failed to get GCS client: %v\", err)\n\t}\n\n\treturn &bucketProvider{client}\n}", "func NewMock() *Mock {\n\tc := &Mock{\n\t\tFakeIncoming: func() chan []byte {\n\t\t\treturn make(chan []byte, 2)\n\t\t},\n\t\tFakeName: func() string {\n\t\t\treturn \"TestClient\"\n\t\t},\n\t\tFakeGame: func() string {\n\t\t\treturn \"test\"\n\t\t},\n\t\tFakeClose: func() {\n\t\t\t// Do nothing\n\t\t},\n\t\tFakeStopTimer: func() {\n\t\t\t// Do nothing\n\t\t},\n\t\tFakeRoom: func() interfaces.Room {\n\t\t\treturn nil\n\t\t},\n\t\tFakeSetRoom: func(interfaces.Room) {\n\n\t\t},\n\t}\n\n\tc.FakeWritePump = func() {\n\t\tfor range c.Incoming() {\n\t\t\t// Do nothing\n\t\t}\n\t}\n\n\tc.FakeSetName = func(string) interfaces.Client {\n\t\treturn c\n\t}\n\treturn c\n}", "func newCloudflareClient(ctx *cli.Context) *cloudflareClient {\n\ttoken := ctx.String(cloudflareTokenFlag.Name)\n\tif token == \"\" {\n\t\texit(fmt.Errorf(\"need cloudflare API token to proceed\"))\n\t}\n\tapi, err := cloudflare.NewWithAPIToken(token)\n\tif err != nil {\n\t\texit(fmt.Errorf(\"can't create Cloudflare client: %v\", err))\n\t}\n\treturn &cloudflareClient{\n\t\tAPI: api,\n\t\tzoneID: ctx.String(cloudflareZoneIDFlag.Name),\n\t}\n}", "func NewFakeDocker() *FakeDocker {\n dockerClient := &FakeDocker{}\n dockerClient.Containers = make(map[string]*docker.Container)\n return dockerClient\n}", "func NewMockGCS(ctrl *gomock.Controller) *MockGCS {\n\tmock := &MockGCS{ctrl: ctrl}\n\tmock.recorder = &MockGCSMockRecorder{mock}\n\treturn mock\n}", "func NewCloudTestConfig() (result *CloudTestConfig) {\n\tresult = &CloudTestConfig{}\n\tresult.Statistics.Enabled = true\n\tresult.Statistics.Interval = 60\n\treturn result\n}", "func NewMock(serverHost string) (*MockClient, error) {\n\treturn &MockClient{}, nil\n}", "func (r *ResourceManager) Cloud() *CloudServiceClient {\n\treturn &CloudServiceClient{getConn: r.getConn}\n}", "func NewMock(path string, nodes uint, replicas uint, vbuckets uint, specs ...BucketSpec) (m *Mock, err error) {\n\tvar lsn *net.TCPListener\n\tchAccept := make(chan bool)\n\tm = &Mock{}\n\n\tdefer func() {\n\t\tclose(chAccept)\n\t\tif lsn != nil {\n\t\t\tif err := lsn.Close(); err != nil {\n\t\t\t\tlog.Printf(\"Failed to close listener: %v\", err)\n\t\t\t}\n\t\t}\n\t\texc := recover()\n\n\t\tif exc == nil {\n\t\t\t// No errors, everything is OK\n\t\t\treturn\n\t\t}\n\n\t\t// Close mock on error, destroying resources\n\t\tm.Close()\n\t\tif mExc, ok := exc.(mockError); !ok {\n\t\t\tpanic(mExc)\n\t\t} else {\n\t\t\tm = nil\n\t\t\terr = mExc\n\t\t}\n\t}()\n\n\tif lsn, err = net.ListenTCP(\"tcp\", &net.TCPAddr{Port: 0}); err != nil {\n\t\tthrowMockError(\"Couldn't set up listening socket\", err)\n\t}\n\t_, ctlPort, err := net.SplitHostPort(lsn.Addr().String())\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to split host and port: %v\", err)\n\t}\n\tlog.Printf(\"Listening for control connection at %s\\n\", ctlPort)\n\n\tgo func() {\n\t\tvar err error\n\n\t\tdefer func() {\n\t\t\tchAccept <- false\n\t\t}()\n\t\tif m.conn, err = lsn.Accept(); err != nil {\n\t\t\tthrowMockError(\"Couldn't accept incoming control connection from mock\", err)\n\t\t\treturn\n\t\t}\n\t}()\n\n\tif len(specs) == 0 {\n\t\tspecs = []BucketSpec{{Name: \"default\", Type: BCouchbase}}\n\t}\n\n\toptions := []string{\n\t\t\"-jar\", path, \"--harakiri-monitor\", \"localhost:\" + ctlPort, \"--port\", \"0\",\n\t\t\"--replicas\", strconv.Itoa(int(replicas)),\n\t\t\"--vbuckets\", strconv.Itoa(int(vbuckets)),\n\t\t\"--nodes\", strconv.Itoa(int(nodes)),\n\t\t\"--buckets\", m.buildSpecStrings(specs),\n\t}\n\n\tlog.Printf(\"Invoking java %s\", strings.Join(options, \" \"))\n\tm.cmd = exec.Command(\"java\", options...)\n\n\tm.cmd.Stdout = os.Stdout\n\tm.cmd.Stderr = os.Stderr\n\n\tif err = m.cmd.Start(); err != nil {\n\t\tm.cmd = nil\n\t\tthrowMockError(\"Couldn't start command\", err)\n\t}\n\n\tselect {\n\tcase <-chAccept:\n\t\tbreak\n\n\tcase <-time.After(mockInitTimeout):\n\t\tthrowMockError(\"Timed out waiting for initialization\", errors.New(\"timeout\"))\n\t}\n\n\tm.rw = bufio.NewReadWriter(bufio.NewReader(m.conn), bufio.NewWriter(m.conn))\n\n\t// Read the port buffer, which is delimited by a NUL byte\n\tif portBytes, err := m.rw.ReadBytes(0); err != nil {\n\t\tthrowMockError(\"Couldn't get port information\", err)\n\t} else {\n\t\tportBytes = portBytes[:len(portBytes)-1]\n\t\tif entryPort, err := strconv.Atoi(string(portBytes)); err != nil {\n\t\t\tthrowMockError(\"Incorrectly formatted port from mock\", err)\n\t\t} else {\n\t\t\tm.EntryPort = uint16(entryPort)\n\t\t}\n\t}\n\n\tlog.Printf(\"Mock HTTP port at %d\\n\", m.EntryPort)\n\treturn\n}", "func newKeyServerWithMocking(user upspin.UserName, ref string, data []byte) (*server, *storagetest.ExpectDownloadCapturePut) {\n\tmockGCP := &storagetest.ExpectDownloadCapturePut{\n\t\tRef: []string{ref},\n\t\tData: [][]byte{data},\n\t\tPutContents: make([][]byte, 0, 1),\n\t\tPutRef: make([]string, 0, 1),\n\t}\n\ts := &server{\n\t\tstorage: mockGCP,\n\t\tuser: user,\n\t\tlookupTXT: mockLookupTXT,\n\t\tlogger: &noopLogger{},\n\t\tcache: cache.NewLRU(10),\n\t\tnegCache: cache.NewLRU(10),\n\t}\n\treturn s, mockGCP\n}", "func init() {\n\tcloudprovider.RegisterCloudProvider(providerName, newCloudConnection)\n}", "func (cloud *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) {\n\tcloud.kubeClient = clientBuilder.ClientOrDie(\"tencentcloud-cloud-provider\")\n\tcredential := common.NewCredential(\n\t\t//os.Getenv(\"TENCENTCLOUD_SECRET_ID\"),\n\t\t//os.Getenv(\"TENCENTCLOUD_SECRET_KEY\"),\n\t\tcloud.txConfig.SecretId,\n\t\tcloud.txConfig.SecretKey,\n\t)\n\t// 非必要步骤\n\t// 实例化一个客户端配置对象,可以指定超时时间等配置\n\tcpf := profile.NewClientProfile()\n\t// SDK有默认的超时时间,非必要请不要进行调整。\n\t// 如有需要请在代码中查阅以获取最新的默认值。\n\tcpf.HttpProfile.ReqTimeout = 10\n\tcvmClient, err := cvm.NewClient(credential, cloud.txConfig.Region, cpf)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcloud.cvm = cvmClient\n\tcvmV3Client, err := cvm.NewClient(credential, cloud.txConfig.Region, cpf)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcloud.cvmV3 = cvmV3Client\n\ttkeClient, err := tke.NewClient(credential, cloud.txConfig.Region, cpf)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcloud.tke = tkeClient\n\tclbClient, err := clb.NewClient(credential, cloud.txConfig.Region, cpf)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcloud.clb = clbClient\n\treturn\n}", "func newMemClient(t *testing.T) drive.Client {\n\tc, err := drive.NewClient(drive.Config{Provider: \"memory\"})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn c\n}", "func NewForge(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *Forge {\n\tmock := &Forge{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func (m *MockCandidatePropertyGetter) Cloudprovider() *models.SCloudprovider {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Cloudprovider\")\n\tret0, _ := ret[0].(*models.SCloudprovider)\n\treturn ret0\n}", "func Mock(codec codec.Codec) (*Client, io.Closer, error) {\n\tdir, err := ioutil.TempDir(\"\", \"etcd\")\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tcfg := embed.NewConfig()\n\tcfg.Logger = \"zap\"\n\tcfg.Dir = dir\n\tlpurl, _ := url.Parse(\"http://localhost:0\")\n\tlcurl, _ := url.Parse(\"http://localhost:0\")\n\tcfg.LPUrls = []url.URL{*lpurl}\n\tcfg.LCUrls = []url.URL{*lcurl}\n\n\tetcd, err := embed.StartEtcd(cfg)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tselect {\n\tcase <-etcd.Server.ReadyNotify():\n\tcase <-time.After(etcdStartTimeout):\n\t\tetcd.Server.Stop() // trigger a shutdown\n\t\treturn nil, nil, fmt.Errorf(\"server took too long to start\")\n\t}\n\n\tcloser := CloserFunc(func() error {\n\t\tetcd.Server.Stop()\n\t\treturn nil\n\t})\n\n\tvar config Config\n\tflagext.DefaultValues(&config)\n\n\tclient := &Client{\n\t\tcfg: config,\n\t\tcodec: codec,\n\t\tcli: v3client.New(etcd.Server),\n\t}\n\n\treturn client, closer, nil\n}", "func newClient(auth azure.Authorizer) *azureClient {\n\treturn &azureClient{\n\t\tscalesetvms: newVirtualMachineScaleSetVMsClient(auth.SubscriptionID(), auth.BaseURI(), auth.Authorizer()),\n\t}\n}", "func Mock(objects ...runtime.Object) KubernetesClientLambda {\n\tfakePool, fakeClient := NewFakes(objects...)\n\treturn &kubernetesClientLambdaImpl{\n\t\tclientPool: fakePool,\n\t\tinformerFactory: informers.NewSharedInformerFactory(fakeClient, 0),\n\t}\n}", "func NewMockContiv() *MockContiv {\n\tci := containeridx.NewConfigIndex(logrus.DefaultLogger(), \"title\", nil)\n\treturn &MockContiv{\n\t\tpodIf: make(map[podmodel.ID]string),\n\t\tpodAppNs: make(map[podmodel.ID]uint32),\n\t\tcontainerIndex: ci,\n\t\tserviceLocalEndpointWeight: 1,\n\t}\n}", "func New() *Mock {\n\treturn &Mock{\n\t\tm: mockMap{},\n\t\toldTransport: http.DefaultTransport,\n\t}\n}", "func newClient(conf config) (*storage.Client, error) {\n\tdb, err := storage.NewDBClient(conf.MongoURI, conf.DBName, conf.MongoMICol, conf.MongoAgCol)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error creating DB client: %q\", err)\n\t}\n\tdb.Collection(conf.MongoMICol)\n\tbc := storage.NewCloudClient(conf.SwiftUsername, conf.SwiftAPIKey, conf.SwiftAuthURL, conf.SwiftDomain, conf.SwiftContainer)\n\tclient, err := storage.NewClient(db, bc)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error creating storage.client: %q\", err)\n\t}\n\treturn client, nil\n}", "func New() (*Client, error) {\n\tstorageClient, err := newClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient := &Client{\n\t\tClient: *storageClient,\n\t}\n\tclient.SetRetryer()\n\n\treturn client, nil\n}", "func TestNewCache(t *testing.T) {\n\n\t// Test Data\n\tk8sNamespace := \"TestK8SNamespace\"\n\n\t// Create A Context With Test Logger & K8S Client\n\tctx := logging.WithLogger(context.TODO(), logtesting.TestLogger(t))\n\tctx = context.WithValue(ctx, injectionclient.Key{}, fake.NewSimpleClientset())\n\n\t// Perform The Test\n\tcache := NewCache(ctx, k8sNamespace)\n\n\t// Verify The Results\n\tassert.NotNil(t, cache)\n}", "func FakeNew() (*Client, *FakeClientset) {\n\treturn FakeNewWithIngressSupports(false, true)\n}", "func New() Client {\n\treturn &client{\n\t\tControllerParams: nil,\n\t\tCloud: nil,\n\t\tServiceEngineGroup: nil,\n\t\tNetwork: nil,\n\t}\n}", "func (g *FakeClientFactory) New(context.Context, client.Reader, string, string) (capb.ConfigAgentClient, controllers.ConnCloseFunc, error) {\n\tif g.Caclient == nil {\n\t\tg.Reset()\n\t}\n\treturn g.Caclient, emptyConnCloseFunc, nil\n}", "func NewClient() (cloudops.Ops, error) {\n\tvar i = new(instance)\n\tvar err error\n\tif metadata.OnGCE() {\n\t\terr = gceInfo(i)\n\t} else if ok := IsDevMode(); ok {\n\t\terr = gceInfoFromEnv(i)\n\t} else {\n\t\treturn nil, fmt.Errorf(\"instance is not running on GCE\")\n\t}\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error fetching instance info. Err: %v\", err)\n\t}\n\n\tc, err := google.DefaultClient(context.Background(), compute.ComputeScope)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to authenticate with google api. Err: %v\", err)\n\t}\n\n\tservice, err := compute.New(c)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to create Compute service: %v\", err)\n\t}\n\n\treturn &gceOps{\n\t\tCompute: unsupported.NewUnsupportedCompute(),\n\t\tinst: i,\n\t\tservice: service,\n\t}, nil\n}", "func NewDeviceClient(t mockConstructorTestingTNewDeviceClient) *DeviceClient {\n\tmock := &DeviceClient{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewFake(force bool) (m starlark.HasAttrs, closeFn func(), err error) {\n\t// Create a fake API store with some endpoints pre-populated\n\tcm := corev1.ConfigMap{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: \"v1\",\n\t\t\tKind: \"ConfigMap\",\n\t\t},\n\t\tData: map[string]string{\n\t\t\t\"client-ca-file\": \"contents\",\n\t\t},\n\t}\n\tcmData, err := apiruntime.Encode(unstructured.UnstructuredJSONScheme, &cm)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tfm := map[string][]byte{\n\t\t\"/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\": cmData,\n\t}\n\n\ts := httptest.NewTLSServer(&fakeKube{m: fm})\n\n\tu, err := url.Parse(s.URL)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\th := \"https://\" + u.Host\n\ttlsConfig := rest.TLSClientConfig{\n\t\tInsecure: true,\n\t}\n\trConf := &rest.Config{Host: h, TLSClientConfig: tlsConfig}\n\n\tt, err := rest.TransportFor(rConf)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tk := New(\n\t\th,\n\t\tfakeDiscovery(),\n\t\tdynamic.NewForConfigOrDie(rConf),\n\t\t&http.Client{Transport: t},\n\t\tfalse, /* dryRun */\n\t\tforce,\n\t\tfalse, /* diff */\n\t\tnil, /* diffFilters */\n\t)\n\n\treturn newFakeModule(k.(*kubePackage)), s.Close, nil\n}", "func newVirtualMachineClient(subID string, authorizer auth.Authorizer) (*client, error) {\n\tc, err := wssdcloudclient.GetVirtualMachineClient(&subID, authorizer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &client{c}, nil\n}", "func CreateCloudCredential(provider, name string, uid, orgID string) {\n\tStep(fmt.Sprintf(\"Create cloud credential [%s] in org [%s]\", name, orgID), func() {\n\t\tlogrus.Printf(\"Create credential name %s for org %s provider %s\", name, orgID, provider)\n\t\tbackupDriver := Inst().Backup\n\t\tswitch provider {\n\t\tcase drivers.ProviderAws:\n\t\t\tlogrus.Infof(\"Create creds for aws\")\n\t\t\tid := os.Getenv(\"AWS_ACCESS_KEY_ID\")\n\t\t\texpect(id).NotTo(equal(\"\"),\n\t\t\t\t\"AWS_ACCESS_KEY_ID Environment variable should not be empty\")\n\n\t\t\tsecret := os.Getenv(\"AWS_SECRET_ACCESS_KEY\")\n\t\t\texpect(secret).NotTo(equal(\"\"),\n\t\t\t\t\"AWS_SECRET_ACCESS_KEY Environment variable should not be empty\")\n\n\t\t\tcredCreateRequest := &api.CloudCredentialCreateRequest{\n\t\t\t\tCreateMetadata: &api.CreateMetadata{\n\t\t\t\t\tName: name,\n\t\t\t\t\tUid: uid,\n\t\t\t\t\tOrgId: orgID,\n\t\t\t\t},\n\t\t\t\tCloudCredential: &api.CloudCredentialInfo{\n\t\t\t\t\tType: api.CloudCredentialInfo_AWS,\n\t\t\t\t\tConfig: &api.CloudCredentialInfo_AwsConfig{\n\t\t\t\t\t\tAwsConfig: &api.AWSConfig{\n\t\t\t\t\t\t\tAccessKey: id,\n\t\t\t\t\t\t\tSecretKey: secret,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\t//ctx, err := backup.GetPxCentralAdminCtx()\n\t\t\tctx, err := backup.GetAdminCtxFromSecret()\n\t\t\texpect(err).NotTo(haveOccurred(),\n\t\t\t\tfmt.Sprintf(\"Failed to fetch px-central-admin ctx: [%v]\",\n\t\t\t\t\terr))\n\t\t\t_, err = backupDriver.CreateCloudCredential(ctx, credCreateRequest)\n\t\t\tif err != nil && strings.Contains(err.Error(), \"already exists\") {\n\t\t\t\treturn\n\t\t\t}\n\t\t\texpect(err).NotTo(haveOccurred(),\n\t\t\t\tfmt.Sprintf(\"Failed to create cloud credential [%s] in org [%s]\", name, orgID))\n\t\t// TODO: validate CreateCloudCredentialResponse also\n\t\tcase drivers.ProviderAzure:\n\t\t\tlogrus.Infof(\"Create creds for azure\")\n\t\t\ttenantID, clientID, clientSecret, subscriptionID, accountName, accountKey := GetAzureCredsFromEnv()\n\t\t\tcredCreateRequest := &api.CloudCredentialCreateRequest{\n\t\t\t\tCreateMetadata: &api.CreateMetadata{\n\t\t\t\t\tName: name,\n\t\t\t\t\tUid: uid,\n\t\t\t\t\tOrgId: orgID,\n\t\t\t\t},\n\t\t\t\tCloudCredential: &api.CloudCredentialInfo{\n\t\t\t\t\tType: api.CloudCredentialInfo_Azure,\n\t\t\t\t\tConfig: &api.CloudCredentialInfo_AzureConfig{\n\t\t\t\t\t\tAzureConfig: &api.AzureConfig{\n\t\t\t\t\t\t\tTenantId: tenantID,\n\t\t\t\t\t\t\tClientId: clientID,\n\t\t\t\t\t\t\tClientSecret: clientSecret,\n\t\t\t\t\t\t\tAccountName: accountName,\n\t\t\t\t\t\t\tAccountKey: accountKey,\n\t\t\t\t\t\t\tSubscriptionId: subscriptionID,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\t//ctx, err := backup.GetPxCentralAdminCtx()\n\t\t\tctx, err := backup.GetAdminCtxFromSecret()\n\t\t\texpect(err).NotTo(haveOccurred(),\n\t\t\t\tfmt.Sprintf(\"Failed to fetch px-central-admin ctx: [%v]\",\n\t\t\t\t\terr))\n\t\t\t_, err = backupDriver.CreateCloudCredential(ctx, credCreateRequest)\n\t\t\tif err != nil && strings.Contains(err.Error(), \"already exists\") {\n\t\t\t\treturn\n\t\t\t}\n\t\t\texpect(err).NotTo(haveOccurred(),\n\t\t\t\tfmt.Sprintf(\"Failed to create cloud credential [%s] in org [%s]\", name, orgID))\n\t\t\t// TODO: validate CreateCloudCredentialResponse also\n\t\t}\n\t})\n}", "func New(mockenv *common.MockEnvironment, storage storage.Storage) *MockService {\n\ts := &MockService{\n\t\tkube: mockenv.GetKubeClient(),\n\t\tstorage: storage,\n\t\tprojects: mockenv.GetProjects(),\n\t}\n\ts.v1 = &SecretsV1{MockService: s}\n\treturn s\n}", "func Create(rw *RequestWrapper) (*clm.GKECluster, error) {\n\tgkeOps, err := rw.acquire()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif rw.Request.SaveMetaData {\n\t\t// At this point we should have a cluster ready to run test. Need to save\n\t\t// metadata so that following flow can understand the context of cluster, as\n\t\t// well as for Prow usage later\n\t\twriteMetaData(gkeOps.Cluster, gkeOps.Project)\n\t}\n\n\t// set up kube config points to cluster\n\tclusterAuthCmd := fmt.Sprintf(\n\t\t\"gcloud beta container clusters get-credentials %s --region %s --project %s\",\n\t\tgkeOps.Cluster.Name, gkeOps.Cluster.Location, gkeOps.Project)\n\tif out, err := cmd.RunCommand(clusterAuthCmd); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed connecting to cluster: %q, %w\", out, err)\n\t}\n\tif out, err := cmd.RunCommand(\"gcloud config set project \" + gkeOps.Project); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed setting project: %q, %w\", out, err)\n\t}\n\n\treturn gkeOps, nil\n}", "func NewCloudProvider(dc *kubermaticv1.Datacenter, secretKeyGetter provider.SecretKeySelectorValueFunc) (*AmazonEC2, error) {\n\tif dc.Spec.AWS == nil {\n\t\treturn nil, errors.New(\"datacenter is not an AWS datacenter\")\n\t}\n\treturn &AmazonEC2{\n\t\tdc: dc.Spec.AWS,\n\t\tsecretKeySelector: secretKeyGetter,\n\t}, nil\n}", "func newKVClient(ctx context.Context, storeType, address string, timeout time.Duration) (kvstore.Client, error) {\n\tlogger.Infow(ctx, \"kv-store-type\", log.Fields{\"store\": storeType})\n\tswitch storeType {\n\tcase \"etcd\":\n\t\treturn kvstore.NewEtcdClient(ctx, address, timeout, log.FatalLevel)\n\t}\n\treturn nil, errors.New(\"unsupported-kv-store\")\n}", "func NewhttpClientMock(valid bool) *HTTPClientMock {\n\treturn &HTTPClientMock{\n\t\tapiKeyPublic: \"apiKeyPublic\",\n\t\tapiKeyPrivate: \"apiKeyPrivate\",\n\t\tclient: http.DefaultClient,\n\t\tvalidCreds: valid,\n\t\tfx: fixtures.New(),\n\t\tCallFunc: func() (int, int, error) {\n\t\t\tif valid == true {\n\t\t\t\treturn 1, 1, nil\n\t\t\t}\n\t\t\treturn 0, 0, errors.New(\"Unexpected error: Unexpected server response code: 401: EOF\")\n\t\t},\n\t\tSendMailV31Func: func(req *http.Request) (*http.Response, error) {\n\t\t\treturn nil, errors.New(\"mock send mail function not implemented yet\")\n\t\t},\n\t}\n}", "func NewMock() Client {\n\treturn &mockClient{}\n}", "func NewCloudTunnel(address string) CloudTunnel {\n\ttunnel := &cloudTunnel{\n\t\taddress: address,\n\t\tclusterNameCheck: defaultClusterNameChecker,\n\t\tnotifyClientClosed: func(*config.ClusterRegistry) { return },\n\t\tafterConnectHook: defaultAfterConnectHook,\n\t}\n\n\ttunnel.receiveMessageHandler = func(client string, msg []byte) error {\n\t\treturn nil\n\t}\n\treturn tunnel\n}", "func newCloudFormationTemplates(c *ServiceoperatorV1alpha1Client, namespace string) *cloudFormationTemplates {\n\treturn &cloudFormationTemplates{\n\t\tclient: c.RESTClient(),\n\t\tns: namespace,\n\t}\n}", "func NewK8SCloudInCluster(opts Options) (CloudProvider, error) {\n\tconfig, err := rest.InClusterConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnamespace, err := ioutil.ReadFile(\"/var/run/secrets/kubernetes.io/serviceaccount/\" + apiv1.ServiceAccountNamespaceKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcloud := &K8SCloud{\n\t\tname: opts.Name,\n\t\thost: config.Host,\n\t\tbearerToken: config.BearerToken,\n\t\tnamespace: string(namespace),\n\t\tinsecure: opts.Insecure,\n\t}\n\tclientset, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcloud.client = clientset\n\treturn cloud, nil\n}", "func newMockClient(doer func(*http.Request) (*http.Response, error)) *http.Client {\n\treturn &http.Client{\n\t\tTransport: transportFunc(doer),\n\t}\n}", "func newGoogleStorageClient(config stow.Config) (*storage.Service, error) {\n\tjson, _ := config.Config(ConfigJSON)\n\tvar httpClient *http.Client\n\tscopes := []string{storage.DevstorageReadWriteScope}\n\tif s, ok := config.Config(ConfigScopes); ok && s != \"\" {\n\t\tscopes = strings.Split(s, \",\")\n\t}\n\tif json != \"\" {\n\t\tjwtConf, err := google.JWTConfigFromJSON([]byte(json), scopes...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thttpClient = jwtConf.Client(context.Background())\n\n\t} else {\n\t\tcreds, err := google.FindDefaultCredentials(context.Background(), strings.Join(scopes, \",\"))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thttpClient = oauth2.NewClient(context.Background(), creds.TokenSource)\n\t}\n\tservice, err := storage.New(httpClient)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn service, nil\n}", "func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) {\n\ts := initSettings(opts...)\n\to := s.clientOption\n\n\tvar creds *google.Credentials\n\t// In general, it is recommended to use raw.NewService instead of htransport.NewClient\n\t// since raw.NewService configures the correct default endpoints when initializing the\n\t// internal http client. However, in our case, \"NewRangeReader\" in reader.go needs to\n\t// access the http client directly to make requests, so we create the client manually\n\t// here so it can be re-used by both reader.go and raw.NewService. This means we need to\n\t// manually configure the default endpoint options on the http client. Furthermore, we\n\t// need to account for STORAGE_EMULATOR_HOST override when setting the default endpoints.\n\tif host := os.Getenv(\"STORAGE_EMULATOR_HOST\"); host == \"\" {\n\t\t// Prepend default options to avoid overriding options passed by the user.\n\t\to = append([]option.ClientOption{option.WithScopes(ScopeFullControl, \"https://www.googleapis.com/auth/cloud-platform\"), option.WithUserAgent(userAgent)}, o...)\n\n\t\to = append(o, internaloption.WithDefaultEndpoint(\"https://storage.googleapis.com/storage/v1/\"))\n\t\to = append(o, internaloption.WithDefaultMTLSEndpoint(\"https://storage.mtls.googleapis.com/storage/v1/\"))\n\n\t\t// Don't error out here. The user may have passed in their own HTTP\n\t\t// client which does not auth with ADC or other common conventions.\n\t\tc, err := transport.Creds(ctx, o...)\n\t\tif err == nil {\n\t\t\tcreds = c\n\t\t\to = append(o, internaloption.WithCredentials(creds))\n\t\t}\n\t} else {\n\t\tvar hostURL *url.URL\n\n\t\tif strings.Contains(host, \"://\") {\n\t\t\th, err := url.Parse(host)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\thostURL = h\n\t\t} else {\n\t\t\t// Add scheme for user if not supplied in STORAGE_EMULATOR_HOST\n\t\t\t// URL is only parsed correctly if it has a scheme, so we build it ourselves\n\t\t\thostURL = &url.URL{Scheme: \"http\", Host: host}\n\t\t}\n\n\t\thostURL.Path = \"storage/v1/\"\n\t\tendpoint := hostURL.String()\n\n\t\t// Append the emulator host as default endpoint for the user\n\t\to = append([]option.ClientOption{option.WithoutAuthentication()}, o...)\n\n\t\to = append(o, internaloption.WithDefaultEndpoint(endpoint))\n\t\to = append(o, internaloption.WithDefaultMTLSEndpoint(endpoint))\n\t}\n\ts.clientOption = o\n\n\t// htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint.\n\thc, ep, err := htransport.NewClient(ctx, s.clientOption...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"dialing: %v\", err)\n\t}\n\t// RawService should be created with the chosen endpoint to take account of user override.\n\trawService, err := raw.NewService(ctx, option.WithEndpoint(ep), option.WithHTTPClient(hc))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"storage client: %v\", err)\n\t}\n\t// Update readHost and scheme with the chosen endpoint.\n\tu, err := url.Parse(ep)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"supplied endpoint %q is not valid: %v\", ep, err)\n\t}\n\n\treturn &httpStorageClient{\n\t\tcreds: creds,\n\t\thc: hc,\n\t\treadHost: u.Host,\n\t\traw: rawService,\n\t\tscheme: u.Scheme,\n\t\tsettings: s,\n\t}, nil\n}", "func New(conf *GCEConfig) (*GceImages, error) {\n\tvar err error\n\tif conf.ProjectID == \"\" {\n\t\treturn nil, errors.New(\"ProjectID is not set. Please check your configuration.\")\n\t}\n\n\t// increase the timeout. Also we need to pass the client with the context itself\n\ttimeout := time.Second * 30\n\tctx := context.WithValue(oauth2.NoContext, oauth2.HTTPClient, &http.Client{\n\t\tTransport: &http.Transport{TLSHandshakeTimeout: timeout},\n\t\tTimeout: timeout,\n\t})\n\n\tvar client *http.Client\n\n\t// allowed scopes\n\tscopes := []string{compute.ComputeScope}\n\n\t// Recommended way is explicit passing of credentials json which can be\n\t// downloaded from console.developers.google under APIs & Auth/Credentials\n\t// section\n\tif conf.AccountFile != \"\" {\n\t\t// expand shell meta character\n\t\tpath, err := homedir.Expand(conf.AccountFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tjsonContent, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tjtwConfig, err := google.JWTConfigFromJSON(jsonContent, scopes...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tclient = jtwConfig.Client(ctx)\n\t} else {\n\t\t// Look for application default credentials, for more details, see:\n\t\t// https://developers.google.com/accounts/docs/application-default-credentials\n\t\tclient, err = google.DefaultClient(ctx, scopes...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tsvc, err := compute.New(client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &GceImages{\n\t\tsvc: compute.NewImagesService(svc),\n\t\tconfig: conf,\n\t}, nil\n}", "func newFakeClient() client.Client {\n\treturn fakeclient.NewFakeClient()\n}", "func newFakeReconciler(initObjects ...runtime.Object) *ReconcileMachineRemediation {\n\tfakeClient := fake.NewFakeClient(initObjects...)\n\tremediator := &FakeRemedatior{}\n\treturn &ReconcileMachineRemediation{\n\t\tclient: fakeClient,\n\t\tremediator: remediator,\n\t\tnamespace: consts.NamespaceOpenshiftMachineAPI,\n\t}\n}", "func NewMock() *Mock {\n\treturn &Mock{VolumesMock: &VolumesServiceMock{}}\n}", "func NewAuth(users map[string]string) policies.AuthServiceClient {\n\treturn &authServiceMock{users}\n}", "func getCloudCostClient() dbclient.CloudCostClient {\n\tvar cloudCost dbclient.CloudCostClient\n\n\tif strings.EqualFold(*cloud, \"azure\") {\n\t\tlog.Println(\"Initializing Azure client...\")\n\t\tazureClient := initAzureClient()\n\t\tcloudCost = &azureCloudCost{UsageExplorer: &azureClient}\n\t} else if strings.EqualFold(*cloud, \"aws\") {\n\t\tlog.Println(\"Initializing AWS client...\")\n\t\tawsClient := initAwsClient()\n\t\tcloudCost = &awsCloudCost{Client: &awsClient}\n\t} else {\n\t\tlog.Fatalf(\"Cloud provider \\\"%v\\\" is not supported\", *cloud)\n\t}\n\treturn cloudCost\n}", "func NewClient(t string) *gophercloud.ServiceClient {\n\tvar err error\n\tao, region, err := authMethod()\n\tif err != nil {\n\t\tfmt.Printf(\"Error retrieving authentication credentials: %s\\n\", err)\n\t}\n\tif ao.IdentityEndpoint == \"\" {\n\t\tao.IdentityEndpoint = rackspace.RackspaceUSIdentity\n\t}\n\tpc, err := rackspace.AuthenticatedClient(ao)\n\tif err != nil {\n\t\tfmt.Printf(\"Error creating ProviderClient: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tvar sc *gophercloud.ServiceClient\n\tswitch t {\n\tcase \"compute\":\n\t\tsc, err = rackspace.NewComputeV2(pc, gophercloud.EndpointOpts{\n\t\t\tRegion: region,\n\t\t})\n\t\tbreak\n\tcase \"blockstorage\":\n\t\tsc, err = rackspace.NewBlockStorageV1(pc, gophercloud.EndpointOpts{\n\t\t\tRegion: region,\n\t\t})\n\t\tbreak\n\tcase \"networking\":\n\t\tsc, err = rackspace.NewNetworkV2(pc, gophercloud.EndpointOpts{\n\t\t\tRegion: region,\n\t\t})\n\t\tbreak\n\t}\n\tif err != nil {\n\t\tfmt.Printf(\"Error creating ServiceClient (%s): %s\\n\", err, t)\n\t\tos.Exit(1)\n\t}\n\t// sc.UserAgent.Prepend(\"rack/\" + util.Version)\n\treturn sc\n}", "func New(options Options) (TKGClient, error) { //nolint:gocritic\n\tvar err error\n\n\t// configure log options for tkg library\n\tconfigureLogging(options.LogOptions)\n\n\tif options.ConfigDir == \"\" {\n\t\treturn nil, errors.New(\"config directory cannot be empty. Please provide config directory when creating tkgctl client\")\n\t}\n\n\tif options.ProviderGetter == nil {\n\t\toptions.ProviderGetter = getDefaultProviderGetter()\n\t}\n\n\tif options.CustomizerOptions.RegionManagerFactory == nil {\n\t\toptions.CustomizerOptions = types.CustomizerOptions{\n\t\t\tRegionManagerFactory: region.NewFactory(),\n\t\t}\n\t}\n\tappConfig := types.AppConfig{\n\t\tTKGConfigDir: options.ConfigDir,\n\t\tProviderGetter: options.ProviderGetter,\n\t\tCustomizerOptions: options.CustomizerOptions,\n\t\tTKGSettingsFile: options.SettingsFile,\n\t}\n\n\terr = ensureTKGConfigFile(options.ConfigDir, options.ProviderGetter)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tallClients, err := clientcreator.CreateAllClients(appConfig, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar clusterKubeConfig *types.ClusterKubeConfig\n\tif options.KubeConfig != \"\" {\n\t\tclusterKubeConfig = &types.ClusterKubeConfig{\n\t\t\tFile: options.KubeConfig,\n\t\t\tContext: options.KubeContext,\n\t\t}\n\t}\n\n\ttkgClient, err := client.New(client.Options{\n\t\tClusterCtlClient: allClients.ClusterCtlClient,\n\t\tReaderWriterConfigClient: allClients.ConfigClient,\n\t\tRegionManager: allClients.RegionManager,\n\t\tTKGConfigDir: options.ConfigDir,\n\t\tTimeout: constants.DefaultOperationTimeout,\n\t\tFeaturesClient: allClients.FeaturesClient,\n\t\tTKGConfigProvidersClient: allClients.TKGConfigProvidersClient,\n\t\tTKGBomClient: allClients.TKGBomClient,\n\t\tTKGConfigUpdater: allClients.TKGConfigUpdaterClient,\n\t\tTKGPathsClient: allClients.TKGConfigPathsClient,\n\t\tClusterKubeConfig: clusterKubeConfig,\n\t\tClusterClientFactory: clusterclient.NewClusterClientFactory(),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// ensure BoM and Providers prerequisite files are extracted if missing\n\terr = ensureBoMandProvidersPrerequisite(options.ConfigDir, allClients.TKGConfigUpdaterClient, options.ForceUpdateTKGCompatibilityImage)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to ensure prerequisites\")\n\t}\n\t// Set default BOM name to the config variables to use during template generation\n\tdefaultBoMFileName, err := allClients.TKGBomClient.GetDefaultBoMFileName()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to get default BOM file name\")\n\t}\n\tallClients.ConfigClient.TKGConfigReaderWriter().Set(constants.ConfigVariableDefaultBomFile, defaultBoMFileName)\n\n\treturn &tkgctl{\n\t\tconfigDir: options.ConfigDir,\n\t\tkubeconfig: options.KubeConfig,\n\t\tkubecontext: options.KubeContext,\n\t\tappConfig: appConfig,\n\t\ttkgBomClient: allClients.TKGBomClient,\n\t\ttkgConfigUpdaterClient: allClients.TKGConfigUpdaterClient,\n\t\ttkgConfigProvidersClient: allClients.TKGConfigProvidersClient,\n\t\ttkgConfigPathsClient: allClients.TKGConfigPathsClient,\n\t\ttkgClient: tkgClient,\n\t\tproviderGetter: options.ProviderGetter,\n\t\ttkgConfigReaderWriter: allClients.ConfigClient.TKGConfigReaderWriter(),\n\t}, nil\n}", "func NewGitClient(t mockConstructorTestingTNewGitClient) *GitClient {\n\tmock := &GitClient{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func (c *Client) CloudCreateInstance(projectID, name, pubkeyID, flavorID, imageID, region string) (instance *types.CloudInstance, err error) {\n\tinstanceReq := types.CloudInstance{\n\t\tName: name,\n\t\tSSHKeyID: pubkeyID,\n\t\tFlavorID: flavorID,\n\t\tImageID: imageID,\n\t\tRegion: region,\n\t}\n\terr = c.Post(queryEscape(\"/cloud/project/%s/instance\", projectID), instanceReq, &instance)\n\treturn instance, err\n}", "func NewCloudStore() CloudStore {\n\treturn NewStow()\n}", "func (g *FakeDatabaseClientFactory) New(context.Context, client.Reader, string, string) (dbdpb.DatabaseDaemonClient, func() error, error) {\n\tif g.Dbclient == nil {\n\t\tg.Reset()\n\t}\n\treturn g.Dbclient, func() error { return nil }, nil\n}", "func New(ctx context.Context, bucket string) (fs.Interface, error) {\n\tclient, err := storage.NewClient(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &gcs{\n\t\tbucket: client.Bucket(bucket),\n\t}, nil\n}", "func NewGCPClient(keys, projectName string) (*GCPClient, error) {\n\tlog.Debugf(\"Connecting to GCP\")\n\tctx := context.Background()\n\tvar client *GCPClient\n\tif projectName == \"\" {\n\t\treturn nil, fmt.Errorf(\"the project name is not specified\")\n\t}\n\tif keys != \"\" {\n\t\tlog.Debugf(\"Using Keys %s\", keys)\n\t\tf, err := os.Open(keys)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tjsonKey, err := io.ReadAll(f)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tconfig, err := google.JWTConfigFromJSON(jsonKey,\n\t\t\tstorage.DevstorageReadWriteScope,\n\t\t\tcompute.ComputeScope,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tclient = &GCPClient{\n\t\t\tclient: config.Client(ctx),\n\t\t\tprojectName: projectName,\n\t\t}\n\t} else {\n\t\tlog.Debugf(\"Using Application Default credentials\")\n\t\tgc, err := google.DefaultClient(\n\t\t\tctx,\n\t\t\tstorage.DevstorageReadWriteScope,\n\t\t\tcompute.ComputeScope,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tclient = &GCPClient{\n\t\t\tclient: gc,\n\t\t\tprojectName: projectName,\n\t\t}\n\t}\n\n\tvar err error\n\tclient.compute, err = compute.NewService(ctx, option.WithHTTPClient(client.client))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient.storage, err = storage.NewService(ctx, option.WithHTTPClient(client.client))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.Debugf(\"Generating SSH Keypair\")\n\tclient.privKey, err = rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn client, nil\n}", "func (f *FactoryFake) New(address string) (client.Interface, error) {\n\tc, _ := f.Clients[address]\n\treturn c, nil\n}", "func NewMock(t *testing.T) *MockT { return &MockT{t: t} }", "func providerFactory(_ io.Reader) (cloudprovider.Interface, error) {\n\tlog := klogr.NewWithOptions(klogr.WithFormat(klogr.FormatKlog))\n\tc, err := loadConfig(envconfig.OsLookuper())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tapiUrl := katapult.DefaultURL\n\tif c.APIHost != \"\" {\n\t\tlog.Info(\"default API base URL overrided\",\n\t\t\t\"url\", c.APIHost)\n\t\tapiUrl, err = url.Parse(c.APIHost)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse provided api url: %w\", err)\n\t\t}\n\t}\n\n\trm, err := katapult.New(\n\t\tkatapult.WithAPIKey(c.APIKey),\n\t\tkatapult.WithBaseURL(apiUrl),\n\t\tkatapult.WithUserAgent(\"kce-ccm\"), // TODO: Add version.\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient := core.New(rm)\n\n\treturn &provider{\n\t\tlog: log,\n\t\tkatapult: client,\n\t\tconfig: *c,\n\t\tloadBalancer: &loadBalancerManager{\n\t\t\tlog: log,\n\t\t\tconfig: *c,\n\t\t\tloadBalancerController: client.LoadBalancers,\n\t\t\tloadBalancerRuleController: client.LoadBalancerRules,\n\t\t},\n\t}, nil\n}", "func setupGCP(ctx *context.Context, bucket string) (*blob.Bucket, error) {\n\t// DefaultCredentials assumes a user has logged in with gcloud.\n\t// See here for more information:\n\t// https://cloud.google.com/docs/authentication/getting-started\n\tcreds, err := gcp.DefaultCredentials(*ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc, err := gcp.NewHTTPClient(gcp.DefaultTransport(), gcp.CredentialsTokenSource(creds))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// The bucket name must be globally unique.\n\treturn gcsblob.OpenBucket(*ctx, bucket, c, nil)\n}" ]
[ "0.74995255", "0.69299114", "0.69029814", "0.6815023", "0.6562258", "0.6515134", "0.6346468", "0.62458694", "0.6240434", "0.6203039", "0.61380965", "0.60854924", "0.6003493", "0.59790623", "0.5966205", "0.59641105", "0.5882635", "0.5866135", "0.5820343", "0.58198625", "0.5764206", "0.572279", "0.56885946", "0.5664913", "0.5664913", "0.5664913", "0.5664913", "0.56582886", "0.56582886", "0.56340057", "0.5618201", "0.56142944", "0.5610625", "0.5609929", "0.5604749", "0.5575401", "0.5526576", "0.55188113", "0.55094385", "0.55057156", "0.54962784", "0.54961073", "0.5476079", "0.5468519", "0.54669565", "0.5450706", "0.5449571", "0.54478395", "0.5443971", "0.5436088", "0.54295254", "0.5408054", "0.5394611", "0.53891116", "0.53833276", "0.53734654", "0.53722453", "0.53700125", "0.53413844", "0.5333573", "0.5327173", "0.5326814", "0.53231794", "0.53171074", "0.5314388", "0.5309397", "0.53089", "0.5307246", "0.530722", "0.53021663", "0.52999413", "0.5295033", "0.5287255", "0.52791244", "0.52790326", "0.526684", "0.5249596", "0.52437633", "0.52396643", "0.52368903", "0.5236126", "0.52004826", "0.5197418", "0.51960266", "0.51959544", "0.51945835", "0.518601", "0.5183194", "0.51780796", "0.51771873", "0.5175839", "0.51680297", "0.5167862", "0.51677996", "0.5165721", "0.5165062", "0.5164794", "0.51638633", "0.515727", "0.51536" ]
0.7895326
0
GetTask returns a new task for the action
GetTask возвращает новую задачу для действия
func GetTask(name, action string, conf *config.MountConfig) (iface.Task, error) { switch action { case "", "create": return NewCreateTask(name, conf), nil case "remove", "rm": return NewRemoveTask(name, conf), nil default: return nil, fmt.Errorf("Invalid mount action %q for task %q", action, name) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GetTask(r *http.Request) *task.Task {\n\tif rv := r.Context().Value(model.ApiTaskKey); rv != nil {\n\t\tif t, ok := rv.(*task.Task); ok {\n\t\t\treturn t\n\t\t}\n\t}\n\treturn nil\n}", "func (*FakeReconcilerClient) GetTask(string) (swarm.Task, error) {\n\treturn swarm.Task{}, FakeUnimplemented\n}", "func (a *agent) GetTask(ctx context.Context, msg *api.AgentID) (*api.Task, error) {\n\tvar task *api.Task = new(api.Task)\n\tselect {\n\tcase task, ok := <-a.work[msg.GetAgentID()]:\n\t\tif ok {\n\t\t\treturn task, nil\n\t\t}\n\t\treturn task, errors.New(\"channel closed\")\n\tdefault:\n\t\treturn task, nil\n\t}\n}", "func (ds *DNSSuite) GetTask() *boomer.Task {\n\tvar fn func()\n\n\tswitch ds.Type {\n\tcase dns.TypeA:\n\t\tfn = ds.doA\n\t}\n\n\treturn &boomer.Task{\n\t\tName: \"dns\",\n\t\tOnStart: func() {},\n\t\tOnStop: func() {},\n\t\tFn: fn,\n\t}\n}", "func GetTask(tid, user_token string) (*Task, error) {\n\t// declarations\n\tvar start_time, end_time pq.NullTime\n\tvar exit_status sql.NullInt64\n\tvar output sql.NullString\n\n\t// initialize Task\n\ttask := Task{}\n\t// get task information\n\tif err := db.QueryRow(\"SELECT * FROM tasks WHERE tasks.id=$1\", tid).\n\t\tScan(&task.Id, &task.Gid, &start_time, &end_time, &task.Status,\n\t\t&exit_status, &output, &task.Patch); err != nil {\n\t\treturn nil, err\n\t}\n\t// set remaining fields\n\tif start_time.Valid {\n\t\ttask.Start_time = &start_time.Time\n\t}\n\tif end_time.Valid {\n\t\ttask.End_time = &end_time.Time\n\t}\n\tif exit_status.Valid {\n\t\ttask.Exit_status = exit_status.Int64\n\t}\n\tif output.Valid {\n\t\ttask.Output = output.String\n\t}\n\n\tgroup_task, _ := getGroupTask(task.Gid)\n\ttask.User = group_task.user\n\ttask.Project = group_task.project\n\ttask.Bot = group_task.bot\n\n\treturn &task, nil\n}", "func (d *dispatcher) Get(state string) *Task {\n\ttask, ok := d.Tasks[state]\n\tif !ok {\n\t\treturn &Task{\n\t\t\tHandler: NotFoundHandler,\n\t\t}\n\t}\n\treturn task\n}", "func GetTask(c common.Client, uri string) (*Task, error) {\n\tvar task Task\n\treturn &task, task.Get(c, uri, &task)\n}", "func (v1 *V1) GetTask(w http.ResponseWriter, r *http.Request) {\n\ttaskID := chi.URLParam(r, \"taskID\")\n\tshouldDeleteTask := false\n\tdeleteParam := r.URL.Query().Get(\"delete\")\n\tif deleteParam == \"1\" {\n\t\tshouldDeleteTask = true\n\t}\n\n\ttask := v1.metaCrawlSvc.TaskByID(taskID)\n\tif task == nil {\n\t\tv1.responseErrorJSON(w, \"task not found\", 404)\n\t\treturn\n\t}\n\n\ttaskStatus := task.Status()\n\tswitch taskStatus {\n\tcase metacrawl.TaskInProgress:\n\t\tv1.responseJSON(w, \"task in progress\", 204)\n\t\treturn\n\tcase metacrawl.TaskCompleted:\n\t\tif shouldDeleteTask {\n\t\t\tv1.metaCrawlSvc.DeleteTaskByID(taskID)\n\t\t}\n\n\t\tv1.responseCSV(w, taskID, task.Render(), 200)\n\t}\n}", "func (_Contract *ContractCallerSession) GetTask(i *big.Int) (struct {\n\tActive bool\n\tAssignment *big.Int\n\tProposalID *big.Int\n}, error) {\n\treturn _Contract.Contract.GetTask(&_Contract.CallOpts, i)\n}", "func (t *Tasker) Get() *Task {\n\ttask := t.tasks.Pop()\n\tif task == nil {\n\t\treturn nil\n\t}\n\n\ttt := task.(*Task)\n\tlog.Printf(\"tasker get task: %v\\n\", tt)\n\treturn tt\n}", "func (m *Master) GetTask(req *GetTaskReq, rsp *GetTaskRsp) error {\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\tfor k := range m.todoMapTask {\n\t\tif m.todoMapTask[k] == 0 {\n\t\t\trsp.Status = \"Task\"\n\t\t\trsp.Filename = m.files[k]\n\t\t\trsp.NReduce = m.nReduce\n\t\t\trsp.TaskID = k\n\t\t\tm.todoMapTask[k] = time.Now().Unix()\n\t\t\treturn nil\n\t\t}\n\t}\n\tif len(m.todoMapTask) != 0 {\n\t\trsp.Status = \"Wait\"\n\t\treturn nil\n\t}\n\n\tfor k := range m.todoReduceTask {\n\t\tif m.todoReduceTask[k] == 0 {\n\t\t\trsp.Status = \"Task\"\n\t\t\trsp.NReduce = m.nReduce\n\t\t\trsp.NMap = len(m.files)\n\t\t\trsp.TaskID = k\n\t\t\tm.todoReduceTask[k] = time.Now().Unix()\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tif len(m.todoReduceTask) != 0 {\n\t\trsp.Status = \"Wait\"\n\t\treturn nil\n\t} else {\n\t\trsp.Status = \"Exit\"\n\t\treturn nil\n\t}\n\n\treturn nil\n}", "func (ts *TaskService) Get(reqdata *TaskGetRequest) (*TaskGetResponse, *http.Response, error) {\n\n\tu := fmt.Sprintf(\"tasks/%s\", reqdata.UUID)\n\n\tu, err := addOptions(u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := ts.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar result *TaskGetResponse\n\tresp, err := ts.client.Do(req, &result)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn result, resp, nil\n}", "func (cl *RedisClient) GetTask() (*RedisTask, error) {\n\tval, err := cl.client.Keys(\"tasks:*\").Result()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar result *RedisTask\n\tvar key string\n\ttxf := func(tx *redis.Tx) error {\n\t\tresult = nil\n\t\tstate, err := tx.HGet(key, \"state\").Result()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif state == \"new\" {\n\t\t\tinputfile, err := cl.client.HGet(key, \"inputfile\").Result()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\toutfile, err := cl.client.HGet(key, \"outfile\").Result()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tresult = &RedisTask{}\n\t\t\tresult.InputFile = inputfile\n\t\t\tresult.OutFile = outfile\n\t\t\tresult.TaskName = key\n\t\t\t_, err = tx.HSet(key, \"state\", \"holded\").Result()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\tfor _, key = range val {\n\t\terr := cl.client.Watch(txf, key)\n\t\tif err == redis.TxFailedErr {\n\t\t\treturn nil, err\n\t\t}\n\t\tif result != nil {\n\t\t\treturn result, nil\n\t\t}\n\t}\n\n\treturn nil, nil\n}", "func (s *state) GetTask(exID string) (*mesos.Task, error) {\n\t// Check if task is in Launched Tasks list\n\tfor _, t := range s.st.GetTasks.LaunchedTasks {\n\t\tif s.isMatchingTask(&t, exID) {\n\t\t\treturn &t, nil\n\t\t}\n\t}\n\n\t// Check if task is in Queued Tasks list\n\tfor _, t := range s.st.GetTasks.QueuedTasks {\n\t\tif s.isMatchingTask(&t, exID) {\n\t\t\treturn &t, nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"unable to find task matching executor id %s\", exID)\n}", "func (_Contract *ContractSession) GetTask(i *big.Int) (struct {\n\tActive bool\n\tAssignment *big.Int\n\tProposalID *big.Int\n}, error) {\n\treturn _Contract.Contract.GetTask(&_Contract.CallOpts, i)\n}", "func (ctrl *TaskController) GetTask(w http.ResponseWriter, r *http.Request) {\n\ttaskId := ParamAsString(\"id\", r)\n\tlogrus.Println(\"task : \", taskId)\n\n\ttask, err := ctrl.taskDao.Get(taskId)\n\tif err != nil {\n\t\tlogrus.Error(err)\n\t\tSendJSONError(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tlogrus.Println(\"task : \", task)\n\tSendJSONOk(w, task)\n}", "func (_Contract *ContractCaller) GetTask(opts *bind.CallOpts, i *big.Int) (struct {\n\tActive bool\n\tAssignment *big.Int\n\tProposalID *big.Int\n}, error) {\n\tvar out []interface{}\n\terr := _Contract.contract.Call(opts, &out, \"getTask\", i)\n\n\toutstruct := new(struct {\n\t\tActive bool\n\t\tAssignment *big.Int\n\t\tProposalID *big.Int\n\t})\n\tif err != nil {\n\t\treturn *outstruct, err\n\t}\n\n\toutstruct.Active = *abi.ConvertType(out[0], new(bool)).(*bool)\n\toutstruct.Assignment = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int)\n\toutstruct.ProposalID = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int)\n\n\treturn *outstruct, err\n\n}", "func (svc *Service) Get(ctx context.Context, id uuid.UUID) (*domain.Task, error) {\n\tsvc.taskRequestPolled.Inc()\n\treturn svc.taskGateway.FindByID(ctx, id)\n}", "func (t *TaskData) GetTask() Task {\n\treturn Task{\n\t\tLocation: t.Location,\n\t\tDestination: t.Destination,\n\t\tAppID: t.AppID,\n\t\tRequestTime: t.RequestTime,\n\t}\n}", "func GetTask(db *sql.DB, taskuuid string) (*Task, error) {\n\tdbLogger.Debug(\"GetTask...\")\n\tvar task = new(Task)\n\tvar err error\n\tvar stmt *sql.Stmt\n\n\tif err := db.Ping(); err != nil {\n\t\tdbLogger.Fatal(ERROR_DB_NOT_CONNECTED)\n\t\treturn nil, errors.New(ERROR_DB_NOT_CONNECTED)\n\t}\n\n\tstmt, err = db.Prepare(\"SELECT rowid, taskuuid, useruuid, keyword, bc_txuuid, type, state, payload FROM task WHERE taskuuid = ? and deleted = 0\")\n\tif err != nil {\n\t\tdbLogger.Errorf(\"Failed preparing statement: %v\", err)\n\t\treturn nil, fmt.Errorf(ERROR_DB_PREPARED + \": %v\", err)\n\t}\n\tdefer stmt.Close()\n\n\tif err := stmt.QueryRow(taskuuid).Scan(&task.RowID, &task.TaskUUID, &task.UserUUID, &task.Keyword, &task.BC_txuuid, &task.Type, &task.State, &task.Payload); err != nil {\n\t\tdbLogger.Errorf(\"Failed getting task by taskuuid %s: %v\", taskuuid, err)\n\t\treturn nil, fmt.Errorf(ERROR_DB_QUERY + \": %v\", err)\n\t}\n\tdbLogger.Debugf(\"Get task by taskuuid %s: \\n%#v\", taskuuid, *task)\n\n\treturn task, nil\n}", "func getTask(c *cli.Context) (string, error) {\n\tif c.NArg() == 0 {\n\t\treturn \"\", fmt.Errorf(\"no task specified\")\n\t}\n\n\treturn c.Args()[0], nil\n}", "func GetTask() *Task {\n\treturn taskPool.Get().(*Task)\n}", "func (s *Storage) GetTask(id uint) (*todopb.TaskResponse, error) {\n\trow := s.db.QueryRow(\"SELECT * FROM tasks WHERE id=$1\", id)\n\n\ttask, err := scan(row)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn task, nil\n}", "func (db *DynamoDB) GetTask(ctx context.Context, req *tes.GetTaskRequest) (*tes.Task, error) {\n\tvar task *tes.Task\n\tvar response *dynamodb.GetItemOutput\n\tvar err error\n\n\tswitch req.View {\n\tcase tes.TaskView_MINIMAL:\n\t\tresponse, err = db.getMinimalView(ctx, req.Id)\n\tcase tes.TaskView_BASIC:\n\t\tresponse, err = db.getBasicView(ctx, req.Id)\n\tcase tes.TaskView_FULL:\n\t\tresponse, err = db.getFullView(ctx, req.Id)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif response.Item == nil {\n\t\treturn nil, grpc.Errorf(codes.NotFound, fmt.Sprintf(\"%v: taskID: %s\", errNotFound.Error(), req.Id))\n\t}\n\n\terr = dynamodbattribute.UnmarshalMap(response.Item, &task)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to DynamoDB unmarshal Task, %v\", err)\n\t}\n\n\treturn task, nil\n}", "func (db *DynamoDB) GetTask(ctx context.Context, req *tes.GetTaskRequest) (*tes.Task, error) {\n\tvar task *tes.Task\n\tvar response *dynamodb.GetItemOutput\n\tvar err error\n\n\tswitch req.View {\n\tcase tes.TaskView_MINIMAL:\n\t\tresponse, err = db.getMinimalView(ctx, req.Id)\n\tcase tes.TaskView_BASIC:\n\t\tresponse, err = db.getBasicView(ctx, req.Id)\n\tcase tes.TaskView_FULL:\n\t\tresponse, err = db.getFullView(ctx, req.Id)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif response.Item == nil {\n\t\treturn nil, tes.ErrNotFound\n\t}\n\n\terr = dynamodbattribute.UnmarshalMap(response.Item, &task)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to DynamoDB unmarshal Task, %v\", err)\n\t}\n\n\treturn task, nil\n}", "func (domain *Domain) GetTask(name string) (*Task, error) {\n\t// determine task\n\tdomain.TasksX.RLock()\n\ttask, ok := domain.Tasks[name]\n\tdomain.TasksX.RUnlock()\n\n\tif !ok {\n\t\treturn nil, errors.New(\"task not found\")\n\t}\n\n\t// success\n\treturn task, nil\n}", "func GetTask(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *TaskState, opts ...pulumi.ResourceOption) (*Task, error) {\n\tvar resource Task\n\terr := ctx.ReadResource(\"google-native:cloudtasks/v2:Task\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func GetTask(id bson.ObjectId) (*Task, error) {\n\ttask := Task{}\n\terr := sess.DB(\"\").C(taskC).FindId(id).One(&task)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &task, nil\n}", "func (a *Client) GetTask(params *GetTaskParams) (*GetTaskOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetTaskParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getTask\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/TaskService/Tasks/{identifier}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetTaskReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetTaskOK), nil\n\n}", "func (p *taskController) GetTask(c echo.Context) error {\n\tid, err := strconv.Atoi(c.Param(\"id\"))\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, \"Task ID must be int\")\n\t}\n\tctx := c.Request().Context()\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\ttask, err := p.TaskUseCase.GetTask(ctx, id)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusNotFound, \"Task does not exist.\")\n\t}\n\treturn c.JSON(http.StatusOK, task)\n}", "func GetTask(id int) (Task, error) {\n\tvar t Task\n\tvar jsonTask []byte\n\n\terr := db.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket(taskBucket)\n\t\tjsonTask = b.Get(itob(id))\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn t, err\n\t}\n\n\terr = t.ReadFromJSON(jsonTask)\n\tif err != nil {\n\t\treturn t, err\n\t}\n\n\treturn t, nil\n\n}", "func (dtm *DfgetTaskManager) Get(ctx context.Context, clientID, taskID string) (dfgetTask *types.DfGetTask, err error) {\n\treturn dtm.getDfgetTask(clientID, taskID)\n}", "func newTask() task {\n\treturn task{}\n}", "func GetTask(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *TaskState, opts ...pulumi.ResourceOption) (*Task, error) {\n\tvar resource Task\n\terr := ctx.ReadResource(\"aws:datasync/task:Task\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (service *Service) Task(id string) *Task {\n\treturn service.Worker.Task(id).(*Task)\n}", "func GetEventTask(etid int64) (*EventTask, error) {\n\tvar hook_id sql.NullInt64\n\ttask := EventTask{}\n\n\tif err := db.QueryRow(\"SELECT * FROM event_tasks WHERE id=$1\", etid).\n\t\tScan(&task.Id, &task.Name, &task.Status, &task.Event,\n\t\t&task.Token, &hook_id); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif hook_id.Valid {\n\t\ttask.HookId = hook_id.Int64\n\t}\n\n\tgroup_task, err := getGroupTask(task.Id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttask.User = group_task.user\n\ttask.Project = group_task.project\n\ttask.Bot = group_task.bot\n\n\treturn &task, nil\n}", "func (cm *Docker) GetTask(id string) (*entity.Task, bool) {\n\tcm.lock.Lock()\n\tt, ok := cm.tasks[id]\n\tcm.lock.Unlock()\n\treturn t, ok\n}", "func (s *Service) GetTask(taskKey string) (*Task, error) {\n\tfor _, task := range s.Tasks {\n\t\tif task.Key == taskKey {\n\t\t\ttask.serviceName = s.Name\n\t\t\treturn task, nil\n\t\t}\n\t}\n\treturn nil, &TaskNotFoundError{\n\t\tTaskKey: taskKey,\n\t\tServiceName: s.Name,\n\t}\n}", "func (d *DeploymentRequest) GetTask() string {\n\tif d == nil || d.Task == nil {\n\t\treturn \"\"\n\t}\n\treturn *d.Task\n}", "func GetTask(mapf func(string, string) []KeyValue,\n\treducef func(string, []string) string) (bool, bool, string, int) {\n\n\t// declare an argument structure.\n\targs := GetTaskArgs{}\n\n\t// fill in the argument(s).\n\n\t// declare a reply structure.\n\treply := GetTaskReply{}\n\n\t// send the RPC request, wait for the reply.\n\tcallSuccess := call(\"Master.GetTask\", &args, &reply)\n\ttaskSuccess := false\n\tif (callSuccess) {\n\t\ttaskSuccess = DoTask(reply, mapf, reducef);\n\t}\n\n\treturn callSuccess, taskSuccess, reply.TaskType, reply.TaskNumber;\n}", "func (c *Client) GetTask(ctx context.Context, in *todopb.TaskQuery, opts ...grpc.CallOption) (*todopb.TaskResponse, error) {\n\treturn c.client.GetTask(ctx, in, opts...)\n}", "func (m *Master) GetTask(_ *ExampleArgs, reply *GetTaskReply) error {\n\tswitch m.masterState {\n\tcase newMaster:\n\t\tfor i, task := range m.mapTask {\n\t\t\tif task.State == initialState {\n\t\t\t\treply.Task.Type_ = task.Type_\n\t\t\t\treply.Task.Filename = task.Filename\n\t\t\t\treply.Task.Id = task.Id\n\t\t\t\treply.Task.NReduce = task.NReduce\n\t\t\t\treply.Flag = 0\n\t\t\t\treply.Task.State = task.State\n\n\t\t\t\tm.mapTask[i].State = inProgress\n\t\t\t\tm.mapTask[i].Time = time.Now()\n\t\t\t\t//reply.Task.State=m.mapTask[i].State\n\n\t\t\t\treturn nil\n\t\t\t} else if task.State == inProgress && time.Now().Sub(m.mapTask[i].Time) > time.Duration(5)*time.Second {\n\t\t\t\treply.Task.Type_ = task.Type_\n\t\t\t\treply.Task.Filename = task.Filename\n\t\t\t\treply.Task.Id = task.Id\n\t\t\t\treply.Task.NReduce = task.NReduce\n\t\t\t\treply.Task.State = task.State\n\t\t\t\treply.Flag = 0\n\n\t\t\t\tm.mapTask[i].State = inProgress\n\t\t\t\tm.mapTask[i].Time = time.Now()\n\t\t\t\t//reply.Task.State=m.mapTask[i].State\n\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\treply.Flag = 1 // map not finished but in progress\n\tcase completeMap:\n\t\tfor i, task := range m.reduceTask {\n\t\t\tif task.State == initialState {\n\t\t\t\treply.Task.Type_ = task.Type_\n\t\t\t\treply.Task.Filename = task.Filename\n\t\t\t\treply.Task.Id = task.Id\n\t\t\t\treply.Task.NReduce = task.NReduce\n\t\t\t\treply.Flag = 0\n\t\t\t\treply.Task.Files = task.Files\n\t\t\t\treply.Task.State = task.State\n\n\t\t\t\tm.reduceTask[i].State = inProgress\n\t\t\t\tm.reduceTask[i].Time = time.Now()\n\t\t\t\t//reply.Task.State=m.mapTask[i].State\n\n\t\t\t\treturn nil\n\t\t\t} else if task.State == inProgress && time.Now().Sub(m.reduceTask[i].Time) > time.Duration(5)*time.Second {\n\t\t\t\treply.Task.Type_ = task.Type_\n\t\t\t\treply.Task.Filename = task.Filename\n\t\t\t\treply.Task.Id = task.Id\n\t\t\t\treply.Task.NReduce = task.NReduce\n\t\t\t\treply.Flag = 0\n\t\t\t\treply.Task.Files = task.Files\n\t\t\t\treply.Task.State = task.State\n\n\t\t\t\tm.reduceTask[i].State = inProgress\n\t\t\t\tm.reduceTask[i].Time = time.Now()\n\t\t\t\t//reply.Task.State=m.mapTask[i].State\n\n\t\t\t\treturn nil\n\n\t\t\t}\n\t\t}\n\t\treply.Flag = 1 // reduce not finished but in progress\n\tcase completeReduce:\n\t\treply.Flag = 2 // all task have been finished\n\n\t}\n\n\treturn nil\n}", "func (m *Master) GetTask(args *GetTaskArgs, reply *GetTaskReply) error {\n\n\tif !m.Done() {\n\t\t//fmt.Println(m.isAllMapCompleted())\n\t\tif !m.isAllMapCompleted() {\n\t\t\tm.Mux.Lock()\n\t\t\tfor i := 0; i < m.M; i += 1 {\n\t\t\t\tif m.IsIdleMaps[i] == 0 {\n\t\t\t\t\tm.IsIdleMaps[i] = 1\n\t\t\t\t\treply.TaskId = i\n\t\t\t\t\treply.TaskType = 1\n\t\t\t\t\tinput := []string{m.MapTasks[i]}\n\t\t\t\t\treply.Input = input\n\t\t\t\t\treply.R = m.R\n\t\t\t\t\tm.MapTasksTime[i] = time.Now().Unix()\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tm.Mux.Unlock()\n\t\t} else {\n\t\t\tm.Mux.Lock()\n\t\t\tfor i := 0; i < m.R; i += 1 {\n\t\t\t\tif m.IsIdleReduces[i] == 0 {\n\t\t\t\t\tm.IsIdleReduces[i] = 1\n\t\t\t\t\treply.TaskId = i\n\t\t\t\t\treply.TaskType = 2\n\t\t\t\t\treply.Input = m.ReduceTasks[i]\n\t\t\t\t\tm.ReduceTasksTime[i] = time.Now().Unix()\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tm.Mux.Unlock()\n\t\t}\n\t}\n\treturn nil\n}", "func (s *StubTodoStore) GetTask(projectID, taskName string) model.Task {\n\tfor _, t := range s.Tasks {\n\t\tif t.Name == taskName && t.ProjectID == projectID {\n\t\t\treturn wrapStubTask(taskName)\n\t\t}\n\t}\n\treturn model.Task{}\n}", "func (r Ref) GetTask() string {\n\tstart := strings.LastIndex(string(r), \":\")\n\ttask := string(r)[start+1:]\n\n\tif strings.HasPrefix(task, \"//\") {\n\t\t// there is no task because task cannot start with '//'\n\t\treturn \"\"\n\t}\n\treturn task\n}", "func GetTask(id int) (Task, error) {\n\tpath := fmt.Sprintf(\"tasks/%d\", id)\n\tres, err := makeRequest(http.MethodGet, path, nil)\n\tif err != nil {\n\t\treturn Task{}, err\n\t}\n\n\treturn decodeTask(res.Body)\n}", "func (g *getRunTask) Get() (runtask *v1alpha1.RunTask, err error) {\n\tvar allStrategies []runTaskGetterFn\n\tif g.currentStrategy != nil {\n\t\tallStrategies = append(allStrategies, g.currentStrategy)\n\t}\n\n\tif len(g.oldStrategies) != 0 {\n\t\tallStrategies = append(allStrategies, g.oldStrategies...)\n\t}\n\n\tif len(allStrategies) == 0 {\n\t\terr = fmt.Errorf(\"no strategies to get runtask: failed to get runtask '%s'\", g.taskName)\n\t\treturn\n\t}\n\n\tfor _, s := range allStrategies {\n\t\truntask, err = s(g.getRunTaskSpec)\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\n\t\terr = errors.Wrap(err, fmt.Sprintf(\"failed to get runtask '%s'\", g.taskName))\n\t\tklog.Warningf(\"%s\", err)\n\t}\n\n\t// at this point, we have a real error we can not recover from\n\terr = fmt.Errorf(\"exhausted all strategies to get runtask: failed to get runtask '%s'\", g.taskName)\n\treturn\n}", "func (t *TaskStore) getTask(id int64) *Task {\n\tif id <= 0 {\n\t\t// Invalid ID.\n\t\treturn nil\n\t}\n\tif _, ok := t.delTasks[id]; ok {\n\t\t// Already deleted in the temporary cache.\n\t\treturn nil\n\t}\n\tif t, ok := t.tmpTasks[id]; ok {\n\t\t// Sitting in cache.\n\t\treturn t\n\t}\n\tif t, ok := t.tasks[id]; ok {\n\t\t// Sitting in the main index.\n\t\treturn t\n\t}\n\treturn nil\n}", "func (cm *Docker) MustGetTask(id string) *entity.Task {\n\tn, ok := cm.GetTask(id)\n\tif !ok {\n\t\tcollector := collector.NewDocker(cm.client, id)\n\t\tn = entity.NewTask(id, collector)\n\t\tcm.lock.Lock()\n\t\tcm.tasks[id] = n\n\t\tcm.lock.Unlock()\n\t}\n\treturn n\n}", "func (e *Executor) GetTask(name string) (task Task, ok bool) {\n\te.lock.RLock()\n\tfor i := len(e.tasks) - 1; i >= 0; i-- {\n\t\tif e.tasks[i].Job.Name() == name {\n\t\t\ttask = e.tasks[i].Task()\n\t\t\tok = true\n\t\t\tbreak\n\t\t}\n\t}\n\te.lock.RUnlock()\n\treturn\n}", "func GetOriginTask(ctx context.Context, task *swarmingAPI.SwarmingRpcsTaskResult, swarmSvc *swarmingAPI.Service) (*swarmingAPI.SwarmingRpcsTaskResult, error) {\n\t// If the task was deduped, then the invocation associated with it is just the one associated\n\t// to the task from which it was deduped.\n\tfor task.DedupedFrom != \"\" {\n\t\tvar err error\n\t\tif task, err = GetSwarmingTask(ctx, task.DedupedFrom, swarmSvc); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn task, nil\n}", "func (s *RefreshImpl) Task(ctx context.Refresh) *taskDTO.PostTaskRequest {\n\tdto := taskDTO.PostTaskRequest{}\n\tdto.MessageID = constvalue.RefreshTaskID\n\tdto.Name = \"Refresh Enclosure\"\n\tdto.Description = \"Refresh enclosure's settings and component.\"\n\tdto.CreatedByName = \"Enclosure Service\"\n\tdto.CreatedByURI = \"/promise/v1/enclosure\"\n\tdto.TargetName = ctx.GetEnclosure().Name\n\tdto.TargetURI = base.ToEnclosureURI(ctx.GetEnclosure().ID)\n\tfor _, v := range s.sub {\n\t\tstep := taskDTO.PostTaskStepRequest{}\n\t\tstep.MessageID = v.MessageID()\n\t\tstep.Name = v.Name()\n\t\tstep.Description = v.Description()\n\t\tstep.ExpectedExecutionMs = v.ExpectedExecutionMs()\n\t\tdto.TaskSteps = append(dto.TaskSteps, step)\n\t}\n\treturn &dto\n}", "func GetOneTimeTask(otid int64) (*OneTimeTask, error) {\n\ttask := OneTimeTask{}\n\n\tif err := db.QueryRow(\"SELECT * FROM onetime_tasks WHERE id=$1\", otid).\n\t\tScan(&task.Id, &task.Name, &task.Status, &task.Exec_time); err != nil {\n\t\treturn nil, err\n\t}\n\n\tgroup_task, err := getGroupTask(task.Id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttask.User = group_task.user\n\ttask.Project = group_task.project\n\ttask.Bot = group_task.bot\n\n\treturn &task, nil\n}", "func (m *Master) GetTask(args ExampleArgs, reply *Task) error {\n\tif len(m.completedTasks[0]) < m.M {\n\t\tselect {\n\t\tcase reply1 := <-m.idleTasks[0]:\n\t\t\treply.Type = reply1.Type\n\t\t\treply.Filename = reply1.Filename\n\t\t\treply.NReduce = reply1.NReduce\n\t\t\treply.TaskNum = reply1.TaskNum\n\t\t\treply1.StartTime = time.Now().UnixNano()\n\t\t\treply.StartTime = reply1.StartTime\n\t\t\tm.mu.Lock()\n\t\t\tm.inProgress[0][reply1] = true\n\t\t\tm.mu.Unlock()\n\t\t\tgo m.waitForTask(reply1)\n\t\tdefault:\n\t\t\treply.EmptyIdle = true\n\t\t}\n\t} else if len(m.completedTasks[1]) < m.R {\n\t\tselect {\n\t\tcase reply1 := <-m.idleTasks[1]:\n\t\t\treply.Type = reply1.Type\n\t\t\treply.Filename = reply1.Filename\n\t\t\treply.NReduce = reply1.NReduce\n\t\t\treply.TaskNum = reply1.TaskNum\n\t\t\treply1.StartTime = time.Now().UnixNano()\n\t\t\treply.StartTime = reply1.StartTime\n\t\t\tm.mu.Lock()\n\t\t\tm.inProgress[1][reply1] = true\n\t\t\tm.mu.Unlock()\n\t\t\tgo m.waitForTask(reply1)\n\t\tdefault:\n\t\t\treply.EmptyIdle = true\n\t\t}\n\t} else {\n\t\treply.Type = \"done\"\n\t}\n\treturn nil\n}", "func (service *Service) Task(id string) *Task {\n\tif t := service.Worker.Task(id); t != nil {\n\t\treturn t.(*Task)\n\t}\n\treturn nil\n}", "func (c *Coordinator) GetTask(args *GetArgs, reply *GetReply) error {\n\tfmt.Printf(\"Worker %v requesting for task\\n\", args.WorkerId)\n\t// If map task available, send map task\n\n\tc.mapLock.Lock()\n\tfor id, _ := range c.availableMapTasks {\n\t\tfmt.Printf(\"Map Task %v given to worker %v\\n\", id, args.WorkerId)\n\n\t\t// Populate reply\n\t\treply.TaskType = 0\n\t\treply.TaskNum = id\n\t\treply.Filename = c.mapTasks[id].filename\n\t\treply.Partitions = len(c.reduceTasks)\n\n\t\t// Fill in maptask details\n\t\tc.mapTasks[id].worker = args.WorkerId\n\n\t\t// Remove from available\n\t\tdelete(c.availableMapTasks, id)\n\n\t\t// Run waiting thread\n\t\tgo waitCheck(c, 0, id)\n\n\t\tc.mapLock.Unlock()\n\n\t\treturn nil\n\t}\n\tc.mapLock.Unlock()\n\n\t// All map tasks not finished yet\n\tif c.mapDoneTasks != len(c.mapTasks) {\n\t\tfmt.Printf(\"No tasks available for worker %v\\n\", args.WorkerId)\n\t\treply.TaskType = 2\n\t\treturn nil\n\t}\n\n\tc.reduceLock.Lock()\n\t// If all map tasks over and reduce task available send reduce task\n\tfor id, _ := range c.availableReduceTasks {\n\t\tfmt.Printf(\"Reduce Task %v given to worker %v\\n\", id, args.WorkerId)\n\n\t\t// Populate reply\n\t\treply.TaskType = 1\n\t\treply.TaskNum = id\n\t\treply.Partitions = len(c.mapTasks)\n\n\t\t// Fill in reduce details\n\t\tc.reduceTasks[id].worker = args.WorkerId\n\n\t\t// Remove from available\n\t\tdelete(c.availableReduceTasks, id)\n\n\t\t// Run waiting thread\n\t\tgo waitCheck(c, 1, id)\n\n\t\tc.reduceLock.Unlock()\n\n\t\treturn nil\n\t}\n\tc.reduceLock.Unlock()\n\n\tif c.reduceDoneTasks != len(c.reduceTasks) {\n\t\t// No task available right now\n\t\tfmt.Printf(\"No tasks available for worker %v\\n\", args.WorkerId)\n\t\treply.TaskType = 2\n\t\treturn nil\n\t} else {\n\t\t// No task available right now\n\t\tfmt.Printf(\"All tasks completed, quiting worker %v\\n\", args.WorkerId)\n\t\treply.TaskType = 3\n\t\treturn nil\n\t}\n}", "func (tasks *Tasks) GetTask(name string) (task *Task, err error) {\n\tfor _, task := range tasks.Tasks {\n\t\tif task.Name == name {\n\t\t\treturn task, nil\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"No Task with name %s\", name)\n}", "func (s Step) Task(ctx context.Context) Task {\n\treturn GetModelContext(ctx).Nodes.MustLoadTask(s.TaskID)\n}", "func (c *jxTasks) Get(name string, options v1.GetOptions) (result *v1alpha1.JxTask, err error) {\n\tresult = &v1alpha1.JxTask{}\n\terr = c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"jxtasks\").\n\t\tName(name).\n\t\tVersionedParams(&options, scheme.ParameterCodec).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}", "func (c *iperfTasks) Get(name string, options v1.GetOptions) (result *alpha1.IperfTask, err error) {\n\tresult = &alpha1.IperfTask{}\n\terr = c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"iperftasks\").\n\t\tName(name).\n\t\tVersionedParams(&options, scheme.ParameterCodec).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}", "func GetInstantTask(itid int64) (*InstantTask, error) {\n\ttask := InstantTask{}\n\n\tif err := db.QueryRow(\"SELECT * FROM instant_tasks WHERE id=$1\", itid).\n\t\tScan(&task.Id); err != nil {\n\t\treturn nil, err\n\t}\n\n\tgroup_task, err := getGroupTask(task.Id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttask.User = group_task.user\n\ttask.Project = group_task.project\n\ttask.Bot = group_task.bot\n\n\treturn &task, nil\n}", "func (w *worker) requestTask() Task {\n\n\targs := TaskArgs{}\n\targs.WorkerId = w.id\n\treply := TaskReply{}\n\n\tif ok := call(\"Master.GetOneTask\", &args, &reply); !ok {\n\t\tlog.Println(\"Failed to get the Task\")\n\n\t\t// could not find the master process\n\t\t// possible improvements:\n\t\t//\t\t1. Add a retry with a delay, it could be due to network issue\n\t\t//\t\t2. Send graceful termination from `master` to all the registered\n\t\t//\t\t `worker`(s) when all the tasks are completed\n\t\tos.Exit(1)\n\t}\n\tlog.Printf(\"Worker Task: %+v\\n\", reply.Task)\n\treturn *reply.Task\n}", "func (d *Deployment) GetTask() string {\n\tif d == nil || d.Task == nil {\n\t\treturn \"\"\n\t}\n\treturn *d.Task\n}", "func (i *TaskRegisterUpdater) StartTask(ctx context.Context, action string, age time.Duration) (models.Task, error) {\n\n\treturn i.repository.GetTask(ctx, action, age)\n}", "func NewTask() *Task {\n\treturn &Task{}\n}", "func GetTaskConfig(name, action string, conf *config.ComposeConfig) (types.TaskConfig, error) {\n\tact, err := getAction(action, name, conf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn types.NewTaskConfig(act.name, conf, act.deps, NewTask(act.Run, act.Stop)), nil\n}", "func GetSingleTask(w http.ResponseWriter, r *http.Request) {\n\tparams := mux.Vars(r)\n\n\tif params[\"id\"] == \"\" {\n\t\thttp.Error(w, http.StatusText(400), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\ttaskID := bson.ObjectIdHex(params[\"id\"])\n\n\ttask, err := repository.GetSingleTask(taskID)\n\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(404), http.StatusNotFound)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\n\tjson.NewEncoder(w).Encode(task)\n}", "func (c *clustermgrClient) GetMigrateTask(ctx context.Context, taskType proto.TaskType, key string) (task *proto.MigrateTask, err error) {\n\tval, err := c.client.GetKV(ctx, key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = json.Unmarshal(val.Value, &task)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif task.TaskType != taskType {\n\t\treturn nil, errcode.ErrIllegalTaskType\n\t}\n\treturn\n}", "func getOrCreateMigrationTask(kind, name, openebsNamespace string, r Migrator,\n\tclient openebsclientset.Interface) (*v1Alpha1API.MigrationTask, error) {\n\tvar mtaskObj *v1Alpha1API.MigrationTask\n\tvar err error\n\tmtaskObj = buildMigrationTask(kind, name, r)\n\t// the below logic first tries to fetch the CR if not found\n\t// then creates a new CR\n\tmtaskObj1, err1 := client.OpenebsV1alpha1().\n\t\tMigrationTasks(openebsNamespace).\n\t\tGet(context.TODO(), mtaskObj.Name, metav1.GetOptions{})\n\tif err1 != nil {\n\t\tif k8serror.IsNotFound(err1) {\n\t\t\tmtaskObj, err = client.OpenebsV1alpha1().\n\t\t\t\tMigrationTasks(openebsNamespace).Create(context.TODO(),\n\t\t\t\tmtaskObj, metav1.CreateOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t} else {\n\t\t\treturn nil, err1\n\t\t}\n\t} else {\n\t\tmtaskObj = mtaskObj1\n\t}\n\n\tif mtaskObj.Status.StartTime.IsZero() {\n\t\tmtaskObj.Status.Phase = v1Alpha1API.MigrateStarted\n\t\tmtaskObj.Status.StartTime = metav1.Now()\n\t}\n\n\tmtaskObj.Status.MigrationDetailedStatuses = []v1Alpha1API.MigrationDetailedStatuses{}\n\tmtaskObj, err = client.OpenebsV1alpha1().\n\t\tMigrationTasks(openebsNamespace).\n\t\tUpdate(context.TODO(), mtaskObj, metav1.UpdateOptions{})\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to update migratetask\")\n\t}\n\treturn mtaskObj, nil\n}", "func forwardGetTask(restAPIaddress string, guid string) (string, error) {\n\turl := \"http://\" + restAPIaddress + \"/api/v1.0/task/\" + guid\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\tsessionData, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(sessionData), err\n}", "func (context Context) CreateTask(task Task) (result Task, err error) {\n\n\t// Validate that the job exists and is running.\n\tvar job Job\n\tjob, err = context.GetJobByID(task.Job)\n\tif err != nil && err != ErrNotFound {\n\t\terr = errors.Wrap(err, \"error while trying to access the referenced job\")\n\t\treturn\n\t} else if err == ErrNotFound || job.Status != JobRunning {\n\t\terr = errors.Wrapf(ErrBadInput,\n\t\t\t\"the referenced objective \\\"%s\\\" does not exist or is running\", task.Job)\n\t}\n\n\t// Validate that the models exist and are active.\n\tvar found bool\n\tfor i := range job.Models {\n\t\tif task.Model == job.Models[i] {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif found == false {\n\t\terr = errors.Wrapf(ErrBadInput,\n\t\t\t\"the referenced model \\\"%s\\\" does not appear in the models list of the parent job \\\"%s\\\"\",\n\t\t\ttask.Model, job.ID)\n\t}\n\n\t// Give default values to some fields. Copy some from the job.\n\ttask.ObjectID = bson.NewObjectId()\n\ttask.User = job.User\n\ttask.Dataset = job.Dataset\n\ttask.Objective = job.Objective\n\ttask.AltObjectives = job.AltObjectives\n\ttask.CreationTime = time.Now()\n\ttask.Status = TaskScheduled\n\ttask.Stage = TaskStageBegin\n\ttask.StageTimes = TaskStageIntervals{}\n\ttask.StageDurations = TaskStageDurations{}\n\ttask.RunningDuration = 0\n\ttask.Quality = 0.0\n\ttask.AltQualities = make([]float64, len(task.AltObjectives))\n\n\t// Get next ID.\n\tc := context.Session.DB(context.DBName).C(\"tasks\")\n\tquery := bson.M{\"job\": bson.M{\"$eq\": task.Job}}\n\tvar resultSize int\n\tresultSize, err = c.Find(query).Count()\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"mongo find failed\")\n\t\treturn\n\t}\n\ttask.ID = fmt.Sprintf(\"%s/%010d\", task.Job.Hex(), resultSize+1)\n\n\terr = c.Insert(task)\n\tif err != nil {\n\t\tlastError := err.(*mgo.LastError)\n\t\tif lastError.Code == 11000 {\n\t\t\terr = ErrIdentifierTaken\n\t\t\treturn\n\t\t}\n\t\terr = errors.Wrap(err, \"mongo insert failed\")\n\t\treturn\n\t}\n\n\treturn task, nil\n\n}", "func (worker *Worker) Task(taskID string) Task {\n\tworker.Lock()\n\tdefer worker.Unlock()\n\n\tfor _, task := range worker.tasks {\n\t\tif task.ID() == taskID {\n\t\t\treturn task\n\t\t}\n\t}\n\treturn nil\n}", "func (this *TodoList) GetTask(pID string) (rFound *Task) {\n\tif this != nil {\n\t\tfor _, zTask := range this.Tasks {\n\t\t\tif zTask.Id == pID {\n\t\t\t\treturn zTask\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (s *Service) GetTask(filter filters.Args) ([]swarm.Task, error) {\n\ttasks, err := s.DockerClient.TaskList(context.Background(), types.TaskListOptions{Filters: filter})\n\n\tif err != nil {\n\t\treturn []swarm.Task{}, err\n\t}\n\n\treturn tasks, nil\n}", "func (tp *ThreadPool) getTask() Task {\n\tvar returnIdleTask = true\n\n\t// Check if tasks should be stopped\n\n\ttp.workerMapLock.Lock()\n\tif tp.workerKill > 0 {\n\t\ttp.workerKill--\n\t\ttp.workerMapLock.Unlock()\n\t\treturn nil\n\n\t} else if tp.workerKill == -1 {\n\n\t\t// Check for special worker kill value which is used when workers should\n\t\t// be killed when no more tasks are available.\n\n\t\treturnIdleTask = false\n\t}\n\ttp.workerMapLock.Unlock()\n\n\t// Check if there is a task available\n\n\ttp.queueLock.Lock()\n\ttask := tp.queue.Pop()\n\ttp.queueLock.Unlock()\n\n\tif task != nil {\n\t\treturn task\n\t}\n\n\ttp.RegulationLock.Lock()\n\n\t// Reset too many flag\n\n\tif tp.tooManyTriggered && tp.TooManyThreshold > tp.queue.Size() {\n\t\ttp.tooManyTriggered = false\n\t}\n\n\t// Check too few\n\n\tif !tp.tooFewTriggered && tp.TooFewThreshold >= tp.queue.Size() {\n\t\ttp.tooFewTriggered = true\n\t\ttp.TooFewCallback()\n\t}\n\n\ttp.RegulationLock.Unlock()\n\n\tif returnIdleTask {\n\n\t\t// No new task available return idle task\n\n\t\treturn &idleTask{tp}\n\t}\n\n\treturn nil\n}", "func getTask(w http.ResponseWriter, r *http.Request){\n\tvars := mux.Vars(r)\n\ttaskID, err :=strconv.Atoi(vars[\"id\"])\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"Invalid ID\")\n\t\treturn\n\t}\n\n\t//Se busca entre las tasks el ID solicitado y luego se muestra en forma de JSON\n\tfor _, task := range tasks {\n\t\tif task.ID == taskID {\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tjson.NewEncoder(w).Encode(task)\n\t\t}\n\t}\n}", "func NewTask(action Work) Task {\n\treturn &task{\n\t\taction: action,\n\t\tdone: make(signal, 1),\n\t\tcancel: make(signal, 1),\n\t}\n}", "func (s *Scavenger) newTask(info *p.TaskListInfo) executor.Task {\n\treturn &executorTask{\n\t\ttaskListInfo: *info,\n\t\tscvg: s,\n\t}\n}", "func (s *InMemoryTasksStore) Get(id uuid.UUID) *task.Task {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\treturn s.tasks[id.String()]\n}", "func getTaskByID(taskID string) Task {\n\tresp, err := http.Get(URL + \"/tasks/\" + taskID)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tdefer resp.Body.Close()\n\n\tvar tasksArray []Task\n\te := json.NewDecoder(resp.Body).Decode(&tasksArray)\n\tif e != nil {\n\t\tfmt.Println(e)\n\t}\n\n\tif len(tasksArray) == 0 {\n\t\tfmt.Println(`\n\t\tTask not found!\n\t\t`)\n\t\tos.Exit(0)\n\t}\n\n\treturn tasksArray[0]\n\n}", "func (ctl *taskController) Task() proto.Message {\n\treturn ctl.task\n}", "func GetTaskController(w http.ResponseWriter, r *http.Request) {\n\t_task := new(Task)\n\tvars := mux.Vars(r)\n\n\ttaskID, err := strconv.Atoi(vars[\"id\"])\n\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\ttask, err := _task.GetTask(taskID)\n\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tresponse, _ := json.Marshal(task)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(response)\n}", "func (dtw dispatchTaskWrapper) Task() queues.Task {\n return dtw.t\n}", "func (m *Master) RequestTask(args *RequestTaskArgs, reply *RequestTaskReply) error {\n\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\n\tswitch m.state {\n\tcase Initializing:\n\t\treply.WorkerNextState = Idle\n\tcase MapPhase:\n\t\tfor i, task := range m.mapTasks {\n\t\t\tif task.State == UnScheduled {\n\t\t\t\t//schedule unassigned task\n\t\t\t\ttask.State = InProgress\n\t\t\t\treply.Task = task\n\t\t\t\treply.WorkerNextState = WorkAssigned\n\n\t\t\t\tm.mapTasks[i].State = InProgress\n\t\t\t\tm.mapTasks[i].TimeStamp = time.Now()\n\t\t\t\treturn nil\n\t\t\t} else if task.State == InProgress && time.Now().Sub(task.TimeStamp) > 10*time.Second {\n\t\t\t\t//reassign tasks due to timeout\n\t\t\t\treply.Task = task\n\t\t\t\treply.WorkerNextState = WorkAssigned\n\t\t\t\t//update TimeStamp\n\t\t\t\tm.mapTasks[i].TimeStamp = time.Now()\n\n\t\t\t\treturn nil\n\t\t\t} else if task.State == Done {\n\t\t\t\t//ignore the task\n\t\t\t\t//TODO: array for task is not efficient, maybe change to map?\n\t\t\t}\n\t\t}\n\t\t//no more mapWork, wait for other tasks\n\t\treply.WorkerNextState = Idle\n\n\tcase ReducePhase:\n\t\tfor i, task := range m.reduceTasks {\n\t\t\tif task.State == UnScheduled {\n\t\t\t\t//schedule unassigned task\n\t\t\t\ttask.State = InProgress\n\t\t\t\treply.Task = task\n\t\t\t\treply.WorkerNextState = WorkAssigned\n\n\t\t\t\tm.reduceTasks[i].State = InProgress\n\t\t\t\tm.reduceTasks[i].TimeStamp = time.Now()\n\n\t\t\t\treturn nil\n\t\t\t} else if task.State == InProgress && time.Now().Sub(task.TimeStamp) > 10*time.Second {\n\t\t\t\t//reassign tasks due to timeout\n\t\t\t\treply.Task = task\n\t\t\t\treply.WorkerNextState = WorkAssigned\n\t\t\t\t//update TimeStamp\n\t\t\t\tm.reduceTasks[i].TimeStamp = time.Now()\n\t\t\t\treturn nil\n\t\t\t} else if task.State == Done {\n\t\t\t\t//ignore the task\n\t\t\t\t//TODO: array for task is not efficient, maybe change to map?\n\t\t\t}\n\t\t}\n\t\t//no more reduceWork, wait for other tasks\n\t\treply.WorkerNextState = Idle\n\tdefault:\n\t\t//master gonna be teared down, shut down worker\n\t\t//or something weng wrong\n\t\treply.WorkerNextState = NoMoreWork\n\t}\n\n\treturn nil\n}", "func (t Task) Task() string {\n\treturn t.task\n}", "func getTaskByName(app string, c *gin.Context) (*_5xxDBTask, error) {\n\tdb, err := utils.GetDBFromContext(c)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Unable to access database\")\n\t}\n\n\ttask := _5xxDBTask{}\n\n\terr = db.Get(&task, \"SELECT * FROM _5xx_tasks WHERE app=$1\", app)\n\tif err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn nil, sql.ErrNoRows\n\t\t}\n\t\treturn nil, errors.New(\"Unable to access database\")\n\t}\n\treturn &task, nil\n}", "func (t *Task) ToHTTPTask() *http.Task {\n\tauthType := v2alpha2.BearerAuthType\n\tauthtype := &authType\n\tsecret := &t.With.Secret\n\n\tref := DefaultRef\n\tif t.With.Ref != nil {\n\t\tref = *t.With.Ref\n\t}\n\n\t// compose body of POST request\n\tbody := \"\"\n\tbody += \"{\"\n\tbody += \"\\\"ref\\\": \\\"\" + ref + \"\\\",\"\n\tbody += \"\\\"inputs\\\": {\"\n\tnumWFInputs := len(t.With.WFInputs)\n\tfor i := 0; i < numWFInputs; i++ {\n\t\tbody += \"\\\"\" + t.With.WFInputs[i].Name + \"\\\": \\\"\" + t.With.WFInputs[i].Value + \"\\\"\"\n\t\tif i+1 < numWFInputs {\n\t\t\tbody += \",\"\n\t\t}\n\t}\n\tbody += \"}\"\n\tbody += \"}\"\n\n\ttSpec := &http.Task{\n\t\tTaskMeta: core.TaskMeta{\n\t\t\tTask: core.StringPointer(TaskName),\n\t\t},\n\t\tWith: http.Inputs{\n\t\t\tURL: \"https://api.github.com/repos/\" + t.With.Repository + \"/actions/workflows/\" + t.With.Workflow + \"/dispatches\",\n\t\t\tAuthType: authtype,\n\t\t\tSecret: secret,\n\t\t\tHeaders: []v2alpha2.NamedValue{{\n\t\t\t\tName: \"Accept\",\n\t\t\t\tValue: \"application/vnd.github.v3+json\",\n\t\t\t}},\n\t\t\tBody: &body,\n\t\t\tIgnoreFailure: t.With.IgnoreFailure,\n\t\t},\n\t}\n\n\tif t.With.IgnoreFailure != nil {\n\t\ttSpec.With.IgnoreFailure = t.With.IgnoreFailure\n\t}\n\n\tlog.Info(\"Dispatching GitHub workflow: \", tSpec.With.URL)\n\tlog.Info(*tSpec.With.Body)\n\n\treturn tSpec\n}", "func (server *Server) GetRegisteredTask(name string) interface{} {\n\treturn server.registeredTasks[name]\n}", "func NewGetTaskOK() *GetTaskOK {\n\treturn &GetTaskOK{}\n}", "func (dtm *DfgetTaskManager) getDfgetTask(clientID, taskID string) (*types.DfGetTask, error) {\n\tkey, err := generateKey(clientID, taskID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tv, err := dtm.dfgetTaskStore.Get(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif dfgetTask, ok := v.(*types.DfGetTask); ok {\n\t\treturn dfgetTask, nil\n\t}\n\treturn nil, errors.Wrapf(errorType.ErrConvertFailed, \"clientID: %s, taskID: %s: %v\", clientID, taskID, v)\n}", "func (c *C2Default) GetTasking() interface{} {\n\turl := fmt.Sprintf(\"%sapi/v%s/agent_message\", c.BaseURL, ApiVersion)\n\t//request := structs.Msg{}\n\trequest := structs.TaskRequestMessage{}\n\trequest.Action = \"get_tasking\"\n\trequest.TaskingSize = -1\n\n\traw, err := json.Marshal(request)\n\n\tif err != nil {\n\t\t//log.Printf(\"Error unmarshalling: %s\", err.Error())\n\t}\n\n\trawTask := c.htmlGetData(url, raw)\n\n\ttask := structs.TaskRequestMessageResponse{}\n\terr = json.Unmarshal(rawTask, &task)\n\n\tif err != nil {\n\t\t//log.Printf(\"Error unmarshalling task data: %s\", err.Error())\n\t}\n\n\treturn task\n}", "func (r *DeviceAppManagementTaskRequest) Get(ctx context.Context) (resObj *DeviceAppManagementTask, err error) {\n\tvar query string\n\tif r.query != nil {\n\t\tquery = \"?\" + r.query.Encode()\n\t}\n\terr = r.JSONRequest(ctx, \"GET\", query, nil, &resObj)\n\treturn\n}", "func (s *K8sSvc) GetServiceTask(ctx context.Context, cluster string, service string, containerInstanceID string) (serviceTaskID string, err error) {\n\treturn \"\", common.ErrNotSupported\n}", "func (t *TaskList) Get(name string) Task {\n\tt.RLock()\n\tdefer t.RUnlock()\n\treturn t.taskSet[name]\n}", "func (c *CreateKubernetesTaskRepo) GetLastTask(eid string, providerName string) (*model.CreateKubernetesTask, error) {\n\tvar old model.CreateKubernetesTask\n\tif err := c.DB.Where(\"eid = ? and provider_name=?\", eid, providerName).Order(\"created_at desc\").Limit(1).Take(&old).Error; err != nil {\n\t\treturn nil, err\n\t}\n\treturn &old, nil\n}", "func (r *Redis) GetTask(taskID string) (*Task, error) {\n\tconn := r.pool.Get()\n\tdefer conn.Close()\n\n\ttaskFields, err := redis.Values(conn.Do(\"HGETALL\", \"sync_tasks#\"+taskID))\n\tnoticeError(err)\n\tif err != nil {\n\t\tif err == redis.ErrNil {\n\t\t\treturn nil, ErrTaskNotFound\n\t\t}\n\n\t\treturn nil, err\n\t}\n\n\tif len(taskFields) == 0 {\n\t\treturn nil, ErrTaskNotFound\n\t}\n\n\ttask := &Task{}\n\terr = redis.ScanStruct(taskFields, task)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error deserializing task entity [%s]: %v\", taskID, err)\n\t}\n\n\treturn task, nil\n}", "func (client *Client) GetTranscodeTask(request *GetTranscodeTaskRequest) (_result *GetTranscodeTaskResponse, _err error) {\n\truntime := &util.RuntimeOptions{}\n\t_result = &GetTranscodeTaskResponse{}\n\t_body, _err := client.GetTranscodeTaskWithOptions(request, runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_result = _body\n\treturn _result, _err\n}", "func (ctx *Context) getOrAddTask(a *Application, pod *v1.Pod) *Task {\n\t// using pod UID as taskId\n\t//如果已经存在app中,则直接返回它\n\tif task, err := a.GetTask(string(pod.UID)); err == nil {\n\t\treturn task\n\t}\n\t//不存在,则创建一个新的task\n\tnewTask := createTaskFromPod(a, ctx.kubeClient, ctx.schedulerApi, pod)\n\ta.AddTask(newTask)\n\treturn newTask\n}", "func (r *TaskRepository) GetTask(id int64) (*api.Task, error) {\n\tvar task api.Task\n\tr.DB.First(&task, id)\n\treturn &task, nil\n}", "func (o *TaskRequest) GetTask() TaskTask {\n\tif o == nil || o.Task == nil {\n\t\tvar ret TaskTask\n\t\treturn ret\n\t}\n\treturn *o.Task\n}" ]
[ "0.7213508", "0.71484256", "0.7080093", "0.7022939", "0.70011353", "0.68791914", "0.6873889", "0.68579173", "0.68332416", "0.6799068", "0.67832404", "0.6742814", "0.6718434", "0.670646", "0.6698478", "0.6695807", "0.6687799", "0.665799", "0.6657192", "0.6641929", "0.6606461", "0.65781647", "0.6544192", "0.65326095", "0.65316254", "0.65070736", "0.6493138", "0.6484437", "0.6480699", "0.647932", "0.6457312", "0.64545876", "0.6448082", "0.6434571", "0.6399549", "0.6378033", "0.6369145", "0.6365156", "0.63484883", "0.63438624", "0.63365966", "0.6335778", "0.63135964", "0.6313303", "0.6305743", "0.62376636", "0.622401", "0.61992776", "0.61863536", "0.6182286", "0.6175694", "0.615826", "0.6156162", "0.6154125", "0.6143831", "0.6131809", "0.6122542", "0.6092725", "0.6085765", "0.6081627", "0.6074413", "0.60706574", "0.60629815", "0.6054584", "0.6053985", "0.60478526", "0.60306215", "0.6026293", "0.60196686", "0.60177004", "0.60176617", "0.60136324", "0.5994884", "0.5971288", "0.5968961", "0.5950695", "0.59204066", "0.5918559", "0.59020805", "0.5885462", "0.58825994", "0.58822006", "0.58777946", "0.5860541", "0.5854294", "0.5850355", "0.58493155", "0.58489895", "0.5848823", "0.5844829", "0.58448094", "0.58403033", "0.5823561", "0.582266", "0.5821775", "0.5814983", "0.5803273", "0.5797133", "0.5779612", "0.57767105" ]
0.7655484
0
New returns a new meter with the specified parameters
New возвращает новый счетчик с указанными параметрами
func New(bpm, beatsPerBar float64, beatValue notes.Duration) *Meter { return &Meter{ BeatsPerMinute: bpm, BeatsPerBar: beatsPerBar, BeatValue: beatValue, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewMeter(name string, options ...Option) Meter {\n\treturn newMeter(name, options...)\n}", "func NewMeter(client Client, name string, tagOptions ...TagOption) (*Meter, error) {\n\tif err := validateMetricName(name); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Meter{\n\t\tclient: client,\n\t\tname: name,\n\t\ttags: GetTags(tagOptions...),\n\t}, nil\n}", "func NewMeter(name string, snapshotInterval time.Duration) *Meter {\n\tm := Meter{}\n\tm.name = name\n\tm.printInterval = snapshotInterval\n\tm.Reset()\n\treturn &m\n}", "func NewMeter(name string) metics.Meter {\n\tif !Enabled {\n\t\treturn new(metics.NilMeter)\n\t}\n\treturn metics.GetOrRegisterMeter(name, metics.DefaultRegistry)\n}", "func New(p Params) (*Worker, error) {\n\tif p.SampleDir == \"\" {\n\t\treturn nil, fmt.Errorf(\"no sample directory set\")\n\t}\n\tif p.MeterAddr == \"\" {\n\t\treturn nil, fmt.Errorf(\"no meter address set\")\n\t}\n\tif p.Now == nil {\n\t\tp.Now = time.Now\n\t}\n\tif p.Interval == 0 {\n\t\tp.Interval = DefaultInterval\n\t}\n\tctx, cancel := context.WithCancel(context.Background())\n\tw := &Worker{\n\t\tp: p,\n\t\tctx: ctx,\n\t\tclose: cancel,\n\t}\n\tw.wg.Add(1)\n\tgo func() {\n\t\tif err := w.run(); err != nil {\n\t\t\tlog.Printf(\"sample worker for meter at %q failed: %v\", w.p.MeterAddr, err)\n\t\t}\n\t}()\n\treturn w, nil\n}", "func New(name string, rate float64, tags ...string) Metric {\n\treturn Metric{name, rate, tags}\n}", "func NewMeter(options ...meterOption) *ProgressMeter {\n\tm := &ProgressMeter{\n\t\tlogger: &progressLogger{},\n\t\tstartTime: time.Now(),\n\t\tfileIndex: make(map[string]int64),\n\t\tfileIndexMutex: &sync.Mutex{},\n\t\tfinished: make(chan interface{}),\n\t}\n\n\tfor _, opt := range options {\n\t\topt(m)\n\t}\n\n\treturn m\n}", "func New() MME {\n\t// TODO: Implement this!\n\toperationCosts = make(map[rpcs.Operation]int)\n\toperationCosts[rpcs.SMS] = -1\n\toperationCosts[rpcs.Call] = -5\n\toperationCosts[rpcs.Load] = 10\n\tm := new(mme)\n\tm.state = make(map[uint64]rpcs.MMEState)\n\tm.stateMutex = new(sync.Mutex)\n\treturn m\n}", "func CreateMeter(numChannels uint32, names []string) (Meter, error) {\n\tnumNames := len(names)\n\tnumNames32 := uint32(numNames)\n\n\t/*\n\t * Check if number of channel names matches number of channels.\n\t */\n\tif numChannels != numNames32 {\n\t\treturn nil, fmt.Errorf(\"Failed to create channel meter. Requested channel meter for %d channels, but provided %d channel names.\", numChannels, numNames)\n\t} else {\n\t\tchannelMeters := make([]*channelMeterStruct, numChannels)\n\n\t\t/*\n\t\t * Create the channel meters.\n\t\t */\n\t\tfor i := range channelMeters {\n\t\t\tname := names[i]\n\n\t\t\t/*\n\t\t\t * Create a new channel meter.\n\t\t\t */\n\t\t\tchannelMeter := &channelMeterStruct{\n\t\t\t\tchannelName: name,\n\t\t\t\tenabled: false,\n\t\t\t\tcurrentValue: 0.0,\n\t\t\t\tpeakValue: 0.0,\n\t\t\t\tsampleCounter: 0,\n\t\t\t}\n\n\t\t\tchannelMeters[i] = channelMeter\n\t\t}\n\n\t\t/*\n\t\t * Create a new level meter.\n\t\t */\n\t\tmeter := meterStruct{\n\t\t\tchannelMeters: channelMeters,\n\t\t\tenabled: false,\n\t\t}\n\n\t\treturn &meter, nil\n\t}\n\n}", "func New(opts ...Option) *Metric {\n\tvar options Options\n\tfor _, opt := range opts {\n\t\topt(&options)\n\t}\n\tm := &Metric{\n\t\tOptions: options,\n\t\thistograms: make(map[string]metrics.Histogram),\n\t\tkeyLabels: make(map[string]map[string]string),\n\t}\n\tgo m.watch()\n\treturn m\n}", "func New(h, m int) Clock {\n\tclock := &Clock{hour: h, minute: m}\n\tclock.normalize()\n\treturn *clock\n}", "func New(h, m int) Clock {\n\tm = (H*h + m) % D\n\tif m < 0 {\n\t\tm += D\n\t}\n\treturn Clock(m)\n}", "func (c *Configuration) NewMeter(name string, options ...Option) (metric.Meter, error) {\n\tif !c.Enabled {\n\t\treturn metric.NoopProvider{}.Meter(name), nil\n\t}\n\n\tif c.AgentEndpoint == \"\" {\n\t\treturn metric.Meter{}, fmt.Errorf(\"missing agent address, please set environment variable %s\", envAgentEndpoint)\n\t}\n\n\topts := applyOptions(options...)\n\texporter := sotlp.SingletonExporter()\n\tif exporter == nil {\n\t\texp, err := otlp.NewExporter(otlp.WithInsecure(),\n\t\t\totlp.WithAddress(c.AgentEndpoint),\n\t\t\totlp.WithReconnectionPeriod(time.Minute),\n\t\t\totlp.WithGRPCDialOption(grpc.WithTimeout(5*time.Second)))\n\t\tif err != nil {\n\t\t\treturn metric.Meter{}, fmt.Errorf(\"failed to create the collector exporter: %w\", err)\n\t\t}\n\t\texporter = exp\n\t\tsotlp.SetExporter(exporter)\n\t\topts.Logger.With(zap.String(\"agentEndpoint\", c.AgentEndpoint)).Info(\"success to otlp agent\")\n\t}\n\t// exporter.Stop()\n\n\tif meterPusher == nil {\n\t\tmeterPusher = push.New(\n\t\t\tbasic.New(\n\t\t\t\tsimple.NewWithExactDistribution(),\n\t\t\t\texporter,\n\t\t\t),\n\t\t\texporter,\n\t\t\tpush.WithPeriod(30*time.Second),\n\t\t\t//push.WithTimeout(10*time.Second),\n\t\t)\n\t\tmeterProvider = meterPusher.Provider()\n\t\tmeterPusher.Start()\n\t\topts.Logger.With(zap.String(\"agentEndpoint\", c.AgentEndpoint)).Info(\"success to create metric pusher and start to push metric\")\n\t}\n\n\treturn meterProvider.Meter(name), nil\n}", "func New(name errors.Op) *Metric {\n\treturn &Metric{\n\t\tName: name,\n\t}\n}", "func New(hour, minute int) Clock {\n\t// create a clock with 0 minutes and then add what was passed as arguments\n\treturn Clock{0}.Add((hour * 60) + minute)\n}", "func New(h, m int) Clock {\n\tminutes := h*60 + m\n\tminutes %= 1440\n\tif minutes < 0 {\n\t\tminutes += 1440\n\t}\n\n\treturn Clock{minutes}\n}", "func (m *Manager) Meter(delay time.Duration) *Meter {\n\treturn &Meter{\n\t\tm: m,\n\t\tdelay: delay,\n\t\tnext: time.Now(),\n\t}\n}", "func New() MME {\n\tvar m MME = new(mme)\n\treturn m\n}", "func New(title string, x, y, width, height int) Device { return newDevice(title, x, y, width, height) }", "func NewMeasurement(name string) Measurement {\n\tattrs := make(map[string]interface{})\n\treturn Measurement{\n\t\tName: name,\n\t\tAttributes: attrs,\n\t}\n}", "func NewMeasurement(name string) *Measurement {\n\treturn &Measurement{\n\t\tname: name,\n\t\tfields: make(map[string]*Field),\n\t\tseries: make(map[uint32]*Series),\n\t}\n}", "func New(\n\tname string,\n\ttags map[string]string,\n\tmeta map[string]string,\n\tfields map[string]interface{},\n\ttm time.Time,\n) (CCMetric, error) {\n\tm := &ccMetric{\n\t\tname: name,\n\t\ttags: make(map[string]string, len(tags)),\n\t\tmeta: make(map[string]string, len(meta)),\n\t\tfields: make(map[string]interface{}, len(fields)),\n\t\ttm: tm,\n\t}\n\n\t// deep copy tags, meta data tags and fields\n\tfor k, v := range tags {\n\t\tm.tags[k] = v\n\t}\n\tfor k, v := range meta {\n\t\tm.meta[k] = v\n\t}\n\tfor k, v := range fields {\n\t\tv := convertField(v)\n\t\tif v == nil {\n\t\t\tcontinue\n\t\t}\n\t\tm.fields[k] = v\n\t}\n\n\treturn m, nil\n}", "func New(h int, m int) Clock {\n\t// Normalize time.\n\tconst minutesInADay = 24 * 60\n\tminutes := (h*60 + m) % minutesInADay\n\tif minutes < 0 {\n\t\tminutes += minutesInADay\n\t}\n\treturn Clock{\n\t\tminutes: minutes,\n\t}\n}", "func New(hours, minutes int) Clock {\n\treturn Clock(((hours*MinutesInAnHour+minutes)%MinutesInADay + MinutesInADay) % MinutesInADay)\n}", "func New(monster int) *Monster {\n\treturn &Monster{\n\t\tid: monster,\n\t\tInfo: monsterData[monster],\n\t\tDisplaced: Empty{},\n\t}\n}", "func NewPingMeter() (pm *PingMeter) {\n\n\treturn\n}", "func New(n int) MMR {\n\tpeaks, heights := peaksAndHeights(n)\n\treturn MMR{\n\t\tpeaks: peaks,\n\t\theights: heights,\n\t}\n}", "func New(h int, m int) Clock {\n\tm, addHours := normalizeMinutes(m)\n\th = normalizeHours(h + addHours)\n\treturn Clock{h, m}\n}", "func Meter(props *MeterProps, children ...Element) *MeterElem {\n\trProps := &_MeterProps{\n\t\tBasicHTMLElement: newBasicHTMLElement(),\n\t}\n\n\tif props != nil {\n\t\tprops.assign(rProps)\n\t}\n\n\treturn &MeterElem{\n\t\tElement: createElement(\"meter\", rProps, children...),\n\t}\n}", "func NewMetric(rtype string) Metric {\n\treturn Metric{\n\t\tType: rtype,\n\t\tCurrent: map[string]int{},\n\t\tOwners: map[string]int{},\n\t}\n}", "func New(hours int, minutes int) Clock {\n\tminutes = hours*minutesPerHour + minutes\n\tminutes %= minutesPerDay\n\tif minutes < 0 {\n\t\tminutes += minutesPerDay\n\t}\n\n\treturn Clock{minutes / minutesPerHour, minutes % minutesPerHour}\n}", "func NewMeterProvider() *MeterProvider {\n\treturn &MeterProvider{}\n}", "func New(h, m int) Clock {\n\tm = h*60 + m\n\tm %= 60 * 24\n\tif m < 0 {\n\t\treturn Clock{m + 60*24}\n\t}\n\treturn Clock{m}\n}", "func NewMeasurement(name string) Measurement {\n\treturn Measurement{\n\t\tname: name,\n\t\ttagSet: map[string]string{},\n\t\tfieldSet: map[string]interface{}{},\n\t\ttimestamp: time.Now(),\n\t}\n}", "func NewMetrics(component string, sampleRate float64, client metrics.Client) BaseMetrics {\n\treturn BaseMetrics{\n\t\tcomponent: component,\n\t\trate: sampleRate,\n\t\tmetrics: client,\n\t\tmetMap: map[string]string{\n\t\t\t\"latency\": \"comp.\" + component + \".requests.latency\",\n\t\t\t\"request\": \"comp.\" + component + \".requests.%d\",\n\t\t\t\"mLatency\": \"comp.\" + component + \".requests.%s.latency\",\n\t\t\t\"mRequest\": \"comp.\" + component + \".requests.%s.%d\",\n\t\t},\n\t}\n}", "func New(hour, minute int) Clock {\n\th, m := normalize(hour, minute)\n\treturn Clock{h, m}\n}", "func (bar *Progress) New(start, total int) {\n\tbar.cur = start\n\tbar.total = total\n\tif bar.graph == \"\" {\n\t\tbar.graph = \"█\"\n\t}\n\tbar.percent = bar.getPercent()\n}", "func New(m int64, c string) *Money {\n\treturn &Money{m, c}\n}", "func New() (*T) {\n\n\tme := T{\n\t\tcount: 0,\n\t\tdatum: \"\",\n\t}\n\n\treturn &me\n}", "func New(hour, minute int) Clock {\n\tminute += hour * 60\n\tminute %= 24 * 60\n\tif minute < 0 {\n\t\tminute += 24 * 60\n\t}\n\treturn Clock{minute}\n}", "func New(hour, minute int) Clock {\n\thour, minute = normalize(hour, minute)\n\treturn Clock{hour, minute}\n}", "func NewMeasurement(name string, idx *DatabaseIndex) *Measurement {\n\treturn &Measurement{\n\t\tName: name,\n\t\tfieldNames: make(map[string]struct{}),\n\t\tindex: idx,\n\n\t\tseries: make(map[string]*Series),\n\t\tseriesByID: make(map[uint64]*Series),\n\t\tseriesByTagKeyValue: make(map[string]map[string]seriesIDs),\n\t\tseriesIDs: make(seriesIDs, 0),\n\t}\n}", "func New(ver, mcls, mtype uint8, params ...*params.Param) *Generic {\n\tg := &Generic{\n\t\tHeader: &Header{\n\t\t\tVersion: ver,\n\t\t\tReserved: 0,\n\t\t\tClass: mcls,\n\t\t\tType: mtype,\n\t\t},\n\t\tParams: params,\n\t}\n\tg.SetLength()\n\n\treturn g\n}", "func NewProgressMeter(estFiles int, estBytes int64, dryRun bool) *ProgressMeter {\n\tlogger, err := newProgressLogger()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Error creating progress logger: %s\\n\", err)\n\t}\n\n\treturn &ProgressMeter{\n\t\tlogger: logger,\n\t\tstartTime: time.Now(),\n\t\tfileIndex: make(map[string]int64),\n\t\tfinished: make(chan interface{}),\n\t\testimatedFiles: estFiles,\n\t\testimatedBytes: estBytes,\n\t\tdryRun: dryRun,\n\t}\n}", "func New(hour, minute int) Clock {\n\ttotalMinutes := (hour*60 + minute) % 1440\n\tif totalMinutes < 0 {\n\t\ttotalMinutes = totalMinutes + 1440\n\t}\n\n\tm := totalMinutes % 60\n\th := totalMinutes / 60 % 24\n\n\treturn Clock{hour: h, minute: m}\n}", "func New() *Metrics {\n\tm := &Metrics{\n\t\tBuildInfo: prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: Namespace,\n\t\t\tSubsystem: Subsystem,\n\t\t\tName: \"build_info\",\n\t\t\tHelp: \"Build information\",\n\t\t}, []string{\"version\"}),\n\t}\n\n\t_ = prometheus.Register(m.BuildInfo)\n\t// TODO: implement metrics\n\treturn m\n}", "func New(config Config, errCh chan<- error) *Librato {\n\tu, _ := url.Parse(apiEndpoint)\n\tu.User = url.UserPassword(config.Email, config.APIKey)\n\tu.Path = \"/v1/metrics\"\n\n\t// determine queue size\n\tqueueSize := 600\n\tif config.QueueSize > 0 {\n\t\tqueueSize = config.QueueSize\n\t}\n\n\t// start the publisher\n\tp := &publisher{\n\t\tmetricsURL: u,\n\t\tqueueSize: queueSize,\n\t\tmeasures: make(chan interface{}, queueSize),\n\t\tshutdown: make(chan chan struct{}),\n\t\terrors: errCh,\n\t}\n\tgo p.run(time.Second * 1)\n\n\treturn &Librato{publisher: p}\n}", "func NewMeasurement(database, name string) Measurement {\n\tm := Measurement{\n\t\tDatabase: database,\n\t\tName: name,\n\t\tTimestamp: time.Now(),\n\t\tValues: make(map[string]string, 0),\n\t\tTags: make(map[string]string, 0),\n\t}\n\treturn m\n}", "func (constructor *Constructor) New(resource string, specs *specs.ParameterMap) (codec.Manager, error) {\n\tif specs == nil {\n\t\treturn nil, ErrUndefinedSpecs{}\n\t}\n\n\treturn &Manager{\n\t\tresource: resource,\n\t\tspecs: specs.Property,\n\t}, nil\n}", "func New(hour, minute int) Clock {\n\tvar c Clock\n\treturn c.Add(hour*60 + minute)\n}", "func NewMetric(name string, fields []MetricField, tags []MetricTag) (Metric, error) {\n\tif err := ValidateMetricName(name, \"metric\"); err != nil {\n\t\treturn Metric{}, err\n\t}\n\n\tif len(fields) == 0 {\n\t\treturn Metric{}, errors.New(\"one or more metric fields are required\")\n\t}\n\n\tfor _, field := range fields {\n\t\tif err := ValidateMetricName(field.Name, \"field\"); err != nil {\n\t\t\treturn Metric{}, err\n\t\t}\n\t}\n\n\tif len(tags) > 0 {\n\t\tfor _, tag := range tags {\n\t\t\tif err := ValidateMetricName(tag.Name, \"tag\"); err != nil {\n\t\t\t\treturn Metric{}, err\n\t\t\t}\n\t\t}\n\t}\n\n\tmetric := Metric{\n\t\tVersionable: common.NewVersionable(),\n\t\tName: name,\n\t\tFields: fields,\n\t\tTimestamp: time.Now().UnixNano(),\n\t\tTags: tags,\n\t}\n\n\treturn metric, nil\n}", "func New() *M {\n\tc := &M{}\n\tc.Component()\n\tc.items = make([]*js.Object, 0)\n\treturn c\n}", "func New(hour, minute int) Clock {\n\treturn Clock(0).Add(hour*60 + minute)\n}", "func New() Battery {\n\treturn Battery{\n\t\tConf: configuration{\n\t\t\tPrintTemplate: \"{{ .Status }} {{ .Power }} {{ .Time }}\",\n\t\t},\n\t}\n}", "func New(hour, minute int) Clock {\n\tadjustedHour, adjustedMinute := convertHelper(hour, minute)\n\tc := Clock{adjustedHour, adjustedMinute}\n\treturn c\n}", "func (x *fastReflection_DenomUnit) New() protoreflect.Message {\n\treturn new(fastReflection_DenomUnit)\n}", "func New() handler.Handler {\n\treturn &ceilometerMetricHandler{\n\t\tceilo: ceilometer.New(),\n\t}\n}", "func NewMining(minter sdk.AccAddress, tally int64) Mining {\n\treturn Mining{\n\t\tMinter: minter,\n\t\tLastTime: 0,\n\t\tTally: tally,\n\t}\n}", "func New(issueser getIssueser, metricser metricser, queries map[string]string) *Monitoring {\n\tlastActiveIssues := make(map[string]map[string]model.Issue)\n\tfor queryName := range queries {\n\t\tlastActiveIssues[queryName] = make(map[string]model.Issue)\n\t}\n\n\treturn &Monitoring{\n\t\tissueser: issueser,\n\t\tmetricser: metricser,\n\t\tlastActiveIssues: lastActiveIssues,\n\t\tqueries: queries,\n\t}\n}", "func New() *SystemMetrics {\n\treturn &SystemMetrics{}\n}", "func (m *podMetrics) New() runtime.Object {\n\treturn &metrics.PodMetrics{}\n}", "func NewIskratelMsan() *IskratelMsan {\r\n var t = &IskratelMsan{}\r\n\r\n return t\r\n}", "func newMonitor(ui cli.Ui, client *api.Client, length int) *monitor {\n\tmon := &monitor{\n\t\tui: &cli.PrefixedUi{\n\t\t\tInfoPrefix: \"==> \",\n\t\t\tOutputPrefix: \" \",\n\t\t\tErrorPrefix: \"==> \",\n\t\t\tUi: ui,\n\t\t},\n\t\tclient: client,\n\t\tstate: newEvalState(),\n\t\tlength: length,\n\t}\n\treturn mon\n}", "func New() *Mediator {\n\tconfig := cfg.New()\n\taddress := fmt.Sprintf(\"%s:%s\", config.RPCHost, config.RPCPort)\n\tpool := pools.NewResourcePool(func() (pools.Resource, error) {\n\t\tconn, err := grpc.Dial(address, grpc.WithInsecure())\n\t\tclient := pb.NewDailyBonusClient(conn)\n\t\treturn &ResourceConn{\n\t\t\tconn,\n\t\t\tclient,\n\t\t}, err\n\t}, config.RPCConnectionPool.InitialCapacity, config.RPCConnectionPool.MaxCapacity, config.RPCConnectionPool.IdleTimeout)\n\treturn &Mediator{\n\t\tclientPool: pool,\n\t\tconfig: &config,\n\t\tpoolMutex: &sync.Mutex{},\n\t}\n}", "func New(label string, units UnitOfMeasurement, value string) *PerfData {\n\tif value != \"\" && !valueCheck.MatchString(value) {\n\t\tpanic(\"invalid value\")\n\t}\n\tr := &PerfData{}\n\tr.Label = label\n\tr.units = units\n\tif value == \"\" {\n\t\tr.value = \"U\"\n\t} else {\n\t\tr.value = value\n\t}\n\treturn r\n}", "func New() *Metrics {\n\treturn &Metrics{\n\t\tSectionCounts: make(map[string]int),\n\t}\n}", "func New(r *chi.Mux, log *logging.Logger, m servermetrics.Metrics) *API {\n\tapi := &API{\n\t\tmetrics: m,\n\t\tstartedAt: time.Now(),\n\t\tminuteDecValues: make(map[*dmsg.SessionCommon]uint64),\n\t\tminuteEncValues: make(map[*dmsg.SessionCommon]uint64),\n\t\tsecondDecValues: make(map[*dmsg.SessionCommon]uint64),\n\t\tsecondEncValues: make(map[*dmsg.SessionCommon]uint64),\n\t\trouter: r,\n\t}\n\tr.Use(httputil.SetLoggerMiddleware(log))\n\tr.Get(\"/health\", api.health)\n\treturn api\n}", "func NewMetric(id string, name string, uri string) *Metric {\n\tthis := Metric{}\n\tthis.Id = id\n\tthis.Name = name\n\tthis.Uri = uri\n\treturn &this\n}", "func newMetrics() *Metrics {\n\treturn newMetricsFrom(DefaultMetricsOpts)\n}", "func New(hours, minutes int) Time {\n\th := (hours + minutes/60) % 24\n\tm := minutes % 60\n\n\tfor m < 0 {\n\t\tm += 60\n\t\th--\n\t}\n\n\tfor h < 0 {\n\t\th += 24\n\t}\n\n\treturn Time{\n\t\tminutes: h*60 + m,\n\t}\n}", "func New(r, c int) M {\n\tvals := make([]Frac, r*c)\n\tfor i := range vals {\n\t\tvals[i] = NewScalarFrac(0)\n\t}\n\n\treturn M{r: r, c: c, values: vals}\n}", "func Meter(attrs []htmlgo.Attribute, children ...HTML) HTML {\n\treturn &htmlgo.Tree{Tag: \"meter\", Attributes: attrs, Children: children}\n}", "func newMetrics() *metrics {\n\treturn new(metrics)\n}", "func New() Info {\n\treturn &hardwareInfo{}\n}", "func New(tracker tracker.Tracker, opts Opts) Bar {\n\topts.ensureDefaults()\n\tunits := united.UnitsNone\n\tif tracker.ByteAmount() != nil {\n\t\tunits = united.UnitsBytes\n\t}\n\n\tb := &bar{\n\t\ttracker: tracker,\n\t\topts: opts,\n\t\ttheme: state.GetTheme(),\n\t\tunits: units,\n\t\tscale: 1.0,\n\n\t\tfinished: false,\n\t\tfinishChan: make(chan struct{}),\n\t}\n\ttracker.OnFinish(b.finish)\n\tgo b.writer()\n\treturn b\n}", "func New(fetcherConfig *config.MetainfoFetcherConfig) (fetcher *MetainfoFetcher, err error) {\n\tclientConfig := torrent.Config{}\n\t// Well, it seems this is the right way to convert speed -> rate.Limiter\n\t// https://github.com/anacrolix/torrent/blob/master/cmd/torrent/main.go\n\tif fetcherConfig.UploadRateLimiter != -1 {\n\t\tclientConfig.UploadRateLimiter = rate.NewLimiter(rate.Limit(fetcherConfig.UploadRateLimiter*1024), 256<<10)\n\t}\n\tif fetcherConfig.DownloadRateLimiter != -1 {\n\t\tclientConfig.DownloadRateLimiter = rate.NewLimiter(rate.Limit(fetcherConfig.DownloadRateLimiter*1024), 1<<20)\n\t}\n\n\tclient, err := torrent.NewClient(&clientConfig)\n\n\tfetcher = &MetainfoFetcher{\n\t\ttorrentClient: client,\n\t\tresults: make(chan Result, fetcherConfig.QueueSize),\n\t\tqueueSize: fetcherConfig.QueueSize,\n\t\ttimeout: fetcherConfig.Timeout,\n\t\tmaxDays: fetcherConfig.MaxDays,\n\t\tnewTorrentsOnly: fetcherConfig.FetchNewTorrentsOnly,\n\t\tbaseFailCooldown: fetcherConfig.BaseFailCooldown,\n\t\tmaxFailCooldown: fetcherConfig.MaxFailCooldown,\n\t\tdone: make(chan int, 1),\n\t\tfailedOperations: make(map[uint]time.Time),\n\t\tnumFails: make(map[uint]int),\n\t\twakeUp: time.NewTicker(time.Second * time.Duration(fetcherConfig.WakeUpInterval)),\n\t}\n\n\treturn\n}", "func New(h int, m int) Clock {\n\tc := (h*60 + m) % minutesInDay\n\n\tfor c < 0 {\n\t\tc += minutesInDay\n\t}\n\n\treturn Clock(c)\n}", "func New(hour, minute int) Clock {\n\treturn Clock(modulus(hour*60+minute, minutesInDay))\n}", "func New(configFile string) (Bench, error) {\n\tb := Bench{}\n\tvar err error\n\tif b.config, err = config(configFile); err != nil {\n\t\treturn b, err\n\t}\n\treturn b, nil\n}", "func NewMetal(albedo Color, roughness float64) Metal {\n\treturn Metal{Albedo: albedo, Rough: roughness}\n}", "func New() *Manager {\n\treturn &Manager{\n\t\tdevices: make(map[string]Modem),\n\t\thandleAdd: func(m Modem){_ = m},\n\t\thandleRemove: func(m Modem){_ = m},\n\t\thandleUpdate: func(m Modem){_ = m},\n\t}\n}", "func New(counter metrics.Counter, latency metrics.Histogram, logger log.Logger) Logger {\n\treturn Logger{\n\t\tcallUpdate: make(chan interface{}),\n\t\tcallError: make(chan error),\n\t\trequestCount: counter,\n\t\trequestLatency: latency,\n\t\tlogger: logger,\n\t}\n}", "func New(base mb.BaseMetricSet) (mb.MetricSet, error) {\n\n\tconfig := struct{}{}\n\n\tif err := base.Module().UnpackConfig(&config); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &MetricSet{\n\t\tBaseMetricSet: base,\n\t}, nil\n}", "func New(name string) *Module {\n\tm := &Module{\n\t\tbatteryName: name,\n\t\tscheduler: timing.NewScheduler(),\n\t}\n\tl.Label(m, name)\n\tl.Register(m, \"scheduler\", \"format\")\n\tm.format.Set(format{})\n\tm.RefreshInterval(3 * time.Second)\n\t// Construct a simple template that's just the available battery percent.\n\tm.OutputTemplate(outputs.TextTemplate(`BATT {{.RemainingPct}}%`))\n\treturn m\n}", "func New(hours int, minutes int) Clock {\n\tt := time.Date(0, 0, 0, 0, 0, 0, 0, time.UTC)\n\tt = t.Add(time.Hour * time.Duration(hours))\n\tt = t.Add(time.Minute * time.Duration(minutes))\n\n\treturn Clock{t.Hour(), t.Minute()}\n}", "func New(t time.Time) *Clock {\n\treturn NewWithStep(t, 0)\n}", "func New(c Config) (Monitor, error) {\n\tm := &monitor{\n\t\tblankThreshold: blankThreshold,\n\t\ttip: []string{\"\"},\n\t\tpath: c.Path,\n\t\tscanner: c.Scanner,\n\t\tsorter: c.Sorter,\n\t}\n\n\tif err := m.sync(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn m, nil\n}", "func New(expire, maid int) (*Cache){\n if expire==0 {\n expire = defaultExpiringDuration\n }\n if maid==0 {\n maid = defaultMaidDuration\n }\n\n expireDuration, _ := time.ParseDuration(fmt.Sprintf(\"%dm\", expire))\n maidDuration, _ := time.ParseDuration(fmt.Sprintf(\"%dm\", maid))\n\n //Make sure that no one is calling New at the same time.\n //Lock and Unlock the same mutex and set the old cache as invalid.\n cache.cacheMutex.Lock()\n cache.isValid = false\n cache.cacheMutex.Unlock()\n\n //Create the new cache\n cache = &Cache{\n cache: map[string]value{},\n expire: expireDuration,\n maid: maidDuration,\n isValid: false}\n\n go callMaid(cache)\n\n //Set cache as valid before returning\n cache.isValid = true\n return cache\n}", "func New(base mb.BaseMetricSet) (mb.MetricSet, error) {\n\tvar config Config\n\n\tif err := base.Module().UnpackConfig(&config); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &MetricSet{\n\t\tBaseMetricSet: base,\n\t\tcfg: config,\n\t}, nil\n}", "func newmetric(name string, kind metricKind, tags []string, common bool) *metric {\n\treturn &metric{\n\t\tname: name,\n\t\tkind: kind,\n\t\ttags: append([]string{}, tags...),\n\t\tcommon: common,\n\t}\n}", "func New(bus drivers.I2C) Device {\n\treturn Device{\n\t\tbus: bus,\n\t\tpowerCtl: powerCtl{\n\t\t\tmeasure: 1,\n\t\t},\n\t\tdataFormat: dataFormat{\n\t\t\tsensorRange: RANGE_2G,\n\t\t},\n\t\tbwRate: bwRate{\n\t\t\tlowPower: 1,\n\t\t\trate: RATE_100HZ,\n\t\t},\n\t\tAddress: AddressLow,\n\t}\n}", "func New(minLat, maxLat, minLong, maxLong float64) Client {\n\treturn Client{\n\t\tminLat: minLat,\n\t\tmaxLat: maxLat,\n\t\tminLong: minLong,\n\t\tmaxLong: maxLong,\n\t}\n}", "func Metal(index int32) Device {\n return Device{KDLMetal, index}\n}", "func (s *Service) New(ctx context.Context, params *light.NewParams) (*light.Scan, error) {\n\t_, span := trace.StartSpan(ctx, \"hue.lights.new\")\n\tdefer span.End()\n\n\tctx = context.WithValue(ctx, hue.UserKey{}, params.GetUser())\n\tctx = context.WithValue(ctx, hue.HostKey{}, params.GetHost())\n\n\tres, err := s.hue.NewLights(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tscan, ok := res.(*light.Scan)\n\tif !ok {\n\t\treturn nil, errors.Errorf(\"failed to convert '%T' to *lights.Scan\", res)\n\t}\n\n\treturn scan, nil\n}", "func New(settings ...Option) *Info {\n\ti := &Info{}\n\ti.Set(settings...)\n\treturn i\n}", "func New(base mb.BaseMetricSet) (mb.MetricSet, error) {\n\n\tconfig := struct{}{}\n\tif err := base.Module().UnpackConfig(&config); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &MetricSet{\n\t\tBaseMetricSet: base,\n\t\tJmxClient: psoft.GetPsoftJMXClient(),\n\t}, nil\n}", "func NewSmart(pickupConfidence float64, callConfidence float64,\n aloneConfidence float64,\n pickupRuns int, pickupDeterminizations int,\n callRuns int, callDeterminizations int,\n playRuns int, playDeterminizations int,\n aloneRuns int, aloneDeterminizations int) (*SmartPlayer) {\n\n return &SmartPlayer{\n pickupConfidence,\n callConfidence,\n aloneConfidence,\n pickupRuns,\n pickupDeterminizations,\n callRuns,\n callDeterminizations,\n playRuns,\n playDeterminizations,\n aloneRuns,\n aloneDeterminizations,\n }\n}", "func NewMetric(name string, prog string, kind Kind, keys ...string) *Metric {\n\tm := &Metric{Name: name, Program: prog, Kind: kind,\n\t\tKeys: make([]string, len(keys), len(keys)),\n\t\tLabelValues: make([]*LabelValue, 0)}\n\tcopy(m.Keys, keys)\n\treturn m\n}", "func New() *Prober {\n\treturn newForTest(time.Now, newRealTicker)\n}", "func NewMetric(asset, method string, backend MetricsBackend) Metric {\n\tm := Metric{}\n\n\tm.backend = backend\n\tm.methodName = method\n\tm.startTime = time.Now()\n\tm.asset = asset\n\n\tm.backend.AddMethod(m.asset, method)\n\treturn m\n}" ]
[ "0.6899709", "0.6802081", "0.6737444", "0.66954213", "0.6548257", "0.6525749", "0.6503611", "0.64407635", "0.63265437", "0.612432", "0.5977774", "0.59515846", "0.59335434", "0.5911891", "0.5897539", "0.5890825", "0.5890561", "0.58305466", "0.58248305", "0.5815958", "0.5770586", "0.5769807", "0.5768072", "0.57628417", "0.5751877", "0.5741924", "0.57267696", "0.5711325", "0.5702508", "0.5695079", "0.5694533", "0.5691486", "0.56862295", "0.5681234", "0.56414366", "0.56233436", "0.56108934", "0.5602679", "0.55888283", "0.55871874", "0.5577002", "0.5574096", "0.5560242", "0.55547506", "0.55357397", "0.5534261", "0.55338657", "0.5526467", "0.5522441", "0.5486933", "0.5467335", "0.5460902", "0.5454856", "0.54514146", "0.5448433", "0.54425776", "0.5439266", "0.5431807", "0.5430335", "0.5419925", "0.5405198", "0.5404749", "0.53934294", "0.5386821", "0.5385955", "0.53760993", "0.53674394", "0.53632045", "0.5347166", "0.5338815", "0.53322494", "0.53208613", "0.53118837", "0.529858", "0.52953136", "0.52929896", "0.5283002", "0.5278523", "0.52778476", "0.527088", "0.52660936", "0.5265931", "0.5257226", "0.5246451", "0.5240478", "0.52385074", "0.5237661", "0.5234075", "0.5232164", "0.5220792", "0.52165073", "0.521261", "0.52124745", "0.52095324", "0.5205043", "0.5201212", "0.51975924", "0.5193682", "0.51857173", "0.5179317" ]
0.71873426
0
NoteToTime converts a notes.Duration to a time.Duration based on the meter
NoteToTime преобразует notes.Duration в time.Duration на основе метра
func (m Meter) NoteToTime(noteVal notes.Duration) time.Duration { return time.Duration((float64(noteVal/m.BeatValue) / m.BeatsPerMinute) * float64(time.Minute)) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func minutesToDuration(n uint8) Duration {\n\treturn Duration(time.Duration(n) * time.Minute)\n}", "func (e *Exact) convertToDuration() time.Duration {\n\tif isValidUnitOfTime(e.Unit) {\n\t\treturn convertTimeToDuration(e.Quantity, e.Unit)\n\t}\n\tpanic(\"'unit' is not a valid unit of time\")\n}", "func (r Rest) Duration(measure time.Duration) time.Duration {\n\tif Duration(r) == None {\n\t\treturn 0\n\t}\n\t//the fraction of the measure the note takes\n\tfraq := 1. / math.Pow(2., float64(r))\n\n\treturn time.Duration(float64(measure) * fraq)\n}", "func (m Meter) NoteToFreq(noteVal notes.Duration) float64 {\n\tduration := m.NoteToTime(noteVal)\n\treturn 1 / float64(duration.Seconds())\n}", "func ToDuration(value interface{}) (time.Duration, error) {\n\tvalue = indirect(value)\n\n\tvar s string\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn 0, nil\n\tcase time.Duration:\n\t\treturn v, nil\n\tcase int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8, float32, float64:\n\t\ti, _ := ToInt64(value)\n\t\treturn time.Duration(i), nil\n\tcase string:\n\t\ts = v\n\tcase []byte:\n\t\ts = string(v)\n\tcase fmt.Stringer:\n\t\ts = v.String()\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"unable to cast %#v of type %T to Duration\", v, v)\n\t}\n\n\tif strings.ContainsAny(s, \"nsuµmh\") {\n\t\treturn time.ParseDuration(s)\n\t}\n\treturn time.ParseDuration(s + \"ns\")\n}", "func adjTime(context interface{}, value string) (time.Time, error) {\n\n\t// The default value is in seconds unless overridden.\n\t// #time:0 Current date/time\n\t// #time:-3600 3600 seconds in the past\n\t// #time:3m\t\t3 minutes in the future.\n\n\t// Possible duration types.\n\t// \"ns\": int64(Nanosecond),\n\t// \"us\": int64(Microsecond),\n\t// \"ms\": int64(Millisecond),\n\t// \"s\": int64(Second),\n\t// \"m\": int64(Minute),\n\t// \"h\": int64(Hour),\n\n\t// Do we have a single value?\n\tif len(value) == 1 {\n\t\tval, err := strconv.Atoi(value[0:1])\n\t\tif err != nil {\n\t\t\treturn time.Time{}.UTC(), fmt.Errorf(\"Invalid duration : %q\", value[0:1])\n\t\t}\n\n\t\tif val == 0 {\n\t\t\treturn time.Now().UTC(), nil\n\t\t}\n\n\t\treturn time.Now().Add(time.Duration(val) * time.Second).UTC(), nil\n\t}\n\n\t// Do we have a duration type and where does the\n\t// actual duration value end\n\tvar typ string\n\tvar end int\n\n\t// The end byte position for the last character in the string.\n\tePos := len(value) - 1\n\n\t// Look at the very last character.\n\tt := value[ePos:]\n\tswitch t {\n\n\t// Is this a minute or hour? [3m]\n\tcase \"m\", \"h\":\n\t\ttyp = t\n\t\tend = ePos // Position of last chr in value.\n\n\t// Is this a second or other duration? [3s or 3us]\n\tcase \"s\":\n\t\ttyp = t // s for 3s\n\t\tend = ePos // 3 for 3s\n\n\t\t// Is this smaller than a second? [ns, us, ms]\n\t\tif len(value) > 2 {\n\t\t\tt := value[ePos-1 : ePos]\n\t\t\tswitch t {\n\t\t\tcase \"n\", \"u\", \"m\":\n\t\t\t\ttyp = value[ePos-1:] // us for 3us\n\t\t\t\tend = ePos - 1 // 3 for 3us\n\t\t\t}\n\t\t}\n\n\tdefault:\n\t\ttyp = \"s\" // s for 3600\n\t\tend = ePos + 1 // 0 for 3600\n\t}\n\n\t// Check if we are to negative the value.\n\tvar start int\n\tif value[0] == '-' {\n\t\tstart = 1\n\t}\n\n\t// Check the remaining bytes is an integer value.\n\tval, err := strconv.Atoi(value[start:end])\n\tif err != nil {\n\t\treturn time.Time{}.UTC(), fmt.Errorf(\"Invalid duration : %q\", value[start:end])\n\t}\n\n\t// Do we have to negate the value?\n\tif start == 1 {\n\t\tval *= -1\n\t}\n\n\t// Calcuate the time value.\n\tswitch typ {\n\tcase \"ns\":\n\t\treturn time.Now().Add(time.Duration(val) * time.Nanosecond).UTC(), nil\n\tcase \"us\":\n\t\treturn time.Now().Add(time.Duration(val) * time.Microsecond).UTC(), nil\n\tcase \"ms\":\n\t\treturn time.Now().Add(time.Duration(val) * time.Millisecond).UTC(), nil\n\tcase \"m\":\n\t\treturn time.Now().Add(time.Duration(val) * time.Minute).UTC(), nil\n\tcase \"h\":\n\t\treturn time.Now().Add(time.Duration(val) * time.Hour).UTC(), nil\n\tdefault:\n\t\treturn time.Now().Add(time.Duration(val) * time.Second).UTC(), nil\n\t}\n}", "func convertTime(time uint64, stream_uot, target_uot UnitOfTime) uint64 {\n\tunitmultiplier := map[UnitOfTime]uint64{\n\t\tUOT_NS: 1000000000,\n\t\tUOT_US: 1000000,\n\t\tUOT_MS: 1000,\n\t\tUOT_S: 1}\n\treturn time / unitmultiplier[stream_uot] * unitmultiplier[target_uot]\n}", "func (pomo *Pomo) GetDuration() string {\n\n\t// if pomo is off do not output anything\n\tif pomo.Status == OFF {\n\t\treturn \"\"\n\t}\n\n\t// if pomo run out of time that was set\n\t// make a blinking animation and send ntification\n\tif pomo.Time < 0 {\n\n\t\t// if user not notified\n\t\tif !pomo.Notified {\n\n\t\t\t// notify the user\n\t\t\tgo notifyUser(NOTIFICATION_MESSAGE)\n\n\t\t\tpomo.Notified = true\n\t\t}\n\n\t\t// emoji_id is a number between 0 and 1\n\t\temoji_id := (pomo.Time.Milliseconds() / 1000 % 2) * (-1)\n\n\t\treturn fmt.Sprintf(\"%s%s\\n\", pomo.Blink[emoji_id], pomo.Time)\n\t}\n\n\treturn fmt.Sprintf(\"%s%s\\n\", pomo.Emoji, pomo.Time)\n}", "func (e PrecisionTiming) durationToMs(x time.Duration) float64 {\n\treturn float64(x) / float64(time.Millisecond)\n}", "func (q MetricTicks) Duration(tempoBPM uint32, deltaTicks uint32) time.Duration {\n\tif q == 0 {\n\t\tq = defaultMetric\n\t}\n\t// (60000 / T) * (d / R) = D[ms]\n\t//\tdurQnMilli := 60000 / float64(tempoBPM)\n\t//\t_4thticks := float64(deltaTicks) / float64(uint16(q))\n\tres := 60000000000 * float64(deltaTicks) / (float64(tempoBPM) * float64(uint16(q)))\n\t//fmt.Printf(\"what: %vns\\n\", res)\n\treturn time.Duration(int64(math.Round(res)))\n\t//\treturn time.Duration(roundFloat(durQnMilli*_4thticks, 0)) * time.Millisecond\n}", "func msToDuration(ms int64) time.Duration {\n\treturn time.Duration(ms * int64(time.Millisecond))\n}", "func durToMsec(dur time.Duration) string {\n\treturn fmt.Sprintf(\"%dms\", dur/time.Millisecond)\n}", "func (d *Delay) TimeDuration() time.Duration {\n\treturn time.Duration(d.Duration*1000) * time.Millisecond\n}", "func (n *Note) measure() {\n\tvar samples int\n\tlength := wholeNote + (wholeNote / 100 * 4 * (4 - n.tempo)) // 4% per tempo unit\n\tswitch n.duration {\n\tcase 'W':\n\t\tsamples = length\n\tcase 'H':\n\t\tsamples = length / 2\n\tcase 'Q':\n\t\tsamples = length / 4\n\tcase 'E':\n\t\tsamples = length / 8\n\tcase 'S':\n\t\tsamples = length / 16\n\tcase 'T':\n\t\tsamples = length / 32\n\tcase 'I':\n\t\tsamples = length / 64\n\t}\n\n\tif samples > 0 {\n\t\t// Apply dot measure\n\t\tif n.dotted {\n\t\t\tsamples += samples / 2\n\t\t}\n\t}\n\n\tn.samples = samples\n}", "func (d Duration) TimeDuration() time.Duration {\n\treturn time.Duration(int64(d) / Millisecond * int64(time.Millisecond))\n}", "func ToDuration(i interface{}) (d time.Duration, err error) {\n\ti = indirect(i)\n\n\tswitch s := i.(type) {\n\tcase time.Duration:\n\t\treturn s, nil\n\tcase int64:\n\t\td = time.Duration(s)\n\t\treturn\n\tcase float64:\n\t\td = time.Duration(s)\n\t\treturn\n\tcase string:\n\t\td, err = time.ParseDuration(s)\n\t\treturn\n\tdefault:\n\t\terr = fmt.Errorf(\"unable to cast %#v to Duration\\n\", i)\n\t\treturn\n\t}\n}", "func ToTime(t uint64) time.Time {\n\tseconds := (t & 0xFFFFFFFF00000000) >> 32\n\tfractional := float64(t&0x00000000FFFFFFFF) / float64(0xFFFFFFFF)\n\td := time.Duration(seconds)*time.Second + time.Duration(fractional*1e9)*time.Nanosecond\n\n\treturn time.Unix(0, 0).Add(-2208988800 * time.Second).Add(d)\n}", "func (ts Timespec) ToDuration() time.Duration {\n\treturn time.Duration(ts.ToNsecCapped())\n}", "func toMilliseconds(duration time.Duration) float64 {\n\tif duration < time.Microsecond*10 {\n\t\treturn 0\n\t}\n\n\tms := float64(duration) / float64(time.Millisecond)\n\t// Round time to 0.02 precision\n\treturn math.Round(ms*100) / 100\n}", "func DurationToTimeMillisField(duration time.Duration) zapcore.Field {\n\treturn zap.Float32(\"grpc.time_ms\", durationToMilliseconds(duration))\n}", "func msToTime(t int64) time.Time {\n\treturn time.Unix(t/int64(1000), (t%int64(1000))*int64(1000000))\n}", "func secondsToDuration(seconds float64) time.Duration {\n\tttl := seconds * float64(time.Second)\n\treturn time.Duration(ttl)\n}", "func secondsToDuration(seconds float64) time.Duration {\n\tttl := seconds * float64(time.Second)\n\treturn time.Duration(ttl)\n}", "func convertMillToTime(originalTime int64) time.Time {\n\ti := time.Unix(0, originalTime*int64(time.Millisecond))\n\treturn i\n}", "func (gdb *Gdb) getTimeDuration(duration int) int64 {\n\treturn time.Now().Add(time.Duration(duration)*time.Second).Unix() + 8*3600\n}", "func timeToPtr(t time.Duration) *time.Duration {\n\treturn &t\n}", "func tempoToPulseInterval(t Bpm) time.Duration {\n\n\treturn time.Duration((uSecInMin/(t/10.00))/ppqn) * time.Microsecond\n}", "func NoteAtTime(t, sr int, note Note) float64 {\n\tsum := 0.0\n\tmultiplier := (2.0 * math.Pi) / float64(sr)\n\tfor i := 0; i < len(note.Frequency); i++ {\n\t\tsum += math.Sin((multiplier * (note.Frequency[i] * note.Octave)) * float64(t))\n\t}\n\treturn sum\n}", "func ToDurationE(i interface{}) (time.Duration, error) {\n\treturn cast.ToDurationE(i)\n}", "func ToDuration(i interface{}) time.Duration {\n\treturn cast.ToDuration(i)\n}", "func (r *Range) convertToDuration() time.Duration {\n\tvar pick int64\n\tif r.High >= r.Low {\n\t\tpick = rand.Int63n(r.High+1-r.Low) + r.Low\n\t\tif isValidUnitOfTime(r.Unit) {\n\t\t\treturn convertTimeToDuration(pick, r.Unit)\n\t\t}\n\t\tpanic(\"'unit' is not a valid unit of time\")\n\t}\n\tpanic(\"'high' cannot be less than 'low'\")\n}", "func (d *Duration) GetTimeDuration() time.Duration {\n\treturn time.Duration(d.Year * yToNano + d.Month * monthToNano +\n\t\td.Day * dToNano + d.Hour * hToNano + d.Minute * mToNano + \n\t\td.Second * sToNano)\n}", "func (i *InputInlineQueryResultVoiceNote) GetVoiceNoteDuration() (value int32) {\n\tif i == nil {\n\t\treturn\n\t}\n\treturn i.VoiceNoteDuration\n}", "func MicroSecToDuration(msec int) time.Duration {\n\treturn time.Duration(msec) * time.Microsecond\n}", "func getDuration(seconds int) time.Duration {\n\treturn time.Duration(seconds) * time.Second\n}", "func (tv Timeval) ToDuration() time.Duration {\n\treturn time.Duration(tv.ToNsecCapped())\n}", "func (e2 *PicoSecondTimeStamp) Duration(e1 *PicoSecondTimeStamp) *PicoSecondDuration {\n\tresult := &PicoSecondDuration{\n\t\tEpoch: int32(e2.Epoch - e1.Epoch),\n\t\tPicoSeconds: int64(e2.PicoSeconds - e1.PicoSeconds),\n\t}\n\n\tif result.PicoSeconds < 0 && result.Epoch > 0 {\n\t\tresult.Epoch = result.Epoch - 1\n\t\tresult.PicoSeconds = result.PicoSeconds + 1000000000000\n\t}\n\treturn result\n}", "func (p *PodStatusInformation) ConvertTime(tlocal *time.Location) {\n\n\tp.FinishedAt = p.FinishedAt.In(tlocal)\n\tp.StartedAt = p.StartedAt.In(tlocal)\n\n}", "func hoursToDuration(n uint8) Duration {\n\treturn Duration(time.Duration(n) * time.Hour)\n}", "func (t timeFlag) Duration() time.Duration {\n\treturn time.Duration(t)\n}", "func durtoTV(d time.Duration) (int64, int64) {\n\tsec := int64(d / nanoPerSec)\n\tmicro := int64((int64(d) - sec*nanoPerSec) / 1000)\n\n\treturn sec, micro\n}", "func timeToMillis(t time.Time) float64 {\n\treturn float64(t.UnixNano() / 1000000)\n}", "func (decoder *Decoder) ByteOffsetToDur(offset int32) time.Duration {\n\treturn time.Duration(offset/decoder.byteRate) * time.Second\n}", "func durationHook(from, to reflect.Type, data reflect.Value) (reflect.Value, error) {\n\tif from.Kind() != reflect.String || to != _typeOfDuration {\n\t\treturn data, nil\n\t}\n\n\td, err := time.ParseDuration(data.String())\n\treturn reflect.ValueOf(d), err\n}", "func halfHoursToDuration(n uint8) Duration {\n\treturn Duration(time.Duration(n) * 30 * time.Minute)\n}", "func (m *RedundantAssignmentAlertConfiguration) GetDuration()(*i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration) {\n val, err := m.GetBackingStore().Get(\"duration\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration)\n }\n return nil\n}", "func (t ntpTime) Duration() time.Duration {\n\tsec := (t >> 32) * nanoPerSec\n\tfrac := (t & 0xffffffff) * nanoPerSec >> 32\n\treturn time.Duration(sec + frac)\n}", "func (t *Track) Duration() float64 {\n\treturn float64(t.duration) / float64(t.globalTimescale)\n}", "func (d Duration) TimeDuration() time.Duration {\n\tdl := ast.DurationLiteral(d)\n\tdd, _ := ast.DurationFrom(&dl, time.Time{})\n\treturn dd\n}", "func (d Duration) TimeDuration() time.Duration {\n\tdl := ast.DurationLiteral(d)\n\tdd, _ := ast.DurationFrom(&dl, time.Time{})\n\treturn dd\n}", "func durationFromMvhdAtom(mvhdStart int64, mvhdLength int64, file *os.File) (int, error) {\n\tbuffer := make([]byte, 8)\n\t_, err := file.ReadAt(buffer, mvhdStart+20) // The timescale field starts at the 21st byte of the mvhd atom\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t// The timescale is bytes 21-24.\n\t// The duration is bytes 25-28\n\ttimescale := convertBytesToInt(buffer[0:4]) // This is in number of units per second\n\tdurationInTimeScale := convertBytesToInt(buffer[4:])\n\treturn int(durationInTimeScale) / int(timescale), nil\n}", "func Duration(i interface{}) time.Duration {\n\t// It's already this type.\n\tif v, ok := i.(time.Duration); ok {\n\t\treturn v\n\t}\n\ts := String(i)\n\tif !utils.IsNumeric(s) {\n\t\td, _ := time.ParseDuration(s)\n\t\treturn d\n\t}\n\treturn time.Duration(Int64(i))\n}", "func ClockTimeDuration(a ClockTime, b ClockTime) ClockTime {\n\ta_I := ClockTimetoI(a)\n\tb_I := ClockTimetoI(b)\n\tduration := a_I - b_I // if a < b then we assume that there's one day difference\n\tif duration < 0 {\n\t\treturn ItoClockTime(1440 - b_I + a_I)\n\t}\n\treturn ItoClockTime(duration)\n}", "func (d *Duration) Time() (time.Duration, error) {\n\tp, err := period.Parse(string(*d))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn p.DurationApprox(), nil\n}", "func (t Tie) Duration(measure time.Duration) time.Duration {\n\tout := time.Duration(0)\n\tfor _, p := range t {\n\t\tout += p.Duration(measure)\n\t}\n\treturn out\n}", "func (t ntpTimeShort) Duration() time.Duration {\n\tsec := uint64(t>>16) * nanoPerSec\n\tfrac := uint64(t&0xffff) * nanoPerSec\n\tnsec := frac >> 16\n\tif uint16(frac) >= 0x8000 {\n\t\tnsec++\n\t}\n\treturn time.Duration(sec + nsec)\n}", "func (t *timer) Measure() time.Duration {\n\treturn t.stopTime.Sub(t.startTime)\n}", "func DurationToTimespec(dur time.Duration) Timespec {\n\treturn NsecToTimespec(dur.Nanoseconds())\n}", "func ToFloat(d time.Duration) (seconds float64) {\n\treturn float64(d) / float64(time.Second)\n}", "func msToTime(ms int64) time.Time {\n\treturn time.Unix(0, ms*int64(time.Millisecond))\n}", "func (t HighresTimestamp) Duration() time.Duration {\n\treturn time.Duration(uint64(t) * uint64(tbinfo.numer) / uint64(tbinfo.denom)))\n}", "func ReadTime() time.Duration {\n\tif drvDMA.timerMemory == nil {\n\t\treturn 0\n\t}\n\tv := uint64(drvDMA.timerMemory.counterHigh)<<32 | uint64(drvDMA.timerMemory.counterLow)\n\tif v == 0 {\n\t\t// BUG(maruel): Implement using AVS_CNT0_REG on A64.\n\t\treturn 0\n\t}\n\t// BUG(maruel): Assumes that counterCtrl & timerPLL6 is not set.\n\tconst tick = time.Microsecond / 24\n\treturn time.Duration(v) * tick\n}", "func (t ntpTime) Duration() time.Duration {\n\tsec := (t >> 32) * nanoPerSec\n\tfrac := (t & 0xffffffff) * nanoPerSec\n\tnsec := frac >> 32\n\tif uint32(frac) >= 0x80000000 {\n\t\tnsec++\n\t}\n\treturn time.Duration(sec + nsec)\n}", "func getNoteTicker(secondsPerNote float64) *time.Ticker {\n\tnoteDurationString := fmt.Sprintf(\"%.5f\", secondsPerNote) + \"s\"\n\tnoteDuration, _ := time.ParseDuration(noteDurationString)\n\n\treturn time.NewTicker(noteDuration)\n}", "func ToMillis(t time.Time) int64 {\n\treturn t.UnixNano() / 1e6\n}", "func calculateDuration(r *record) time.Duration {\n\tdateFormat := \"2006-01-0215:04:05\"\n\n\tstart, err := time.Parse(dateFormat, r.date+r.startTime)\n\tcheckErr(err)\n\n\tend, err := time.Parse(dateFormat, r.date+r.endTime)\n\tcheckErr(err)\n\n\tpause, err := time.ParseDuration(r.pause)\n\tcheckErr(err)\n\n\treturn end.Sub(start) - pause\n}", "func (this *StrToMillis) Type() value.Type { return value.NUMBER }", "func (item Item) GetDuration(name string) time.Duration {\n\tduration, _ := time.ParseDuration(\"0h0m0s\")\n\n\tswitch item[name].(type) {\n\tcase time.Duration:\n\t\tduration = item[name].(time.Duration)\n\tcase string:\n\t\tvar matched bool\n\t\tvar re *regexp.Regexp\n\t\tvalue := item[name].(string)\n\n\t\tmatched, _ = regexp.MatchString(`^\\d{2}:\\d{2}:\\d{2}$`, value)\n\n\t\tif matched {\n\t\t\tre, _ = regexp.Compile(`^(\\d{2}):(\\d{2}):(\\d{2})$`)\n\t\t\tall := re.FindAllStringSubmatch(value, -1)\n\n\t\t\tformatted := fmt.Sprintf(\"%sh%sm%ss\", all[0][1], all[0][2], all[0][3])\n\t\t\tduration, _ = time.ParseDuration(formatted)\n\t\t}\n\t}\n\treturn duration\n}", "func MakeDuration(target string, def int) time.Duration {\n\tif !elapso.MatchString(target) {\n\t\treturn time.Duration(def)\n\t}\n\n\tmatchs := elapso.FindAllStringSubmatch(target, -1)\n\n\tif len(matchs) <= 0 {\n\t\treturn time.Duration(def)\n\t}\n\n\tmatch := matchs[0]\n\n\tif len(match) < 3 {\n\t\treturn time.Duration(def)\n\t}\n\n\tdur := time.Duration(ConvertToInt(match[1], def))\n\n\tmtype := match[2]\n\n\tswitch mtype {\n\tcase \"s\":\n\t\treturn dur * time.Second\n\tcase \"mcs\":\n\t\treturn dur * time.Microsecond\n\tcase \"ns\":\n\t\treturn dur * time.Nanosecond\n\tcase \"ms\":\n\t\treturn dur * time.Millisecond\n\tcase \"m\":\n\t\treturn dur * time.Minute\n\tcase \"h\":\n\t\treturn dur * time.Hour\n\tdefault:\n\t\treturn time.Duration(dur) * time.Second\n\t}\n}", "func FootToMeters(f Foot) Meter { return Meter(f * 3) }", "func timeFromJournalInt(t int64) time.Time {\n\tsecs := t / 1000000\n\tms := t % 1000000\n\treturn time.Unix(secs, ms).UTC()\n}", "func Ms(duration time.Duration) float64 {\n\treturn float64(duration / time.Millisecond)\n}", "func (t *Time) Diff(from Time) Duration {\n\tsec, nsec := normalizeTemporal(int64(t.Sec)-int64(from.Sec),\n\t\tint64(t.NSec)-int64(from.NSec))\n\treturn Duration{temporal{sec, nsec}}\n}", "func (tv Timeval) ToTime() time.Time {\n\treturn time.Unix(tv.Sec, tv.Usec*1e3)\n}", "func durationToMilliseconds(d time.Duration) (uint64, error) {\n\tif d < 0 {\n\t\treturn 0, fmt.Errorf(\"report period cannot be negative: %v\", d)\n\t}\n\n\treturn uint64(d / time.Millisecond), nil\n}", "func (s Broker) TimingDuration(name string, duration time.Duration) {\n\ttimeMillis := int(duration.Nanoseconds() / 1000000)\n\ts.Timing(name, timeMillis)\n}", "func (d *Duration) UnmarshalTOML(b []byte) error {\n\tvar err error\n\tb = bytes.Trim(b, `'`)\n\n\t// see if we can directly convert it\n\td.Duration, err = time.ParseDuration(string(b))\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\t// Parse string duration, ie, \"1s\"\n\tif uq, err := strconv.Unquote(string(b)); err == nil && len(uq) > 0 {\n\t\td.Duration, err = time.ParseDuration(uq)\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// First try parsing as integer seconds\n\tsI, err := strconv.ParseInt(string(b), 10, 64)\n\tif err == nil {\n\t\td.Duration = time.Second * time.Duration(sI)\n\t\treturn nil\n\t}\n\t// Second try parsing as float seconds\n\tsF, err := strconv.ParseFloat(string(b), 64)\n\tif err == nil {\n\t\td.Duration = time.Second * time.Duration(sF)\n\t\treturn nil\n\t}\n\n\treturn nil\n}", "func durationToWord(in Interval) string {\n\tswitch in {\n\tcase FifteenSecond:\n\t\treturn \"fifteensecond\"\n\tcase OneMin:\n\t\treturn \"onemin\"\n\tcase ThreeMin:\n\t\treturn \"threemin\"\n\tcase FiveMin:\n\t\treturn \"fivemin\"\n\tcase TenMin:\n\t\treturn \"tenmin\"\n\tcase FifteenMin:\n\t\treturn \"fifteenmin\"\n\tcase ThirtyMin:\n\t\treturn \"thirtymin\"\n\tcase OneHour:\n\t\treturn \"onehour\"\n\tcase TwoHour:\n\t\treturn \"twohour\"\n\tcase FourHour:\n\t\treturn \"fourhour\"\n\tcase SixHour:\n\t\treturn \"sixhour\"\n\tcase EightHour:\n\t\treturn \"eighthour\"\n\tcase TwelveHour:\n\t\treturn \"twelvehour\"\n\tcase OneDay:\n\t\treturn \"oneday\"\n\tcase ThreeDay:\n\t\treturn \"threeday\"\n\tcase FifteenDay:\n\t\treturn \"fifteenday\"\n\tcase OneWeek:\n\t\treturn \"oneweek\"\n\tcase TwoWeek:\n\t\treturn \"twoweek\"\n\tcase OneMonth:\n\t\treturn \"onemonth\"\n\tcase OneYear:\n\t\treturn \"oneyear\"\n\tdefault:\n\t\treturn \"notfound\"\n\t}\n}", "func (tj *TensorFlowJob) Duration() time.Duration {\n\tjob := tj.tfjob\n\n\tif job.Status.StartTime == nil ||\n\t\tjob.Status.StartTime.IsZero() {\n\t\treturn 0\n\t}\n\n\tif !job.Status.CompletionTime.IsZero() {\n\t\treturn job.Status.CompletionTime.Time.Sub(job.Status.StartTime.Time)\n\t}\n\n\tif tj.GetStatus() == \"FAILED\" {\n\t\tcond := getPodLatestCondition(tj.chiefPod)\n\t\tif !cond.LastTransitionTime.IsZero() {\n\t\t\treturn cond.LastTransitionTime.Time.Sub(job.Status.StartTime.Time)\n\t\t} else {\n\t\t\tlog.Debugf(\"the latest condition's time is zero of pod %s\", tj.chiefPod.Name)\n\t\t}\n\t}\n\n\treturn metav1.Now().Sub(job.Status.StartTime.Time)\n}", "func ToTimeE(i interface{}) (time.Time, error) {\n\treturn cast.ToTimeE(i)\n}", "func SecondsToTime(n int64) Time {\n\treturn Time(n * 1e6)\n}", "func DurationToTimeval(dur time.Duration) Timeval {\n\treturn NsecToTimeval(dur.Nanoseconds())\n}", "func convertToSeconds(hours, minutes, seconds, microseconds string) {\n\thoursInSeconds, _ := strconv.Atoi(hours)\n\tminutesInSeconds, _ := strconv.Atoi(minutes)\n\tformattedSeconds, _ := strconv.Atoi(seconds)\n\tformattedSeconds = formattedSeconds + (hoursInSeconds * 3600) + (minutesInSeconds * 60)\n\n\tvar buffer bytes.Buffer\n\n\tbuffer.WriteString(strconv.Itoa(formattedSeconds))\n\tbuffer.WriteString(\".\")\n\tbuffer.WriteString(microseconds)\n\n\tfmt.Println(\"BarDuration: \" + buffer.String())\n}", "func stampToTime(quadPart C.LONGLONG) time.Time {\n\tft := windows.Filetime{\n\t\tHighDateTime: uint32(quadPart >> 32),\n\t\tLowDateTime: uint32(quadPart & math.MaxUint32),\n\t}\n\treturn time.Unix(0, ft.Nanoseconds())\n}", "func toTimeSeconds(value string) (int64, error) {\n\t//is serial format?\n\tserial, err := strconv.ParseFloat(value, 64)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\treturn int64(serial * 86400), nil\n}", "func timeDiff(t1 time.Time, t2 time.Time, duration int) string {\n\tif t1.After(t2) {\n\t\ttimeList := diff(t1, t2)\n\t\treturn timeDiffHelper(timeList) + \"AGO\"\n\t}\n\n\ttimeList := diff(t2, t1)\n\tif duration == 1 {\n\t\treturn timeDiffHelper(timeList)\n\t}\n\treturn \"IN \" + timeDiffHelper(timeList)\n\n}", "func (d ISODuration) ToDuration() (time.Duration, error) {\r\n\treturn d.duration.ToDuration()\r\n}", "func (br *BandwidthMeter) Duration() (duration time.Duration) {\n duration = br.lastRead.Sub(br.start)\n return\n}", "func to_ms(nano int64) int64 {\n\treturn nano / int64(time.Millisecond)\n}", "func (ts Timespec) ToTime() time.Time {\n\treturn time.Unix(ts.Sec, ts.Nsec)\n}", "func prettyDuration(t int64) string {\n\tif t > 1000000000 {\n\t\treturn fmt.Sprintf(\"%.2fs\", float64(t)/float64(1000000000))\n\t}\n\treturn fmt.Sprintf(\"%.2fms\", float64(t)/float64(1000000))\n}", "func parseIntoDuration(str string) (time.Duration, error) {\n\tvar d time.Duration\n\t/**\n\t * important! When editing this regex, make sure that you specify the \"or\"s as\n\t * whole -> subset instead of subset -> whole, that is \"second|sec|s\" instead of\n\t * \"s|sec|second\". Otherwise, you will find yourself matching \"s\", but with a tailing\n\t * \"econd\"\n\t**/\n\tre := regexp.MustCompile(\"([-+][0-9]+)(hour|hr|h|minute|min|m|second|sec|s|days|day|d)\")\n\tres := re.FindAllStringSubmatch(str, -1)\n\tif len(res) != 1 {\n\t\treturn d, errors.New(\"Invalid timespec: \" + str)\n\t}\n\n\t// handle amount\n\ti, err := strconv.ParseInt(res[0][1], 10, 64)\n\tif err != nil {\n\t\treturn d, err\n\t}\n\td = time.Duration(i)\n\n\t// handle units\n\tswitch res[0][2] {\n\tcase \"h\", \"hr\", \"hour\":\n\t\td *= time.Hour\n\tcase \"m\", \"min\", \"minute\":\n\t\td *= time.Minute\n\tcase \"s\", \"sec\", \"second\":\n\t\td *= time.Second\n\tcase \"d\", \"days\", \"day\":\n\t\td *= 24 * time.Hour\n\tdefault:\n\t\treturn d, errors.New(\"Timespec needs valid units:\" + str)\n\t}\n\n\treturn d, nil\n}", "func (d Duration) StringUsingUnits(unit units.Unit) string {\n\treturn d.convert(units.Second, unit).toString()\n}", "func (note Note) AtTime(t, sr int) float64 {\n\treturn NoteAtTime(t, sr, note)\n}", "func DurationInWords(d time.Duration) string {\n\n\tif d >= time.Second && d <= (time.Second*4) {\n\t\treturn fmt.Sprintf(lssthnd, 5, \"seconds\")\n\t} else if d >= (time.Second*5) && d < (time.Second*10) {\n\t\treturn fmt.Sprintf(lssthnd, 10, \"seconds\")\n\t} else if d >= (time.Second*10) && d < (time.Second*20) {\n\t\treturn fmt.Sprintf(lssthnd, 20, \"seconds\")\n\t} else if d >= (time.Second*20) && d < (time.Second*40) {\n\t\treturn \"half a minute\"\n\t} else if d >= (time.Second*40) && d < (time.Second*60) {\n\t\treturn fmt.Sprintf(lssthns, \"minute\")\n\t} else if d >= (time.Second*60) && d < time.Minute+(time.Second*30) {\n\t\treturn \"1 minute\"\n\t} else if d >= time.Minute+(time.Second*30) && d < (time.Minute*44)+(time.Second*30) {\n\t\treturn fmt.Sprintf(\"%d minutes\", (d / time.Minute))\n\t} else if d >= (time.Minute*44)+(time.Second*30) && d < (time.Minute*89)+(time.Second*30) {\n\t\treturn fmt.Sprintf(aboutnd, d/time.Hour, \"hour\")\n\t} else if d >= (time.Minute*89)+(time.Second*30) && d < (time.Hour*29)+(time.Minute*59)+(time.Second*30) {\n\t\treturn fmt.Sprintf(aboutnd, (d / time.Hour), \"hours\")\n\t} else if d >= (time.Hour*23)+(time.Minute*59)+(time.Second*30) && d < (time.Hour*41)+(time.Minute*59)+(time.Second*30) {\n\t\treturn \"1 day\"\n\t} else if d >= (time.Hour*41)+(time.Minute*59)+(time.Second*30) && d < (day*29)+(time.Hour*23)+(time.Minute*59)+(time.Second*30) {\n\t\treturn fmt.Sprintf(\"%d days\", d/(time.Hour*24))\n\t} else if d >= (day*29)+(time.Hour*23)+(time.Minute*59)+(time.Second*30) && d < (day*59)+(time.Hour*23)+(time.Minute*59)+(time.Second*30) {\n\t\treturn fmt.Sprintf(aboutnd, 1, \"month\")\n\t} else if d >= (day*59)+(time.Hour*23)+(time.Minute*59)+(time.Second*30) && d < (year) {\n\t\treturn fmt.Sprintf(aboutnd, d/month+1, \"months\")\n\t} else if d >= year && d < year+(3*month) {\n\t\treturn fmt.Sprintf(aboutnd, 1, \"year\")\n\t} else if d >= year+(3*month) && d < year+(9*month) {\n\t\treturn \"over 1 year\"\n\t} else if d >= year+(9*month) && d < (year*2) {\n\t\treturn \"almost 2 years\"\n\t} else {\n\t\treturn fmt.Sprintf(aboutnd, d/year, \"years\")\n\t}\n}", "func (mes *MarkerEncodingScheme) TimeUnit() Marker { return mes.timeUnit }", "func GetSignalTime(timeUnit int32, refDate time.Time) time.Time {\n\tvar t time.Time\n\tswitch timeUnit {\n\tcase SignalTimeUnit_NOW:\n\t\t{\n\t\t\treturn refDate.UTC().Truncate(time.Hour * 24)\n\t\t}\n\tcase SignalTimeUnit_MONTH:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -30)\n\t\t}\n\tcase SignalTimeUnit_BIMONTH:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -60)\n\t\t}\n\tcase SignalTimeUnit_QUARTER:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -90)\n\t\t}\n\tcase SignalTimeUnit_HALFYEAR:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -180)\n\t\t}\n\tcase SignalTimeUnit_THIRDQUARTER:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -270)\n\t\t}\n\tcase SignalTimeUnit_YEAR:\n\t\t{\n\t\t\tt = refDate.UTC().AddDate(0, 0, -365)\n\t\t}\n\t}\n\n\treturn t.Truncate(time.Hour * 24)\n}", "func (r Rest) TickDuration(quarter uint16) uint16 {\n\tif Duration(r) == None {\n\t\treturn 0\n\t}\n\t//the fraction of the measure the note takes\n\tfraq := 1. / math.Pow(2., float64(r))\n\n\treturn uint16(float64(4*quarter) * fraq)\n}", "func (p *FrameLease) TimeToLive() time.Duration {\n\tv := binary.BigEndian.Uint32(p.body.Bytes())\n\treturn time.Millisecond * time.Duration(v)\n}", "func StringToTimeDurationHookFunc() DecodeHookFunc {\n\treturn func(\n\t\tf reflect.Type,\n\t\tt reflect.Type,\n\t\tdata interface{}) (interface{}, error) {\n\t\tif f.Kind() != reflect.String {\n\t\t\treturn data, nil\n\t\t}\n\t\tif t != reflect.TypeOf(time.Duration(5)) {\n\t\t\treturn data, nil\n\t\t}\n\n\t\t// Convert it by parsing\n\t\treturn time.ParseDuration(data.(string))\n\t}\n}" ]
[ "0.6077799", "0.5801322", "0.5798061", "0.5757803", "0.5740865", "0.57225984", "0.5601669", "0.5556431", "0.54757607", "0.5466047", "0.54560196", "0.54490495", "0.5432458", "0.5383502", "0.53819966", "0.5361781", "0.53517246", "0.53364056", "0.5326453", "0.5320531", "0.5315199", "0.5305441", "0.5305441", "0.5302505", "0.5270126", "0.52627414", "0.524501", "0.5244543", "0.52388316", "0.5228539", "0.5218616", "0.521388", "0.51982343", "0.51920253", "0.51789004", "0.5165287", "0.5159179", "0.5153292", "0.5131149", "0.51276755", "0.5105739", "0.5093638", "0.50809175", "0.5064148", "0.50355005", "0.50234497", "0.501787", "0.5013835", "0.50025123", "0.50025123", "0.4991857", "0.49719313", "0.497044", "0.49627566", "0.4951521", "0.4949176", "0.49286336", "0.49201325", "0.49190268", "0.4912904", "0.49082246", "0.49065349", "0.4897654", "0.489457", "0.48943564", "0.4893668", "0.48843744", "0.4879211", "0.48695427", "0.48614013", "0.48591074", "0.48539004", "0.48528993", "0.4851961", "0.48487324", "0.4843809", "0.48289412", "0.48275736", "0.48221448", "0.48167086", "0.4815702", "0.48103678", "0.48057002", "0.47917658", "0.47831908", "0.47743097", "0.4769937", "0.47650608", "0.47537443", "0.4748513", "0.47459233", "0.47332752", "0.47317386", "0.47299075", "0.47254565", "0.47192535", "0.47164682", "0.47150022", "0.47093403", "0.47052515" ]
0.8580701
0
NoteToFreq converts a notes.Duration into a frequency with period equal to that note length
NoteToFreq преобразует notes.Duration в частоту с периодом, равным длительности ноты
func (m Meter) NoteToFreq(noteVal notes.Duration) float64 { duration := m.NoteToTime(noteVal) return 1 / float64(duration.Seconds()) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func toFreq(s semi, tonic freq) freq {\n\treturn tonic * freq(math.Pow(root12, float64(s)))\n}", "func (c *Config) FrequencyDur() time.Duration {\n\tconfigLock.RLock()\n\tdefer configLock.RUnlock()\n\n\tif c.Frequency == 0 {\n\t\treturn callhomeCycleDefault\n\t}\n\n\treturn c.Frequency\n}", "func (m Meter) NoteToTime(noteVal notes.Duration) time.Duration {\n\treturn time.Duration((float64(noteVal/m.BeatValue) / m.BeatsPerMinute) * float64(time.Minute))\n}", "func (m *TermsExpiration) GetFrequency()(*i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration) {\n return m.frequency\n}", "func NewNote(vol float64, len time.Duration, freq ...float64) *Note {\n\treturn &Note{\n\t\tVolume: vol,\n\t\tFrequency: freq,\n\t\tOctave: 1.0,\n\t\tLength: len,\n\t}\n}", "func (c *Config) GetFrequency() time.Duration {\n\tif c.FrequencyInMS == 0 {\n\t\treturn time.Second\n\t}\n\n\treturn time.Duration(c.FrequencyInMS) * time.Millisecond\n}", "func (n *Note) measure() {\n\tvar samples int\n\tlength := wholeNote + (wholeNote / 100 * 4 * (4 - n.tempo)) // 4% per tempo unit\n\tswitch n.duration {\n\tcase 'W':\n\t\tsamples = length\n\tcase 'H':\n\t\tsamples = length / 2\n\tcase 'Q':\n\t\tsamples = length / 4\n\tcase 'E':\n\t\tsamples = length / 8\n\tcase 'S':\n\t\tsamples = length / 16\n\tcase 'T':\n\t\tsamples = length / 32\n\tcase 'I':\n\t\tsamples = length / 64\n\t}\n\n\tif samples > 0 {\n\t\t// Apply dot measure\n\t\tif n.dotted {\n\t\t\tsamples += samples / 2\n\t\t}\n\t}\n\n\tn.samples = samples\n}", "func frequencyFromSemitone(semitone int) float32 {\n\t// A4 is 440 Hz, 12 semitones per octave\n\treturn float32(440 * math.Pow(2, float64(semitone-69)/12))\n}", "func (o AnomalySubscriptionOutput) Frequency() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *AnomalySubscription) pulumi.StringOutput { return v.Frequency }).(pulumi.StringOutput)\n}", "func (o KubernetesClusterMaintenanceWindowAutoUpgradePtrOutput) Frequency() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *KubernetesClusterMaintenanceWindowAutoUpgrade) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Frequency\n\t}).(pulumi.StringPtrOutput)\n}", "func Freq() float32 {\n\treturn global.Freq()\n}", "func (j *ScheduledJob) Frequency() (time.Duration, error) {\n\tif !j.HasRecurringSchedule() {\n\t\treturn 0, errors.Newf(\n\t\t\t\"schedule %d is not periodic\", j.rec.ScheduleID)\n\t}\n\texpr, err := cronexpr.Parse(j.rec.ScheduleExpr)\n\tif err != nil {\n\t\treturn 0, errors.Wrapf(err,\n\t\t\t\"parsing schedule expression: %q; it must be a valid cron expression\",\n\t\t\tj.rec.ScheduleExpr)\n\t}\n\tnext := expr.Next(j.env.Now())\n\tnextNext := expr.Next(next)\n\treturn nextNext.Sub(next), nil\n}", "func ConvertNanosecondsToHz(val float64) float64 {\n\treturn val / 1e7\n}", "func NoteAtTime(t, sr int, note Note) float64 {\n\tsum := 0.0\n\tmultiplier := (2.0 * math.Pi) / float64(sr)\n\tfor i := 0; i < len(note.Frequency); i++ {\n\t\tsum += math.Sin((multiplier * (note.Frequency[i] * note.Octave)) * float64(t))\n\t}\n\treturn sum\n}", "func (o KubernetesClusterMaintenanceWindowAutoUpgradeOutput) Frequency() pulumi.StringOutput {\n\treturn o.ApplyT(func(v KubernetesClusterMaintenanceWindowAutoUpgrade) string { return v.Frequency }).(pulumi.StringOutput)\n}", "func tempoToPulseInterval(t Bpm) time.Duration {\n\n\treturn time.Duration((uSecInMin/(t/10.00))/ppqn) * time.Microsecond\n}", "func (o ElastigroupScheduledTaskOutput) Frequency() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ElastigroupScheduledTask) *string { return v.Frequency }).(pulumi.StringPtrOutput)\n}", "func SanitizeFrequency(frequency float64) uint64 {\n\t// 868.1 to 868100000 - but we will lose the decimals\n\tif frequency < 1000.0 {\n\t\tfrequency = frequency * 1000000\n\t}\n\n\t// 868400000000000 to 868400000\n\tif frequency > 1000000000 {\n\t\tfrequency = frequency / 1000000\n\t}\n\n\t// 869099976 to 869100000\n\tfrequency = math.Round(frequency/1000) * 1000\n\tfrequencyInt := uint64(frequency)\n\n\treturn frequencyInt\n}", "func (o BeanstalkScheduledTaskOutput) Frequency() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BeanstalkScheduledTask) *string { return v.Frequency }).(pulumi.StringPtrOutput)\n}", "func (o ScheduledAuditOutput) Frequency() ScheduledAuditFrequencyOutput {\n\treturn o.ApplyT(func(v *ScheduledAudit) ScheduledAuditFrequencyOutput { return v.Frequency }).(ScheduledAuditFrequencyOutput)\n}", "func getNoteTicker(secondsPerNote float64) *time.Ticker {\n\tnoteDurationString := fmt.Sprintf(\"%.5f\", secondsPerNote) + \"s\"\n\tnoteDuration, _ := time.ParseDuration(noteDurationString)\n\n\treturn time.NewTicker(noteDuration)\n}", "func (note Note) ToData(index, sr int) float64 {\n\tfreqLen := len(note.Frequency)\n\tvol := note.Volume\n\tif freqLen > 1 {\n\t\tvol = vol / float64(freqLen)\n\t}\n\treturn vol * note.AtTime(index, sr)\n}", "func AutocorrelateFrequency(waveform []float64, sampleRate float64) float64 {\n\n\tsearchSize := len(waveform) / 2\n\n\ttolerance := 0.001\n\trms := 0.0\n\trmsMin := 0.008\n\n\tprevAssessedStrings := assessedStringsInLastFrame\n\n\tfor _, amplitude := range waveform {\n\t\trms += amplitude * amplitude\n\t}\n\n\trms = math.Sqrt(rms / float64(len(waveform)))\n\n\tif rms < rmsMin {\n\t\treturn 0\n\t}\n\n\ttime := (time.Now().UnixNano() / 1000000)\n\n\tif rms > lastRms+rmsThreshold {\n\t\tassessStringsUntilTime = time + 250\n\t}\n\n\tif time < assessStringsUntilTime {\n\t\tassessedStringsInLastFrame = true\n\n\t\tfor i, note := range notes {\n\t\t\toffset := int(math.Round(sampleRate / note.frequency))\n\t\t\tdifference := 0.0\n\n\t\t\tif !prevAssessedStrings {\n\t\t\t\tdifferences[i] = 0\n\t\t\t}\n\n\t\t\tfor j := 0; j < searchSize; j++ {\n\t\t\t\tcurrentAmp := waveform[j]\n\t\t\t\toffsetAmp := waveform[j+offset]\n\t\t\t\tdifference += math.Abs(currentAmp - offsetAmp)\n\t\t\t}\n\n\t\t\tdifference /= float64(searchSize)\n\n\t\t\tdifferences[i] += difference * float64(offset)\n\t\t}\n\t} else {\n\t\tassessedStringsInLastFrame = false\n\t}\n\n\tif !assessedStringsInLastFrame && prevAssessedStrings {\n\t\tlastMinDifference = argmin(differences)\n\t}\n\n\tassumedString := notes[lastMinDifference]\n\tsearchRange := 10\n\tactualFrequency := int(math.Round(sampleRate / assumedString.frequency))\n\tsearchStart := actualFrequency - searchRange\n\tsearchEnd := actualFrequency + searchRange\n\tsmallestDifference := math.Inf(1)\n\n\tfor i := searchStart; i < searchEnd; i++ {\n\t\tdifference := 0.0\n\n\t\tfor j := 0; j < searchSize; j++ {\n\t\t\tcurrentAmp := waveform[j]\n\t\t\toffsetAmp := waveform[j+i]\n\t\t\tdifference += math.Abs(currentAmp - offsetAmp)\n\t\t}\n\n\t\tdifference /= float64(searchSize)\n\n\t\tif difference < smallestDifference {\n\t\t\tsmallestDifference = difference\n\t\t\tactualFrequency = i\n\t\t}\n\n\t\tif difference < tolerance {\n\t\t\tactualFrequency = i\n\t\t\tbreak\n\t\t}\n\n\t}\n\n\tlastRms = rms\n\n\treturn sampleRate / float64(actualFrequency)\n}", "func durationToWord(in Interval) string {\n\tswitch in {\n\tcase FifteenSecond:\n\t\treturn \"fifteensecond\"\n\tcase OneMin:\n\t\treturn \"onemin\"\n\tcase ThreeMin:\n\t\treturn \"threemin\"\n\tcase FiveMin:\n\t\treturn \"fivemin\"\n\tcase TenMin:\n\t\treturn \"tenmin\"\n\tcase FifteenMin:\n\t\treturn \"fifteenmin\"\n\tcase ThirtyMin:\n\t\treturn \"thirtymin\"\n\tcase OneHour:\n\t\treturn \"onehour\"\n\tcase TwoHour:\n\t\treturn \"twohour\"\n\tcase FourHour:\n\t\treturn \"fourhour\"\n\tcase SixHour:\n\t\treturn \"sixhour\"\n\tcase EightHour:\n\t\treturn \"eighthour\"\n\tcase TwelveHour:\n\t\treturn \"twelvehour\"\n\tcase OneDay:\n\t\treturn \"oneday\"\n\tcase ThreeDay:\n\t\treturn \"threeday\"\n\tcase FifteenDay:\n\t\treturn \"fifteenday\"\n\tcase OneWeek:\n\t\treturn \"oneweek\"\n\tcase TwoWeek:\n\t\treturn \"twoweek\"\n\tcase OneMonth:\n\t\treturn \"onemonth\"\n\tcase OneYear:\n\t\treturn \"oneyear\"\n\tdefault:\n\t\treturn \"notfound\"\n\t}\n}", "func (c *Context) GetTimerFrequency() uint64 {\n\treturn uint64(C.glfwGetTimerFrequency())\n}", "func (c *Context) GetTimerFrequency() uint64 {\n\treturn uint64(C.glfwGetTimerFrequency())\n}", "func (o KubernetesClusterMaintenanceWindowNodeOsPtrOutput) Frequency() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *KubernetesClusterMaintenanceWindowNodeOs) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Frequency\n\t}).(pulumi.StringPtrOutput)\n}", "func (o KubernetesClusterMaintenanceWindowNodeOsOutput) Frequency() pulumi.StringOutput {\n\treturn o.ApplyT(func(v KubernetesClusterMaintenanceWindowNodeOs) string { return v.Frequency }).(pulumi.StringOutput)\n}", "func ToFloat(d time.Duration) (seconds float64) {\n\treturn float64(d) / float64(time.Second)\n}", "func (s *Sound) Length() time.Duration {\n\treturn time.Duration(s.snd.Get(\"duration\").Float()) * time.Second\n}", "func (m *TermsExpiration) SetFrequency(value *i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ISODuration)() {\n m.frequency = value\n}", "func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}", "func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}", "func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}", "func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}", "func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}", "func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}", "func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}", "func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}", "func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}", "func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}", "func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}", "func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}", "func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}", "func Frequency(s string) FreqMap {\n\tm := FreqMap{}\n\tfor _, r := range s {\n\t\tm[r]++\n\t}\n\treturn m\n}", "func (d *DFT) ToFreqRange() map[int]float64 {\n\tif d == nil {\n\t\treturn nil\n\t}\n\toutput := make(map[int]float64, len(d.Coefs)/2)\n\tfor i := 0; i < len(d.Coefs)/2; i++ {\n\t\tf := (i * d.SampleRate) / (len(d.Coefs))\n\t\t// calculate the magnitude\n\t\toutput[f] = math.Log10(math.Sqrt(math.Pow(real(d.Coefs[i]), 2) + math.Pow(imag(d.Coefs[i]), 2)))\n\t}\n\treturn output\n}", "func (o InventorySchedulePtrOutput) Frequency() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *InventorySchedule) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Frequency\n\t}).(pulumi.StringPtrOutput)\n}", "func (o AnomalySubscriptionOutput) Frequency() AnomalySubscriptionFrequencyOutput {\n\treturn o.ApplyT(func(v *AnomalySubscription) AnomalySubscriptionFrequencyOutput { return v.Frequency }).(AnomalySubscriptionFrequencyOutput)\n}", "func fseconds(d time.Duration) float64 { return float64(d) / float64(time.Second) }", "func (e PrecisionTiming) durationToMs(x time.Duration) float64 {\n\treturn float64(x) / float64(time.Millisecond)\n}", "func WithFrequency(f int32) Option {\n\treturn func(d *Daemon) {\n\t\td.Frequency = f\n\t\td.frequency = time.Duration(time.Duration(f) * time.Second)\n\t}\n}", "func (c *Context) GetFreqCorrection() (freq int) {\n\treturn int(C.rtlsdr_get_freq_correction((*C.rtlsdr_dev_t)(c.dev)))\n}", "func (r Rest) TickDuration(quarter uint16) uint16 {\n\tif Duration(r) == None {\n\t\treturn 0\n\t}\n\t//the fraction of the measure the note takes\n\tfraq := 1. / math.Pow(2., float64(r))\n\n\treturn uint16(float64(4*quarter) * fraq)\n}", "func Frequency(frequency uint32) PmMetricsOption {\n\treturn func(args *PmMetrics) {\n\t\targs.frequency = frequency\n\t}\n}", "func (r *FTW) FreqTuningWord() uint32 {\n\treturn binary.BigEndian.Uint32(r[:])\n}", "func Frequency(s string) FreqMap {\n m := FreqMap{}\n for _, r := range s {\n m[r]++\n }\n return m\n}", "func (r Rest) Duration(measure time.Duration) time.Duration {\n\tif Duration(r) == None {\n\t\treturn 0\n\t}\n\t//the fraction of the measure the note takes\n\tfraq := 1. / math.Pow(2., float64(r))\n\n\treturn time.Duration(float64(measure) * fraq)\n}", "func (o InventoryScheduleOutput) Frequency() pulumi.StringOutput {\n\treturn o.ApplyT(func(v InventorySchedule) string { return v.Frequency }).(pulumi.StringOutput)\n}", "func (s *ReplicationJob) SetFrequency(v int64) *ReplicationJob {\n\ts.Frequency = &v\n\treturn s\n}", "func (m *Mixer) Frequency() uint {\n\treturn uint(C.al_get_mixer_frequency((*C.ALLEGRO_MIXER)(m)))\n}", "func durationToMilliseconds(d time.Duration) (uint64, error) {\n\tif d < 0 {\n\t\treturn 0, fmt.Errorf(\"report period cannot be negative: %v\", d)\n\t}\n\n\treturn uint64(d / time.Millisecond), nil\n}", "func (s *CreateReplicationJobInput) SetFrequency(v int64) *CreateReplicationJobInput {\n\ts.Frequency = &v\n\treturn s\n}", "func (i *InputInlineQueryResultVoiceNote) GetVoiceNoteDuration() (value int32) {\n\tif i == nil {\n\t\treturn\n\t}\n\treturn i.VoiceNoteDuration\n}", "func (f *Fs) Precision() time.Duration {\n\treturn time.Second\n}", "func (f *Fs) Precision() time.Duration {\n\treturn time.Second\n}", "func NewFrequency(v float64, s string) Frequency {\n\treturn Frequency(v) * frequency[s]\n}", "func (r *rPIO) UpdatePollFreq(d time.Duration) error {\n\tif !r.open {\n\t\treturn fmt.Errorf(\"polling has not yet started\")\n\t}\n\n\t// Update the poller frequency\n\tr.poller.newPollFreq <- d\n\n\treturn nil\n}", "func (r *Radio) Frequency() uint32 {\n\treturn r.freq\n}", "func toMilliseconds(duration time.Duration) float64 {\n\tif duration < time.Microsecond*10 {\n\t\treturn 0\n\t}\n\n\tms := float64(duration) / float64(time.Millisecond)\n\t// Round time to 0.02 precision\n\treturn math.Round(ms*100) / 100\n}", "func (o VolumeGroupSapHanaVolumeDataProtectionReplicationPtrOutput) ReplicationFrequency() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *VolumeGroupSapHanaVolumeDataProtectionReplication) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.ReplicationFrequency\n\t}).(pulumi.StringPtrOutput)\n}", "func (mh *MidiLogger) HandleNote(n gm.Note) {\n\tif n.Vel == 0 {\n\t\tn.On = false\n\t}\n\tnotes := mh.keys[int(n.Ch)]\n\n\t// Update our note maps maps\n\tif n.On {\n\t\tnotes[n.Note] = n.Vel\n\t} else {\n\t\tnotes[n.Note] = 0\n\t}\n\tfmt.Printf(\"%s - %d.%d (%v)\\n\", n, mh.sixteenthsElapsed, mh.beatPosition, time.Since(mh.startTime))\n}", "func (sound L8) Duration() float32 {\n\tif sound.SampleRate <= 0 {\n\t\treturn 0.0\n\t}\n\treturn float32(len(sound.Samples)) / sound.SampleRate\n}", "func (r *RAM) FreqTuningWord() uint32 {\n\treturn binary.BigEndian.Uint32(r[0:4])\n}", "func (l DNA8List) BaseFreq() (b [4]float64) {\n\tvar n [7]int\n\tfor _, s := range l {\n\t\tfor _, b := range s {\n\t\t\tn[b&6]++\n\t\t}\n\t}\n\tc := 1 / float64(n[0]+n[2]+n[4]+n[6])\n\tfor i := range b {\n\t\tb[i] = float64(n[i*2]) * c\n\t}\n\treturn\n}", "func (fn *formulaFuncs) duration(settlement, maturity, coupon, yld, frequency, basis formulaArg) formulaArg {\n\tfrac := yearFrac(settlement.Number, maturity.Number, int(basis.Number))\n\tif frac.Type != ArgNumber {\n\t\treturn frac\n\t}\n\targumments := list.New().Init()\n\targumments.PushBack(settlement)\n\targumments.PushBack(maturity)\n\targumments.PushBack(frequency)\n\targumments.PushBack(basis)\n\tcoups := fn.COUPNUM(argumments)\n\tduration := 0.0\n\tp := 0.0\n\tcoupon.Number *= 100 / frequency.Number\n\tyld.Number /= frequency.Number\n\tyld.Number++\n\tdiff := frac.Number*frequency.Number - coups.Number\n\tfor t := 1.0; t < coups.Number; t++ {\n\t\ttDiff := t + diff\n\t\tadd := coupon.Number / math.Pow(yld.Number, tDiff)\n\t\tp += add\n\t\tduration += tDiff * add\n\t}\n\tadd := (coupon.Number + 100) / math.Pow(yld.Number, coups.Number+diff)\n\tp += add\n\tduration += (coups.Number + diff) * add\n\tduration /= p\n\tduration /= frequency.Number\n\treturn newNumberFormulaArg(duration)\n}", "func (s *UpdateReplicationJobInput) SetFrequency(v int64) *UpdateReplicationJobInput {\n\ts.Frequency = &v\n\treturn s\n}", "func (s *ServerReplicationParameters) SetFrequency(v int64) *ServerReplicationParameters {\n\ts.Frequency = &v\n\treturn s\n}", "func (d TimeDivision) TicksPerQuarterNote() uint16 {\n\tif (d & 0x8000) != 0 {\n\t\treturn 0\n\t}\n\treturn uint16(d)\n}", "func SilentNote(length time.Duration) *Note {\n\treturn &Note{\n\t\tVolume: 0.0,\n\t\tFrequency: []float64{0.0},\n\t\tOctave: 1.0,\n\t\tLength: length,\n\t}\n}", "func microsecondsPerPulse(bpm float32) time.Duration {\n\treturn time.Duration((float32(Minute) * float32(Microsecond)) / (float32(Ppqn) * bpm))\n}", "func (g *Keyboard) Add(freq float64) {\n\tg.l.Lock()\n\tdefer g.l.Unlock()\n\n\tbaseT := float64(g.totalSamples) / float64(g.sr)\n\tdurT := float64(g.dur) / float64(time.Second)\n\n\t// todo (bs): this fixed gain is pretty clumsy. It acts as something of a\n\t// safeguard to ensure that multiple notes can be played at the same time\n\t// without overwhelming the volume. I'd kinda guess this should be more\n\t// adaptive based on the number of concurrent notes - e.g. make a mapping\n\t// like: 1 note -> 0.4 gain; 2 notes -> 0.35 gain each; 3 notes -> 0.28 gain\n\t// each; 4 notes -> 0.25 gain each\n\t//\n\t// and have further notes have a fixed fraction of 1. This would need some\n\t// good internal smarts about how to downscale past gains for existing notes;\n\t// I'd say it'd require some better struct-based functions to make variability\n\t// easier to manage.\n\t// w := AmplifyWave(Gain(0.4), PianoNote(g.dur, freq))\n\n\tw := AmplifyWave(\n\t\tGain(0.4),\n\t\tPianoNote(g.dur, freq),\n\t)\n\n\tg.waves = append(g.waves, func(t float64) (float64, bool) {\n\t\tif t > baseT+durT {\n\t\t\treturn 0, true\n\t\t}\n\t\treturn w(t - baseT), false\n\t})\n}", "func (t *Track) Duration() float64 {\n\treturn float64(t.duration) / float64(t.globalTimescale)\n}", "func wordFreq(filename string, word string)(int, time.Duration){\n\tstartTime := time.Now()\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\treturn -1000, time.Since(startTime)\n\t}\n\tdefer file.Close()\n\n\tscanner := bufio.NewScanner(file)\n\twc := 0\n\tfor scanner.Scan() {\n\t\twords := strings.Fields(scanner.Text())\n\t\tfor _, lword := range words {\n\t\t\tif lword == word || lword == word+\",\" || lword == word+\".\"{\n\t\t\t\twc += 1\n\t\t\t}\n\t\t}\n\t}\n\n\tdur := time.Since(startTime)\n\treturn wc, dur\n}", "func (p *Posting) Frequency() uint64 {\n\treturn p.freq\n}", "func tickspersecond() int64", "func fmtDuration(d time.Duration) string {\n\treturn fmt.Sprintf(\"%.2fs\", d.Seconds())\n}", "func (decoder *Decoder) ByteOffsetToDur(offset int32) time.Duration {\n\treturn time.Duration(offset/decoder.byteRate) * time.Second\n}", "func (q MetricTicks) FractionalDuration(fractionalBPM float64, deltaTicks uint32) time.Duration {\n\tif q == 0 {\n\t\tq = defaultMetric\n\t}\n\t// (60000 / T) * (d / R) = D[ms]\n\t//\tdurQnMilli := 60000 / float64(tempoBPM)\n\t//\t_4thticks := float64(deltaTicks) / float64(uint16(q))\n\tres := 60000000000 * float64(deltaTicks) / (fractionalBPM * float64(uint16(q)))\n\t//fmt.Printf(\"what: %vns\\n\", res)\n\treturn time.Duration(int64(math.Round(res)))\n\t//\treturn time.Duration(roundFloat(durQnMilli*_4thticks, 0)) * time.Millisecond\n}", "func (pm *PmMetrics) UpdateFrequency(frequency uint32) {\n\tpm.frequency = frequency\n}", "func (r *TTNRandom) Freq() float32 {\n\treturn freqs[r.Interface.Intn(len(freqs))]\n}", "func (w *wavData) writeNote(note string, time float32, amplitude float32, channels []int, blend bool, reset bool, relativeDuration int) {\n\tvar (\n\t\tnumChannels = w.numChannels\n\t\tsampleRate = w.sampleRate\n\n\t\t// to prevent sound artifacts\n\t\tfadeSeconds float32 = 0.001\n\n\t\t// calculating properties of given note\n\t\tsemitone, _ = semitoneFromNote(note)\n\t\tfrequency = float32(frequencyFromSemitone(semitone)) * math.Pi * 2 / float32(sampleRate)\n\n\t\t// amount of blocks to be written\n\t\tblocksOut = int(math.Round(float64(sampleRate) * float64(time)))\n\t\t// reduces sound artifacts by fading at last fadeSeconds\n\t\tnonZero = float32(blocksOut) - float32(sampleRate)*fadeSeconds\n\t\t// fade interval in samples\n\t\tfade = float32(sampleRate)*fadeSeconds + 1\n\n\t\t// index of start and stop samples\n\t\tstart = int(w.pointer)\n\t\tstop = len(w.data)\n\n\t\t// k = cached index of data\n\t\t// d = sample data value\n\t\tk int\n\t\td float32\n\t)\n\n\t// by default write to all channels\n\tif len(channels) == 0 {\n\t\tfor i := 0; i < int(numChannels); i++ {\n\t\t\tchannels = append(channels, i)\n\t\t}\n\t}\n\n\tskipChannels := make([]bool, numChannels)\n\tfor i := 0; i < len(skipChannels); i++ {\n\t\tskipChannels[i] = channels[i] == -1\n\t}\n\n\t// update existing data\n\tfor i := 0; i < blocksOut; i++ {\n\t\t// iterate through specified channels\n\t\tfor j := 0; j < len(channels); j++ {\n\t\t\tk = start + i*int(numChannels) + channels[j]\n\t\t\td = 0\n\n\t\t\tif frequency > 0 {\n\t\t\t\td = amplitude * float32(math.Sin(float64(frequency)*float64(i)))\n\t\t\t\tif float32(i) < fade {\n\t\t\t\t\td *= float32(i) / fade\n\t\t\t\t} else if float32(i) > nonZero {\n\t\t\t\t\td *= float32(blocksOut-i+1) / fade\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif blend {\n\t\t\t\tw.data[k] = d + w.data[k]\n\t\t\t} else {\n\t\t\t\tw.data[k] = d\n\t\t\t}\n\t\t}\n\t}\n\n\tend := maxInt(start+blocksOut*int(numChannels), stop) * (w.bitsPerSample >> 3)\n\tw.chunkSize = uint32(end + len(w.header) - 8)\n\tw.subChunk2Size = uint32(end)\n\n\tbinary.LittleEndian.PutUint32(w.header[4:8], w.chunkSize)\n\tbinary.LittleEndian.PutUint32(w.header[40:44], w.subChunk2Size)\n\n\tif !reset {\n\t\tw.pointer = uint(start + blocksOut*int(numChannels))\n\t}\n}", "func updatePollFrequency(url string) int8 {\n\tvar (\n\t\tlastChange sql.NullString\n\t)\n\n\terr := db.QueryRow(\"SELECT last_change FROM podcasts WHERE feed_url = $1;\", url).Scan(&lastChange)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\t// Setup time for comparison\n\tt := time.Now()\n\n\t// no last change date, set default time\n\tif !lastChange.Valid {\n\t\treturn 4\n\t}\n\n\t// Parse lastChange into time\n\tlastChangeTime, err := time.Parse(time.RFC3339, lastChange.String)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\t// get the difference from now to lastChange\n\tdiff := t.Sub(lastChangeTime).Hours()\n\n\tif diff > 730 {\n\t\treturn 48\n\t} else if diff > 168 {\n\t\treturn 24\n\t} else if diff > 48 {\n\t\treturn 16\n\t} else if diff > 24 {\n\t\treturn 8\n\t} else {\n\t\treturn 4\n\t}\n}", "func (p *Pitch) FrequencyInHertz(concertPitch float64) float64 {\n\treturn math.Pow(2, float64(A4().GetDistanceTo(p))/12.0) * concertPitch\n}", "func (q *IntervalQueryRuleFuzzy) PrefixLength(prefixLength int) *IntervalQueryRuleFuzzy {\n\tq.prefixLength = &prefixLength\n\treturn q\n}", "func (p *Periph) StoreFREQUENCY(f Freq) {\n\tp.frequency.Store(uint32(f))\n}", "func (f *Fs) Precision() time.Duration {\n\treturn f.precision\n}", "func GetTimeFrequency(lines []string) (timeFrequence map[string]int) {\n\ttimeFrequence = map[string]int{\n\t\t\"Morning\": 0,\n\t\t\"Noon\": 0,\n\t\t\"Afternoon\": 0,\n\t\t\"Evening\": 0,\n\t\t\"Night\": 0,\n\t}\n\n\tfor _, line := range lines {\n\t\thourTime := getHourTime(parseHour(line))\n\t\tif _, ok := timeFrequence[hourTime]; ok {\n\t\t\ttimeFrequence[hourTime]++\n\t\t}\n\t}\n\n\treturn\n}", "func (t *ToneGenerator) Tone(freq, seconds float64, vol int32) []int32 {\n\tvar synthArray = make([]int32, int(seconds*t.sampleRate))\n\tdelta := freq * t.step\n\n\tfor i := 0; i < len(synthArray); i++ {\n\t\tsynthArray[i] = int32(t.wave(float64(i)*delta) * float64(vol))\n\n\t}\n\treturn synthArray\n}", "func countRuneFreq(work [][]rune, acc *FreqMap, mu *sync.Mutex, wg *sync.WaitGroup) {\n\tfreq := FreqMap{} // this worker's count\n\n\tfor _, runes := range work {\n\t\tfor _, r := range runes {\n\t\t\tfreq[r]++\n\t\t}\n\t}\n\n\t// atomic update\n\tmu.Lock()\n\tfor r, count := range freq {\n\t\t(*acc)[r] += count\n\t}\n\tmu.Unlock()\n\n\twg.Done()\n}", "func (o *PayPeriodDetails) GetPayFrequency() string {\n\tif o == nil || o.PayFrequency.Get() == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.PayFrequency.Get()\n}" ]
[ "0.6109282", "0.56692827", "0.5592371", "0.54737693", "0.5341398", "0.5270148", "0.52006036", "0.5196711", "0.50826305", "0.5026439", "0.5008344", "0.50019354", "0.49847287", "0.4954853", "0.4947973", "0.4922762", "0.49062866", "0.48975572", "0.48922068", "0.48828414", "0.48826703", "0.48503333", "0.4807786", "0.4796074", "0.47953132", "0.47953132", "0.47924343", "0.4768234", "0.47653648", "0.4731927", "0.4721248", "0.4715538", "0.4715538", "0.4715538", "0.4715538", "0.4715538", "0.4715538", "0.4715538", "0.4715538", "0.4715538", "0.4715538", "0.4715538", "0.4715538", "0.4715538", "0.4715538", "0.4714661", "0.46773726", "0.46529272", "0.46274608", "0.46207258", "0.462014", "0.4612961", "0.4601178", "0.45749488", "0.45575896", "0.45520902", "0.45391828", "0.45371342", "0.4528572", "0.45119452", "0.45013675", "0.44932637", "0.4485349", "0.44812185", "0.44812185", "0.44774956", "0.44760126", "0.4468291", "0.44388062", "0.44294864", "0.44292873", "0.4422195", "0.4419397", "0.4413722", "0.44132966", "0.4409625", "0.44007844", "0.439679", "0.43836528", "0.43821883", "0.4358468", "0.4357613", "0.43272364", "0.4326961", "0.4309018", "0.4304459", "0.42959937", "0.42715394", "0.42690802", "0.42679313", "0.42483518", "0.42415392", "0.4227476", "0.42173022", "0.42141944", "0.41979897", "0.41942605", "0.41896075", "0.41890633", "0.41696638" ]
0.82673764
0
GenerateJWTToken generates a JWT token with the username and singed by the given secret key
GenerateJWTToken генерирует JWT-токен с именем пользователя и подписывает его с помощью заданного секретного ключа
func GenerateJWTToken(userName, jwtAccSecretKey string) (string, error) { claims := jwt.MapClaims{ "username": userName, "ExpiresAt": jwt.TimeFunc().Add(1 * time.Minute).Unix(), "IssuedAt": jwt.TimeFunc().Unix(), } token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) return token.SignedString([]byte(jwtAccSecretKey)) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GenerateJWTToken(username string) (string, error) {\n\t// Create token\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"username\": username,\n\t\t\"exp\": time.Now().Add(time.Minute * 5).Unix(),\n\t})\n\n\tt, err := token.SignedString([]byte(jwtsecret))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn t, err\n}", "func GenerateToken(username, dept_id string) (string, error) {\n\tnowTime := time.Now()\n\texpireTime := nowTime.Add(330 * 24 * time.Hour)\n\n\tclaims := CustomClaims{\n\t\tusername,\n\t\tdept_id,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: expireTime.Unix(),\n\t\t\tIssuer: \"dingtalk\",\n\t\t},\n\t}\n\n\ttokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\ttoken, err := tokenClaims.SignedString(jwtSecret)\n\n\treturn token, err\n}", "func GenerateJWT(t models.User) (string, error) {\n\n\tJWTSECRET := config.Get(\"JWTSECRET\")\n\tsecret := []byte(JWTSECRET)\n\n\tpayload := jwt.MapClaims{\n\t\t\"_id\": t.ID.Hex(),\n\t\t\"email\": t.Email,\n\t\t\"name\": t.Name,\n\t\t\"lastname\": t.Lastname,\n\t\t\"bio\": t.Biography,\n\t\t\"website\": t.Website,\n\t\t\"birthDate\": t.BirthDate,\n\t\t\"location\": t.Location,\n\n\t\t\"exp\": time.Now().Add(24 * time.Hour).Unix(),\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, payload)\n\ttokenStr, err := token.SignedString(secret)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ttokenStr = \"Bearer \" + tokenStr\n\n\treturn tokenStr, nil\n}", "func GenerateJWTToken(info *TokenInfo, expiresAt int64) (string, error) {\n\tinfo.ExpiresAt = expiresAt\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, info)\n\tencryptedToken, err := token.SignedString([]byte(secretKey))\n\tif err != nil {\n\t\treturn \"\", errors.Customize(500, \"failed to sign on token\", err)\n\t}\n\treturn encryptedToken, nil\n}", "func GenerateJWT(t models.User) (string, error) {\n\tmySecret := []byte(\"learningaboutgobybuildingatwittercloneusingmongodb\")\n\n\tpayload := jwt.MapClaims{\n\t\t\"email\": t.Email,\n\t\t\"name\": t.Name,\n\t\t\"Last\": t.Last,\n\t\t\"bod\": t.Bod,\n\t\t\"location\": t.Location,\n\t\t\"website\": t.Website,\n\t\t\"biography\": t.Biography,\n\t\t\"_id\": t.ID.Hex(),\n\t\t\"exp\": time.Now().Add(time.Hour * 24).Unix(),\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, payload)\n\ttokenStr, err := token.SignedString(mySecret)\n\tif err != nil {\n\t\treturn tokenStr, err\n\t}\n\treturn tokenStr, nil\n}", "func GenerateJWT(username string, session *r.Session) string {\n\tvar jwt string\n\tdb := os.Getenv(\"DB\")\n\ttokenTable := os.Getenv(\"TOKENTABLE\")\n\tsalt := randStringBytes(32)\n\tu64 := b64.URLEncoding.EncodeToString([]byte(username))\n\ts64 := b64.URLEncoding.EncodeToString([]byte(salt))\n\thash := computeHMAC(u64 + \".\" + s64)\n\th := u64 + \".\" + s64 + \".\" + b64.URLEncoding.EncodeToString([]byte(hash))\n\t// Write to token table\n\tif !CheckUserExists(username, tokenTable, session) {\n\t\tauth := AuthToken{username, h}\n\t\t// fmt.Println(auth)\n\t\tr.DB(db).Table(tokenTable).Insert(auth).Run(session)\n\t\tjwt = h\n\t}\n\n\treturn jwt\n}", "func GenerateToken(username string, isAdmin bool, expires int, signingKey []byte) (string, error) {\n\tiat := time.Now()\n\texpirationTime := iat.Add(time.Duration(expires) * time.Second)\n\t// Create the JWT claims, which includes the username and expiry time\n\tclaims := &CustomClaims{\n\t\tUsername: username,\n\t\tIsAdmin: isAdmin,\n\t\tIssuedAt: iat.Unix(),\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\t// In JWT, the expiry time is expressed as unix milliseconds\n\t\t\tExpiresAt: expirationTime.Unix(),\n\t\t},\n\t}\n\n\t// Declare the token with the algorithm used for signing, and the claims\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\t// Create the JWT string.\n\treturn token.SignedString(signingKey)\n}", "func (engine ssoEngineImpl) generateJWTToken(authenticatedUser *authenticatedUser) (*common.CustomClaims, string, error) {\n\n\t// Build the claims\n\tclaims := &common.CustomClaims{\n\t\tUser: authenticatedUser.UserName,\n\t\tRoles: authenticatedUser.Roles,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Unix() + engine.tokenSecondsToLive,\n\t\t\tIssuedAt: time.Now().Unix(),\n\t\t\tIssuer: \"EasySSO Server\",\n\t\t},\n\t}\n\t// Build the token\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS512, claims)\n\n\t// Convert the token to a string\n\ttokenString, err := token.SignedString(engine.privateKey)\n\tif err != nil {\n\t\tlog.Error(\"Unable to sign generated token\", err)\n\t\treturn nil, \"\", err\n\t}\n\treturn claims, tokenString, nil\n}", "func GenerateJWT() (string, error) {\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\tclaims := token.Claims.(jwt.MapClaims)\n\tclaims[\"authorized\"] = true\n\tclaims[\"user\"] = \"heheh\"\n\tclaims[\"exp\"] = time.Now().Add(time.Minute * 30).Unix()\n\ttokenString, err := token.SignedString(secretKey)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn tokenString, nil\n}", "func GenerateJWT() (string, error) {\n\tlog.Printf(\"Generating new JWT\")\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\n\tclaims := token.Claims.(jwt.MapClaims)\n\n\tclaims[\"authorized\"] = true\n\tclaims[\"exp\"] = time.Now().Add(time.Minute * 30).Unix()\n\n\ttokenString, err := token.SignedString(signingKey)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn tokenString, nil\n}", "func GenerateJWT(t models.User) (string, error) {\n\tcode := []byte(\"lmSeryiJuasJuas\")\n\tpayload := jwt.MapClaims{\n\t\t\"_id\": t.ID.Hex(),\n\t\t\"email\": t.Email,\n\t\t\"name\": t.Name,\n\t\t\"lastName\": t.LastName,\n\t\t\"birthday\": t.Birthday,\n\t\t\"biography\": t.Biography,\n\t\t\"location\": t.Location,\n\t\t\"webSite\": t.WebSite,\n\t\t\"exp\": time.Now().Add(time.Hour * 24).Unix(),\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, payload)\n\ttokenStr, err := token.SignedString(code)\n\n\tif err != nil {\n\t\treturn tokenStr, err\n\t}\n\treturn tokenStr, nil\n}", "func GenerateJWT() (string, error) {\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\tclaims := token.Claims.(jwt.MapClaims)\n\n\tclaims[\"authorized\"] = true\n\tclaims[\"user\"] = \"Wyllis Monteiro\"\n\tclaims[\"exp\"] = time.Now().Add(time.Minute * 30).Unix()\n\n\ttokenString, err := token.SignedString(MySigningKey)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn tokenString, nil\n}", "func (m *manager) GenerateToken(userID string, username string, roles []string) (string, error) {\n nowTime := time.Now()\n expireTime := nowTime.Add(m.expireTime * time.Second)\n\n claims := Token{\n UserID: userID,\n Name: m.hashService.Make(username),\n Roles: roles,\n StandardClaims: &jwt.StandardClaims{\n ExpiresAt: expireTime.Unix(),\n Issuer: m.issuer,\n Audience: m.audience,\n },\n }\n\n tokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n token, err := tokenClaims.SignedString(m.jwtSecret)\n\n return token, err\n}", "func GenerateToken(user string) (string, error) {\n\tvar err error\n\tsecret := \"secret\"\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"username\": user,\n\t\t\"iss\": strconv.FormatInt(GetCurrentTimeMillis(), 10),\n\t})\n\ttokenString, err := token.SignedString([]byte(secret))\n\n\treturn tokenString, err\n}", "func generateJwtToken(login, fgp string, api *UserAPIHandler) (string, error) {\n\tvar claims models.TokenClaims\n\n\t// set required claims\n\tclaims.ExpiresAt = time.Now().Add(1 * time.Hour).Unix()\n\tclaims.Fingerprint = fgp\n\tif IsUserAdmin(login, api.admins) {\n\t\tclaims.Role = roleAdmin\n\t} else {\n\t\tclaims.Role = roleUser\n\t}\n\n\t// generate and sign the token\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\treturn token.SignedString(api.jwtSecret)\n}", "func GenerateJWT(user models.User) (resp LoginResponse, err error) {\n\tclaims := jwt.MapClaims{}\n\n\t// set our claims\n\tclaims[\"User\"] = user\n\tclaims[\"Name\"] = user.Name\n\n\t// set the expire time\n\n\tclaims[\"exp\"] = time.Now().Add(time.Hour * 24 * 30 * 12).Unix() //24 hours inn a day, in 30 days * 12 months = 1 year in milliseconds\n\n\t// create a signer for rsa 256\n\tt := jwt.NewWithClaims(jwt.GetSigningMethod(\"RS256\"), claims)\n\n\tpub, err := jwt.ParseRSAPrivateKeyFromPEM(config.GetConf().Encryption.Private)\n\tif err != nil {\n\t\treturn\n\t}\n\ttokenString, err := t.SignedString(pub)\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresp = LoginResponse{\n\t\tUser: user,\n\t\tMessage: \"Token succesfully generated\",\n\t\tToken: tokenString,\n\t}\n\n\treturn\n\n}", "func GenerateJWT(userID string) (string, error) {\n\tsigningKey := []byte(\"havealookatbath\")\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\tclaims := token.Claims.(jwt.MapClaims)\n\n\tclaims[\"authorised\"] = true\n\tclaims[\"user_id\"] = userID\n\tclaims[\"exp\"] = time.Now().Add(time.Minute * 30).UnixNano()\n\n\ttokenString, err := token.SignedString(signingKey)\n\tif err != nil {\n\t\tfmt.Printf(err.Error())\n\t\treturn \"\", err\n\t}\n\treturn tokenString, nil\n}", "func GenerateToken(secret []byte, aud, sub string) (string, error) {\n\n\ttok := jwt.NewWithClaims(jwt.SigningMethodHS256, &jwt.RegisteredClaims{\n\t\tIssuer: TokenIssuer,\n\t\tAudience: []string{aud},\n\t\tSubject: sub,\n\t\tIssuedAt: jwt.NewNumericDate(time.Now()),\n\t\tNotBefore: jwt.NewNumericDate(time.Now().Add(-15 * time.Minute)),\n\t})\n\n\treturn tok.SignedString(secret)\n}", "func GenerateToken(key []byte, userID int64, credential string) (string, error) {\n\n\t//new token\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\n\t// Claims\n\tclaims := make(jwt.MapClaims)\n\tclaims[\"user_id\"] = userID\n\tclaims[\"credential\"] = credential\n\tclaims[\"exp\"] = time.Now().Add(time.Hour*720).UnixNano() / int64(time.Millisecond)\n\ttoken.Claims = claims\n\n\t// Sign and get as a string\n\ttokenString, err := token.SignedString(key)\n\treturn tokenString, err\n}", "func GenerateToken(userID uint) (string, error) {\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"userID\": userID,\n\t})\n\n\ttokenStr, err := token.SignedString([]byte(secret))\n\n\treturn tokenStr, err\n}", "func GenerateToken(c *gin.Context, user *models.UserResource) string {\n\tclaims := jwt.NewWithClaims(jwt.SigningMethodHS256, &jwt.StandardClaims{\n\t\tIssuer: user.ID,\n\t\tExpiresAt: jwt.NewTime(float64(time.Now().Add(24 * time.Hour).UnixNano())),\n\t})\n\n\ttoken, err := claims.SignedString([]byte(SecretKey))\n\tif err != nil {\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\"error\": \"Unable to authonticate\"})\n\t\treturn \"\"\n\t}\n\tc.SetCookie(\n\t\t\"jwt\", token, int(time.Now().Add(24*time.Hour).UnixNano()), \"/\", \"localhost\", false, true,\n\t)\n\treturn token\n}", "func GenerateJWT(user models.User) (string, error) {\n\tscopes, err := executedao.PermissionDAO.GetScopes(user.Profile.ID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\n\tclaims := models.AppClaims{\n\t\tUser: user,\n\t\tScopes: scopes,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Add(time.Hour * 8).Unix(),\n\t\t\tIssuer: \"Contabilidad por Alexys\",\n\t\t},\n\t}\n\tlog.Printf(\"Creando un token a: %s\\n\", user.Username)\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)\n\tlog.Println(\"Firmando el token...\")\n\tss, err := token.SignedString(signKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn ss, nil\n}", "func GenerateJWT() (string, error) {\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\tclaims := token.Claims.(jwt.MapClaims)\n\tclaims[\"authorized\"] = true\n\tclaims[\"user\"] = \"Niroop\"\n\tclaims[\"exp\"] = time.Now().Add(time.Minute * 30).Unix()\n\ttokenString, err := token.SignedString(mySigningKey)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn \"\", err\n\t}\n\n\treturn tokenString, nil\n}", "func GenerateJWT() (string, error) {\n\n\tkey := os.Getenv(\"JWT_SECRETKEY\")\n\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\tclaims := token.Claims.(jwt.MapClaims)\n\n\tclaims[\"authorized\"] = true\n\tclaims[\"user\"] = \"Client\"\n\n\ttokenString, err := token.SignedString([]byte(key))\n\n\tif err != nil {\n\t\tlog.Warning.Println(err)\n\t\treturn \"\", err\n\t}\n\n\treturn tokenString, nil\n}", "func GenerateJWT(user *models.User) string {\n\tclaims := models.Claim{\n\t\tUser: *user,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Add(time.Hour * 10).Unix(),\n\t\t\tIssuer: \"marold97@outlook.com\",\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)\n\tresult, err := token.SignedString(PrivateKey)\n\tif err != nil {\n\t\tlog.Println(\"No se ha podido firmar el token: \", err)\n\t}\n\n\treturn result\n}", "func generateJWT(u Model) (string, error) {\n\tvar token string\n\tc := Claim{\n\t\tUsuario: u,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\t// Tiempo de expiración del token: 1 semana\n\t\t\tExpiresAt: time.Now().Add(time.Hour * 24 * 1).Unix(),\n\t\t\tIssuer: \"Cursos EDteam\",\n\t\t},\n\t}\n\n\tt := jwt.NewWithClaims(jwt.SigningMethodRS256, c)\n\ttoken, err := t.SignedString(SignKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn token, nil\n}", "func GenToken(id uint) string {\n\tjwt_token := jwt.New(jwt.GetSigningMethod(\"HS256\"))\n\t// Set some claims\n\tjwt_token.Claims = jwt.MapClaims{\n\t\t\"id\": id,\n\t\t\"exp\": time.Now().Add(time.Hour * 24).Unix(),\n\t}\n\t// Sign and get the complete encoded token as a string\n\ttoken, _ := jwt_token.SignedString([]byte(NBSecretPassword))\n\treturn token\n}", "func GenerateToken(secretKey string, validDays int) string {\n\n\tif validDays < 1 {\n\t\tvalidDays = 1\n\t}\n\n\tclaims := sjwt.New() // Issuer of the token\n\tclaims.SetIssuer(\"goUpload\")\n\t/*\n\t\tclaims.SetTokenID() // UUID generated\n\t\tclaims.SetSubject(\"Bearer Token\") // Subject of the token\n\t\tclaims.SetAudience([]string{\"Prometeus\"}) // Audience the toke is for\n\t\tclaims.SetIssuedAt(time.Now()) // IssuedAt in time, value is set in unix\n\t*/\n\tclaims.SetNotBeforeAt(time.Now()) // Token valid now\n\tclaims.SetExpiresAt(time.Now().Add(time.Hour * 24 * time.Duration(validDays))) // Token expires in 24 hours\n\tjwt := claims.Generate([]byte(secretKey))\n\treturn jwt\n}", "func createJwtToken(u user.User) (string, error) {\n\t// Set custom claims\n\tclaims := &middleware.LoginCustomClaims{\n\t\tu.Username,\n\t\tfalse,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Add(time.Hour * 72).Unix(),\n\t\t},\n\t}\n\n\t// Create token with claims\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\t// Generate encoded token and send it as response.\n\tkey := viper.GetString(\"auth.signkey\")\n\tt, err := token.SignedString([]byte(key))\n\treturn t, err\n\n}", "func (j *Jwt) GenerateToken() string {\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"exp\": json.Number(strconv.FormatInt(time.Now().AddDate(0, 0, 1).Unix(), 10)),\n\t\t\"iat\": json.Number(strconv.FormatInt(time.Now().Unix(), 10)),\n\t\t\"uid\": j.UID,\n\t\t\"name\": j.Name,\n\t\t\"username\": j.Username,\n\t})\n\n\ttokenStr, err := token.SignedString(JWTSecret)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn tokenStr\n}", "func GenerateJWT(u models.User) (string, error) {\n\tmyKey := []byte(\"LaClaveSecretaQueUsoParaCrearUnJWT\")\n\n\tpayload := jwt.MapClaims{\n\t\t\"email\": u.Email,\n\t\t\"name\": u.Name,\n\t\t\"lastname\": u.Lastname,\n\t\t\"birthday\": u.Birthday,\n\t\t\"location\": u.Location,\n\t\t\"biography\": u.Biography,\n\t\t\"website\": u.Website,\n\t\t\"_id\": u.ID.Hex(),\n\t\t\"exp\": time.Now().Add(24 * time.Hour).Unix(),\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, payload)\n\n\ttokenStr, err := token.SignedString(myKey)\n\n\tif err != nil {\n\t\treturn tokenStr, err\n\t}\n\n\treturn tokenStr, nil\n}", "func GenerateJWT(initialToken string, validDuration int) (string, error) {\n\n\tloginKey := []byte(initialToken)\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\tclaims := token.Claims.(jwt.MapClaims)\n\n\tclaims[\"authorized\"] = true\n\tclaims[\"exp\"] = time.Now().Add(time.Minute * time.Duration(validDuration))\n\n\tjwtToken, jwtErr := token.SignedString(loginKey)\n\n\tif jwtErr != nil {\n\t\tlog.Println(\"Error creating jwt Token : \", jwtErr)\n\t\treturn \"\", jwtErr\n\t}\n\n\treturn jwtToken, nil\n}", "func (s service) generateJWT(identity Identity) (string, error) {\n\treturn jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"id\": identity.ID(),\n\t\t\"isAdmin\": identity.IsAdmin(),\n\t\t\"exp\": time.Now().Add(time.Duration(s.tokenExpiration) * time.Hour).Unix(),\n\t}).SignedString([]byte(s.signingKey))\n}", "func (t *Jwt) GenerateToken(userID uint, expiredAt time.Duration) (accessToken string, err error) {\n\texp := time.Now().Add(expiredAt)\n\t// jwt token\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{\"exp\": exp.Unix(), \"userID\": userID})\n\t// sign the jwt token\n\taccessToken, err = token.SignedString(t.PrivateKey)\n\tif err != nil {\n\t\t// todo: log error\n\t}\n\treturn\n}", "func GenerateJWT(id, role string, signinigKey []byte) (access string, err error) {\n\tvar (\n\t\taccessToken *jwt.Token\n\t\tclaims jwt.MapClaims\n\t)\n\taccessToken = jwt.New(jwt.SigningMethodHS256)\n\n\tclaims = accessToken.Claims.(jwt.MapClaims)\n\tclaims[\"iss\"] = \"user\"\n\tclaims[\"sub\"] = id\n\tclaims[\"role\"] = role\n\tclaims[\"iat\"] = time.Now().Unix()\n\n\taccessTokenString, err := accessToken.SignedString(signinigKey)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"access_token generating error: %s\", err)\n\t\treturn\n\t}\n\n\treturn accessTokenString, nil\n}", "func GenerateJWT(name, role string) (string, error) {\n\t// Create the Claims\n\tclaims := AppClaims{\n\t\tUserName: name,\n\t\tRole: role,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Add(time.Minute * 20).Unix(),\n\t\t\tIssuer: \"admin\",\n\t\t},\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)\n\tss, err := token.SignedString(signKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn ss, nil\n}", "func GenerateToken(jwtSecret string, claims InvoicesClaims) string {\n\thmacSampleSecret := []byte(jwtSecret)\n\n\ttype Claims struct {\n\t\tInvoicesClaims\n\t\tjwt.StandardClaims\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, Claims{\n\t\tInvoicesClaims{\n\t\t\tGetInvoices: true,\n\t\t\tGetInvoice: true,\n\t\t\tCreateInvoice: true,\n\t\t},\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: getExpiry(),\n\t\t},\n\t})\n\n\ttokenString, err := token.SignedString(hmacSampleSecret)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn tokenString\n}", "func GenerateJWT(user string) *jwtgo.Token {\n\n\ttoken := jwtgo.New(jwtgo.SigningMethodRS512)\n\tin10m := time.Now().Add(time.Duration(10) * time.Minute).Unix()\n\ttoken.Claims = jwtgo.MapClaims{\n\t\t\"iss\": \"Issuer\", // who creates the token and signs it\n\t\t\"aud\": \"Audience\", // to whom the token is intended to be sent\n\t\t\"exp\": in10m, // time when the token will expire (10 minutes from now)\n\t\t\"jti\": uuid.Must(uuid.NewV4()).String(), // a unique identifier for the token\n\t\t\"iat\": time.Now().Unix(), // when the token was issued/created (now)\n\t\t\"nbf\": 2, // time before which the token is not yet valid (2 minutes ago)\n\t\t\"sub\": \"subject\", // the subject/principal is whom the token is about\n\t\t\"scopes\": \"api:access\", // token scope - not a standard claim\n\t\t\"user\": user, // username\n\t}\n\treturn token\n}", "func GenerateJWT(user interface{}) (string, error) {\n\t// 4380 hours = 6 months\n\texpireToken := time.Now().Add(time.Hour * 4380).Unix()\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, &model.User{\n\t\tId: user.(model.User).Id,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expireToken,\n\t\t},\n\t})\n\tsignedToken, err := token.SignedString(server.JwtSecret)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn signedToken, nil\n}", "func (s service) generateJWT(identity Identity) (string, error) {\n\treturn jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"id\": identity.GetID(),\n\t\t\"name\": identity.GetName(),\n\t\t\"exp\": time.Now().Add(time.Duration(s.tokenExpiration) * time.Hour).Unix(),\n\t}).SignedString([]byte(s.signingKey))\n}", "func GenerateJWT(user models.User) (string, error) {\n\n\tmyKey := []byte(\"MastersdelDesarrollo_grupodeFacebook\")\n\n\tpayload := jwt.MapClaims{\n\t\t\"email\": user.Email,\n\t\t\"nombre\": user.UserName,\n\t\t\"userid\": user.UserID,\n\t\t\"phone\": user.PhoneID,\n\t\t\"Password\": user.Password,\n\t\t\"_id\": user.ID.Hex(),\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, payload)\n\ttokenStr, err := token.SignedString(myKey)\n\tif err != nil {\n\t\treturn tokenStr, err\n\t}\n\treturn tokenStr, nil\n}", "func (a *Service) GenerateJweToken(customClaims map[string]interface{}) (string, *time.Time, *error_utils.ApiError) {\n\n\tenc, err := jose.NewEncrypter(\n\t\tjose.ContentEncryption(a.encryptionAlgorithm),\n\t\tjose.Recipient{Algorithm: jose.DIRECT, Key: a.encryptionKey},\n\t\t(&jose.EncrypterOptions{}).WithType(\"JWT\"),\n\t)\n\tif err != nil {\n\t\treturn \"\", nil, error_utils.NewInternalServerError(err.Error())\n\t}\n\n\texpire := a.timeFunc().UTC().Add(a.timeout)\n\n\tclaims := map[string]interface{} { }\n\tclaims[\"exp\"] = expire.Unix()\n\tclaims[\"orig_iat\"] = a.timeFunc().Unix()\n\tclaims[\"iss\"] = a.issuer\n\n\tif customClaims != nil {\n\t\tfor key, value := range customClaims {\n\t\t\tclaims[key] = value\n\t\t}\n\t}\n\n\ttoken, err := jwt.Encrypted(enc).Claims(claims).CompactSerialize()\n\tif err != nil {\n\t\treturn \"\", nil, error_utils.NewInternalServerError(err.Error())\n\t}\n\n\treturn token, &expire, nil\n}", "func generateUserToken(identity *Identity) *jwt.Token {\n\ttoken := jwt.New(jwt.SigningMethodRS256)\n\tclaims := token.Claims.(jwt.MapClaims)\n\tclaims[\"jti\"] = uuid.NewV4().String()\n\tiat := time.Now().Unix()\n\tclaims[\"exp\"] = 0\n\tclaims[\"iat\"] = iat\n\tclaims[\"typ\"] = \"Bearer\"\n\tclaims[\"preferred_username\"] = identity.Username\n\tclaims[\"sub\"] = identity.ID.String()\n\tclaims[\"email\"] = identity.Email\n\n\ttoken.Header[\"kid\"] = \"test-key\"\n\n\treturn token\n}", "func (op *AuthOperations) HandleJWTGenerate(w http.ResponseWriter, r *http.Request) {\n\tvar input jwt.General\n\t//fid := r.Header.Get(\"x-fid\")\n\tiid := r.Header.Get(\"x-iid\")\n\terr := json.NewDecoder(r.Body).Decode(&input)\n\tif err != nil {\n\t\tLOGGER.Warningf(\"Error while validating token body : %v\", err)\n\t\tjwt.ResponseError(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tLOGGER.Debugf(\"%s, %s\", iid, input.JTI)\n\n\tvar token jwt.Info\n\tinfoCollection, ctx := op.session.GetSpecificCollection(AuthDBName, JWTInfoCollection)\n\terr = infoCollection.FindOne(ctx,\n\t\tbson.M{\n\t\t\t\"institution\": iid,\n\t\t\t\"jti\": input.JTI,\n\t\t}).Decode(&token)\n\tif err != nil {\n\t\tLOGGER.Errorf(\"Error getting JWT info from query: %s\", err.Error())\n\t\tjwt.ResponseError(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tLOGGER.Debugf(\"%+v\", token)\n\n\t// if token exists\n\tif &token == nil {\n\t\tLOGGER.Errorf(\"Token info not found\")\n\t\tjwt.ResponseError(w, http.StatusInternalServerError, errors.New(\"token info not found\"))\n\t\treturn\n\t}\n\n\t// only generate if stage is currently approved\n\tif token.Stage != jwt.Approved {\n\t\tLOGGER.Errorf(\"Token is not currently approved\")\n\t\tjwt.ResponseError(w, http.StatusForbidden, errors.New(\"token is not currently approved\"))\n\t\treturn\n\t}\n\n\temail := r.Header.Get(\"email\")\n\t// check to make sure the authenticated user is the same user who requested the token\n\tif email == \"\" || email != token.CreatedBy {\n\t\tLOGGER.Errorf(\"User who requested the token must be the same user to generate the token\")\n\t\tjwt.ResponseError(w, http.StatusForbidden, errors.New(\"user who requested the token must be the same user to generate the token\"))\n\t\treturn\n\t}\n\n\t// ensure that the approved request includes a jti\n\tif token.JTI != input.JTI {\n\t\tLOGGER.Errorf(\"Unknown token id\")\n\t\tjwt.ResponseError(w, http.StatusForbidden, errors.New(\"unknown token id\"))\n\t\treturn\n\t}\n\n\t// update token info\n\ttoken.Stage = jwt.Ready\n\n\t// set default expiration time\n\t//initExp := \"15m\" //os.Getenv(\"initial_mins\") + \"m\"\n\t//if initExp == \"\" {\n\t//\tinitExp = \"1h\"\n\t//}\n\n\t// generate the token with payload and claims\n\t// initialize to expire in n1 hrs and not before n2 seconds from now\n\t//encodedToken := jwt.GenerateToken(payload, initExp, \"0s\")\n\ttokenSecret := stringutil.RandStringRunes(64, false)\n\n\tkeyID := primitive.NewObjectIDFromTimestamp(time.Now())\n\tjwtSecure := jwt.IJWTSecure{\n\t\tID: keyID,\n\t\tSecret: tokenSecret,\n\t\tJTI: input.JTI,\n\t\tNumber: 0,\n\t}\n\n\tsecureCollection, secureCtx := op.session.GetSpecificCollection(AuthDBName, JWTSecureCollection)\n\t_, err = secureCollection.InsertOne(secureCtx, jwtSecure)\n\tif err != nil {\n\t\tLOGGER.Errorf(\"Insert JWT secure failed: %+v\", err)\n\t\tjwt.ResponseError(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\t// convert the interface type ID to string\n\tLOGGER.Debugf(\"New generate ID: %s\" , keyID.Hex())\n\n\tcount := 0\n\t// define payload\n\tpayload := jwt.CreateClaims(token, count, iid, keyID.Hex())\n\tpayload.ExpiresAt = time.Now().Add(time.Minute * 60).Unix()\n\tpayload.NotBefore = time.Now().Unix()\n\n\tencodedToken, _ := jwt.CreateAndSign(payload, tokenSecret, keyID.Hex())\n\n\t// save updated token info\n\tupdateResult, updateInfoErr := infoCollection.UpdateOne(ctx, bson.M{\"institution\": iid, \"jti\": input.JTI}, bson.M{\"$set\": &token})\n\tif updateInfoErr != nil || updateResult.MatchedCount < 1{\n\t\tLOGGER.Errorf(\"Error update token info: %+v\", updateInfoErr)\n\t\tjwt.ResponseError(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tLOGGER.Debugf(\"Successfully generate JWT token\")\n\tjwt.ResponseSuccess(w, encodedToken)\n\treturn\n}", "func GenerateToken(info Jwt) (string, error) {\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"id\": info.ID,\n\t\t\"email\": info.Email,\n\t\t\"name\": info.Name,\n\t\t\"nbf\": time.Date(2015, 10, 10, 12, 0, 0, 0, time.UTC).Unix(),\n\t})\n\n\t// Sign and get the complete encoded token as a string using the secret\n\treturn token.SignedString(secret)\n}", "func createJwToken(user models.User) (string, error) {\n\n\tjwtExpired, _ := strconv.ParseInt(os.Getenv(\"JWT_EXPIRED_MINUTES\"), 10, 64)\n\n\tclaims := models.JwtClaims{\n\t\tName: user.Name,\n\t\tEmail: user.Email,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tId: strconv.Itoa(user.ID),\n\t\t\tExpiresAt: time.Now().Add(time.Duration(jwtExpired) * time.Minute).Unix(),\n\t\t},\n\t}\n\n\trawToken := jwt.NewWithClaims(jwt.SigningMethodHS512, claims)\n\n\ttoken, err := rawToken.SignedString([]byte(os.Getenv(\"JWT_SECRET\")))\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn token, nil\n}", "func GenerateToken(m *models.User) (*AuthToken, error) {\n\tnowTime := time.Now()\n\texpireTime := nowTime.Add(24 * time.Hour)\n\n\tclaims := userStdClaims{\n\t\tUser: m,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expireTime.Unix(),\n\t\t\tIssuedAt: time.Now().Unix(),\n\t\t\tIssuer: \"gin-server-api\",\n\t\t},\n\t}\n\n\ttokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\ttoken, err := tokenClaims.SignedString(jwtSecret)\n\n\tauthToken := &AuthToken{Token: token, ExpiresAt: expireTime.Format(\"2006-01-02 15:04:05\")}\n\treturn authToken, err\n}", "func (a *Authenticator) generateJwt(p *Profile) ([]byte, error) {\n\t// Create the token\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\t// Set some claims\n\t// TODO: complete the claims.\n\ttoken.Claims[\"exp\"] = time.Now().Add(time.Hour * 72).Unix()\n\t// User is valid. Create a jwt response.\n\ttoken.Claims[\"kid\"] = 0\n\ttoken.Claims[\"userid\"] = p.UserName\n\ttoken.Claims[\"scopes\"] = p.Scopes\n\tts, err := token.SignedString(a.secret)\n\t// Sign and get the complete encoded token as a string\n\n\tts, err = token.SignedString(a.secret)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn []byte(ts), nil\n}", "func GenerateToken(payload interface{}) string {\n\ttokenContent := jwt.MapClaims{\n\t\t\"payload\": payload,\n\t\t\"exp\": time.Now().Add(time.Second * TokenExpiredTime).Unix(),\n\t}\n\tjwtToken := jwt.NewWithClaims(jwt.GetSigningMethod(\"HS256\"), tokenContent)\n\ttoken, err := jwtToken.SignedString([]byte(\"TokenPassword\"))\n\tif err != nil {\n\t\tlogger.Error(\"Failed to generate token: \", err)\n\t\treturn \"\"\n\t}\n\n\treturn token\n}", "func (this *Token) CreateJWTToken(typeUser string, user interface{}) string {\n\n\t// Create new JWT token for the newly registered account\n\tvar id uint64\n\tswitch typeUser {\n\tcase \"user_buyers\":\n\t\tid = user.(*UserBuyers).ID\n\t}\n\n\ttk := &Token{UserId: id, UserType: typeUser, UserDetail: user}\n\ttoken := jwt.NewWithClaims(jwt.GetSigningMethod(\"HS256\"), tk)\n\ttokenString, _ := token.SignedString([]byte(os.Getenv(\"TOKEN_PASSWORD\")))\n\n\treturn tokenString\n}", "func CreateToken(userId uint64, secret_name string) (string, error) {\n\n //Retrieve secret value from secrets manager\n\tsecret, err := getSecretValue(secret_name);\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n atClaims := jwt.MapClaims{}\n atClaims[\"authorized\"] = true\n atClaims[\"user_id\"] = userId\n atClaims[\"exp\"] = time.Now().Add(time.Minute * 15).Unix()\n at := jwt.NewWithClaims(jwt.SigningMethodHS256, atClaims)\n\ttoken, err := at.SignedString([]byte(secret))\n if err != nil {\n return \"\", err\n }\n\tlog.Println(\"Token is successfully created\")\n return token, nil\n}", "func (j *JWTUtil) CreateToken(userID uint) (string, error) {\n\n\tclaims := jwt.MapClaims{}\n\n\tvar duration time.Duration\n\tdurationStr := os.Getenv(\"JWT_LIFESPAN_MINUTES\")\n\tif durationStr == \"\" {\n\t\tduration = DefaultTokenLifeSpan\n\t} else {\n\t\td, _ := strconv.ParseInt(durationStr, 10, 64)\n\t\tduration = time.Duration(d) * time.Minute\n\t}\n\n\tclaims[USER_ID] = userID\n\tclaims[\"authorized\"] = true\n\tclaims[\"exp\"] = time.Now().Add(duration).Unix()\n\tjwtToken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\tsecret := os.Getenv(\"JWT_SECRET\")\n\tif secret == \"\" {\n\t\treturn \"\", errors.New(\"missing jwt token secret\")\n\t}\n\ttoken, err := jwtToken.SignedString([]byte(secret))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn token, nil\n}", "func GenerateJWT(privateKey []byte, fields map[string]interface{}) (string, error) {\n\n\ttype CustomClaims struct {\n\t\tUserId int64 `json:\"user_id,omitempty\"`\n\t\tjwt.StandardClaims\n\t}\n\n\tclaims := &CustomClaims{}\n\n\tfor k, v := range fields {\n\t\tswitch k {\n\t\tcase \"aud\":\n\t\t\tclaims.Audience = cast.ToString(v)\n\t\tcase \"sub\":\n\t\t\tclaims.Subject = cast.ToString(v)\n\t\tcase \"iss\":\n\t\t\tclaims.Issuer = cast.ToString(v)\n\t\tcase \"id\":\n\t\t\tclaims.Id = cast.ToString(v)\n\t\tcase \"exp\":\n\t\t\tclaims.ExpiresAt = cast.ToInt64(v)\n\t\tcase \"user_id\":\n\t\t\tclaims.UserId = cast.ToInt64(v)\n\t\tcase \"nbf\":\n\t\t\tclaims.NotBefore = cast.ToInt64(v)\n\t\tcase \"iat\":\n\t\t\tclaims.IssuedAt = time.Now().Unix()\n\t\t}\n\t}\n\n\tvar (\n\t\tsPrivateKey interface{}\n\t\terr error\n\t)\n\n\tsPrivateKey, err = jwt.ParseRSAPrivateKeyFromPEM(privateKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)\n\tss, err := token.SignedString(sPrivateKey)\n\n\tif err != nil {\n\n\t\treturn \"\", err\n\t}\n\treturn ss, nil\n}", "func (user *User) GenerateToken() {\n\n\tvalue, _ := strconv.Atoi(os.Getenv(\"token_exp\"))\n\n\t//Create new JWT token for the newly registered account\n\ttk := &Token{UserID: uint(user.ID), ExpirationTime: time.Now().Add(time.Duration(value) * time.Second).Unix()}\n\n\ttoken := jwt.NewWithClaims(jwt.GetSigningMethod(\"HS256\"), tk)\n\ttokenString, _ := token.SignedString([]byte(os.Getenv(\"token_password\")))\n\tuser.Token = tokenString\n\n}", "func GenerateJWTforUser(user models.User) (string, error) {\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"userId\": user.UserID,\n\t\t\"username\": user.Username,\n\t\t\"isAdmin\": user.IsAdmin,\n\t\t\"expires\": strconv.FormatInt(time.Now().Add(time.Minute*time.Duration(1)).Unix(), 10),\n\t})\n\treturn token.SignedString([]byte(os.Getenv(\"JWT_SECRET\")))\n}", "func GenerateJWT(currentUserEmail string) (string, error) {\n\t// Create the Claims\n\tclaims := AppClaims{\n\t\tcurrentUserEmail,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Add(time.Minute * 60).Unix(),\n\t\t\tIssuer: \"admin\",\n\t\t},\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)\n\tss, err := token.SignedString(signKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn ss, nil\n}", "func CreateToken(user model.User, jwtKey string) (string, error) {\n\n\texpireToken := time.Now().Add(time.Hour * 48).Unix()\n\n\t// Set-up claims\n\tclaims := model.TokenClaims{\n\t\tID: user.ID,\n\t\tUsername: user.Username,\n\t\tName: user.Name,\n\t\tEmail: user.Email,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expireToken,\n\t\t\tIssuer: \"smartdashboard-backend-auth\",\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\ttokenString, err := token.SignedString([]byte(jwtKey))\n\n\treturn tokenString, err\n}", "func GenerateToken(payload PayLoad, expireTime int64) (string, error) {\n\n\tclaims := Claims{\n\t\tpayload.ID,\n\t\tpayload.Account,\n\t\tEncodeMD5(payload.Password),\n\t\tpayload.Scope,\n\t\tpayload.IsSuper,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: expireTime,\n\t\t\tIssuer: \"liaoliao\",\n\t\t},\n\t}\n\n\ttokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\ttoken, err := tokenClaims.SignedString(jwtSecret)\n\n\treturn token, err\n}", "func GenerateToken(id int, account string, role string) (token string, err error) {\n nowTime := time.Now()\n expireTime := nowTime.Add(3 * time.Hour) // token發放後多久過期\n\n claims := Claims{\n ID: id,\n Account: account,\n Role: role,\n StandardClaims: jwt.StandardClaims{\n ExpiresAt: expireTime.Unix(),\n IssuedAt: nowTime.Unix(),\n Issuer: \"go-gin-cli\",\n },\n }\n\n tokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n token, err = tokenClaims.SignedString(jwtSecret)\n if err != nil {\n log.Println(err)\n return\n }\n\n return\n}", "func (h *Helper) generateToken(tokentype int, expiresInSec time.Duration, id, role, username, email, picturepath string, createdAt, modifiedAt int64) (string, error) {\n\t// Create the Claims\n\tclaims := AppClaims{\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tAudience: helper.TokenAudience,\n\t\t\tSubject: id,\n\t\t\tIssuedAt: time.Now().Unix(),\n\t\t\t//1Day\n\t\t\tExpiresAt: time.Now().Add(expiresInSec).Unix(),\n\t\t\tIssuer: helper.TokenIssuer,\n\t\t},\n\t\tRole: role,\n\t}\n\tswitch tokentype {\n\tcase ID_TOKEN:\n\t\tclaims.Type = \"id_token\"\n\t\tclaims.User = &TokenUser{username, email, picturepath, createdAt, modifiedAt}\n\tcase REFRESH_TOKEN:\n\t\tclaims.Type = \"refresh\"\n\tcase ACCESS_TOKEN:\n\t\tclaims.Type = \"bearer\"\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)\n\tss, err := token.SignedString(h.signKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn ss, nil\n}", "func (j *JWT) GenerateToken(user models.User) (string, error) {\n\texpirationTime := time.Now().Add(7 * 24 * time.Hour)\n\tclaims := &requset.CustomClaims{\n\t\tTelephone: user.Telephone,\n\t\tUserName: user.Username,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expirationTime.Unix(),\n\t\t\tIssuedAt: time.Now().Unix(),\n\t\t\tIssuer: \"y\",\n\t\t},\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\treturn token.SignedString(j.JwtSecret)\n}", "func (t *jwtMgr) createJWTToken(user *auth.User, privateClaims map[string]interface{}) (string, time.Time, error) {\n\tcurrTime := time.Now()\n\texp := currTime.Add(t.expiration)\n\tif user == nil || user.Name == \"\" {\n\t\tt.logger.Errorf(\"User information is required to create a JWT token\")\n\t\treturn \"\", exp, ErrMissingUserInfo\n\t}\n\t// standard jwt claims like sub, iss, exp\n\tclaims := jwt.Claims{\n\t\tSubject: user.Name,\n\t\tIssuer: issuerClaimValue,\n\t\tExpiry: jwt.NewNumericDate(exp),\n\t\tIssuedAt: jwt.NewNumericDate(currTime),\n\t}\n\t// venice custom claims\n\tif privateClaims == nil {\n\t\tprivateClaims = make(map[string]interface{})\n\t}\n\tprivateClaims[TenantClaim] = user.GetTenant()\n\tprivateClaims[RolesClaim] = user.Status.GetRoles()\n\t// create signed JWT\n\ttoken, err := jwt.Signed(t.signer).Claims(claims).Claims(privateClaims).CompactSerialize()\n\tif err != nil {\n\t\tt.logger.Errorf(\"Unable to create JWT token: Err: %v\", err)\n\t\treturn \"\", exp, err\n\t}\n\treturn token, exp, err\n}", "func createJWT(userID int) (string, int64, error) {\n\t// expirationTime := time.Now().Add(7 * 24 * 60 * time.Minute)\n\t// expirationTime := time.Now().Add(1 * time.Minute)\n\texpirationTime := time.Now().Add(24 * 60 * time.Minute)\n\tclaims := &Claims{\n\t\tUserID: userID,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expirationTime.Unix(),\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\ttokenString, err := token.SignedString(jwtSecretKey)\n\tif err == nil {\n\t\treturn tokenString, expirationTime.Unix(), nil\n\t}\n\treturn \"\", 0, err\n}", "func createJWT(secret map[string]interface{}, scope string, pkey pkeyInterface) (string, error) {\n\t// A valid JWT has an \"iat\" timestamp and an \"exp\" timestamp. Get the current\n\t// time to create these timestamps.\n\tnow := int(time.Now().Unix())\n\n\t// Construct the JWT header, which contains the private key id in the service\n\t// account secret.\n\theader := map[string]string{\n\t\t\"typ\": \"JWT\",\n\t\t\"alg\": \"RS256\",\n\t\t\"kid\": toString(secret[\"private_key_id\"]),\n\t}\n\n\t// Construct the JWT payload.\n\tpayload := map[string]string{\n\t\t\"aud\": toString(secret[\"token_uri\"]),\n\t\t\"scope\": scope,\n\t\t\"iat\": strconv.Itoa(now),\n\t\t\"exp\": strconv.Itoa(now + 3600),\n\t\t\"iss\": toString(secret[\"client_email\"]),\n\t}\n\n\t// Convert header and payload to base64-encoded JSON.\n\theaderB64, err := mapToJsonBase64(header)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tpayloadB64, err := mapToJsonBase64(payload)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// The first two segments of the JWT are signed. The signature is the third\n\t// segment.\n\tsegments := headerB64 + \".\" + payloadB64\n\n\t// sign the hash, instead of the actual segments.\n\thashed := sha256.Sum256([]byte(segments))\n\tsignedBytes, err := pkey.Sign(rand.Reader, hashed[:], crypto.SignerOpts(sha256Opts{}))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Generate the final JWT as\n\t// base64(header) + \".\" + base64(payload) + \".\" + base64(signature)\n\treturn segments + \".\" + base64Encode(signedBytes), nil\n}", "func GenerateToken() (string, int64, error) {\n\tnow := time.Now()\n\tnow = now.UTC()\n\n\texpiration := now.Add(TokenLifespan).Unix()\n\tclaims := &jwt.StandardClaims{\n\t\tIssuer: \"auth.evanmoncuso.com\",\n\t\tAudience: \"*\",\n\t\tExpiresAt: expiration,\n\t\tIssuedAt: now.Unix(),\n\t}\n\n\ttoken := jwt.NewWithClaims(SigningMethod, claims)\n\ttokenString, err := token.SignedString(TokenSecret)\n\n\treturn tokenString, expiration, err\n}", "func CreateToken(userId primitive.ObjectID) (tokenString string, err error) {\n\n\t// Get config file\n\tconfig, err := ConfigHelper.GetConfig()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\ttype MyCustomClaims struct {\n\t\tUserId primitive.ObjectID `json:\"userId\"`\n\t\tjwt.StandardClaims\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, MyCustomClaims{\n\t\tuserId,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Unix() + (config.JwtExpHours * 3600),\n\t\t},\n\t})\n\n\t// Sign and get the complete encoded token as a string using the secret\n\ttokenString, err = token.SignedString([]byte(config.JwtSecret))\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\treturn\n}", "func GenerateJwtToken(email string) (SignedToken, error) {\n\tclaims := &jwtClaim{\n\t\tEmail: email,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tIssuedAt: time.Now().Unix(),\n\t\t\tIssuer: os.Getenv(\"JWT_ISSUER\"),\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\tsignedToken, err := token.SignedString([]byte(os.Getenv(\"JWT_KEY\")))\n\n\treturn SignedToken{Token: signedToken}, err\n}", "func createJwt(payload *JWTUser) (string, error) {\n\t// if the Expires isn't set, we need to set it to the expiration from the config\n\t// the only time it may be set is during test\n\t// generally, if you find yourself setting this by hand, you're doing it wrong\n\tif payload.Expires == \"\" {\n\t\tpayload.Expires = time.Now().Add(Config.TokenExpiresMinutes).Format(\"2006-01-02T15:04:05Z\")\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"user\": payload,\n\t})\n\ttokenString, err := token.SignedString([]byte(Config.TokenSalt))\n\n\treturn tokenString, err\n}", "func GeneroJWT(t models.Usuario)(string,error){\n\tmiClave:=[]byte(\"MastersDelDesarrollo\")\n\tpayload:=jwt.MapClaims{\n\t\t\"email\":t.Email,\n\t\t\"nombres\":t.Nombre,\n\t\t\"apellidos\":t.Apellidos,\n\t\t\"fecha_nacimiento\":t.FechaNacimiento,\n\t\t\"biografia\":t.Biografia,\n\t\t\"ubicacion\":t.Ubicacion,\n\t\t\"sitioweb\":t.SitioWeb,\n\t\t\"_id\":t.ID.Hex(),\n\t\t\"exp\":time.Now().Add(time.Hour*24).Unix(),\n\t}\n\n\ttoken:=jwt.NewWithClaims(jwt.SigningMethodHS256,payload)\n\ttokenStr,err:=token.SignedString(miClave)\n\tif err!=nil{\n\t\treturn tokenStr,err\n\t}\n\treturn tokenStr,nil\n}", "func CreateJWTToken(email string, provider string, providerID string, group string) (string, error) {\n\tclaims := Claims{\n\t\tEmail: email,\n\t\tProvider: provider,\n\t\tGroup: group,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Add(time.Hour * 24).Unix(),\n\t\t\tId: providerID,\n\t\t},\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\ttokenString, err := token.SignedString(jwtSecret)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn tokenString, nil\n}", "func Generate(payload map[string]interface{}, privateKey *rsa.PrivateKey) []byte {\n\tpayload[\"date\"] = time.Now().UTC().Format(\"02-01-2006\")\n\n\ttoken, err := jws.NewJWT(payload, crypto.SigningMethodRS512).Serialize(privateKey)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn token\n}", "func GenerateToken(c *gin.Context) {\n\tcurrentUser := GetCurrentUser(c.Request)\n\tif currentUser == nil {\n\t\terr := c.AbortWithError(http.StatusUnauthorized, fmt.Errorf(\"Invalid session\"))\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagAuth, c.ClientIP(), err.Error())\n\t\treturn\n\t}\n\n\ttokenID := uuid.NewV4().String()\n\n\t// Create the Claims\n\tclaims := &ScopedClaims{\n\t\tjwt.StandardClaims{\n\t\t\tIssuer: auth0ApiIssuer,\n\t\t\tAudience: auth0ApiAudiences[0],\n\t\t\tIssuedAt: time.Now().UnixNano(),\n\t\t\tExpiresAt: time.Now().UnixNano() * 2,\n\t\t\tSubject: strconv.Itoa(int(currentUser.ID)),\n\t\t\tId: tokenID,\n\t\t},\n\t\t\"api:invoke\",\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\tsignedToken, err := token.SignedString(signingKey)\n\n\tif err != nil {\n\t\terr = c.AbortWithError(http.StatusInternalServerError, fmt.Errorf(\"Failed to sign token: %s\", err))\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagAuth, c.ClientIP(), err.Error())\n\t} else {\n\t\terr = tokenStore.Store(strconv.Itoa(int(currentUser.ID)), tokenID)\n\t\tif err != nil {\n\t\t\terr = c.AbortWithError(http.StatusInternalServerError, fmt.Errorf(\"Failed to store token: %s\", err))\n\t\t\tbanzaiUtils.LogInfo(banzaiConstants.TagAuth, c.ClientIP(), err.Error())\n\t\t} else {\n\t\t\tc.JSON(http.StatusOK, gin.H{\"token\": signedToken})\n\t\t}\n\t}\n}", "func (r *Repository) CreateJWT(u *User) (string, error) {\n\tclaims := r.GetClaims(u)\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\t// Sign and get the complete encoded token as a string using the secret\n\ttokenString, err := token.SignedString([]byte(r.secretForToken))\n\treturn tokenString, err\n}", "func (c *Claim) GenerateJWT(signingString string) (string, error) {\n\t// set the expiration time to 15'\n\tc.ExpiresAt = time.Now().Add(time.Minute * 15).Unix()\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, c)\n\treturn token.SignedString([]byte(signingString))\n}", "func CreateToken(username string) (string, error) {\n\n\tclaims := jwt.MapClaims{\n\t\t\"user_id\": username,\n\t\t\"exp\": time.Now().Add(12 * time.Hour).Unix(),\n\t\t\"authorized\": true,\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\treturn token.SignedString([]byte(SECRET))\n}", "func createJWT(SID string) (string, error) {\n\t// Create custom claims value\n\tclaims := MyCustomClaims{\n\t\tSID,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: expire,\n\t\t},\n\t}\n\t// Create a jwt tokenizer\n\ttokenizer := jwt.NewWithClaims(jwt.SigningMethodHS512, &claims)\n\t// create a token and sign it with your key\n\tss, err := tokenizer.SignedString(key)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Error in SignedString while signing: %w\", err)\n\t}\n\treturn ss, nil\n}", "func GenerateAuthToken(claims *JWTClaims, expiry time.Duration, jwtKey []byte) (string, time.Time, error) {\n\tissuedTime := time.Now()\n\texpirationTime := issuedTime.Add(expiry)\n\tclaims.StandardClaims = jwt.StandardClaims{\n\t\t// In JWT, the expiry time is expressed as unix milliseconds\n\t\tExpiresAt: expirationTime.Unix(),\n\t\t// Can be used to blacklist in the future. Needs to hold state\n\t\t// in that case :/\n\t\tId: uuid.NewV4().String(),\n\t\tIssuedAt: issuedTime.Unix(),\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\tres, err := token.SignedString(jwtKey)\n\treturn res, expirationTime, err\n}", "func (s *Setup) GenerateToken(info *model.Auth) (string, error) {\n\tcfg, err := config.Load()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tsecret := []byte(cfg.JWTSecret)\n\n\tvar claims model.AuthClaims\n\n\tclaims.ID = info.ID\n\tclaims.Name = info.Name\n\tclaims.Email = info.Email\n\tclaims.StandardClaims = jwt.StandardClaims{\n\t\tExpiresAt: time.Now().Add(time.Hour * 2).Unix(),\n\t\tIssuer: cfg.AppName,\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\tsignedString, err := token.SignedString(secret)\n\tif err != nil {\n\t\treturn \"\", errors.New(errGeneratingToken)\n\t}\n\n\treturn signedString, nil\n}", "func (handler *AuthHandler) GenerateToken(w http.ResponseWriter, r *http.Request) {\n\ttokenString, err := GenerateJWT()\n\tif err != nil {\n\t\tfmt.Println(\"error occured while generating the token string\")\n\t}\n\n\tfmt.Fprintf(w, tokenString)\n}", "func GenerateToken(payload map[string]interface{}) (string, error) {\n\treturn GenerateCustomToken(payload, defaultSecret, defaultExpireTime)\n}", "func JWTCreate(userID int, expiredAt int64) string {\n\tclaims := UserClaims{\n\t\tuserID,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: expiredAt,\n\t\t\tIssuer: \"proton\",\n\t\t},\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\tsignedToken, _ := token.SignedString(mySigningKey)\n\treturn signedToken\n}", "func GenerateNewAccessToken(u *domain.User) (string, error) {\n\t// Set secret key from .env file.\n\tsecret := os.Getenv(\"JWT_SECRET_KEY\")\n\n\t// Set expires minutes count for secret key from .env file.\n\tminutesCount, _ := strconv.Atoi(os.Getenv(\"JWT_SECRET_KEY_EXPIRE_MINUTES\"))\n\n\t// Create token\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\n\t// Set claims\n\tclaims := token.Claims.(jwt.MapClaims)\n\tclaims[\"id\"] = u.ID\n\tclaims[\"email\"] = u.Email\n\tclaims[\"username\"] = u.Username\n\tclaims[\"full_name\"] = u.FullName\n\tclaims[\"exp\"] = time.Now().Add(time.Minute * time.Duration(minutesCount)).Unix()\n\n\t// Generate encoded token and send it as response.\n\tt, err := token.SignedString([]byte(secret))\n\tif err != nil {\n\t\t// Return error, it JWT token generation failed.\n\t\treturn \"\", err\n\t}\n\n\treturn t, nil\n}", "func createToken(user *models.User) string {\n\tvar store models.Store\n\tvar storeID uint\n\n\tif user.HaveStore == true {\n\t\tif config.DB.First(&store, \"user_id = ?\", user.ID).RecordNotFound() {\n\t\t\tstoreID = 0\n\t\t}\n\t\tstoreID = store.ID\n\t} else {\n\t\tstoreID = 0\n\t}\n\t// to send time expire, issue at (iat)\n\tjwtToken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"user_id\": user.ID,\n\t\t\"user_role\": user.Role,\n\t\t\"user_store\": user.HaveStore,\n\t\t\"store_id\": storeID,\n\t\t\"exp\": time.Now().AddDate(0, 0, 7).Unix(),\n\t\t\"iat\": time.Now().Unix(),\n\t})\n\n\t// Sign and get the complete encoded token as a string using the secret\n\ttokenString, err := jwtToken.SignedString([]byte(os.Getenv(\"JWT_SECRET\")))\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\treturn tokenString\n}", "func generateAndSaveJWT(secrets secret.Interface, cfg ServerConfig) error {\n\tclaims := jg.MapClaims{\n\t\t\"iss\": fmt.Sprintf(\"kube-arangodb/%s\", cfg.ServerName),\n\t\t\"iat\": time.Now().Unix(),\n\t}\n\terr := k8sutil.CreateJWTFromSecret(context.Background(), secrets, secrets, cfg.JWTSecretName, cfg.JWTKeySecretName, claims, nil)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\treturn err\n}", "func CreateToken(id, username string) (string, error) {\n\tvar err error\n\tatClaims := jwt.MapClaims{}\n\tatClaims[\"authorized\"] = true\n\tatClaims[\"ID\"] = id\n\tatClaims[\"username\"] = username\n\tatClaims[\"exp\"] = time.Now().Add(time.Hour * 23).Unix()\n\tat := jwt.NewWithClaims(jwt.SigningMethodHS256, atClaims)\n\ttoken, err := at.SignedString([]byte(os.Getenv(\"jwtsecret\")))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn token, nil\n}", "func GenerateOAuthToken(isSignUp bool, timeZone string, typeVal string, host string) (string, error) {\n\t//compute the expiration\n\texpiration := time.Now().Unix() + JWTOAuthExpirationSec\n\n\t//create the claims\n\tclaims := &OAuthClaims{\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expiration,\n\t\t},\n\t\tIsSignUp: isSignUp,\n\t\tTimeZone: timeZone,\n\t\tType: typeVal,\n\t\tHost: host,\n\t}\n\n\t//create the token\n\talgorithm := jwt.GetSigningMethod(JWTSigningAlgorithm)\n\ttoken := jwt.NewWithClaims(algorithm, claims)\n\n\t//create the signed string\n\ttokenStr, err := token.SignedString([]byte(GetJWTKey()))\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"sign oauth token\")\n\t}\n\treturn tokenStr, nil\n}", "func GetJWT(claims *UserAuthClaims, keyID string) string {\n\thmacSecret := os.Getenv(env.HmacSecret)\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\ttoken.Header[\"kid\"] = keyID\n\n\ttokenString, err := token.SignedString([]byte(hmacSecret))\n\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn \"\"\n\t}\n\n\treturn tokenString\n}", "func GenerateToken(uuid string, name string, email string, role string) (string, error) {\n\texpirationTime := time.Now().Add(24 * time.Hour)\n\tclaims := &Claims{\n\t\tUUID: uuid,\n\t\tName: name,\n\t\tEmail: email,\n\t\tRole: role,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: expirationTime.Unix(),\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\ttokenString, err := token.SignedString(jwtKey)\n\tif err != nil {\n\t\treturn err.Error(), err\n\t}\n\treturn tokenString, nil\n}", "func prepareJWTToken(config *Config) (string, error) {\n\tpubBytes, err := x509.MarshalPKIXPublicKey(config.PrivateKey.Public())\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\thash := sha256.Sum256(pubBytes)\n\n\taccountName := strings.ToUpper(config.Account)\n\tuserName := strings.ToUpper(config.User)\n\n\tissueAtTime := time.Now().UTC()\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{\n\t\t\"iss\": fmt.Sprintf(\"%s.%s.%s\", accountName, userName, \"SHA256:\"+base64.StdEncoding.EncodeToString(hash[:])),\n\t\t\"sub\": fmt.Sprintf(\"%s.%s\", accountName, userName),\n\t\t\"iat\": issueAtTime.Unix(),\n\t\t\"nbf\": time.Date(2015, 10, 10, 12, 0, 0, 0, time.UTC).Unix(),\n\t\t\"exp\": issueAtTime.Add(config.JWTExpireTimeout).Unix(),\n\t})\n\n\ttokenString, err := token.SignedString(config.PrivateKey)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn tokenString, err\n}", "func mustAuthCreateJWTToken() string {\n\tauthOptions.jwtSecretFile = mustExpand(authOptions.jwtSecretFile)\n\n\tif authOptions.jwtSecretFile == \"\" {\n\t\tlog.Fatal().Msg(\"A JWT secret file is required. Set --auth.jwt-secret option.\")\n\t}\n\tcontent, err := ioutil.ReadFile(authOptions.jwtSecretFile)\n\tif err != nil {\n\t\tlog.Fatal().Err(err).Msgf(\"Failed to read JWT secret file '%s'\", authOptions.jwtSecretFile)\n\t}\n\tjwtSecret := strings.TrimSpace(string(content))\n\ttoken, err := service.CreateJwtToken(jwtSecret, authOptions.user)\n\tif err != nil {\n\t\tlog.Fatal().Err(err).Msg(\"Failed to create JWT token\")\n\t}\n\treturn token\n}", "func generateAuthToken(u *db.UserModel) (*types.AuthorizedUser, error) {\n\tc := make(chan *types.TokenOutput)\n\n\te := time.Now().Add(time.Hour * 72).Unix()\n\n\tclaims := &types.JwtUserClaims{\n\t\tCurrentUser: types.CurrentUser{Name: u.Username, Email: u.Email, Id: u.ID},\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: e,\n\t\t},\n\t}\n\n\tt := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\ts, err := t.SignedString([]byte(config.JWT_SECRET))\n\n\tif err != nil {\n\t\treturn nil, errors.New(utils.StatusMessage(500))\n\t}\n\n\tgo tokenModel.Create(\n\t\t&types.Token{UserId: u.ID, Token: s, Expiration: e},\n\t\tc,\n\t)\n\n\tif r := <-c; r.Err != nil {\n\t\treturn nil, errors.New(utils.StatusMessage(500))\n\t}\n\n\treturn &types.AuthorizedUser{Token: s}, nil\n}", "func (t *TokenClaims) GenerateToken(key []byte) (string, error) {\n\treturn jwt.\n\t\tNewWithClaims(jwt.SigningMethodHS256, t).\n\t\tSignedString(key)\n}", "func (u *User) CreateJWTToken() error {\n\tdb.First(&u.UserGroup, u.UserGroupID)\n\tnow := time.Now()\n\tclaims := JWTClaims{\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tIssuedAt: now.Unix(),\n\t\t\tId: createJWTID(u.ID),\n\t\t\tIssuer: \"pm\",\n\t\t\tExpiresAt: now.AddDate(0, 0, 365).Unix(),\n\t\t\tSubject: u.Email,\n\t\t},\n\t\tUserID: u.ID,\n\t\tName: u.Name,\n\t\tRole: u.UserGroup.Name,\n\t}\n\trawToken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\tvar err error\n\tu.JWTToken, err = rawToken.SignedString([]byte(config.Settings.JWTSecret))\n\treturn err\n}", "func getJWT(msg string) (string, error) {\n\n\t// create a new claim\n\tclaims := myClaims{\n\t\tEmail: msg,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\t// expires in 5 minutes from now\n\t\t\tExpiresAt: time.Now().Add(5 * time.Minute).Unix(),\n\t\t},\n\t}\n\n\t// create token from newly created claims\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, &claims)\n\n\t// sign the token\n\tss, err := token.SignedString([]byte(key))\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error getting signed string from token\")\n\t}\n\n\treturn ss, nil\n}", "func (manager *JWTManager) Generate(user *User) (string, error) {\n\tclaims := UserClaims{\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Add(manager.tokenDuration).Unix(),\n\t\t},\n\t\tUsername: user.Username,\n\t\tRole: user.Role,\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\treturn token.SignedString([]byte(manager.secretKey))\n}", "func CreateToken(username, secret string) (string, time.Time, error) {\n\texpTime := time.Now().Add(tokenTTL * time.Hour)\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\"username\": username,\n\t\t\"exp\": expTime.Unix(),\n\t})\n\n\tstoken, err := token.SignedString([]byte(secret))\n\treturn stoken, expTime, err\n}", "func BuildJWT(us models.User) (string, error) {\n\n\tpayload := jwt.MapClaims{\n\t\t\"email\": us.Email,\n\t\t\"name\": us.Name,\n\t\t\"lastName\": us.LastName,\n\t\t\"birthDate\": us.BirthDate,\n\t\t\"_id\": us.ID.Hex(),\n\t\t\"exp\": time.Now().Add(time.Hour * 24).Unix(),\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, payload)\n\n\tjwtKey, err := token.SignedString(Pass)\n\tif err != nil {\n\t\treturn jwtKey, err\n\t}\n\n\treturn jwtKey, nil\n}", "func NewJWT(claims map[string]interface{}, validFor time.Duration) (string, error) {\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\n\tfor k, v := range claims {\n\t\ttoken.Claims[k] = v\n\t}\n\n\ttoken.Claims[\"exp\"] = time.Now().UTC().Add(validFor).Unix()\n\treturn token.SignedString([]byte(JWTPrivateKey))\n}", "func (a *API) createJWT(claims jwt.MapClaims) (string, error) {\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\ttokenString, err := token.SignedString([]byte(a.config.SigningSecret))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn tokenString, nil\n}", "func CreateToken(user *models.User, ExpiresAt int64) (string, error) {\n\n\tclaims := &models.Claims{\n\t\tID: user.ID,\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: ExpiresAt,\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\treturn token.SignedString([]byte(\"pingouin123\"))\n}" ]
[ "0.8215574", "0.78082126", "0.7724785", "0.77192235", "0.771866", "0.7673677", "0.7650788", "0.7650347", "0.76278704", "0.7624205", "0.761858", "0.76141596", "0.76023495", "0.7587422", "0.7586905", "0.7568152", "0.75659156", "0.75551575", "0.75548047", "0.7543786", "0.7528981", "0.7524678", "0.75244164", "0.7514457", "0.74895495", "0.7487728", "0.7440466", "0.74403876", "0.744", "0.741801", "0.74047357", "0.7359958", "0.73590213", "0.73575294", "0.73181087", "0.7317995", "0.731551", "0.7313919", "0.7283807", "0.72584724", "0.7251446", "0.72204876", "0.7205572", "0.71996665", "0.7191368", "0.7173905", "0.7172686", "0.7099483", "0.7090258", "0.706544", "0.70422524", "0.7029094", "0.7023916", "0.700178", "0.6981462", "0.69594556", "0.6958624", "0.69478303", "0.694667", "0.69274664", "0.69220674", "0.69162625", "0.68926543", "0.6889208", "0.6885338", "0.6852373", "0.682971", "0.68182045", "0.6801709", "0.6790563", "0.6789386", "0.6770377", "0.67510265", "0.6739653", "0.6734999", "0.67264044", "0.6725783", "0.6712088", "0.66923213", "0.66867864", "0.6686479", "0.6670591", "0.6638893", "0.6629424", "0.6628068", "0.66222537", "0.66173935", "0.66005933", "0.66004825", "0.65875965", "0.65852576", "0.65739745", "0.6561735", "0.65458506", "0.65446305", "0.6540049", "0.6531577", "0.65255296", "0.651029", "0.65089244" ]
0.8048552
1
WriteCloserWithContext converts ContextCloser to io.Closer, whenever new Close method will be called, the ctx will be passed to it
WriteCloserWithContext преобразует ContextCloser в io.Closer, при вызове нового метода Close контекст будет передан ему
func WriteCloserWithContext(ctx context.Context, closer WriteContextCloser) io.WriteCloser { return &closerWithContext{ WriteContextCloser: closer, ctx: ctx, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *closerWithContext) Close() error {\n\treturn c.WriteContextCloser.Close(c.ctx)\n}", "func (c *Context) Close() error {\n\treturn c.writer.Close()\n}", "func (fw *FileWriter) CloseWithContext(ctx context.Context, opts ...FlushRowGroupOption) error {\n\tif fw.schemaWriter.rowGroupNumRecords() > 0 {\n\t\tif err := fw.FlushRowGroup(opts...); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tkv := make([]*parquet.KeyValue, 0, len(fw.kvStore))\n\tfor i := range fw.kvStore {\n\t\tv := fw.kvStore[i]\n\t\taddr := &v\n\t\tif v == \"\" {\n\t\t\taddr = nil\n\t\t}\n\t\tkv = append(kv, &parquet.KeyValue{\n\t\t\tKey: i,\n\t\t\tValue: addr,\n\t\t})\n\t}\n\tmeta := &parquet.FileMetaData{\n\t\tVersion: fw.version,\n\t\tSchema: fw.schemaWriter.getSchemaArray(),\n\t\tNumRows: fw.totalNumRecords,\n\t\tRowGroups: fw.rowGroups,\n\t\tKeyValueMetadata: kv,\n\t\tCreatedBy: &fw.createdBy,\n\t\tColumnOrders: nil,\n\t}\n\n\tpos := fw.w.Pos()\n\tif err := writeThrift(ctx, meta, fw.w); err != nil {\n\t\treturn err\n\t}\n\n\tln := int32(fw.w.Pos() - pos)\n\tif err := binary.Write(fw.w, binary.LittleEndian, &ln); err != nil {\n\t\treturn err\n\t}\n\n\tif err := writeFull(fw.w, magic); err != nil {\n\t\treturn err\n\t}\n\n\treturn fw.bw.Flush()\n}", "func WithWriterContext(ctx context.Context) FileWriterOption {\n\treturn func(fw *FileWriter) {\n\t\tfw.ctx = ctx\n\t}\n}", "func WithContext(response http.ResponseWriter, request *http.Request, ctx context.Context) (http.ResponseWriter, *http.Request) {\n\tif ca, ok := response.(ContextAware); ok {\n\t\tca.SetContext(ctx)\n\t\treturn response, request.WithContext(ctx)\n\t}\n\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\n\treturn &contextAwareResponseWriter{response, ctx}, request.WithContext(ctx)\n}", "func DelayedCtxCloser(ctx context.Context, delay time.Duration) context.Context {\n\tdelayedCtx, cancel := context.WithCancel(context.Background())\n\n\tgo func() {\n\t\t<-ctx.Done()\n\t\ttime.Sleep(delay)\n\t\tcancel()\n\t}()\n\n\treturn delayedCtx\n}", "func CloseContext(ctx *ContextT) {\n\tC.yices_free_context(yctx(*ctx))\n\tctx.raw = 0\n}", "func (o *WriteOptions) Context() context.Context {\n\tif o != nil && o.ctx != nil {\n\t\treturn o.ctx\n\t}\n\treturn context.Background()\n}", "func (lc *Closer) Ctx() context.Context {\n\treturn lc.ctx\n}", "func (o *WriteOptions) WithContext(ctx context.Context) *WriteOptions {\n\to2 := new(WriteOptions)\n\tif o != nil {\n\t\t*o2 = *o\n\t}\n\to2.ctx = ctx\n\treturn o2\n}", "func WriteStructWithContext(ctx context.Context, p thrift.TProtocol, value thrift.TStruct, name string, field int16) error {\n\tif err := p.WriteFieldBegin(ctx, name, thrift.STRUCT, field); err != nil {\n\t\treturn thrift.PrependError(\"write field begin error: \", err)\n\t}\n\tif err := value.Write(ctx, p); err != nil {\n\t\treturn thrift.PrependError(\"field write error: \", err)\n\t}\n\tif err := p.WriteFieldEnd(ctx); err != nil {\n\t\treturn thrift.PrependError(\"write field end error: \", err)\n\t}\n\treturn nil\n}", "func PipeWithContext(\n\tctx context.Context,\n\tsamplesPerSecond uint,\n\tformat SampleFormat,\n) (PipeReader, PipeWriter) {\n\tctx, cancel := context.WithCancel(ctx)\n\tp := &pipe{\n\t\tcontext: ctx,\n\t\tcancel: cancel,\n\t\tformat: format,\n\t\tsamplesPerSecond: samplesPerSecond,\n\t\tsamplesCh: make(chan Samples),\n\t\treadSamplesCh: make(chan int),\n\t}\n\treturn p, p\n}", "func ctx(out io.Writer, debug bool) context.Context {\n\tif !debug {\n\t\treturn orascontext.Background()\n\t}\n\tctx := orascontext.WithLoggerFromWriter(context.Background(), out)\n\torascontext.GetLogger(ctx).Logger.SetLevel(logrus.DebugLevel)\n\treturn ctx\n}", "func (ctx *ResourceContext) SafeClose() {\n}", "func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) {\n\tvar (\n\t\tctx context.Context\n\t\tcancel context.CancelFunc\n\t)\n\n\tif c.exportTimeout > 0 {\n\t\tctx, cancel = context.WithTimeout(parent, c.exportTimeout)\n\t} else {\n\t\tctx, cancel = context.WithCancel(parent)\n\t}\n\n\tif c.metadata.Len() > 0 {\n\t\tctx = metadata.NewOutgoingContext(ctx, c.metadata)\n\t}\n\n\treturn ctx, cancel\n}", "func WrapCancel(cancel context.CancelFunc) io.Closer {\n\treturn Wrap(func() error {\n\t\tcancel()\n\t\treturn nil\n\t})\n}", "func (m *MQTT) WriteWithContext(ctx context.Context, msg *message.Batch) error {\n\treturn m.Write(msg)\n}", "func runWithContext(fun func(ctx context.Context) error) (context.CancelFunc, chan error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdone := make(chan error, 1)\n\tgo func() {\n\t\tdefer close(done)\n\t\tdone <- fun(ctx)\n\t}()\n\n\treturn cancel, done\n}", "func (h ContextHandlerFunc) ServeHTTPWithContext(ctx context.Context, w http.ResponseWriter, req *http.Request) {\n\th(ctx, w, req)\n}", "func (a *AzureBlobStorage) WriteWithContext(_ context.Context, msg *message.Batch) error {\n\treturn IterateBatchedSend(msg, func(i int, p *message.Part) error {\n\t\tc := a.client.GetContainerReference(a.container.String(i, msg))\n\t\tb := c.GetBlobReference(a.path.String(i, msg))\n\t\tif err := a.uploadBlob(b, a.blobType.String(i, msg), p.Get()); err != nil {\n\t\t\tif containerNotFound(err) {\n\t\t\t\tif cerr := a.createContainer(c, a.accessLevel.String(i, msg)); cerr != nil {\n\t\t\t\t\ta.log.Debugf(\"error creating container: %v.\", cerr)\n\t\t\t\t\treturn cerr\n\t\t\t\t}\n\t\t\t\terr = a.uploadBlob(b, a.blobType.String(i, msg), p.Get())\n\t\t\t\tif err != nil {\n\t\t\t\t\ta.log.Debugf(\"error retrying to upload blob: %v.\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}", "func (g *Glue) newContext(w http.ResponseWriter, r *http.Request) *Context {\n\tctx := &Context{inj.New(), g, newResponseWriter(w)}\n\n\tctx.Register(r)\n\tctx.Register(ctx.rw)\n\t// register our ResponseWriter as an http.ResponseWriter as well for\n\t// net/http HandlerFunc compatibility\n\tctx.RegisterAs(ctx.rw, (*http.ResponseWriter)(nil))\n\t// register this instance with itself\n\tctx.Register(*ctx)\n\treturn ctx\n}", "func WithContext(ctx context.Context) Option {\n\treturn func(o *Registry) { o.ctx = ctx }\n}", "func (zw *ZerologWriter) WithContext(ctx context.Context) ZerologWriter {\n\treturn ZerologWriter{w: zw.w.WithContext(ctx)}\n}", "func (zw *ZerologWriter) WithContext(ctx context.Context) ZerologWriter {\n\treturn ZerologWriter{w: zw.w.WithContext(ctx)}\n}", "func archiveContext(ctx context.Context, root string, writer io.Writer) (err error) {\n\n\t// Create a buffered writer.\n\tbufWriter := bufio.NewWriter(writer)\n\tdefer bufWriter.Flush()\n\n\t// Create a zipped writer on the bufferd writer.\n\tzipWriter, err := gzip.NewWriterLevel(bufWriter, gzip.BestCompression)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer zipWriter.Close()\n\n\t// Create a tarball writer on the zipped writer.\n\ttarWriter := tar.NewWriter(zipWriter)\n\tdefer tarWriter.Close()\n\n\t// Create a tarball.\n\tsources, err := ioutil.ReadDir(root)\n\tif err != nil {\n\t\treturn\n\t}\n\tfor _, info := range sources {\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\n\t\tdefault:\n\t\t\t// Write a file header.\n\t\t\theader, err := tar.FileInfoHeader(info, info.Name())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttarWriter.WriteHeader(header)\n\n\t\t\t// Write the body.\n\t\t\tif err = copyFile(filepath.Join(root, info.Name()), tarWriter); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n\n}", "func (h *Handler) WithContext(ctx context.Context) {\n\tgo func() {\n\t\t<-ctx.Done()\n\t\th.terminating = true\n\t}()\n}", "func (c MethodsCollection) WithContext() pWithContext {\n\treturn pWithContext{\n\t\tMethod: c.MustGet(\"WithContext\"),\n\t}\n}", "func SerializeCtx(ctx context.Context, opts ...SerializeOpts) ([]byte, error) {\n\tbuf := new(bytes.Buffer)\n\te := gob.NewEncoder(buf)\n\n\ts := contextData{\n\t\tValues: make(map[interface{}]interface{}),\n\t\tHasCancel: false,\n\t\tDeadline: time.Time{},\n\t}\n\n\tserialized := buildMap(ctx, s)\n\n\t// if options were passed\n\tif len(opts) > 0 {\n\t\t// override cancel/deadline\n\t\tif !opts[0].RetainCancel {\n\t\t\tserialized.HasCancel = false\n\t\t}\n\t\tif !opts[0].RetainDeadline {\n\t\t\tserialized.HasDeadline = false\n\t\t}\n\t\t// ignore functions to allow serialization to pass\n\t\tif opts[0].IgnoreFunctions {\n\t\t\tfor key, val := range serialized.Values {\n\t\t\t\tif reflect.TypeOf(key).Kind() == reflect.Func || reflect.TypeOf(val).Kind() == reflect.Func {\n\t\t\t\t\tdelete(serialized.Values, key)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Encoding the map\n\terr := e.Encode(serialized)\n\treturn buf.Bytes(), err\n}", "func WithCloser(closeableObject io.Closer, action func()) {\n\tdefer func() {\n\t\terr := closeableObject.Close()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"failed to close %T: %s\", closeableObject, err)\n\t\t}\n\t}()\n\taction()\n}", "func CopyWithContext(ctx context.Context, dst *Writer, src Stream) error {\n\tif err := src.Open(); err != nil {\n\t\treturn err\n\t}\n\tvar err error\n\tfor ctx.Err() == nil {\n\t\tvar pair Pair\n\t\tpair, err = src.Read()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif pair.Key == nil {\n\t\t\tbreak\n\t\t}\n\t\terr = dst.Write(pair)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn ctx.Err()\n}", "func NewTickerWithContext(ctx context.Context, d time.Duration) *Ticker {\n\tt := NewTicker(d)\n\tif ctx.Done() != nil {\n\t\tgo func() {\n\t\t\t<-ctx.Done()\n\t\t\tt.Stop()\n\t\t}()\n\t}\n\treturn t\n}", "func DoWithContext(ctx context.Context, do func(ctx context.Context) error, fallback func(err error)) (err error) {\n\terrorChannel := make(chan error)\n\tvar contextHasBeenDone = false\n\tgo func() {\n\t\terr := do(ctx)\n\t\tif contextHasBeenDone {\n\t\t\tif fallback != nil {\n\t\t\t\tfallback(err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\terrorChannel <- err\n\t}()\n\tselect {\n\tcase err = <-errorChannel:\n\t\treturn err\n\tcase <-ctx.Done():\n\t\tcontextHasBeenDone = true\n\t\treturn ctx.Err()\n\t}\n}", "func With(ctx context.Context, kvs ...interface{}) context.Context {\n\tl := fromCtx(ctx)\n\tl = l.With(kvs...)\n\treturn toCtx(ctx, l)\n}", "func (c *ProjectsLocationsInsightsWriteInsightCall) Context(ctx context.Context) *ProjectsLocationsInsightsWriteInsightCall {\n\tc.ctx_ = ctx\n\treturn c\n}", "func (r *StreamingRuntime) ExecWithContext(\n\tctx context.Context,\n\tcontainerID string,\n\tcmd []string,\n\tin io.Reader,\n\tout, errw io.WriteCloser,\n\ttty bool,\n\tresize <-chan remotecommand.TerminalSize,\n\ttimeout time.Duration,\n) error {\n\tcontainer, err := libdocker.CheckContainerStatus(r.Client, containerID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn r.ExecHandler.ExecInContainer(\n\t\tctx,\n\t\tr.Client,\n\t\tcontainer,\n\t\tcmd,\n\t\tin,\n\t\tout,\n\t\terrw,\n\t\ttty,\n\t\tresize,\n\t\ttimeout,\n\t)\n}", "func WithCtx(c *Controller) func(http.Handler) http.Handler {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t*r = *r.WithContext(context.WithValue(r.Context(), helpers.RenderCtxKey, c))\n\t\t\tnext.ServeHTTP(w, r)\n\t\t})\n\t}\n}", "func ContextResponseWriter(ctx context.Context) (rw ResponseWriter) {\n\tif d, ok := ctx.Value(contextHandlerDetailsKey).(*handlerDetails); ok {\n\t\trw, _ = d.rw.(ResponseWriter)\n\t}\n\treturn\n}", "func (rw *RWLock) LockWithCtx(ctx context.Context) error {\n\trw.lock.Lock()\n\tif !rw.init() {\n\t\trw.lock.Unlock()\n\t\treturn errors.ClosedState\n\t}\n\n\trw.writers++\n\tif rw.writers == 1 {\n\t\tif atomic.AddInt32(&rw.readers, -rwLockMaxReaders) == -rwLockMaxReaders+1 {\n\t\t\t// happy to lock\n\t\t\trw.state = stateLocked\n\t\t\trw.lock.Unlock()\n\t\t\treturn nil\n\t\t}\n\t}\n\tif rw.state == stateInit {\n\t\trw.state = stateWaiting\n\t}\n\trw.lock.Unlock()\n\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\n\tselect {\n\tcase <-rw.clsCh:\n\t\trw.cancelWriter()\n\t\treturn errors.ClosedState\n\tcase <-ctx.Done():\n\t\trw.cancelWriter()\n\t\treturn ctx.Err()\n\tcase <-rw.wrCh:\n\t\trw.lock.Lock()\n\t\trw.state = stateLocked\n\t\trw.lock.Unlock()\n\t}\n\n\treturn nil\n}", "func (e *Encoder) EncodeContext(ctx context.Context, v interface{}, optFuncs ...EncodeOptionFunc) error {\n\trctx := encoder.TakeRuntimeContext()\n\trctx.Option.Flag = 0\n\trctx.Option.Flag |= encoder.ContextOption\n\trctx.Option.Context = ctx\n\n\terr := e.encodeWithOption(rctx, v, optFuncs...)\n\n\tencoder.ReleaseRuntimeContext(rctx)\n\treturn err\n}", "func closeCtx(k *http.Request) {\n\tpk := privateKey(k)\n\tif _, has := internalCtx.Get(pk); has {\n\t\tinternalCtx.Remove(pk)\n\t}\n}", "func CreticalfWithContext(ctx context.Context, format string, args ...interface{}) {\n\tif hub := sentry.GetHubFromContext(ctx); hub != nil {\n\t\tcreticaldeps(hub.CaptureMessage, 3, format, args...)\n\t\treturn\n\t}\n\n\tcreticaldeps(sentry.CaptureMessage, 3, format, args...)\n}", "func (q *ChannelQueue) FlushWithContext(ctx context.Context) error {\n\tlog.Trace(\"ChannelQueue: %d Flush\", q.qid)\n\tpaused, _ := q.IsPausedIsResumed()\n\tfor {\n\t\tselect {\n\t\tcase <-paused:\n\t\t\treturn nil\n\t\tcase data, ok := <-q.dataChan:\n\t\t\tif !ok {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif unhandled := q.handle(data); unhandled != nil {\n\t\t\t\tlog.Error(\"Unhandled Data whilst flushing queue %d\", q.qid)\n\t\t\t}\n\t\t\tatomic.AddInt64(&q.numInQueue, -1)\n\t\tcase <-q.baseCtx.Done():\n\t\t\treturn q.baseCtx.Err()\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tdefault:\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func (wss *WrappedServerStream) Context() context.Context {\n\treturn wss.context\n}", "func WriteBinaryWithContext(ctx context.Context, p thrift.TProtocol, value []byte, name string, field int16) error {\n\tif err := p.WriteFieldBegin(ctx, name, thrift.STRING, field); err != nil {\n\t\treturn thrift.PrependError(\"write field begin error: \", err)\n\t}\n\tif err := p.WriteBinary(ctx, value); err != nil {\n\t\treturn thrift.PrependError(\"field write error: \", err)\n\t}\n\tif err := p.WriteFieldEnd(ctx); err != nil {\n\t\treturn thrift.PrependError(\"write field end error: \", err)\n\t}\n\treturn nil\n}", "func Contexter() func(next http.Handler) http.Handler {\n\trnd := templates.HTMLRenderer()\n\tcsrfOpts := CsrfOptions{\n\t\tSecret: setting.SecretKey,\n\t\tCookie: setting.CSRFCookieName,\n\t\tSetCookie: true,\n\t\tSecure: setting.SessionConfig.Secure,\n\t\tCookieHTTPOnly: setting.CSRFCookieHTTPOnly,\n\t\tHeader: \"X-Csrf-Token\",\n\t\tCookieDomain: setting.SessionConfig.Domain,\n\t\tCookiePath: setting.SessionConfig.CookiePath,\n\t\tSameSite: setting.SessionConfig.SameSite,\n\t}\n\tif !setting.IsProd {\n\t\tCsrfTokenRegenerationInterval = 5 * time.Second // in dev, re-generate the tokens more aggressively for debug purpose\n\t}\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {\n\t\t\tctx := Context{\n\t\t\t\tResp: NewResponse(resp),\n\t\t\t\tCache: mc.GetCache(),\n\t\t\t\tLocale: middleware.Locale(resp, req),\n\t\t\t\tLink: setting.AppSubURL + strings.TrimSuffix(req.URL.EscapedPath(), \"/\"),\n\t\t\t\tRender: rnd,\n\t\t\t\tSession: session.GetSession(req),\n\t\t\t\tRepo: &Repository{\n\t\t\t\t\tPullRequest: &PullRequest{},\n\t\t\t\t},\n\t\t\t\tOrg: &Organization{},\n\t\t\t\tData: middleware.GetContextData(req.Context()),\n\t\t\t}\n\t\t\tdefer ctx.Close()\n\n\t\t\tctx.Data.MergeFrom(middleware.CommonTemplateContextData())\n\t\t\tctx.Data[\"Context\"] = &ctx\n\t\t\tctx.Data[\"CurrentURL\"] = setting.AppSubURL + req.URL.RequestURI()\n\t\t\tctx.Data[\"Link\"] = ctx.Link\n\t\t\tctx.Data[\"locale\"] = ctx.Locale\n\n\t\t\t// PageData is passed by reference, and it will be rendered to `window.config.pageData` in `head.tmpl` for JavaScript modules\n\t\t\tctx.PageData = map[string]any{}\n\t\t\tctx.Data[\"PageData\"] = ctx.PageData\n\n\t\t\tctx.Req = WithContext(req, &ctx)\n\t\t\tctx.Csrf = PrepareCSRFProtector(csrfOpts, &ctx)\n\n\t\t\t// Get the last flash message from cookie\n\t\t\tlastFlashCookie := middleware.GetSiteCookie(ctx.Req, CookieNameFlash)\n\t\t\tif vals, _ := url.ParseQuery(lastFlashCookie); len(vals) > 0 {\n\t\t\t\t// store last Flash message into the template data, to render it\n\t\t\t\tctx.Data[\"Flash\"] = &middleware.Flash{\n\t\t\t\t\tDataStore: &ctx,\n\t\t\t\t\tValues: vals,\n\t\t\t\t\tErrorMsg: vals.Get(\"error\"),\n\t\t\t\t\tSuccessMsg: vals.Get(\"success\"),\n\t\t\t\t\tInfoMsg: vals.Get(\"info\"),\n\t\t\t\t\tWarningMsg: vals.Get(\"warning\"),\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// prepare an empty Flash message for current request\n\t\t\tctx.Flash = &middleware.Flash{DataStore: &ctx, Values: url.Values{}}\n\t\t\tctx.Resp.Before(func(resp ResponseWriter) {\n\t\t\t\tif val := ctx.Flash.Encode(); val != \"\" {\n\t\t\t\t\tmiddleware.SetSiteCookie(ctx.Resp, CookieNameFlash, val, 0)\n\t\t\t\t} else if lastFlashCookie != \"\" {\n\t\t\t\t\tmiddleware.SetSiteCookie(ctx.Resp, CookieNameFlash, \"\", -1)\n\t\t\t\t}\n\t\t\t})\n\n\t\t\t// If request sends files, parse them here otherwise the Query() can't be parsed and the CsrfToken will be invalid.\n\t\t\tif ctx.Req.Method == \"POST\" && strings.Contains(ctx.Req.Header.Get(\"Content-Type\"), \"multipart/form-data\") {\n\t\t\t\tif err := ctx.Req.ParseMultipartForm(setting.Attachment.MaxSize << 20); err != nil && !strings.Contains(err.Error(), \"EOF\") { // 32MB max size\n\t\t\t\t\tctx.ServerError(\"ParseMultipartForm\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\thttpcache.SetCacheControlInHeader(ctx.Resp.Header(), 0, \"no-transform\")\n\t\t\tctx.Resp.Header().Set(`X-Frame-Options`, setting.CORSConfig.XFrameOptions)\n\n\t\t\tctx.Data[\"CsrfToken\"] = ctx.Csrf.GetToken()\n\t\t\tctx.Data[\"CsrfTokenHtml\"] = template.HTML(`<input type=\"hidden\" name=\"_csrf\" value=\"` + ctx.Data[\"CsrfToken\"].(string) + `\">`)\n\n\t\t\t// FIXME: do we really always need these setting? There should be someway to have to avoid having to always set these\n\t\t\tctx.Data[\"DisableMigrations\"] = setting.Repository.DisableMigrations\n\t\t\tctx.Data[\"DisableStars\"] = setting.Repository.DisableStars\n\t\t\tctx.Data[\"EnableActions\"] = setting.Actions.Enabled\n\n\t\t\tctx.Data[\"ManifestData\"] = setting.ManifestData\n\n\t\t\tctx.Data[\"UnitWikiGlobalDisabled\"] = unit.TypeWiki.UnitGlobalDisabled()\n\t\t\tctx.Data[\"UnitIssuesGlobalDisabled\"] = unit.TypeIssues.UnitGlobalDisabled()\n\t\t\tctx.Data[\"UnitPullsGlobalDisabled\"] = unit.TypePullRequests.UnitGlobalDisabled()\n\t\t\tctx.Data[\"UnitProjectsGlobalDisabled\"] = unit.TypeProjects.UnitGlobalDisabled()\n\t\t\tctx.Data[\"UnitActionsGlobalDisabled\"] = unit.TypeActions.UnitGlobalDisabled()\n\n\t\t\tctx.Data[\"AllLangs\"] = translation.AllLangs()\n\n\t\t\tnext.ServeHTTP(ctx.Resp, ctx.Req)\n\t\t})\n\t}\n}", "func WriteI32WithContext(ctx context.Context, p thrift.TProtocol, value int32, name string, field int16) error {\n\tif err := p.WriteFieldBegin(ctx, name, thrift.I32, field); err != nil {\n\t\treturn thrift.PrependError(\"write field begin error: \", err)\n\t}\n\tif err := p.WriteI32(ctx, value); err != nil {\n\t\treturn thrift.PrependError(\"field write error: \", err)\n\t}\n\tif err := p.WriteFieldEnd(ctx); err != nil {\n\t\treturn thrift.PrependError(\"write field end error: \", err)\n\t}\n\treturn nil\n}", "func (w *wrappedServerStream) Context() context.Context {\n\treturn w.WrappedContext\n}", "func WithContext(ctx context.Context, fnctx Context) context.Context {\n\treturn context.WithValue(ctx, ctxKey, fnctx)\n}", "func (x Go) WithContext(ctx context.Context, f func(context.Context)) error {\n\t_ctx := ctx\n\tvar _cancel context.CancelFunc\n\tvar started, funcDone chan struct{}\n\tif x.ensureStarted {\n\t\tstarted = make(chan struct{})\n\t}\n\tif x.timeout != 0 {\n\t\tif x.timeout > 0 {\n\t\t\t_ctx, _cancel = context.WithCancel(ctx)\n\t\t\tdefer _cancel()\n\t\t}\n\t\tfuncDone = make(chan struct{})\n\t}\n\tif x.wg != nil {\n\t\tx.wg.Add(1)\n\t}\n\n\tgo func() {\n\t\tif started != nil {\n\t\t\tclose(started)\n\t\t}\n\t\tif x.wg != nil {\n\t\t\tdefer x.wg.Done()\n\t\t}\n\t\tif funcDone != nil {\n\t\t\tdefer close(funcDone)\n\t\t}\n\t\tif x.recoverFunc != nil {\n\t\t\tdefer func() {\n\t\t\t\tif e := recover(); e != nil {\n\t\t\t\t\tx.recoverFunc(e)\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t\tif x.before != nil {\n\t\t\tx.before()\n\t\t}\n\t\tif x.after != nil && x.deferAfter {\n\t\t\tdefer x.after()\n\t\t}\n\n\t\tf(_ctx)\n\n\t\tif x.after != nil && !x.deferAfter {\n\t\t\tx.after()\n\t\t}\n\t}()\n\n\tif started != nil {\n\t\t<-started\n\t}\n\tif funcDone != nil {\n\t\tif x.timeout > 0 {\n\t\t\ttm := time.NewTimer(x.timeout)\n\t\t\tdefer func() {\n\t\t\t\tif !tm.Stop() {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-tm.C:\n\t\t\t\t\tdefault:\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t\tselect {\n\t\t\tcase <-funcDone:\n\t\t\tcase <-tm.C:\n\t\t\t\treturn ErrTimeout\n\t\t\t}\n\t\t} else if x.timeout < 0 {\n\t\t\t<-funcDone\n\t\t}\n\t}\n\n\treturn nil\n}", "func WriteI16WithContext(ctx context.Context, p thrift.TProtocol, value int16, name string, field int16) error {\n\tif err := p.WriteFieldBegin(ctx, name, thrift.I16, field); err != nil {\n\t\treturn thrift.PrependError(\"write field begin error: \", err)\n\t}\n\tif err := p.WriteI16(ctx, value); err != nil {\n\t\treturn thrift.PrependError(\"field write error: \", err)\n\t}\n\tif err := p.WriteFieldEnd(ctx); err != nil {\n\t\treturn thrift.PrependError(\"write field end error: \", err)\n\t}\n\treturn nil\n}", "func (s *DeterminationWithContext) Close() error {\n\treturn s.close(s.trigger)\n}", "func (tx *WriteTx) RunWithContext(ctx context.Context) error {\n\tif tx.err != nil {\n\t\treturn tx.err\n\t}\n\tinput, err := tx.input()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = retry(ctx, func() error {\n\t\tout, err := tx.db.client.TransactWriteItemsWithContext(ctx, input)\n\t\tif tx.cc != nil && out != nil {\n\t\t\tfor _, cc := range out.ConsumedCapacity {\n\t\t\t\taddConsumedCapacity(tx.cc, cc)\n\t\t\t}\n\t\t}\n\t\treturn err\n\t})\n\treturn err\n}", "func (k *KeepAliveConn) KeepAliveContext(ctx context.Context) {\n\tk.Lock()\n\tdefer k.Unlock()\n\n\tif k.cancel != nil || k.isClosed {\n\t\treturn\n\t}\n\n\tderivedCtx, cancel := context.WithCancel(ctx)\n\tk.cancel = cancel\n\n\tgo k.readContext(derivedCtx)\n\tgo k.writeContext(derivedCtx)\n\tgo k.keepAliveContext(derivedCtx)\n\n}", "func ctxForOp() (context.Context, func()) {\n\treturn context.WithTimeout(context.Background(), timeoutOp)\n}", "func sendWithContext(ctx context.Context, httpClient *http.Client, url string, body io.Reader, opt *Options) (*http.Response, error) {\n\tv, _ := query.Values(opt)\n\n\t// fmt.Print(v.Encode()) will output: \"city=0&mr=1&pb=4&pro=0&yys=0\"\n\tAPIEndpoint := fmt.Sprintf(\"%s&%s\", url, v.Encode())\n\tfmt.Println(APIEndpoint)\n\t// Change NewRequest to NewRequestWithContext and pass context it\n\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, APIEndpoint, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// http.DefaultClient\n\tres, err := httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res, nil\n}", "func (s *Stream) WithContext(ctx context.Context) *Stream {\n\ts.ctx = ctx\n\treturn s\n}", "func MetaWithContext(ctx context.Context, newMeta map[string]interface{}) context.Context {\n\tprevMeta := MetaFromContext(ctx)\n\n\tif prevMeta == nil {\n\t\tprevMeta = make(map[string]interface{})\n\t}\n\n\tfor k, v := range newMeta {\n\t\tprevMeta[k] = v\n\t}\n\n\treturn context.WithValue(ctx, MetaCtxKey, prevMeta)\n}", "func AsContext(d Doner) context.Context {\n\tc, cancel := context.WithCancel(context.Background())\n\tDefer(d, cancel)\n\treturn c\n}", "func (el *ZapEventLogger) SerializeContext(ctx context.Context) ([]byte, error) {\n\tgTracer := opentrace.GlobalTracer()\n\tb := make([]byte, 0)\n\tcarrier := bytes.NewBuffer(b)\n\tspan := opentrace.SpanFromContext(ctx)\n\tif err := gTracer.Inject(span.Context(), opentrace.Binary, carrier); err != nil {\n\t\treturn nil, err\n\t}\n\treturn carrier.Bytes(), nil\n}", "func (c *minecraftConn) newContext(parent context.Context) (ctx context.Context, cancel func()) {\n\tctx, cancel = context.WithCancel(parent)\n\tgo func() {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\tcase <-c.closed:\n\t\t\tcancel()\n\t\t}\n\t}()\n\treturn ctx, cancel\n}", "func WithContext(ctx context.Context) (Interface, context.Context) {\n\tctx, cancel := context.WithCancel(ctx)\n\treturn (&contextBreaker{ctx, cancel}).trigger(), ctx\n}", "func WithContext(context string) StructuredLogger {\n\treturn factory.WithContext(context)\n}", "func (op *Operation) With(ctx context.Context, err *error, args Args) (context.Context, FinishFunc) {\n\tctx, _, endObservation := op.WithAndLogger(ctx, err, args)\n\treturn ctx, endObservation\n}", "func (op *Operation) With(ctx context.Context, err *error, args Args) (context.Context, FinishFunc) {\n\tctx, _, endObservation := op.WithAndLogger(ctx, err, args)\n\treturn ctx, endObservation\n}", "func (blk *Block) DrawWithContext(d Drawable, ctx DrawContext) error {\n\tblocks, _, err := d.GeneratePageBlocks(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(blocks) != 1 {\n\t\treturn errors.New(\"too many output blocks\")\n\t}\n\n\tfor _, newBlock := range blocks {\n\t\tif err := blk.mergeBlocks(newBlock); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (obj *ConfigWriter) AddServantWithContext(imp impConfigWriterWithContext, objStr string) {\n\ttars.AddServantWithContext(obj, imp, objStr)\n}", "func WithContext(ctx context.Context, db interfaces.DB) interfaces.DB {\n\t// the cases on if/else are actually transactions: noop\n\tif sdb, ok := db.(*DB); ok {\n\t\tif sdb.tx != nil {\n\t\t\treturn db\n\t\t}\n\t} else if _, ok := db.(interface{ Rollback() error }); ok {\n\t\treturn db\n\t}\n\n\treturn &DB{\n\t\tinner: db.WithContext(ctx),\n\t}\n}", "func WithContext(ctx context.Context, opts ...TreeOption) context.Context {\n\tchosenName := fmt.Sprintf(\"tree-%d\", rand.Uint64())\n\tbaseOpts := append([]TreeOption{\n\t\toversight.WithRestartStrategy(oversight.OneForAll()),\n\t\toversight.NeverHalt(),\n\t}, opts...)\n\ttree := oversight.New(baseOpts...)\n\n\tmu.Lock()\n\ttrees[chosenName] = tree\n\tmu.Unlock()\n\n\twrapped := context.WithValue(ctx, treeName, chosenName)\n\tgo tree.Start(wrapped)\n\treturn wrapped\n}", "func (manager *transportManager) cancelCtxCloseTransport() {\n\t// Grab the notification subscriber lock so new subscribers will not get added\n\t// without seeing the context cancel.\n\tmanager.notificationSubscriberLock.Lock()\n\n\t// Cancel the context the tryReconnect this closure will cause exits.\n\tmanager.cancelFunc()\n\n\t// Release the notification lock. Not doing so before we grab the livesOnce lock\n\t// can result in a deadlock if a redial is in process (since the redial needs to\n\t// grab the subscribers lock to notify them).\n\tmanager.notificationSubscriberLock.Unlock()\n\n\t// Take control of the connection lock to ensure all in-process operations have\n\t// completed.\n\tmanager.transportLock.Lock()\n\tdefer manager.transportLock.Unlock()\n\n\t// Close the current connection on exit\n\tdefer manager.transport.underlyingTransport().Close()\n}", "func WriteDoubleWithContext(ctx context.Context, p thrift.TProtocol, value float64, name string, field int16) error {\n\tif err := p.WriteFieldBegin(ctx, name, thrift.DOUBLE, field); err != nil {\n\t\treturn thrift.PrependError(\"write field begin error: \", err)\n\t}\n\tif err := p.WriteDouble(ctx, value); err != nil {\n\t\treturn thrift.PrependError(\"field write error: \", err)\n\t}\n\tif err := p.WriteFieldEnd(ctx); err != nil {\n\t\treturn thrift.PrependError(\"write field end error: \", err)\n\t}\n\treturn nil\n}", "func (f HandlerFunc) Handle(ctx context.Context, c WriteCloser) {\n\tf(ctx, c)\n}", "func (_obj *DataService) CreateApplyWithContext(tarsCtx context.Context, wx_id string, club_id string, _opt ...map[string]string) (ret int32, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_string(wx_id, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _os.Write_string(club_id, 2)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"createApply\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = _is.Read_int32(&ret, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func genContext(r *http.Request) (context.Context, context.CancelFunc) {\n\twriteTimeout := r.Context().Value(http.ServerContextKey).(*http.Server).WriteTimeout\n\treturn context.WithTimeout(context.Background(), writeTimeout*80/100)\n}", "func withContext(borrower ContextBorrower, worker Worker) Worker {\n\n\treturn func(t *T, _ Context) {\n\n\t\tif t.Failed() {\n\t\t\treturn\n\t\t}\n\n\t\tctx, release, err := borrower.Borrow()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s\", err)\n\t\t\tt.FailNow()\n\t\t}\n\n\t\tdefer release()\n\t\tworkerRunner(nil, worker, t, ctx)\n\t}\n}", "func DoCtx(ctx context.Context, req *http.Request, resp interface{}) (*http.Response, error) {\n\tr := req.Clone(ctx)\n\n\treturn Do(r, resp)\n}", "func Context(canc Canceller) context.Context {\n\treturn ctxWrap{Canceller: canc}\n}", "func ForWithContext(c context.Context, begin int, end int, f ForLoop) {\n\tlength := end - begin\n\n\tif length > 0 {\n\t\tctx, cacnel := context.WithCancel(c)\n\t\tgo doLoop(cacnel, begin, end, f)\n\t\t<-ctx.Done()\n\t}\n}", "func (c *ConnUDP) WriteWithContext(ctx context.Context, udpCtx *ConnUDPContext, buffer []byte) error {\n\tif udpCtx == nil {\n\t\treturn fmt.Errorf(\"cannot write with context: invalid udpCtx\")\n\t}\n\tif udpCtx.raddr.IP.IsMulticast() {\n\t\treturn c.writeMulticastWithContext(ctx, udpCtx, buffer)\n\t}\n\n\twritten := 0\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\tfor written < len(buffer) {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tdefault:\n\t\t}\n\t\terr := c.connection.SetWriteDeadline(time.Now().Add(c.heartBeat))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"cannot set write deadline for udp connection: %v\", err)\n\t\t}\n\t\tn, err := WriteToSessionUDP(c.connection, udpCtx, buffer[written:])\n\t\tif err != nil {\n\t\t\tif isTemporary(err) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"cannot write to udp connection: %v\", err)\n\t\t}\n\t\twritten += n\n\t}\n\n\treturn nil\n}", "func (config *DialConfig) DialContext(ctx context.Context, path string) (net.Conn, error) {\n\tvar err error\n\tvar h windows.Handle\n\th, err = tryDialPipe(ctx, &path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif config.ExpectedOwner != nil {\n\t\tsd, err := windows.GetSecurityInfo(h, windows.SE_FILE_OBJECT, windows.OWNER_SECURITY_INFORMATION)\n\t\tif err != nil {\n\t\t\twindows.Close(h)\n\t\t\treturn nil, err\n\t\t}\n\t\trealOwner, _, err := sd.Owner()\n\t\tif err != nil {\n\t\t\twindows.Close(h)\n\t\t\treturn nil, err\n\t\t}\n\t\tif !realOwner.Equals(config.ExpectedOwner) {\n\t\t\twindows.Close(h)\n\t\t\treturn nil, windows.ERROR_ACCESS_DENIED\n\t\t}\n\t}\n\n\tvar flags uint32\n\terr = windows.GetNamedPipeInfo(h, &flags, nil, nil, nil)\n\tif err != nil {\n\t\twindows.Close(h)\n\t\treturn nil, err\n\t}\n\n\tf, err := makeFile(h)\n\tif err != nil {\n\t\twindows.Close(h)\n\t\treturn nil, err\n\t}\n\n\t// If the pipe is in message mode, return a message byte pipe, which\n\t// supports CloseWrite.\n\tif flags&windows.PIPE_TYPE_MESSAGE != 0 {\n\t\treturn &messageBytePipe{\n\t\t\tpipe: pipe{file: f, path: path},\n\t\t}, nil\n\t}\n\treturn &pipe{file: f, path: path}, nil\n}", "func (e *Encoder) EncodeContext(ctx context.Context, v interface{}) error {\n\tnode, err := e.EncodeToNodeContext(ctx, v)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to encode to node\")\n\t}\n\tif err := e.setCommentByCommentMap(node); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to set comment by comment map\")\n\t}\n\tif !e.written {\n\t\te.written = true\n\t} else {\n\t\t// write document separator\n\t\te.writer.Write([]byte(\"---\\n\"))\n\t}\n\tvar p printer.Printer\n\te.writer.Write(p.PrintNode(node))\n\treturn nil\n}", "func (cw compressingWriter) Close() error {\n\tz := cw.WriteCloser.(*gzip.Writer)\n\terr := z.Flush()\n\tcw.p.Put(z)\n\treturn err\n}", "func (cdp *Client) Context(ctx context.Context) *Client {\n\tctx, cancel := context.WithCancel(ctx)\n\tcdp.ctx = ctx\n\tcdp.ctxCancel = cancel\n\treturn cdp\n}", "func WithCancel(ctx Context) (Context, context.CancelFunc) {\n\tstdCtx, cancel := context.WithCancel(ctx.StdContext())\n\treturn withStdCancel(ctx, stdCtx), cancel\n}", "func wrapContext(ctx context.Context, adapter Adapter) contextWrapper {\n\treturn contextWrapper{\n\t\tctx: context.WithValue(ctx, ctxKey, adapter),\n\t\tadapter: adapter,\n\t}\n}", "func WriteByteWithContext(ctx context.Context, p thrift.TProtocol, value int8, name string, field int16) error {\n\tif err := p.WriteFieldBegin(ctx, name, thrift.BYTE, field); err != nil {\n\t\treturn thrift.PrependError(\"write field begin error: \", err)\n\t}\n\tif err := p.WriteByte(ctx, value); err != nil {\n\t\treturn thrift.PrependError(\"field write error: \", err)\n\t}\n\tif err := p.WriteFieldEnd(ctx); err != nil {\n\t\treturn thrift.PrependError(\"write field end error: \", err)\n\t}\n\treturn nil\n}", "func (ctx *Context) Close() error {\n\tif ctx == nil {\n\t\treturn nil\n\t}\n\n\treturn ctx.yum.Close()\n}", "func (ctx *Context) Close() error {\n\tvar err error\n\tif ctx.Req != nil && ctx.Req.MultipartForm != nil {\n\t\terr = ctx.Req.MultipartForm.RemoveAll() // remove the temp files buffered to tmp directory\n\t}\n\t// TODO: close opened repo, and more\n\treturn err\n}", "func (_obj *Apichannels) Channels_joinChannelWithContext(tarsCtx context.Context, params *TLchannels_joinChannel, _opt ...map[string]string) (ret Updates, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = params.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"channels_joinChannel\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = ret.ReadBlock(_is, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func (m *Macross) ReleaseContext(c *Context) {\n\tc.Response.Header.SetServer(\"Macross\")\n\tm.pool.Put(c)\n}", "func WithContext(ctx context.Context) OptFn {\n\treturn func(o *Opt) {\n\t\to.ctx = ctx\n\t}\n}", "func withInterrupt(parent context.Context) (_ context.Context, stop func()) {\n\tsig := make(chan os.Signal, 1)\n\tsignal.Notify(sig, interruptSignals()...)\n\tctx, cancel := context.WithCancel(parent)\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tselect {\n\t\tcase <-sig:\n\t\t\tcancel()\n\t\tcase <-done:\n\t\t}\n\t}()\n\treturn ctx, func() {\n\t\tcancel()\n\t\tsignal.Stop(sig)\n\t\tclose(done)\n\t}\n}", "func SetContext(response http.ResponseWriter, ctx context.Context) http.ResponseWriter {\n\tif ca, ok := response.(ContextAware); ok {\n\t\tca.SetContext(ctx)\n\t\treturn response\n\t}\n\n\tif ctx == nil {\n\t\tpanic(\"nil context\")\n\t}\n\n\treturn &contextAwareResponseWriter{response, ctx}\n}", "func (req *UpsertObjectRequest) Context(ctx context.Context) *UpsertObjectRequest {\n\treq.impl = req.impl.Context(ctx)\n\n\treturn req\n}", "func (_obj *Apichannels) Channels_createChannelWithContext(tarsCtx context.Context, params *TLchannels_createChannel, _opt ...map[string]string) (ret Updates, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = params.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"channels_createChannel\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = ret.ReadBlock(_is, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func (_obj *Apichannels) Channels_getAdminLogWithContext(tarsCtx context.Context, params *TLchannels_getAdminLog, _opt ...map[string]string) (ret Channels_AdminLogResults, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = params.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"channels_getAdminLog\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = ret.ReadBlock(_is, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func Context(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// user service\n\t\tu := user.New(cognitoID, cognitoSecret)\n\t\tcontext.Set(r, \"userService\", u)\n\n\t\t// session helper\n\t\ts := session.New()\n\t\tcontext.Set(r, \"session\", s)\n\n\t\tvar netTransport = &http.Transport{\n\t\t\tDial: (&net.Dialer{\n\t\t\t\tTimeout: 5 * time.Second,\n\t\t\t}).Dial,\n\t\t\tTLSHandshakeTimeout: 5 * time.Second,\n\t\t}\n\n\t\t// support timeout and net transport.\n\t\tc := &http.Client{\n\t\t\tTimeout: time.Second * 10,\n\t\t\tTransport: netTransport,\n\t\t}\n\n\t\t// http client\n\t\tcontext.Set(r, \"client\", c)\n\n\t\tp := post.New(dynamoTablePosts, dynamoEndpoint, nil)\n\t\tcontext.Set(r, \"postService\", p)\n\n\t\tl := like.New(dynamoTableLikes, dynamoEndpoint, nil)\n\t\tcontext.Set(r, \"likeService\", l)\n\n\t\th.ServeHTTP(w, r)\n\t})\n}", "func NewContextWith(data map[string]interface{}) *Context {\n\tc := &Context{\n\t\tContext: context.Background(),\n\t\tdata: data,\n\t\touter: nil,\n\t\tmoot: &sync.Mutex{},\n\t}\n\n\tfor k, v := range Helpers.helpers {\n\t\tif !c.Has(k) {\n\t\t\tc.Set(k, v)\n\t\t}\n\t}\n\n\treturn c\n}", "func CrtlfWithContext(ctx context.Context, format string, args ...interface{}) {\n\tif hub := sentry.GetHubFromContext(ctx); hub != nil {\n\t\tcreticaldeps(hub.CaptureMessage, 3, format, args...)\n\t\treturn\n\t}\n\n\tcreticaldeps(sentry.CaptureMessage, 3, format, args...)\n}", "func (cs *ContextualServerStream) Context() context.Context {\n\treturn cs.Ctx\n}", "func CancelWhenClosed(parent context.Context, w http.ResponseWriter) (context.Context, func()) {\n\tctx, cancel := context.WithCancel(parent)\n\n\tclose := w.(http.CloseNotifier).CloseNotify()\n\n\t// listen for the connection to close, trigger cancelation\n\tgo func() {\n\t\tselect {\n\t\tcase <-close:\n\t\t\tcancel()\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}()\n\n\treturn ctx, cancel\n}" ]
[ "0.7478201", "0.59653234", "0.59053546", "0.5853121", "0.5791737", "0.57658553", "0.57110214", "0.5644903", "0.5617057", "0.55500585", "0.54606736", "0.5370619", "0.52994496", "0.5261682", "0.5238762", "0.5207119", "0.52057135", "0.5195372", "0.51947", "0.5191003", "0.5137905", "0.5120517", "0.51183856", "0.51183856", "0.5107839", "0.5091963", "0.5086669", "0.5077432", "0.50702924", "0.5052297", "0.5046513", "0.5024339", "0.5022383", "0.50117266", "0.49957973", "0.49881437", "0.4986711", "0.49835154", "0.49749213", "0.49740964", "0.4971424", "0.4967874", "0.49621794", "0.4952077", "0.4936446", "0.49345484", "0.4930686", "0.49250188", "0.49213982", "0.49011016", "0.4895321", "0.48948592", "0.48907092", "0.48896906", "0.48891193", "0.4868775", "0.4866342", "0.48525837", "0.4848692", "0.48461318", "0.48359478", "0.48349595", "0.48284096", "0.48284096", "0.48269457", "0.48261982", "0.48213896", "0.4821054", "0.48166162", "0.48147142", "0.47924927", "0.47902372", "0.47721875", "0.47642645", "0.47596124", "0.4758727", "0.47571492", "0.47510484", "0.47469887", "0.47348237", "0.47222126", "0.47116843", "0.47116798", "0.46986398", "0.46961066", "0.46952438", "0.4694454", "0.46823046", "0.46797955", "0.46720335", "0.46670365", "0.46628755", "0.46591228", "0.46515846", "0.46474284", "0.4643623", "0.46408328", "0.46376604", "0.46329018", "0.46250698" ]
0.7699851
0
NilCloser returns closer if it's not nil otherwise returns a nop closer
NilCloser возвращает closer, если он не nil, иначе возвращает closer без действий
func NilCloser(r io.Closer) io.Closer { if r == nil { return &nilCloser{} } return r }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (nopCloser) Close() error { return nil }", "func (n *nilCloser) Close() error {\n\t// works even if n is nil\n\treturn nil\n}", "func NopCloser() error { return nil }", "func NoCloser(in io.Reader) io.Reader {\n\tif in == nil {\n\t\treturn in\n\t}\n\t// if in doesn't implement io.Closer, just return it\n\tif _, canClose := in.(io.Closer); !canClose {\n\t\treturn in\n\t}\n\treturn noClose{in: in}\n}", "func NopCloser() io.Closer {\r\n\treturn &nopCloser{}\r\n}", "func NopCloser(bio Biome) BiomeCloser {\n\treturn nopCloser{bio}\n}", "func NopReadSeekerCloser(r io.ReadSeeker) ReadSeekerCloser {\n\treturn readSeekerCloser{r, func() error { return nil }}\n}", "func Wrap(closeFunc func() error) io.Closer {\n\treturn simpleCloser{\n\t\tcloseFunc: closeFunc,\n\t}\n}", "func NopCloser(std agent.ExtendedAgent) Agent {\n\treturn nopCloser{std}\n}", "func NoCloseReader(r io.Reader) io.Reader {\n _, ok := r.(io.Closer)\n if ok {\n return readerWrapper{r}\n }\n return r\n}", "func NopWriteCloser(r io.Writer) io.WriteCloser {\n\treturn nopWriteCloser{r}\n}", "func NopWriteCloser(w io.Writer) io.WriteCloser {\n\treturn &nopWriteCloser{w}\n}", "func NopCloser(r xml.TokenReader) TokenReadCloser {\n\treturn nopCloser{r}\n}", "func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser {\n\treturn exported.NopCloser(rs)\n}", "func NopWriteCloser(w io.Writer) io.WriteCloser {\n\treturn errWriteCloser{Writer: w, CloseErr: nil}\n}", "func (b *bufCloser) Close() error { return nil }", "func (g *Group) popCloser() (closer func() error) {\n\tg.mutex.Lock()\n\tdefer g.mutex.Unlock()\n\ti := len(g.closers) - 1\n\tif i >= 0 {\n\t\tcloser = g.closers[i]\n\t\tg.closers = g.closers[:i]\n\t}\n\treturn\n}", "func closeIgnore(closer io.Closer) {\n\t_ = closer.Close()\n}", "func NewCloser() *Closer {\n\treturn &Closer{\n\t\tm: &sync.Mutex{},\n\t\to: &sync.Once{},\n\t}\n}", "func TestExactReadCloserShort(t *testing.T) {\n\tbuf := bytes.NewBuffer(make([]byte, 5))\n\trc := NewExactReadCloser(&readerNilCloser{buf}, 10)\n\tif _, err := rc.Read(make([]byte, 10)); err != nil {\n\t\tt.Fatalf(\"Read expected nil err, got %v\", err)\n\t}\n\tif err := rc.Close(); err != ErrShortRead {\n\t\tt.Fatalf(\"Close expected %v, got %v\", ErrShortRead, err)\n\t}\n}", "func NewCloser(initial int) *Closer {\n\tret := &Closer{}\n\tret.ctx, ret.cancel = context.WithCancel(context.Background())\n\tret.waiting.Add(initial)\n\treturn ret\n}", "func TryClose(maybeClosers ...interface{}) {\n\tfor _, maybeCloser := range maybeClosers {\n\t\tif closer, ok := maybeCloser.(io.Closer); ok {\n\t\t\t_ = closer.Close()\n\t\t}\n\t}\n}", "func mustClose(c io.Closer) {\n\tif err := c.Close(); err != nil {\n\t\tpanic(err)\n\t}\n}", "func SafeClose(c io.Closer) {\n\tif c != nil {\n\t\terr := c.Close()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}\n}", "func (*writeCloser) Close() error {\n\treturn nil\n}", "func ReadCloserClose(rc *zip.ReadCloser,) error", "func (mock WriteCloser) fakeZeroClose() error {\n\tvar (\n\t\tr0 error\n\t)\n\treturn r0\n}", "func (v *ReadCloserValue) Freeze() {}", "func (c *carver) newWriterCloser(fp string) (io.WriteCloser, error) {\n\tif c.dryRun {\n\t\treturn noopCloser{w: io.Discard}, nil\n\t}\n\tif c.w != nil {\n\t\treturn noopCloser{w: c.w}, nil\n\t}\n\treturn os.Create(fp)\n}", "func (f *FakeWriteCloser) Close() error {\n\treturn nil\n}", "func (dr *NullReader) Close() error {\n\tif dr.r == nil {\n\t\treturn fmt.Errorf(\"not started\")\n\t}\n\treturn dr.r.Close()\n}", "func ensureReaderClosed(r io.ReadCloser) error {\n\t_, err := io.Copy(ioutil.Discard, r)\n\tif closeErr := r.Close(); closeErr != nil && err == nil {\n\t\terr = closeErr\n\t}\n\treturn err\n}", "func WriteCloserDaisy(inp <-chan io.WriteCloser, tube WriteCloserTube) (out <-chan io.WriteCloser) {\n\tcha := make(chan io.WriteCloser)\n\tgo tube(inp, cha)\n\treturn cha\n}", "func NoCloseRows(r Rows) Rows {\n _, ok := r.(io.Closer)\n if ok {\n return rowsWrapper{r}\n }\n return r\n}", "func NewReadSeekerCloser(t mockConstructorTestingTNewReadSeekerCloser) *ReadSeekerCloser {\n\tmock := &ReadSeekerCloser{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func (p *IdlePool) Get() io.Closer {\n\tp.Lock()\n\tdefer p.Unlock()\n\tfor i, c := range p.elems {\n\t\tif c == nil {\n\t\t\tcontinue\n\t\t}\n\t\tp.elems[i] = nil\n\t\treturn c\n\t}\n\treturn nil\n}", "func TestExactReadCloserExpectEOF(t *testing.T) {\n\tbuf := bytes.NewBuffer(make([]byte, 10))\n\trc := NewExactReadCloser(&readerNilCloser{buf}, 1)\n\tif _, err := rc.Read(make([]byte, 10)); err != ErrExpectEOF {\n\t\tt.Fatalf(\"expected %v, got %v\", ErrExpectEOF, err)\n\t}\n}", "func (o *ODirectReader) Close() error {\n\tif o.bufp != nil {\n\t\tif o.SmallFile {\n\t\t\tODirectPoolSmall.Put(o.bufp)\n\t\t} else {\n\t\t\tODirectPoolLarge.Put(o.bufp)\n\t\t}\n\t\to.bufp = nil\n\t\to.buf = nil\n\t}\n\to.err = errors.New(\"internal error: ODirectReader Read after Close\")\n\treturn o.File.Close()\n}", "func (s *Stopper) AddCloser(c Closer) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tif s.refuseRLocked() {\n\t\tc.Close()\n\t\treturn\n\t}\n\ts.mu.closers = append(s.mu.closers, c)\n}", "func (s *SeekerWrapper) Close() error { return s.s.Close() }", "func (c *refCountedCloser) Close(ctx context.Context) error {\n\tremaining := c.refCount.Add(-1)\n\n\tif remaining != 0 {\n\t\treturn nil\n\t}\n\n\tif c.closed.Load() {\n\t\tpanic(\"already closed\")\n\t}\n\n\tc.closed.Store(true)\n\n\tvar errors []error\n\n\tfor _, closer := range c.closers {\n\t\terrors = append(errors, closer(ctx))\n\t}\n\n\t//nolint:wrapcheck\n\treturn multierr.Combine(errors...)\n}", "func newNullableTicker(d time.Duration) (<-chan time.Time, func()) {\n\tif d > 0 {\n\t\tt := time.NewTicker(d)\n\t\treturn t.C, t.Stop\n\t}\n\treturn nil, func() {}\n}", "func CloseTheCloser(c io.Closer) {\n\t_ = c.Close()\n}", "func ensureReaderClosed(stream io.ReadCloser) {\n\tif stream == nil {\n\t\treturn\n\t}\n\tio.Copy(ioutil.Discard, stream)\n\tstream.Close()\n}", "func (n *NoOP) Close() {}", "func checkClose(c io.Closer, err *error) {\n\tcerr := c.Close()\n\tif *err == nil {\n\t\t*err = cerr\n\t}\n}", "func (closer *Closer) CloseChannel() chan struct{} {\n\treturn closer.channel\n}", "func (hcwc hcWriteCloser) Close() error {\n\treturn nil\n}", "func (_e *ReadSeekerCloser_Expecter) Close() *ReadSeekerCloser_Close_Call {\n\treturn &ReadSeekerCloser_Close_Call{Call: _e.mock.On(\"Close\")}\n}", "func (mock WriteCloser) Close() error {\n\tmethodName := \"Close\" // nolint: goconst\n\tif mock.impl.Close != nil {\n\t\treturn mock.impl.Close()\n\t}\n\tif mock.callbackNotImplemented != nil {\n\t\tmock.callbackNotImplemented(mock.t, mock.name, methodName)\n\t} else {\n\t\tgomic.DefaultCallbackNotImplemented(mock.t, mock.name, methodName)\n\t}\n\treturn mock.fakeZeroClose()\n}", "func WrapCancel(cancel context.CancelFunc) io.Closer {\n\treturn Wrap(func() error {\n\t\tcancel()\n\t\treturn nil\n\t})\n}", "func Close(closer io.Closer, log log.Logger) {\n\tif err := closer.Close(); err != nil {\n\t\tlog.Crit(\"Failed to Close Object: %#v\\n Error: %s \", err.Error())\n\t}\n}", "func ReadEOFCloser(r io.Reader) io.Reader {\n\treturn readEOFCloser{\n\t\tReader: r,\n\t}\n}", "func WithCloser(closer io.Closer) OptionFunc {\n\treturn func(c *Config) error {\n\t\tc.closers = append(c.closers, closer)\n\t\treturn nil\n\t}\n}", "func CloseAndIgnore(c io.Closer) {\n\t_ = c.Close()\n}", "func NopCloserWithSize(r io.Reader) io.ReadCloser {\n\treturn nopCloserWithObjectSize{r}\n}", "func (d *Decoder) IOReadCloser() io.ReadCloser {\n\treturn closeWrapper{d: d}\n}", "func CloseQuietly(v io.Closer) {\n\t_ = v.Close()\n}", "func Close(o io.Closer) {\n\t_ = o.Close()\n}", "func NewWriteCloser(t mockConstructorTestingTNewWriteCloser) *WriteCloser {\n\tmock := &WriteCloser{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func (closer *Closer) Close() {\n\tclose(closer.channel)\n}", "func WithCloser(f func() error) Option {\n\treturn func(e *environment) {\n\t\tif f == nil {\n\t\t\te.closer = NopCloser\n\t\t} else {\n\t\t\te.closer = f\n\t\t}\n\t}\n}", "func (g *Group) AddCloser(closer func() error) {\n\tg.mutex.Lock()\n\tdefer g.mutex.Unlock()\n\tg.closers = append(g.closers, closer)\n}", "func (d *dht) getCloser(k types.PublicKey) []dhtEntry {\n\tresults := append([]dhtEntry{}, d.sorted...)\n\tsort.SliceStable(results, func(i, j int) bool {\n\t\treturn util.DHTOrdered(d.r.public, results[i].PublicKey(), results[j].PublicKey())\n\t})\n\treturn results\n}", "func New() Closer {\n\treturn &closer{ch: make(chan struct{})}\n}", "func LoggedCloser(closer io.Closer) {\n\tif err := closer.Close(); err != nil {\n\t\tlogger.Error(\"error while closing: %s\", err)\n\t}\n}", "func (s *Stopper) AddCloser(c Closer) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tselect {\n\tcase <-s.stopper:\n\t\t// Close immediately.\n\t\tc.Close()\n\tdefault:\n\t\ts.mu.closers = append(s.mu.closers, c)\n\t}\n}", "func NewCloserInit(route wire.Route, ctx common.Context, socket core.DataSocket) func(machine.WorkerSocket, []interface{}) {\n\tlogger := common.FormatLogger(ctx.Logger(), route)\n\n\treturn func(worker machine.WorkerSocket, args []interface{}) {\n\t\tlogger.Info(\"Closing init\")\n\t\tif err := closeInit(route, ctx, socket.Rx(), socket.Tx()); err != nil {\n\t\t\tlogger.Error(err.Error())\n\t\t\tworker.Fail(err)\n\t\t}\n\n\t\tworker.Terminate()\n\t}\n}", "func (mrc *MockReadCloser) Close() error {\n\tmrc.closed = true\n\treturn nil\n}", "func WriteCloserDaisyChain(inp <-chan io.WriteCloser, tubes ...WriteCloserTube) (out <-chan io.WriteCloser) {\n\tcha := inp\n\tfor i := range tubes {\n\t\tcha = WriteCloserDaisy(cha, tubes[i])\n\t}\n\treturn cha\n}", "func drain(r io.ReadCloser) {\n\tgo func() {\n\t\t// Panicking here does not put data in\n\t\t// an inconsistent state.\n\t\tdefer func() {\n\t\t\t_ = recover()\n\t\t}()\n\n\t\t_, _ = io.Copy(io.Discard, r)\n\t\tr.Close()\n\t}()\n}", "func (r *mockedReader) Close() error {\n\tr.c = true\n\treturn nil\n}", "func (c *NOOPConnection) Close() {\n}", "func Closed(c io.Closer) {\n\tif c != nil {\n\t\t_ = c.Close()\n\t}\n}", "func (r *Reader) Close() error {\n\tif closer, ok := r.Reader.(io.Closer); ok {\n\t\treturn closer.Close()\n\t}\n\treturn nil\n}", "func NewMockCloser(ctrl *gomock.Controller) *MockCloser {\n\tmock := &MockCloser{ctrl: ctrl}\n\tmock.recorder = &MockCloserMockRecorder{mock}\n\treturn mock\n}", "func (r *ThrottledReadCloser) Close() error {\n\tr.pool.mu.Lock()\n\tdefer r.pool.mu.Unlock()\n\tdelete(r.pool.connections, r.id)\n\tr.pool.updateBufferSize()\n\treturn r.origReadCloser.Close()\n}", "func (f CloserFn) Close() {\n\tf()\n}", "func (f CloserFn) Close() {\n\tf()\n}", "func (this *reader) Close() (err error) {\n\tif ioCloser, ok := this.ioReader.(io.Closer); ok {\n\t\terr = ioCloser.Close()\n\t}\n\treturn\n}", "func NewWriteCloser(t *testing.T, cb gomic.CallbackNotImplemented) *WriteCloser {\n\treturn &WriteCloser{\n\t\tt: t, name: \"WriteCloser\", callbackNotImplemented: cb}\n}", "func (f *FileBlob) ReadCloser() (io.ReadCloser, error) {\n\tif f.blob == nil {\n\t\treturn nil, fmt.Errorf(\"underlying blob ([]byte) is nil\")\n\t}\n\treturn blob.NewBufferedReadCloser(f.blob), nil\n}", "func (r *Remoter) Close() error {\n\tr.closer = nil\n\tif r.clt != nil {\n\t\treturn r.clt.Close()\n\t}\n\treturn nil\n}", "func NewReadSeekerCloser(r io.ReadSeeker, c CloseFunc) ReadSeekerCloser {\n\treturn readSeekerCloser{r, c}\n}", "func (_e *WriteCloser_Expecter) Close() *WriteCloser_Close_Call {\n\treturn &WriteCloser_Close_Call{Call: _e.mock.On(\"Close\")}\n}", "func (c *Closer) Close() error {\n\tc.CloseAll()\n\treturn nil\n}", "func NewReadCloser(rd io.ReadCloser) io.ReadCloser {\n\tif rd == nil {\n\t\treturn nil\n\t}\n\n\tret, err := NewReadCloserSize(rd, DefaultBuffers, DefaultBufferSize)\n\n\t// Should not be possible to trigger from other packages.\n\tif err != nil {\n\t\tpanic(\"unexpected error:\" + err.Error())\n\t}\n\treturn ret\n}", "func Close(closer io.Closer) {\n\tdefer func() {\n\t\tif p := recover(); p != nil {\n\t\t\tlog.Panic(p)\n\t\t}\n\t}()\n\tif err := closer.Close(); err != nil {\n\t\tlog.Error(err)\n\t}\n}", "func (a *reader) Close() (err error) {\n\tselect {\n\tcase <-a.exited:\n\tcase a.exit <- struct{}{}:\n\t\t<-a.exited\n\t}\n\tif a.closer != nil {\n\t\t// Only call once\n\t\tc := a.closer\n\t\ta.closer = nil\n\t\treturn c.Close()\n\t}\n\ta.err = errors.New(\"readahead: read after Close\")\n\treturn nil\n}", "func (er *EventReader[T]) Close() {\n\tif closer, ok := er.reader.(io.Closer); ok {\n\t\tcloser.Close()\n\t}\n}", "func IgnoreClose(cr io.Closer) {\n\terr := cr.Close()\n\tIgnoreError(err)\n}", "func Cl(c ...Arg) *Closer {\n\treturn &Closer{c}\n}", "func Close(obj interface{}) {\n\tif obj == nil {\n\t\treturn\n\t}\n\tif c, ok := obj.(Closer); ok {\n\t\tc.Close()\n\t}\n}", "func (c *Closer) Close() (err error) {\n\tc.o.Do(func() {\n\t\t// Get close funcs\n\t\tc.m.Lock()\n\t\tfs := append([]CloseFunc{}, c.fs...)\n\t\tc.m.Unlock()\n\n\t\t// Loop through closers\n\t\tvar errs []error\n\t\tfor _, f := range fs {\n\t\t\tif errC := f(); errC != nil {\n\t\t\t\terrs = append(errs, errC)\n\t\t\t}\n\t\t}\n\n\t\t// Process errors\n\t\tif len(errs) == 1 {\n\t\t\terr = errs[0]\n\t\t} else if len(errs) > 1 {\n\t\t\terr = astierror.NewMultiple(errs)\n\t\t}\n\t})\n\treturn\n}", "func (r *ThrottledWriteCloser) Close() error {\n\tr.pool.mu.Lock()\n\tdefer r.pool.mu.Unlock()\n\tdelete(r.pool.connections, r.id)\n\tr.pool.updateBufferSize()\n\treturn r.origWriteCloser.Close()\n}", "func MultiCloser(closers ...io.Closer) io.Closer {\n\treturn &multiCloser{\n\t\tclosers: closers,\n\t}\n}", "func (w *RWWrapper) Close() {\n\tif w.gz != nil {\n\t\tw.gz.Close()\n\t}\n}", "func NewReadCloser(r io.Reader, c CloseFunc) io.ReadCloser {\n\treturn readCloser{r, c}\n}", "func (f *FakeReadCloser) Close() error {\n\tf.CloseCalled = true\n\treturn f.CloseError\n}", "func newFuncCloser(fn func() error) *funcCloser {\n\treturn &funcCloser{\n\t\tfn: fn,\n\t}\n}" ]
[ "0.7222569", "0.69184256", "0.68131655", "0.65612435", "0.65169245", "0.608959", "0.59361756", "0.59183574", "0.5906258", "0.5872431", "0.57893544", "0.57882726", "0.5740676", "0.5678546", "0.56239897", "0.5589913", "0.55773497", "0.5547098", "0.54516846", "0.54490966", "0.54453087", "0.53478074", "0.53134245", "0.5238412", "0.5234843", "0.5225153", "0.5168599", "0.5156914", "0.5156862", "0.50839907", "0.50511545", "0.501426", "0.4993796", "0.49704328", "0.49326342", "0.4929904", "0.49295527", "0.49192756", "0.4908235", "0.49029765", "0.4890587", "0.48879722", "0.4883035", "0.48813483", "0.48772123", "0.48691502", "0.48554662", "0.48538432", "0.48369205", "0.47948813", "0.4793564", "0.478939", "0.4774067", "0.47676587", "0.47608978", "0.4748314", "0.47325483", "0.4731918", "0.47242805", "0.46782437", "0.465498", "0.46545625", "0.46501246", "0.4629458", "0.46081457", "0.46073243", "0.46062264", "0.45943567", "0.45672476", "0.4563887", "0.4562749", "0.45618916", "0.4560796", "0.45587486", "0.4551951", "0.45517477", "0.4542441", "0.4527389", "0.4527389", "0.45238248", "0.4498847", "0.44986692", "0.44848832", "0.44810283", "0.44794363", "0.44773722", "0.44711778", "0.44709802", "0.44664547", "0.44648725", "0.44577488", "0.44521204", "0.4443298", "0.4436248", "0.44332758", "0.44323808", "0.44323087", "0.4432212", "0.44299203", "0.44264048" ]
0.7534535
0
NopWriteCloser returns a WriteCloser with a noop Close method wrapping the provided Writer w
NopWriteCloser возвращает WriteCloser с методом Close, не выполняющим никаких действий, обертывающим предоставленный Writer w
func NopWriteCloser(r io.Writer) io.WriteCloser { return nopWriteCloser{r} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NopWriteCloser(w io.Writer) io.WriteCloser {\n\treturn &nopWriteCloser{w}\n}", "func NopWriteCloser(w io.Writer) io.WriteCloser {\n\treturn errWriteCloser{Writer: w, CloseErr: nil}\n}", "func NopCloser() error { return nil }", "func NopCloser() io.Closer {\r\n\treturn &nopCloser{}\r\n}", "func NopFlusher(w Writer) WriteFlusher {\n\treturn nopFlusher{w}\n}", "func NopCloser(std agent.ExtendedAgent) Agent {\n\treturn nopCloser{std}\n}", "func NewWriteCloser(t *testing.T, cb gomic.CallbackNotImplemented) *WriteCloser {\n\treturn &WriteCloser{\n\t\tt: t, name: \"WriteCloser\", callbackNotImplemented: cb}\n}", "func NopCloser(r xml.TokenReader) TokenReadCloser {\n\treturn nopCloser{r}\n}", "func NewWriteCloser(t mockConstructorTestingTNewWriteCloser) *WriteCloser {\n\tmock := &WriteCloser{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NopCloser(bio Biome) BiomeCloser {\n\treturn nopCloser{bio}\n}", "func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser {\n\treturn exported.NopCloser(rs)\n}", "func NewMockWriteCloser(t *testing.T) *MockWriteCloser {\n\treturn &MockWriteCloser{\n\t\tb: bytes.Buffer{},\n\t\tclosed: false,\n\t\tt: t,\n\t}\n}", "func wrapWriter(w http.ResponseWriter) writerProxy {\n\tvar _, cn = w.(http.CloseNotifier) // nolint\n\tvar _, fl = w.(http.Flusher)\n\tvar _, hj = w.(http.Hijacker)\n\tvar _, rf = w.(io.ReaderFrom)\n\n\tvar bw = basicWriter{ResponseWriter: w}\n\tif cn && fl && hj && rf {\n\t\treturn &fancyWriter{&bw}\n\t}\n\tif fl {\n\t\treturn &flushWriter{&bw}\n\t}\n\treturn &bw\n}", "func (c *carver) newWriterCloser(fp string) (io.WriteCloser, error) {\n\tif c.dryRun {\n\t\treturn noopCloser{w: io.Discard}, nil\n\t}\n\tif c.w != nil {\n\t\treturn noopCloser{w: c.w}, nil\n\t}\n\treturn os.Create(fp)\n}", "func NopReadSeekerCloser(r io.ReadSeeker) ReadSeekerCloser {\n\treturn readSeekerCloser{r, func() error { return nil }}\n}", "func (nopCloser) Close() error { return nil }", "func (f *FakeWriteCloser) Close() error {\n\treturn nil\n}", "func newFlushWriter(w io.Writer) flushWriter {\n\tfw := flushWriter{writer: w}\n\tif f, ok := w.(http.Flusher); ok {\n\t\tfw.flusher = f\n\t}\n\n\treturn fw\n}", "func NoCloser(in io.Reader) io.Reader {\n\tif in == nil {\n\t\treturn in\n\t}\n\t// if in doesn't implement io.Closer, just return it\n\tif _, canClose := in.(io.Closer); !canClose {\n\t\treturn in\n\t}\n\treturn noClose{in: in}\n}", "func NewWrappedWriter(writer store.Writer, onFinalize FinalizeFunc) *WrappedWriter {\n\treturn &WrappedWriter{writer: writer, onFinalize: onFinalize}\n}", "func (*writeCloser) Close() error {\n\treturn nil\n}", "func newLockingWriteCloser(wc io.WriteCloser) io.WriteCloser {\n\treturn &lockingWriteCloser{WriteCloser: wc}\n}", "func NewWrappedWriter(w io.Writer) (*WrappedWriter, error) {\n\treturn &WrappedWriter{wrapped: w, bw: bufio.NewWriterSize(w, 1024*1024)}, nil\n}", "func Wrap(closeFunc func() error) io.Closer {\n\treturn simpleCloser{\n\t\tcloseFunc: closeFunc,\n\t}\n}", "func (rwc *noPIReadWriteCloser) Write(p []byte) (n int, err error) {\n\tcopy(rwc.wBuffer[4:], p)\n\tn, err = rwc.ReadWriteCloser.Write(rwc.wBuffer[:len(p)+4])\n\treturn n - 4, err\n}", "func (w *Writer) Close() error {}", "func ToWriteCloser(w io.WriteCloser) Dest {\n\treturn func() (io.WriteCloser, error) {\n\t\treturn w, nil\n\t}\n}", "func NoCloseReader(r io.Reader) io.Reader {\n _, ok := r.(io.Closer)\n if ok {\n return readerWrapper{r}\n }\n return r\n}", "func (w *Writer) Bypass() io.Writer {\n\treturn &bypass{writer: w}\n}", "func NewLimitedWriter(w io.WriteCloser, options ...LimitedWriterOption) io.WriteCloser {\n\tfor _, o := range options {\n\t\tw = o(w)\n\t}\n\n\treturn NewSyncedWriteCloser(w)\n}", "func wrapWriter(w http.ResponseWriter) writerProxy {\n\tbw := basicWriter{ResponseWriter: w}\n\treturn &bw\n}", "func (nw noopWriter) Write(b []byte) (int, error) {\n\treturn len(b), nil\n}", "func NopCloserWithSize(r io.Reader) io.ReadCloser {\n\treturn nopCloserWithObjectSize{r}\n}", "func NewWriter(w io.Writer) io.WriteCloser {\n\treturn NewWriterSizeLevel(w, -1, DefaultCompression)\n}", "func (w *DiscardWriter) Close() error {\n\tw.Writer = nil\n\treturn nil\n}", "func NewWriteCloser(store Store, key string, ttl int) io.WriteCloser {\n\treturn &writerCloser{\n\t\tstore: store,\n\t\tkey: key,\n\t\tttl: ttl,\n\t}\n}", "func (hcwc hcWriteCloser) Close() error {\n\treturn nil\n}", "func NewChunkedWriter(w io.Writer) io.WriteCloser", "func Base(w io.Writer) io.Writer {\n\tif d, ok := w.(decorator); ok {\n\t\treturn coalesceWriters(d.Base(), w)\n\t}\n\treturn w\n}", "func newResponseWriterNoBody(w http.ResponseWriter) *responseWriterNoBody {\n\treturn &responseWriterNoBody{w}\n}", "func Must(w io.WriteCloser, err error) io.WriteCloser {\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"could not create revolving log writer, %v\", err))\n\t}\n\treturn w\n}", "func NilCloser(r io.Closer) io.Closer {\n\tif r == nil {\n\t\treturn &nilCloser{}\n\t}\n\treturn r\n}", "func Underlying(w io.Writer) io.Writer {\n\tif u, ok := w.(WrappedWriter); ok {\n\t\treturn Underlying(u.Underlying())\n\t}\n\treturn w\n}", "func (log *Logger) Wrap(pipe func(wc io.WriteCloser) io.WriteCloser) {\n\tlog.Lock()\n\tdefer log.Unlock()\n\twc := log.output\n\tif log.fallback == nil {\n\t\tlog.fallback = wc\n\t\twc = writeNoCloser{wc}\n\t}\n\tlog.output = pipe(wc)\n}", "func (w *writerWrapper) Unwrap() http.ResponseWriter {\n\treturn w.ResponseWriter\n}", "func NewWriter(w io.Writer) *Writer {\n\tvar bw Writer\n\tbw.Reset(w)\n\treturn &bw\n}", "func WriterClose(w *zip.Writer,) error", "func NopOutput() io.Writer {\r\n\treturn &nopOutput{}\r\n}", "func (s *Status) MaybeWrapWriter(w io.Writer) io.Writer {\n\tif IsTerminal(s.writer) && IsTerminal(w) {\n\t\treturn s.WrapWriter(w)\n\t}\n\treturn w\n}", "func NewWriter(base io.Writer, level int) (io.WriteCloser, error) {\n\tw, err := gzip.NewWriterLevel(base, level)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn streamstore.NewIOCoppler(w, base), nil\n}", "func NewReadWriteCloser(r stdio.ReadCloser, w stdio.WriteCloser) stdio.ReadWriteCloser {\n\treturn &rwc{r, w}\n}", "func (b *basicWriter) Unwrap() http.ResponseWriter {\n\treturn b.ResponseWriter\n}", "func (mock WriteCloser) Write(p []byte) (n int, err error) {\n\tmethodName := \"Write\" // nolint: goconst\n\tif mock.impl.Write != nil {\n\t\treturn mock.impl.Write(p)\n\t}\n\tif mock.callbackNotImplemented != nil {\n\t\tmock.callbackNotImplemented(mock.t, mock.name, methodName)\n\t} else {\n\t\tgomic.DefaultCallbackNotImplemented(mock.t, mock.name, methodName)\n\t}\n\treturn mock.fakeZeroWrite(p)\n}", "func NewFailingWriteCloser(wc io.WriteCloser) *FailingWriteCloser {\n\treturn &FailingWriteCloser{WriteCloser: wc}\n}", "func NewDiscardWriter() *DiscardWriter {\n\treturn &DiscardWriter{Writer: ioutil.Discard}\n}", "func (mock WriteCloser) fakeZeroClose() error {\n\tvar (\n\t\tr0 error\n\t)\n\treturn r0\n}", "func (n *NoOP) Close() {}", "func New(prefix string) (io.WriteCloser, error) {\n\treturn nil, fmt.Errorf(\"not implemented on windows\")\n}", "func NewWriter(t mockConstructorTestingTNewWriter) *Writer {\n\tmock := &Writer{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func (b *Writer) Reset(w io.Writer)", "func NewCountedWriter(w io.WriteCloser) *CountedWriter {\n\treturn &CountedWriter{w: w}\n}", "func (w *WrappedWriter) Write(data []byte) error {\n\tif len(data) == 0 {\n\t\treturn nil\n\t}\n\n\t_, err := w.bw.Write(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tn := len(data)\n\t// Increment file position pointer\n\tw.n += int64(n)\n\n\treturn nil\n}", "func New(w io.Writer) Writer {\n\treturn &writer{w}\n}", "func WrapWriter(w http.ResponseWriter) ResponseWriterWrapper {\n\treturn &writerWrapper{ResponseWriter: w}\n}", "func NewWriter(w io.Writer) *zip.Writer", "func NoCloseStream(s Stream) Stream {\n return noCloseStream{s}\n}", "func closeIgnore(closer io.Closer) {\n\t_ = closer.Close()\n}", "func NewDummy() *Writer {\n\treturn &Writer{\n\t\tfp: nil,\n\t\tw: newDummyWriter(),\n\t}\n}", "func (n NoOp) Apply(io.Reader, io.Writer) error {\n\treturn nil\n}", "func (mwc *MockWriteCloser) Close() error {\n\tmwc.closed = true\n\treturn nil\n}", "func (pb *Bar) NewProxyWriter(r io.Writer) *Writer {\n\tpb.Set(Bytes, true)\n\treturn &Writer{r, pb}\n}", "func (mock WriteCloser) Close() error {\n\tmethodName := \"Close\" // nolint: goconst\n\tif mock.impl.Close != nil {\n\t\treturn mock.impl.Close()\n\t}\n\tif mock.callbackNotImplemented != nil {\n\t\tmock.callbackNotImplemented(mock.t, mock.name, methodName)\n\t} else {\n\t\tgomic.DefaultCallbackNotImplemented(mock.t, mock.name, methodName)\n\t}\n\treturn mock.fakeZeroClose()\n}", "func (n *nilCloser) Close() error {\n\t// works even if n is nil\n\treturn nil\n}", "func dummyBytesReader(p []byte) io.Reader {\n\treturn ioutil.NopCloser(bytes.NewReader(p))\n}", "func (p *ioThrottlerPool) NewThrottledWriteCloser(writer io.WriteCloser, r rate.Limit, b int, id string) *ThrottledWriteCloser {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tthrottler := ioThrottler{\n\t\tlimiter: rate.NewLimiter(r, b),\n\t}\n\tp.connections[id] = &throttler\n\tp.updateBufferSize()\n\treturn &ThrottledWriteCloser{\n\t\torigWriteCloser: writer,\n\t\tid: id,\n\t\tpool: p,\n\t}\n\n}", "func IsNop(w io.Writer) bool {\r\n\tif isN, ok := w.(interface {\r\n\t\tIsNop() bool\r\n\t}); ok {\r\n\t\treturn isN.IsNop()\r\n\t}\r\n\treturn false\r\n}", "func WriteCloserDaisy(inp <-chan io.WriteCloser, tube WriteCloserTube) (out <-chan io.WriteCloser) {\n\tcha := make(chan io.WriteCloser)\n\tgo tube(inp, cha)\n\treturn cha\n}", "func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter {\n\tvar (\n\t\thj, i0 = t.writer.(http.Hijacker)\n\t\tcn, i1 = t.writer.(http.CloseNotifier)\n\t\tpu, i2 = t.writer.(http.Pusher)\n\t\tfl, i3 = t.writer.(http.Flusher)\n\t\trf, i4 = t.writer.(io.ReaderFrom)\n\t)\n\n\tswitch {\n\tcase !i0 && !i1 && !i2 && !i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t}{t}\n\tcase !i0 && !i1 && !i2 && !i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\tio.ReaderFrom\n\t\t}{t, rf}\n\tcase !i0 && !i1 && !i2 && i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Flusher\n\t\t}{t, fl}\n\tcase !i0 && !i1 && !i2 && i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Flusher\n\t\t\tio.ReaderFrom\n\t\t}{t, fl, rf}\n\tcase !i0 && !i1 && i2 && !i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Pusher\n\t\t}{t, pu}\n\tcase !i0 && !i1 && i2 && !i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Pusher\n\t\t\tio.ReaderFrom\n\t\t}{t, pu, rf}\n\tcase !i0 && !i1 && i2 && i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Pusher\n\t\t\thttp.Flusher\n\t\t}{t, pu, fl}\n\tcase !i0 && !i1 && i2 && i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Pusher\n\t\t\thttp.Flusher\n\t\t\tio.ReaderFrom\n\t\t}{t, pu, fl, rf}\n\tcase !i0 && i1 && !i2 && !i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.CloseNotifier\n\t\t}{t, cn}\n\tcase !i0 && i1 && !i2 && !i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.CloseNotifier\n\t\t\tio.ReaderFrom\n\t\t}{t, cn, rf}\n\tcase !i0 && i1 && !i2 && i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Flusher\n\t\t}{t, cn, fl}\n\tcase !i0 && i1 && !i2 && i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Flusher\n\t\t\tio.ReaderFrom\n\t\t}{t, cn, fl, rf}\n\tcase !i0 && i1 && i2 && !i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Pusher\n\t\t}{t, cn, pu}\n\tcase !i0 && i1 && i2 && !i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Pusher\n\t\t\tio.ReaderFrom\n\t\t}{t, cn, pu, rf}\n\tcase !i0 && i1 && i2 && i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Pusher\n\t\t\thttp.Flusher\n\t\t}{t, cn, pu, fl}\n\tcase !i0 && i1 && i2 && i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Pusher\n\t\t\thttp.Flusher\n\t\t\tio.ReaderFrom\n\t\t}{t, cn, pu, fl, rf}\n\tcase i0 && !i1 && !i2 && !i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t}{t, hj}\n\tcase i0 && !i1 && !i2 && !i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\tio.ReaderFrom\n\t\t}{t, hj, rf}\n\tcase i0 && !i1 && !i2 && i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.Flusher\n\t\t}{t, hj, fl}\n\tcase i0 && !i1 && !i2 && i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.Flusher\n\t\t\tio.ReaderFrom\n\t\t}{t, hj, fl, rf}\n\tcase i0 && !i1 && i2 && !i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.Pusher\n\t\t}{t, hj, pu}\n\tcase i0 && !i1 && i2 && !i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.Pusher\n\t\t\tio.ReaderFrom\n\t\t}{t, hj, pu, rf}\n\tcase i0 && !i1 && i2 && i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.Pusher\n\t\t\thttp.Flusher\n\t\t}{t, hj, pu, fl}\n\tcase i0 && !i1 && i2 && i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.Pusher\n\t\t\thttp.Flusher\n\t\t\tio.ReaderFrom\n\t\t}{t, hj, pu, fl, rf}\n\tcase i0 && i1 && !i2 && !i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t}{t, hj, cn}\n\tcase i0 && i1 && !i2 && !i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t\tio.ReaderFrom\n\t\t}{t, hj, cn, rf}\n\tcase i0 && i1 && !i2 && i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Flusher\n\t\t}{t, hj, cn, fl}\n\tcase i0 && i1 && !i2 && i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Flusher\n\t\t\tio.ReaderFrom\n\t\t}{t, hj, cn, fl, rf}\n\tcase i0 && i1 && i2 && !i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Pusher\n\t\t}{t, hj, cn, pu}\n\tcase i0 && i1 && i2 && !i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Pusher\n\t\t\tio.ReaderFrom\n\t\t}{t, hj, cn, pu, rf}\n\tcase i0 && i1 && i2 && i3 && !i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Pusher\n\t\t\thttp.Flusher\n\t\t}{t, hj, cn, pu, fl}\n\tcase i0 && i1 && i2 && i3 && i4:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t\thttp.Pusher\n\t\t\thttp.Flusher\n\t\t\tio.ReaderFrom\n\t\t}{t, hj, cn, pu, fl, rf}\n\tdefault:\n\t\treturn struct {\n\t\t\thttp.ResponseWriter\n\t\t}{t}\n\t}\n}", "func WriterFlush(w *zip.Writer,) error", "func NewWriter(bw BitWriteCloser, eof Symbol, weights []int) SymbolWriteCloser {\n\treturn newWriter(bw, eof, weights)\n}", "func (w *Writer) Close() error {\n\treturn nil\n}", "func (w *writer) Close() error {\n\tif !w.upload {\n\t\tif w.pr != nil {\n\t\t\tdefer w.pr.Close()\n\t\t}\n\t\tif w.pw == nil {\n\t\t\t// We never got any bytes written. We'll write an http.NoBody.\n\t\t\tw.open(nil, false)\n\t\t} else if err := w.pw.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t<-w.donec\n\treturn w.err\n}", "func (w *writer) Close() error {\n\tif !w.upload {\n\t\tif w.pr != nil {\n\t\t\tdefer w.pr.Close()\n\t\t}\n\t\tif w.pw == nil {\n\t\t\t// We never got any bytes written. We'll write an http.NoBody.\n\t\t\tw.open(nil, false)\n\t\t} else if err := w.pw.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t<-w.donec\n\treturn w.err\n}", "func New(w io.Writer) *Writer {\n\treturn &Writer{\n\t\tout: w,\n\t}\n}", "func (_e *WriteCloser_Expecter) Close() *WriteCloser_Close_Call {\n\treturn &WriteCloser_Close_Call{Call: _e.mock.On(\"Close\")}\n}", "func (rw *responseWriter) Unwrap() http.ResponseWriter {\n\treturn rw.ResponseWriter\n}", "func New(w io.Writer) *Writer {\n\treturn &Writer{\n\t\tw: w,\n\t}\n}", "func (mock WriteCloser) fakeZeroWrite(p []byte) (n int, err error) {\n\treturn n, err\n}", "func (m *Nitro) NewWriter() *Writer {\n\tw := m.newWriter()\n\tw.next = m.wlist\n\tm.wlist = w\n\tw.dwrCtx.Init()\n\n\tm.shutdownWg1.Add(1)\n\tgo m.collectionWorker(w)\n\tif m.useMemoryMgmt {\n\t\tm.shutdownWg2.Add(1)\n\t\tgo m.freeWorker(w)\n\t}\n\n\treturn w\n}", "func NewWriter() *Writer {\n\treturn &Writer{buf: bytes.NewBuffer(nil)}\n}", "func (w *ChunkWriter) Close() error {\n\tif w.buffer == nil {\n\t\treturn nil\n\t}\n\n\tw.c = NewChunk(w.buffer.Bytes())\n\tw.buffer = nil\n\treturn nil\n}", "func NewWriter(limit int) *Wrap {\n\treturn &Wrap{\n\t\tLimit: limit,\n\t\tNewline: defaultNewline,\n\t\tKeepNewlines: true,\n\t\t// Keep whitespaces following a forceful line break. If disabled,\n\t\t// leading whitespaces in a line are only kept if the line break\n\t\t// was not forceful, meaning a line break that was already present\n\t\t// in the input\n\t\tPreserveSpace: false,\n\t\tTabWidth: defaultTabWidth,\n\n\t\tbuf: &bytes.Buffer{},\n\t}\n}", "func New() *Writer {\n\ttermWidth, _ = getTermSize()\n\tif termWidth != 0 {\n\t\toverFlowHandled = true\n\t}\n\n\treturn &Writer{\n\t\tOut: Out,\n\t\tRefreshInterval: RefreshInterval,\n\n\t\tmtx: &sync.Mutex{},\n\t}\n}", "func (w responseWriterNoBody) Write(data []byte) (int, error) {\n\treturn 0, nil\n}", "func NewWrappedResponseWriter(w http.ResponseWriter) *WrappedResponseWriter {\n\tgw := gzip.NewWriter(w)\n\treturn &WrappedResponseWriter{w, gw}\n}", "func (w *Writer) Close() error {\n\tif w.w == nil {\n\t\treturn nil\n\t}\n\n\tvar (\n\t\terrz error\n\t\terrc error\n\t)\n\n\terrz = w.wz.Close()\n\tif w.wc != nil {\n\t\twc := w.wc\n\t\tw.wc = nil\n\t\terrc = wc.Close()\n\t}\n\n\tw.w = nil\n\tw.wz = nil\n\n\tif errz != nil {\n\t\treturn fmt.Errorf(\"npz: could not close npz archive: %w\", errz)\n\t}\n\n\tif errc != nil {\n\t\treturn fmt.Errorf(\"npz: could not close npz file: %w\", errc)\n\t}\n\n\treturn nil\n}", "func (_e *WriteCloser_Expecter) Write(p interface{}) *WriteCloser_Write_Call {\n\treturn &WriteCloser_Write_Call{Call: _e.mock.On(\"Write\", p)}\n}", "func NewWriter(w io.Writer, opts WriterOpts) (deprecated.LegacyPackedWriter, error) {\n\twr := &writer{opts: opts}\n\tsubopts := deprecated.LegacyPackedWriterOpts{\n\t\tMarshal: deprecated.MarshalFunc(opts.Marshal),\n\t\tIndex: opts.Index,\n\t\tMaxItems: opts.MaxItems,\n\t\tMaxBytes: opts.MaxBytes,\n\t\tFlushed: opts.Flushed,\n\t}\n\n\tcompress := false\n\tswitch opts.FlateLevel {\n\tcase flate.BestSpeed,\n\t\tflate.BestCompression,\n\t\tflate.DefaultCompression,\n\t\tflate.HuffmanOnly:\n\t\tcompress = true\n\t}\n\n\tif compress {\n\t\twr.compressor = NewFlateTransform(opts.FlateLevel)\n\t\tsubopts.Transform = wr.compressor.CompressTransform\n\t}\n\twr.LegacyPackedWriter = deprecated.NewLegacyPackedWriter(w, subopts)\n\treturn wr, nil\n}", "func (w *Writer) WriteNull() {\n\tif w.err != nil {\n\t\treturn\n\t}\n\tw.b = AppendNull(w.b)\n}", "func newBufferedWriter(w io.Writer) *snappy.Writer {\n\trawBufWriter := bufWriterPool.Get()\n\tif rawBufWriter == nil {\n\t\treturn snappy.NewBufferedWriter(w)\n\t}\n\tbufW, ok := rawBufWriter.(*snappy.Writer)\n\tif !ok {\n\t\treturn snappy.NewBufferedWriter(w)\n\t}\n\tbufW.Reset(w)\n\treturn bufW\n}" ]
[ "0.83569413", "0.8011272", "0.71590877", "0.6870779", "0.67140406", "0.67096657", "0.6687032", "0.6371078", "0.6286475", "0.6217874", "0.6208649", "0.61860895", "0.61359453", "0.61247253", "0.607347", "0.6037165", "0.5976911", "0.5891818", "0.58669317", "0.57316923", "0.57210654", "0.5682104", "0.563397", "0.5629736", "0.5625744", "0.557823", "0.557412", "0.55650455", "0.5563851", "0.5560972", "0.55571365", "0.551571", "0.5502931", "0.5491729", "0.5465291", "0.5447252", "0.54405767", "0.5438453", "0.5425513", "0.53875154", "0.5362165", "0.53545207", "0.53502595", "0.534587", "0.5317975", "0.53112465", "0.52884346", "0.52672035", "0.5255381", "0.5211145", "0.52089447", "0.520102", "0.5185364", "0.5164889", "0.51586646", "0.51447415", "0.51335126", "0.51251715", "0.50978523", "0.509738", "0.50793195", "0.50586474", "0.50542516", "0.50520915", "0.5035403", "0.5029016", "0.5024436", "0.5006709", "0.50047827", "0.5000567", "0.49806482", "0.49799782", "0.4976809", "0.49650797", "0.4964724", "0.49459618", "0.49390182", "0.4932785", "0.49304035", "0.49073264", "0.48975888", "0.48895088", "0.48895088", "0.48813412", "0.48811096", "0.48781016", "0.4876857", "0.48465377", "0.48442262", "0.48441097", "0.48290163", "0.4818307", "0.48135456", "0.48111305", "0.4806843", "0.4800899", "0.47868156", "0.4776983", "0.47737506", "0.4772327" ]
0.82828856
1
NewTracer returns a new tracer
NewTracer возвращает новый трассер
func NewTracer(description string) *Tracer { return &Tracer{Started: time.Now().UTC(), Description: description} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewTracer(name string) *Tracer {\n\tname = fmt.Sprintf(namePattern, name)\n\treturn &Tracer{\n\t\tname: name,\n\t}\n}", "func NewTracer(parent *Logger, prefix string) *Logger {\n\treturn &Logger{parent: parent, prefix: prefix, tracer: true}\n}", "func NewTracer(name string, w io.Writer, m Memory) Memory {\n\treturn &tracer{m: m, w: w, s: name}\n}", "func NewTracer(cli CLI) (*Tracer, error) {\n\ttracer := &Tracer{\n\t\tcli: cli,\n\t\tnumPackets: defaultNumPackets,\n\t}\n\treturn tracer, nil\n}", "func NewTracer(_ *config.Config) (*Tracer, error) {\n\treturn nil, ebpf.ErrNotImplemented\n}", "func NewTracer(cfg TracerConfig) opentracing.Tracer {\n\tvar tracer opentracing.Tracer\n\tswitch cfg.Provider {\n\tcase Zipkin:\n\t\tlogrus.Error(\"No implements yet.\")\n\t\t// fmt.Sprintf(\"http://%s:%s/api/v1/spans\",cfg.Host, cfg.Port)\n\t\tbreak\n\tcase Jaeger:\n\t\ttracer = newJaegerTracer(cfg)\n\t\tbreak\n\tdefault:\n\t\tlogrus.Errorf(\"unsported provider %s, use opentracing.GlobalTracer()\", cfg.Provider)\n\t\ttracer = opentracing.GlobalTracer()\n\t}\n\treturn tracer\n}", "func New(w io.Writer) Tracer{\n\treturn &tracer{out:w}\n}", "func NewTracer(\n\tserviceName string,\n\tdispatcher Dispatcher,\n\toptions ...TracerOption,\n) (opentracing.Tracer, io.Closer) {\n\ttracer := &Tracer{\n\t\tserviceName: serviceName,\n\t\tdispatcher: dispatcher,\n\t\tuseDualSpanMode: false,\n\t}\n\ttracer.propagators = make(map[interface{}]Propagator)\n\ttracer.propagators[opentracing.TextMap] = NewDefaultTextMapPropagator()\n\ttracer.propagators[opentracing.HTTPHeaders] = NewTextMapPropagator(PropagatorOpts{}, URLCodex{})\n\tfor _, option := range options {\n\t\toption(tracer)\n\t}\n\n\tif tracer.timeNow == nil {\n\t\ttracer.timeNow = time.Now\n\t}\n\n\tif tracer.logger == nil {\n\t\ttracer.logger = NullLogger{}\n\t}\n\n\tif tracer.idGenerator == nil {\n\t\ttracer.idGenerator = func() string {\n\t\t\t_uuid, err := uuid.NewUUID()\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\treturn _uuid.String()\n\t\t}\n\t}\n\n\tdispatcher.SetLogger(tracer.logger)\n\treturn tracer, tracer\n}", "func New(w io.Writer) Tracer {\n\treturn &tracer{out: w}\n}", "func New(w io.Writer) Tracer {\n\treturn &tracer{out: w}\n}", "func New(w io.Writer) Tracer {\n\treturn &tracer{out: w}\n}", "func NewTracer() Tracer {\n\treturn &nullTracer{}\n}", "func NewTracer(\n\tserviceName string,\n\tsampler Sampler,\n\treporter Reporter,\n\toptions ...TracerOption,\n) (opentracing.Tracer, io.Closer) {\n\tt := &Tracer{\n\t\tserviceName: serviceName,\n\t\tsampler: samplerV1toV2(sampler),\n\t\treporter: reporter,\n\t\tinjectors: make(map[interface{}]Injector),\n\t\textractors: make(map[interface{}]Extractor),\n\t\tmetrics: *NewNullMetrics(),\n\t\tspanAllocator: simpleSpanAllocator{},\n\t}\n\n\tfor _, option := range options {\n\t\toption(t)\n\t}\n\n\t// register default injectors/extractors unless they are already provided via options\n\ttextPropagator := NewTextMapPropagator(getDefaultHeadersConfig(), t.metrics)\n\tt.addCodec(opentracing.TextMap, textPropagator, textPropagator)\n\n\thttpHeaderPropagator := NewHTTPHeaderPropagator(getDefaultHeadersConfig(), t.metrics)\n\tt.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator)\n\n\tbinaryPropagator := NewBinaryPropagator(t)\n\tt.addCodec(opentracing.Binary, binaryPropagator, binaryPropagator)\n\n\t// TODO remove after TChannel supports OpenTracing\n\tinteropPropagator := &jaegerTraceContextPropagator{tracer: t}\n\tt.addCodec(SpanContextFormat, interopPropagator, interopPropagator)\n\n\tzipkinPropagator := &zipkinPropagator{tracer: t}\n\tt.addCodec(ZipkinSpanFormat, zipkinPropagator, zipkinPropagator)\n\n\tif t.baggageRestrictionManager != nil {\n\t\tt.baggageSetter = newBaggageSetter(t.baggageRestrictionManager, &t.metrics)\n\t} else {\n\t\tt.baggageSetter = newBaggageSetter(baggage.NewDefaultRestrictionManager(0), &t.metrics)\n\t}\n\tif t.debugThrottler == nil {\n\t\tt.debugThrottler = throttler.DefaultThrottler{}\n\t}\n\n\tif t.randomNumber == nil {\n\t\tseedGenerator := utils.NewRand(time.Now().UnixNano())\n\t\tpool := sync.Pool{\n\t\t\tNew: func() interface{} {\n\t\t\t\treturn rand.NewSource(seedGenerator.Int63())\n\t\t\t},\n\t\t}\n\n\t\tt.randomNumber = func() uint64 {\n\t\t\tgenerator := pool.Get().(rand.Source)\n\t\t\tnumber := uint64(generator.Int63())\n\t\t\tpool.Put(generator)\n\t\t\treturn number\n\t\t}\n\t}\n\tif t.timeNow == nil {\n\t\tt.timeNow = time.Now\n\t}\n\tif t.logger == nil {\n\t\tt.logger = log.NullLogger\n\t}\n\t// Set tracer-level tags\n\tt.tags = append(t.tags, Tag{key: JaegerClientVersionTagKey, value: JaegerClientVersion})\n\tif hostname, err := os.Hostname(); err == nil {\n\t\tt.tags = append(t.tags, Tag{key: TracerHostnameTagKey, value: hostname})\n\t}\n\tif ipval, ok := t.getTag(TracerIPTagKey); ok {\n\t\tipv4, err := utils.ParseIPToUint32(ipval.(string))\n\t\tif err != nil {\n\t\t\tt.hostIPv4 = 0\n\t\t\tt.logger.Error(\"Unable to convert the externally provided ip to uint32: \" + err.Error())\n\t\t} else {\n\t\t\tt.hostIPv4 = ipv4\n\t\t}\n\t} else if ip, err := utils.HostIP(); err == nil {\n\t\tt.tags = append(t.tags, Tag{key: TracerIPTagKey, value: ip.String()})\n\t\tt.hostIPv4 = utils.PackIPAsUint32(ip)\n\t} else {\n\t\tt.logger.Error(\"Unable to determine this host's IP address: \" + err.Error())\n\t}\n\n\tif t.options.gen128Bit {\n\t\tif t.options.highTraceIDGenerator == nil {\n\t\t\tt.options.highTraceIDGenerator = t.randomNumber\n\t\t}\n\t} else if t.options.highTraceIDGenerator != nil {\n\t\tt.logger.Error(\"Overriding high trace ID generator but not generating \" +\n\t\t\t\"128 bit trace IDs, consider enabling the \\\"Gen128Bit\\\" option\")\n\t}\n\tif t.options.maxTagValueLength == 0 {\n\t\tt.options.maxTagValueLength = DefaultMaxTagValueLength\n\t}\n\tt.process = Process{\n\t\tService: serviceName,\n\t\tUUID: strconv.FormatUint(t.randomNumber(), 16),\n\t\tTags: t.tags,\n\t}\n\tif throttler, ok := t.debugThrottler.(ProcessSetter); ok {\n\t\tthrottler.SetProcess(t.process)\n\t}\n\n\treturn t, t\n}", "func New(recorders []basictracer.SpanRecorder) opentracing.Tracer {\n\treturn basictracer.New(NewRecorder(recorders))\n}", "func NewTracer(ctx context.Context, yamlConfig []byte) (opentracing.Tracer, io.Closer, error) {\n\tconfig := Config{}\n\tif err := yaml.Unmarshal(yamlConfig, &config); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\toptions := lightstep.Options{\n\t\tAccessToken: config.AccessToken,\n\t\tCollector: config.Collector,\n\t}\n\tlighstepTracer, err := lightstep.CreateTracer(options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tt := &Tracer{\n\t\tlighstepTracer,\n\t\tctx,\n\t}\n\treturn t, t, nil\n}", "func NewTracer(serviceName, host string) (opentracing.Tracer, io.Closer, error) {\n\tjcfg := jaegerconfig.Configuration{\n\t\tSampler: &jaegerconfig.SamplerConfig{\n\t\t\tType: \"const\",\n\t\t\tParam: 1,\n\t\t},\n\t\tReporter: &jaegerconfig.ReporterConfig{\n\t\t\tLogSpans: false,\n\t\t\tBufferFlushInterval: 1 * time.Second,\n\t\t\tLocalAgentHostPort: host,\n\t\t},\n\t\tServiceName: serviceName,\n\t}\n\n\ttracer, closer, err := jcfg.NewTracer()\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"new tracer error: %v\", err)\n\t}\n\n\treturn tracer, closer, nil\n}", "func NewTracer(cfg Config) (otrace.Tracer, io.Closer, error) {\n\tconf := cfg.New()\n\ttracer, closer, err := conf.New(\n\t\tcfg.App,\n\t\tconfig.Logger(jaeger.StdLogger),\n\t)\n\n\tif err != nil {\n\t\treturn nil, nil, errCreateTracer\n\t}\n\n\treturn tracer, closer, nil\n}", "func New() graphql.Tracer {\n\treturn tracer{Tracer: gqlopencensus.New()}\n}", "func New() graphql.Tracer {\n\treturn tracer{Tracer: gqlopencensus.New()}\n}", "func NewTracer(serviceName, zipkinURL string) (io.Closer, error) {\n\t// Send the tracing in Zipkin format (even if we are using Jaeger as backend).\n\ttransport, err := zipkin.NewHTTPTransport(\"http://\" + zipkinURL + \":9411/api/v1/spans\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not init Jaeger Zipkin HTTP transport: %w\", err)\n\t}\n\n\t// Zipkin shares span ID between client and server spans; it must be enabled via the following option.\n\tzipkinPropagator := zk.NewZipkinB3HTTPHeaderPropagator()\n\n\ttracer, closer := jaeger.NewTracer(\n\t\tserviceName,\n\t\tjaeger.NewConstSampler(true), // Trace everything for now.\n\t\tjaeger.NewRemoteReporter(transport),\n\t\tjaeger.TracerOptions.Injector(opentracing.HTTPHeaders, zipkinPropagator),\n\t\tjaeger.TracerOptions.Extractor(opentracing.HTTPHeaders, zipkinPropagator),\n\t\tjaeger.TracerOptions.ZipkinSharedRPCSpan(true),\n\t\tjaeger.TracerOptions.Gen128Bit(true),\n\t)\n\topentracing.SetGlobalTracer(tracer)\n\n\treturn closer, nil\n}", "func NewTracer(opts Options) ot.Tracer {\n\toptions := basictracer.DefaultOptions()\n\toptions.ShouldSample = func(_ uint64) bool { return true }\n\n\tif opts.UseGRPC {\n\t\tr := NewRecorder(opts)\n\t\tif r == nil {\n\t\t\treturn ot.NoopTracer{}\n\t\t}\n\t\toptions.Recorder = r\n\t} else {\n\t\topts.setDefaults()\n\t\t// convert opts to thrift_rpc.Options\n\t\tthriftOpts := thrift_rpc.Options{\n\t\t\tAccessToken: opts.AccessToken,\n\t\t\tCollector: thrift_rpc.Endpoint{opts.Collector.Host, opts.Collector.Port, opts.Collector.Plaintext},\n\t\t\tTags: opts.Tags,\n\t\t\tLightStepAPI: thrift_rpc.Endpoint{opts.LightStepAPI.Host, opts.LightStepAPI.Port, opts.LightStepAPI.Plaintext},\n\t\t\tMaxBufferedSpans: opts.MaxBufferedSpans,\n\t\t\tReportingPeriod: opts.ReportingPeriod,\n\t\t\tReportTimeout: opts.ReportTimeout,\n\t\t\tDropSpanLogs: opts.DropSpanLogs,\n\t\t\tMaxLogsPerSpan: opts.MaxLogsPerSpan,\n\t\t\tVerbose: opts.Verbose,\n\t\t\tMaxLogMessageLen: opts.MaxLogValueLen,\n\t\t}\n\t\tr := thrift_rpc.NewRecorder(thriftOpts)\n\t\tif r == nil {\n\t\t\treturn ot.NoopTracer{}\n\t\t}\n\t\toptions.Recorder = r\n\t}\n\toptions.DropAllLogs = opts.DropSpanLogs\n\toptions.MaxLogsPerSpan = opts.MaxLogsPerSpan\n\treturn basictracer.NewWithOptions(options)\n}", "func NewTracer(task concurrency.Task, enable bool, msg ...interface{}) Tracer {\n\tt := tracer{\n\t\tenabled: enable,\n\t}\n\tif task != nil {\n\t\tt.taskSig = task.Signature()\n\t}\n\n\tmessage := strprocess.FormatStrings(msg...)\n\tif message == \"\" {\n\t\tmessage = \"()\"\n\t}\n\tt.callerParams = strings.TrimSpace(message)\n\n\t// Build the message to trace\n\t// VPL: my version\n\t// if pc, file, line, ok := runtime.Caller(1); ok {\n\t//\tif f := runtime.FuncForPC(pc); f != nil {\n\t//\t\tt.funcName = f.Name()\n\t//\t\tfilename := strings.Replace(file, debug.sourceFilePartToRemove(), \"\", 1)\n\t//\t\tt.inOutMessage = fmt.Sprintf(\"%s %s%s [%s:%d]\", t.taskSig, filepath.Base(t.funcName), message, filename, line)\n\t//\t}\n\t// }\n\t// VPL: la version d'Oscar\n\tif pc, file, _, ok := runtime.Caller(1); ok {\n\t\tt.fileName = callstack.SourceFilePathUpdater()(file)\n\t\tif f := runtime.FuncForPC(pc); f != nil {\n\t\t\tt.funcName = filepath.Base(f.Name())\n\t\t}\n\t}\n\tif t.funcName == \"\" {\n\t\tt.funcName = unknownFunction\n\t}\n\tif t.fileName == \"\" {\n\t\tt.funcName = unknownFile\n\t}\n\n\treturn &t\n}", "func New(opts ...opentelemetry.Option) (opentracing.Tracer, io.Closer, error) {\n\toptions := opentelemetry.DefaultOptions()\n\tfor _, o := range opts {\n\t\to(&options)\n\t}\n\n\tlogger.Debug(\"Creating a new Jaeger tracer\")\n\n\t// Prepare a Jaeger config using our options:\n\tjaegerConfig := config.Configuration{\n\t\tServiceName: options.ServiceName,\n\t\tSampler: &config.SamplerConfig{\n\t\t\tType: \"const\", // No adaptive sampling or external lookups\n\t\t\tParam: options.SamplingRate,\n\t\t},\n\t\tReporter: &config.ReporterConfig{\n\t\t\tLocalAgentHostPort: options.TraceReporterAddress,\n\t\t},\n\t}\n\n\t// Prepare a new Jaeger tracer from this config:\n\ttracer, closer, err := jaegerConfig.NewTracer()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn tracer, closer, nil\n}", "func New(conf Config) *AppTracer {\n\treturn &AppTracer{\n\t\tConfig: conf,\n\t}\n}", "func (t *Tracer) newSpan() *Span {\n\treturn t.spanAllocator.Get()\n}", "func NewTrace(name string) *Trace {\n\treturn &Trace{\n\t\tTraceID: uuid.New(),\n\t\tSpanID: rand.Int63(),\n\t\tSpanName: name,\n\t}\n}", "func NewMockTracer(ctrl *gomock.Controller) *MockTracer {\n\tmock := &MockTracer{ctrl: ctrl}\n\tmock.recorder = &MockTracerMockRecorder{mock}\n\treturn mock\n}", "func NewMockTracer(ctrl *gomock.Controller) *MockTracer {\n\tmock := &MockTracer{ctrl: ctrl}\n\tmock.recorder = &MockTracerMockRecorder{mock}\n\treturn mock\n}", "func NewMockTracer(ctrl *gomock.Controller) *MockTracer {\n\tmock := &MockTracer{ctrl: ctrl}\n\tmock.recorder = &MockTracerMockRecorder{mock}\n\treturn mock\n}", "func NewTrace(localAddr string, logPath string) *Trace {\n\tt := &Trace{\n\t\tstopCh: make(chan struct{}),\n\t\tmsgCh: make(chan []byte, 1000),\n\t\tlocalAddr: localAddr,\n\t\tlogPath: logPath,\n\t\tforceLog: false,\n\t}\n\treturn t\n}", "func AddNewTracer(name string) *Tracer {\n\tsrc := NewTracer(name)\n\tif err := gwr.AddGenericDataSource(src); err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn src\n}", "func New(conf Config, opts ...func(Type)) (Type, error) {\n\tif c, ok := Constructors[conf.Type]; ok {\n\t\treturn c.constructor(conf, opts...)\n\t}\n\treturn nil, ErrInvalidTracerType\n}", "func newTraceV4() httptracer.HTTPTracer {\n\treturn traceV4{}\n}", "func newTraceV2() httptracer.HTTPTracer {\n\treturn traceV2{}\n}", "func NewSpan(tracer *Tracing, name string) Span {\n\treturn newSpanWithStart(tracer, name, time.Now())\n}", "func (i *DI) MakeTracer() opentracing.Tracer {\n\tif cacheTracer != nil {\n\t\treturn *cacheTracer\n\t}\n\t// cacheTracer global is set in MakeTracerCloser\n\t_, _ = i.MakeTracerCloser()\n\treturn *cacheTracer\n}", "func newOpentelemetryTracerProvider(address string, customAttributes ...attribute.KeyValue) (*tracesdk.TracerProvider, error) {\n\t// Same as Grafana core\n\tclient := otlptracegrpc.NewClient(otlptracegrpc.WithEndpoint(address), otlptracegrpc.WithInsecure())\n\texp, err := otlptrace.New(context.Background(), client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn initTracerProvider(exp, customAttributes...)\n}", "func (t *Tracer) Start() *Tracer {\n\tlog.Debugf(\"Tracer started %v.\", t.Description)\n\treturn t\n}", "func (t *HadeTraceService) NewTrace() *contract.TraceContext {\n\tvar traceID, spanID string\n\tif t.traceIDGenerator != nil {\n\t\ttraceID = t.traceIDGenerator.NewID()\n\t} else {\n\t\ttraceID = t.idService.NewID()\n\t}\n\n\tif t.spanIDGenerator != nil {\n\t\tspanID = t.spanIDGenerator.NewID()\n\t} else {\n\t\tspanID = t.idService.NewID()\n\t}\n\ttc := &contract.TraceContext{\n\t\tTraceID: traceID,\n\t\tParentID: \"\",\n\t\tSpanID: spanID,\n\t\tCspanID: \"\",\n\t\tAnnotation: map[string]string{},\n\t}\n\treturn tc\n}", "func New(name string, impl []Wrapper, opts ...TraceOption) *Trace {\n\tt := &Trace{impl: impl, event: newSpanEvent(name)}\n\tfor _, opt := range opts {\n\t\topt(t)\n\t}\n\tfor _, i := range t.impl {\n\t\ti.Setup(name)\n\t}\n\treturn t\n}", "func (p *Provider) Tracer(instrumentationName string, opts ...oteltrace.TracerOption) oteltrace.Tracer {\n\topts = append(opts, oteltrace.WithInstrumentationVersion(teleport.Version))\n\n\treturn p.provider.Tracer(instrumentationName, opts...)\n}", "func NewWithTracer(tracer opentracing.Tracer) tracing.StartSpan {\n\treturn func(serviceId string, operationId string, protocol tracing.WireProtocol, r *http.Request) (context.Context, tracing.ServerSpan) {\n\t\tspanName := serviceId + OperationDelimiter + operationId\n\t\twireContext, err := tracer.Extract(\n\t\t\topentracing.HTTPHeaders,\n\t\t\topentracing.HTTPHeadersCarrier(r.Header))\n\t\tif err != nil {\n\t\t\tlog.Debug(\"Failed to extract opentracing headers\")\n\t\t}\n\n\t\t// Create the span referring to the RPC client if available.\n\t\t// If wireContext == nil, a root span will be created.\n\t\t// ext.RPCServerOption() sets tag span.kind=server\n\t\tserverSpan := tracer.StartSpan(\n\t\t\tspanName,\n\t\t\text.RPCServerOption(wireContext))\n\n\t\t//Set tag for the component\n\t\tserverSpan.SetTag(\"component\", ComponentTag)\n\n\t\t//Set headers, if header is not present the tag will be set to \"\"\n\t\tserverSpan.SetTag(\"http.user_agent\", r.Header.Get(\"User-Agent\"))\n\t\tserverSpan.SetTag(\"peer.address\", peerAddress(r))\n\t\tserverSpan.SetTag(\"wire.protocol\", protocol)\n\t\tserverSpan.SetTag(\"is_internal\", isInternal(r))\n\n\t\tspan := NewServerSpan(serverSpan)\n\n\t\tctx := opentracing.ContextWithSpan(r.Context(), serverSpan)\n\n\t\treturn ctx, span\n\t}\n}", "func NewPrometheusTracer(name string, rate int, vec *prometheus.CounterVec) *Tracer {\n\tconst (\n\t\tnewconn = \"new\"\n\t\treusedconn = \"reused\"\n\t)\n\n\thooks := &httptrace.ClientTrace{\n\t\tGotConn: func(con httptrace.GotConnInfo) {\n\t\t\tif con.Reused {\n\t\t\t\tvec.WithLabelValues(name, reusedconn).Inc()\n\t\t\t} else {\n\t\t\t\tvec.WithLabelValues(name, newconn).Inc()\n\t\t\t}\n\t\t},\n\t}\n\n\treturn &Tracer{SampleRate: rate, trace: hooks}\n}", "func NewCompositeTracer(references cref.IReferences) *CompositeTracer {\n\tc := &CompositeTracer{}\n\tif references != nil {\n\t\tc.SetReferences(references)\n\t}\n\treturn c\n}", "func NewPTracer(store Store) PTracer {\n\tt := PTracer{\n\t\tops: make(chan func()),\n\t\tstopped: make(chan stopped),\n\t\tquit: make(chan struct{}),\n\t\tchildAttached: make(chan struct{}),\n\n\t\tthreads: make(map[int]*thread),\n\t\tprocesses: make(map[int]*process),\n\t\tstore: store,\n\t}\n\tgo t.waitLoop()\n\tgo t.loop()\n\treturn t\n}", "func NewContext(ctx context.Context, t *Tracer) context.Context {\n\treturn context.WithValue(ctx, tracerKey, t)\n}", "func NewChromeTracer(chrome *godet.RemoteDebugger, size *ScreenSize, screenshotsStoragePath string) *ChromeTracer {\n\treturn &ChromeTracer{\n\t\tinstance: chrome,\n\t\tsize: size,\n\t\tscreenshotsStoragePath: screenshotsStoragePath,\n\t}\n}", "func NewSpan(tracer opentracing.Tracer, operationName string, opts ...opentracing.StartSpanOption) gin.HandlerFunc {\n\treturn func(ctx *gin.Context) {\n\t\tspan := tracer.StartSpan(operationName, opts...)\n\t\tctx.Set(spanContextKey, span)\n\t\tdefer span.Finish()\n\n\t\tctx.Next()\n\t}\n}", "func NewServiceTracer(serviceName string, agentAddress string, metricsType MetricsType) *ServiceTracer {\n\tzlogger, _ := zap.NewDevelopment(zap.AddStacktrace(zapcore.FatalLevel))\n\tzapLogger := zlogger.With(zap.String(\"service\", serviceName))\n\tlogger := NewLogFactory(zapLogger)\n\tmetricsFactory := NewMetrics(metricsType)\n\ttracer := NewTracer(serviceName, agentAddress, metricsFactory, logger)\n\topentracing.SetGlobalTracer(tracer)\n\treturn &ServiceTracer{tracer: tracer, logger: logger, serviceName: serviceName}\n}", "func NewDasTracer() DasTracer {\n\treturn DasTracer{\n\t\tURI: DefaultDasURI,\n\t}\n}", "func NewRecordingTracer() *RecordingTracer {\n\tvar result RecordingTracer\n\ttracer, err := apm.NewTracerOptions(apm.TracerOptions{\n\t\tTransport: &result.RecorderTransport,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tresult.Tracer = tracer\n\treturn &result\n}", "func New(x trace.Trace, y *log.Logger) *Object {\n\tceph = cephInterface.New(x, y)\n\tt = x\n\treturn &Object{ y }\n}", "func New(ctx context.Context, method string, kvps ...interface{}) (*SpanLogger, context.Context) {\n\treturn spanlogger.New(ctx, util_log.Logger, method, resolver, kvps...)\n}", "func (m *SpanManager) NewTrace(span_name string) *Span {\n\tm.mtx.Lock()\n\ttrace_fraction := m.trace_fraction\n\ttrace_debug := m.trace_debug\n\tm.mtx.Unlock()\n\tif Rng.Float64() >= trace_fraction {\n\t\treturn NewDisabledTrace()\n\t}\n\treturn m.NewSampledTrace(span_name, trace_debug)\n}", "func NewTrace(s *logic.S, ws ...z.Lit) *Trace {\n\treturn NewTraceLen(s, s.Len(), ws...)\n}", "func newTraceExporter(logger *zap.Logger, cfg configmodels.Exporter) (component.TracesExporter, error) {\n\n\tl := &logServiceTraceSender{\n\t\tlogger: logger,\n\t}\n\n\tvar err error\n\tif l.client, err = NewLogServiceClient(cfg.(*Config), logger); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn exporterhelper.NewTraceExporter(\n\t\tcfg,\n\t\tlogger,\n\t\tl.pushTraceData)\n}", "func (t *Trace) NewSpan(name string) platform.Span {\n\ts := NewSpan(name).(*Span)\n\ts.logger = t.logger\n\treturn s\n}", "func newOpenTelemetryWrapper(\n\tctx context.Context,\n\tspanName string,\n) (openTelemetryWrapper, error) {\n\tif spanName == \"\" {\n\t\tspanName = defaultSpanName\n\t}\n\n\tot := openTelemetryWrapper{\n\t\tspanName: spanName,\n\t}\n\n\tversion, _ := caddy.Version()\n\tres, err := ot.newResource(webEngineName, version)\n\tif err != nil {\n\t\treturn ot, fmt.Errorf(\"creating resource error: %w\", err)\n\t}\n\n\ttraceExporter, err := otlptracegrpc.New(ctx)\n\tif err != nil {\n\t\treturn ot, fmt.Errorf(\"creating trace exporter error: %w\", err)\n\t}\n\n\tot.propagators = autoprop.NewTextMapPropagator()\n\n\ttracerProvider := globalTracerProvider.getTracerProvider(\n\t\tsdktrace.WithBatcher(traceExporter),\n\t\tsdktrace.WithResource(res),\n\t)\n\n\tot.handler = otelhttp.NewHandler(http.HandlerFunc(ot.serveHTTP),\n\t\tot.spanName,\n\t\totelhttp.WithTracerProvider(tracerProvider),\n\t\totelhttp.WithPropagators(ot.propagators),\n\t\totelhttp.WithSpanNameFormatter(ot.spanNameFormatter),\n\t)\n\n\treturn ot, nil\n}", "func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanConfig) trace.Span {\n\t// If told explicitly to make this a new root use a zero value SpanContext\n\t// as a parent which contains an invalid trace ID and is not remote.\n\tvar psc trace.SpanContext\n\tif config.NewRoot() {\n\t\tctx = trace.ContextWithSpanContext(ctx, psc)\n\t} else {\n\t\tpsc = trace.SpanContextFromContext(ctx)\n\t}\n\n\t// If there is a valid parent trace ID, use it to ensure the continuity of\n\t// the trace. Always generate a new span ID so other components can rely\n\t// on a unique span ID, even if the Span is non-recording.\n\tvar tid trace.TraceID\n\tvar sid trace.SpanID\n\tif !psc.TraceID().IsValid() {\n\t\ttid, sid = tr.provider.idGenerator.NewIDs(ctx)\n\t} else {\n\t\ttid = psc.TraceID()\n\t\tsid = tr.provider.idGenerator.NewSpanID(ctx, tid)\n\t}\n\n\tsamplingResult := tr.provider.sampler.ShouldSample(SamplingParameters{\n\t\tParentContext: ctx,\n\t\tTraceID: tid,\n\t\tName: name,\n\t\tKind: config.SpanKind(),\n\t\tAttributes: config.Attributes(),\n\t\tLinks: config.Links(),\n\t})\n\n\tscc := trace.SpanContextConfig{\n\t\tTraceID: tid,\n\t\tSpanID: sid,\n\t\tTraceState: samplingResult.Tracestate,\n\t}\n\tif isSampled(samplingResult) {\n\t\tscc.TraceFlags = psc.TraceFlags() | trace.FlagsSampled\n\t} else {\n\t\tscc.TraceFlags = psc.TraceFlags() &^ trace.FlagsSampled\n\t}\n\tsc := trace.NewSpanContext(scc)\n\n\tif !isRecording(samplingResult) {\n\t\treturn tr.newNonRecordingSpan(sc)\n\t}\n\treturn tr.newRecordingSpan(psc, sc, name, samplingResult, config)\n}", "func NewRayTracer(view *Camera, options *RayTracerOptions) *RayTracer {\n\n\t// compute half-dimensions and angles:\n\thalfWidth, halfHeight := entry(view.width)/TWO, entry(view.height)/TWO\n\ttanY := tan(view.fovY / TWO)\n\ttanX := tanY * (halfWidth / halfHeight)\n\n\t// compute eye-basis vectors:\n\tbW := view.pos.minus(&view.lookAt).direction()\n\tbU := view.up.cross(bW).direction()\n\tbV := bW.cross(bU)\n\n\treturn &RayTracer{\n\t\tview.width, view.height,\n\t\thalfWidth, halfHeight, tanX, tanY,\n\t\t*bU, *bV, *bW, view.pos,\n\t\toptions,\n\t}\n}", "func New(msg string) error {\n return &withTrace{\n msg: msg,\n stack: trace(),\n }\n}", "func (i *DI) MakeTracerCloser() (opentracing.Tracer, io.Closer) {\n\tif cacheTracer != nil {\n\t\treturn *cacheTracer, cacheTraceCloser\n\t}\n\tcfg := i.MakeTraceConfig()\n\tjLogger := jaegerlog.StdLogger\n\tjMetricsFactory := metrics.NullFactory\n\n\t// Initialize tracer with a logger and a metrics factory\n\ttracer, closer, err := cfg.NewTracer(\n\t\tjaegercfg.Logger(jLogger),\n\t\tjaegercfg.Metrics(jMetricsFactory),\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcacheTracer := &tracer\n\tcacheTraceCloser := closer\n\treturn *cacheTracer, cacheTraceCloser\n}", "func NewTraces(consume ConsumeTracesFunc, options ...Option) (Traces, error) {\n\tif consume == nil {\n\t\treturn nil, errNilFunc\n\t}\n\treturn &baseTraces{\n\t\tbaseImpl: newBaseImpl(options...),\n\t\tConsumeTracesFunc: consume,\n\t}, nil\n}", "func NewTrace(lfn string, site string, ts int64, jobtype string, wnname string) Trace {\n\ttrc := Trace{}\n\ttrc.Account = \"fwjr\"\n\ttrc.ClientState = \"DONE\"\n\ttrc.Filename = lfn\n\ttrc.DID = \"cms:\" + fmt.Sprintf(\"%v\", trc.Filename)\n\ttrc.EventType = \"get\"\n\ttrc.EventVersion = \"API_1.21.6\"\n\ttrc.FileReadts = ts\n\ttrc.Jobtype = jobtype\n\ttrc.RemoteSite = site\n\ttrc.Scope = \"cms\"\n\ttrc.Timestamp = trc.FileReadts\n\ttrc.TraceTimeentryUnix = trc.FileReadts\n\ttrc.Usrdn = \"/DC=ch/DC=cern/OU=Organic Units/OU=Users/CN=yuyi/CN=639751/CN=Yuyi Guo/CN=706639693\"\n\ttrc.Wnname = wnname\n\treturn trc\n}", "func (st *ServiceTracer) OpenTracer() opentracing.Tracer {\n\treturn st.tracer\n}", "func NewTraceProvider(ctx context.Context, cfg Config) (*Provider, error) {\n\tif err := cfg.CheckAndSetDefaults(); err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\texporter, err := NewExporter(ctx, cfg)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\tattrs := []attribute.KeyValue{\n\t\t// the service name used to display traces in backends\n\t\tsemconv.ServiceNameKey.String(cfg.Service),\n\t\tattribute.String(VersionKey, teleport.Version),\n\t}\n\tattrs = append(attrs, cfg.Attributes...)\n\n\tres, err := resource.New(ctx,\n\t\tresource.WithFromEnv(),\n\t\tresource.WithProcessPID(),\n\t\tresource.WithProcessExecutableName(),\n\t\tresource.WithProcessExecutablePath(),\n\t\tresource.WithProcessRuntimeName(),\n\t\tresource.WithProcessRuntimeVersion(),\n\t\tresource.WithProcessRuntimeDescription(),\n\t\tresource.WithTelemetrySDK(),\n\t\tresource.WithHost(),\n\t\tresource.WithAttributes(attrs...),\n\t)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\t// set global propagator, the default is no-op.\n\totel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}))\n\n\t// override the global logging handled with one that uses the\n\t// configured logger instead\n\totel.SetErrorHandler(otel.ErrorHandlerFunc(func(err error) {\n\t\tcfg.Logger.WithError(err).Warnf(\"failed to export traces.\")\n\t}))\n\n\t// set global provider to our provider wrapper to have all tracers use the common TracerOptions\n\tprovider := &Provider{\n\t\tprovider: sdktrace.NewTracerProvider(\n\t\t\tsdktrace.WithSampler(sdktrace.ParentBased(sdktrace.TraceIDRatioBased(cfg.SamplingRate))),\n\t\t\tsdktrace.WithResource(res),\n\t\t\tsdktrace.WithSpanProcessor(sdktrace.NewBatchSpanProcessor(exporter)),\n\t\t),\n\t}\n\totel.SetTracerProvider(provider)\n\n\treturn provider, nil\n}", "func (t *TraceWrapper) newSpan(name string) *SpanWrapper {\n\tctx := t.ctx\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\n\ts, ok := ctx.Value(spanKey{}).(*SpanWrapper)\n\tif !ok {\n\t\ts = t.generateSpan(name)\n\t\tctx = context.WithValue(ctx, spanKey{}, s)\n\t\ts.ctx, t.ctx = ctx, ctx\n\t}\n\treturn s\n}", "func GetNewTraceTransport(trace HTTPTracer, transport http.RoundTripper) RoundTripTrace {\n\treturn RoundTripTrace{Trace: trace,\n\t\tTransport: transport}\n}", "func newTraceExporter(config *Config, transportChannel transportChannel, logger *zap.Logger) (exporter.TraceExporter, error) {\n\n\texporter := &traceExporter{\n\t\tconfig: config,\n\t\ttransportChannel: transportChannel,\n\t\tlogger: logger,\n\t}\n\n\texp, err := exporterhelper.NewTraceExporter(\n\t\tconfig,\n\t\texporter.pushTraceData,\n\t\texporterhelper.WithTracing(true),\n\t\texporterhelper.WithMetrics(true))\n\n\treturn exp, err\n}", "func (ts *TeeSpan) Tracer() opentracing.Tracer {\n\treturn ts.tracer\n}", "func New(ctx context.Context) *Manager {\n\tout := &Manager{\n\t\tsync.Mutex{},\n\t\tmake(map[id.ID]tracer.Tracer),\n\t}\n\tbind.GetRegistry(ctx).Listen(bind.NewDeviceListener(out.createTracer, out.destroyTracer))\n\treturn out\n}", "func (b *Builder) Build() (*Tracer, error) {\n\tif err := b.err; err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar options []Option\n\tif b.loggers != nil {\n\t\tswitch len(b.loggers) {\n\t\tcase 0:\n\t\t\t// intentionally left blank\n\t\tcase 1:\n\t\t\toptions = append(options, WithLogger(b.loggers[0]))\n\t\tdefault:\n\t\t\toptions = append(options, WithLogger(MultiLogger(b.loggers...)))\n\t\t}\n\t}\n\tif b.traceClient != nil {\n\t\toptions = append(options, WithTraceClient(b.traceClient))\n\t}\n\tif b.errorClient != nil {\n\t\toptions = append(options, WithErrorClient(b.errorClient))\n\t}\n\tif b.baggage != nil {\n\t\toptions = append(options, WithBaggage(b.baggage))\n\t}\n\n\treturn New(options...), nil\n}", "func T() tracing.Trace {\n\treturn gtrace.EngineTracer\n}", "func NewSpan() Span {\n\treturn newSpan(&otlptrace.Span{})\n}", "func initTracer(jaegerURL string) (func(), error) {\n\tif jaegerURL == \"\" {\n\t\treturn func() {\n\t\t\ttrace.NewNoopTracerProvider()\n\t\t}, nil\n\t}\n\n\t// create and install Jaeger export pipeline\n\treturn jaeger.InstallNewPipeline(\n\t\tjaeger.WithCollectorEndpoint(jaegerURL),\n\t\tjaeger.WithProcess(jaeger.Process{\n\t\t\tServiceName: \"kms\",\n\t\t}),\n\t\tjaeger.WithSDK(&sdktrace.Config{DefaultSampler: sdktrace.AlwaysSample()}),\n\t)\n}", "func NewContext(ctx context.Context, tr Tracer) context.Context {\n\treturn trace.NewContext(ctx, tr)\n}", "func buildTracerProvider(ctx context.Context, tracingCfg config.TracingConfig) (trace.TracerProvider, func() error, error) {\n\tclient, err := getClient(tracingCfg)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\texp, err := otlptrace.New(ctx, client)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// Create a resource describing the service and the runtime.\n\tres, err := resource.New(\n\t\tctx,\n\t\tresource.WithSchemaURL(semconv.SchemaURL),\n\t\tresource.WithAttributes(\n\t\t\tsemconv.ServiceNameKey.String(serviceName),\n\t\t\tsemconv.ServiceVersionKey.String(version.Version),\n\t\t),\n\t\tresource.WithProcessRuntimeDescription(),\n\t\tresource.WithTelemetrySDK(),\n\t)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\ttp := tracesdk.NewTracerProvider(\n\t\ttracesdk.WithBatcher(exp),\n\t\ttracesdk.WithSampler(tracesdk.ParentBased(\n\t\t\ttracesdk.TraceIDRatioBased(tracingCfg.SamplingFraction),\n\t\t)),\n\t\ttracesdk.WithResource(res),\n\t)\n\n\treturn tp, func() error {\n\t\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\t\tdefer cancel()\n\t\terr := tp.Shutdown(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}, nil\n}", "func New(config *Config, log *zap.Logger) (exporter.TraceExporter, error) {\n\thttpClient := &http.Client{}\n\toptions := []elastic.ClientOptionFunc{\n\t\telastic.SetURL(config.Servers...),\n\t\telastic.SetBasicAuth(config.Username, config.Password),\n\t\telastic.SetSniff(config.Sniffer),\n\t\telastic.SetHttpClient(httpClient),\n\t}\n\tif config.TokenFile != \"\" {\n\t\ttoken, err := loadToken(config.TokenFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thttpClient.Transport = &tokenAuthTransport{\n\t\t\ttoken: token,\n\t\t\twrapped: &http.Transport{},\n\t\t}\n\t}\n\n\tesRawClient, err := elastic.NewClient(options...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create Elasticsearch client for %s, %v\", config.Servers, err)\n\t}\n\tbulk, err := esRawClient.BulkProcessor().\n\t\tBulkActions(config.bulkActions).\n\t\tBulkSize(config.bulkSize).\n\t\tWorkers(config.bulkWorkers).\n\t\tFlushInterval(config.bulkFlushInterval).\n\t\tDo(context.Background())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tversion := config.Version\n\tif version == 0 {\n\t\tversion, err = getVersion(esRawClient, config.Servers[0])\n\t}\n\tvar tags []string\n\tif config.TagsAsFields.AllAsFields && config.TagsAsFields.File != \"\" {\n\t\ttags, err = loadTagsFromFile(config.TagsAsFields.File)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to load tags file: %v\", err)\n\t\t}\n\t}\n\n\tw := esSpanStore.NewSpanWriter(esSpanStore.SpanWriterParams{\n\t\tLogger: log,\n\t\tMetricsFactory: metrics.NullFactory,\n\t\tClient: eswrapper.WrapESClient(esRawClient, bulk, version),\n\t\tIndexPrefix: config.IndexPrefix,\n\t\tUseReadWriteAliases: config.UseWriteAlias,\n\t\tAllTagsAsFields: config.TagsAsFields.AllAsFields,\n\t\tTagKeysAsFields: tags,\n\t\tTagDotReplacement: config.TagsAsFields.DotReplacement,\n\t})\n\n\tif config.CreateTemplates {\n\t\tspanMapping, serviceMapping := es.GetMappings(int64(config.Shards), int64(config.Shards), version)\n\t\terr := w.CreateTemplates(spanMapping, serviceMapping)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tstorage := jexporter.Storage{\n\t\tWriter: w,\n\t}\n\treturn exporterhelper.NewTraceExporter(\n\t\tconfig,\n\t\tstorage.Store,\n\t\texporterhelper.WithShutdown(func() error {\n\t\t\treturn w.Close()\n\t\t}))\n}", "func (s *Service) Tracer() opentracing.Tracer {\n\treturn s.tracer\n}", "func initTracer(serviceName string, agentUrl string, collectorUrl string) (io.Closer, error) {\n\tvar cfg jaegercfg.Configuration\n\tif agentUrl != \"\" {\n\t\tcfg = jaegercfg.Configuration{\n\t\t\tSampler: &jaegercfg.SamplerConfig{\n\t\t\t\tType: jaeger.SamplerTypeConst,\n\t\t\t\tParam: 1,\n\t\t\t},\n\t\t\tReporter: &jaegercfg.ReporterConfig{\n\t\t\t\tLocalAgentHostPort: agentUrl,\n\t\t\t\tLogSpans: true,\n\t\t\t},\n\t\t}\n\t} else if collectorUrl != \"\" {\n\t\tcfg = jaegercfg.Configuration{\n\t\t\tSampler: &jaegercfg.SamplerConfig{\n\t\t\t\tType: jaeger.SamplerTypeConst,\n\t\t\t\tParam: 1,\n\t\t\t},\n\t\t\tReporter: &jaegercfg.ReporterConfig{\n\t\t\t\tCollectorEndpoint: collectorUrl,\n\t\t\t\tLogSpans: true,\n\t\t\t},\n\t\t}\n\t} else {\n\t\tcfg = jaegercfg.Configuration{}\n\t}\n\n\tcloser, err := cfg.InitGlobalTracer(\n\t\tserviceName,\n\t\tjaegercfg.Logger(jaegerlog.StdLogger),\n\t\tjaegercfg.Metrics(jaegermetrics.NullFactory),\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn closer, nil\n}", "func initTracer(serviceName string, agentUrl string, collectorUrl string) (io.Closer, error) {\n\tvar cfg jaegercfg.Configuration\n\tif agentUrl != \"\" {\n\t\tcfg = jaegercfg.Configuration{\n\t\t\tSampler: &jaegercfg.SamplerConfig{\n\t\t\t\tType: jaeger.SamplerTypeConst,\n\t\t\t\tParam: 1,\n\t\t\t},\n\t\t\tReporter: &jaegercfg.ReporterConfig{\n\t\t\t\tLocalAgentHostPort: agentUrl,\n\t\t\t\tLogSpans: true,\n\t\t\t},\n\t\t}\n\t} else if collectorUrl != \"\" {\n\t\tcfg = jaegercfg.Configuration{\n\t\t\tSampler: &jaegercfg.SamplerConfig{\n\t\t\t\tType: jaeger.SamplerTypeConst,\n\t\t\t\tParam: 1,\n\t\t\t},\n\t\t\tReporter: &jaegercfg.ReporterConfig{\n\t\t\t\tCollectorEndpoint: collectorUrl,\n\t\t\t\tLogSpans: true,\n\t\t\t},\n\t\t}\n\t} else {\n\t\tcfg = jaegercfg.Configuration{}\n\t}\n\n\tcloser, err := cfg.InitGlobalTracer(\n\t\tserviceName,\n\t\tjaegercfg.Logger(jaegerlog.StdLogger),\n\t\tjaegercfg.Metrics(jaegermetrics.NullFactory),\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn closer, nil\n}", "func newOpentelemetryTracerProviderStore() *opentelemetryTracerProviderStore {\n\texps := []sdktrace.SpanExporter{}\n\treturn &opentelemetryTracerProviderStore{exps, nil, nil}\n}", "func newNoOpTracerProvider() TracerProvider {\n\treturn &noopTracerProvider{TracerProvider: trace.NewNoopTracerProvider()}\n}", "func newTracesExporter(params exporter.CreateSettings, cfg component.Config) (*traceExporterImp, error) {\n\texporterFactory := otlpexporter.NewFactory()\n\n\tlb, err := newLoadBalancer(params, cfg, func(ctx context.Context, endpoint string) (component.Component, error) {\n\t\toCfg := buildExporterConfig(cfg.(*Config), endpoint)\n\t\treturn exporterFactory.CreateTracesExporter(ctx, params, &oCfg)\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttraceExporter := traceExporterImp{loadBalancer: lb, routingKey: traceIDRouting}\n\n\tswitch cfg.(*Config).RoutingKey {\n\tcase \"service\":\n\t\ttraceExporter.routingKey = svcRouting\n\tcase \"traceID\", \"\":\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported routing_key: %s\", cfg.(*Config).RoutingKey)\n\t}\n\treturn &traceExporter, nil\n}", "func TravelNew(id ID) *Travel {\n\ta := IntransitiveActivityNew(id, TravelType)\n\to := Travel(*a)\n\treturn &o\n}", "func (sb ServiceBase) Tracer() trace.Tracer {\n\treturn sb.options.Tracer\n}", "func NewTrace(err error, skip int) error {\n\tswitch err.(type) {\n\tcase Restackable:\n\t\treturn err.(Restackable).NewTrace(skip + 1)\n\t}\n\treturn &detailedError{\n\t\ts: err.Error(),\n\t\tstack: stackTrace(skip + 1),\n\t}\n}", "func initTracing(cfg domain.TracingConfig) (opentracing.Tracer, io.Closer) {\n\ttracingCfg := jaegerconf.Configuration{\n\t\tServiceName: cfg.ServiceName,\n\t\tSampler: &jaegerconf.SamplerConfig{\n\t\t\tType: jaeger.SamplerTypeConst,\n\t\t\tParam: 1,\n\t\t},\n\t\tReporter: &jaegerconf.ReporterConfig{\n\t\t\tLogSpans: true,\n\t\t},\n\t}\n\ttracer, closer, err := tracingCfg.NewTracer()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\topentracing.SetGlobalTracer(tracer)\n\treturn tracer, closer\n}", "func NewTraceExporter(logger *zap.Logger) exporter.TraceExporter {\n\treturn &loggingExporter{logger: logger}\n}", "func newFakeTracerProviderStore() *fakeTracerProviderStore {\n\texps := []sdktrace.SpanExporter{}\n\treturn &fakeTracerProviderStore{exps, nil, nil}\n}", "func initTracer() func() {\n\t// Create and install Jaeger export pipeline.\n\tflush, err := jaeger.InstallNewPipeline(\n\t\tjaeger.WithCollectorEndpoint(\"http://localhost:14268/api/traces\"),\n\t\tjaeger.WithProcess(jaeger.Process{\n\t\t\tServiceName: \"server\",\n\t\t\tTags: []label.KeyValue{\n\t\t\t\tlabel.String(\"exporter\", \"jaeger\"),\n\t\t\t},\n\t\t}),\n\t\tjaeger.WithSDK(&sdktrace.Config{DefaultSampler: sdktrace.AlwaysSample()}),\n\t)\n\totel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn flush\n}", "func newExporter(w io.Writer) (trace.SpanExporter, error) {\n\treturn stdouttrace.New(\n\t\tstdouttrace.WithWriter(w),\n\t\t// Use human-readable output.\n\t\tstdouttrace.WithPrettyPrint(),\n\t\t// Do not print timestamps for the demo.\n\t\tstdouttrace.WithoutTimestamps(),\n\t)\n}", "func New(svc service.TictacService, logger log.Logger, otTracer stdopentracing.Tracer, zipkinTracer *stdzipkin.Tracer) (ep Endpoints) {\n\tvar ticEndpoint endpoint.Endpoint\n\t{\n\t\tmethod := \"tic\"\n\t\tticEndpoint = MakeTicEndpoint(svc)\n\t\tticEndpoint = opentracing.TraceServer(otTracer, method)(ticEndpoint)\n\t\tif zipkinTracer != nil {\n\t\t\tticEndpoint = zipkin.TraceEndpoint(zipkinTracer, method)(ticEndpoint)\n\t\t}\n\t\tticEndpoint = LoggingMiddleware(log.With(logger, \"method\", method))(ticEndpoint)\n\t\tep.TicEndpoint = ticEndpoint\n\t}\n\n\tvar tacEndpoint endpoint.Endpoint\n\t{\n\t\tmethod := \"tac\"\n\t\ttacEndpoint = MakeTacEndpoint(svc)\n\t\ttacEndpoint = opentracing.TraceServer(otTracer, method)(tacEndpoint)\n\t\tif zipkinTracer != nil {\n\t\t\ttacEndpoint = zipkin.TraceEndpoint(zipkinTracer, method)(tacEndpoint)\n\t\t}\n\t\ttacEndpoint = LoggingMiddleware(log.With(logger, \"method\", method))(tacEndpoint)\n\t\tep.TacEndpoint = tacEndpoint\n\t}\n\n\treturn ep\n}", "func NewDefault() tracing.StartSpan {\n\treturn NewWithTracer(opentracing.GlobalTracer())\n}", "func NewTraceClient(c HTTPClient, l Level) *TraceClient {\n\treturn &TraceClient{c: c, l: l}\n}", "func NewTracingService(s Service) Service {\n\treturn &tracingService{service: s}\n}", "func InitTracer(tracing env.Tracing, id string) (io.Closer, error) {\n\ttracer, trCloser, err := tracing.NewTracer(id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\topentracing.SetGlobalTracer(tracer)\n\treturn trCloser, nil\n}", "func (e *detailedError) NewTrace(skip int) error {\n\tcp := new(detailedError)\n\t*cp = *e\n\tcp.stack = stackTrace(skip + 1)\n\tcp.original = e.Original()\n\treturn cp\n}", "func newTracingMiddleware(tracer opentracing.Tracer) linkManagerMiddleware {\n\treturn func(next om.LinkManager) om.LinkManager {\n\t\treturn tracingMiddleware{next, tracer}\n\t}\n}", "func NewTransport(base *http.Transport) http.RoundTripper {\n\tif tracer != nil {\n\t\treturn tracer.NewTransport(base)\n\t}\n\treturn nil\n}" ]
[ "0.8209218", "0.8077655", "0.79836166", "0.79239917", "0.7877002", "0.78683406", "0.7835157", "0.779283", "0.7783801", "0.7783801", "0.7783801", "0.7775605", "0.7605881", "0.76013285", "0.7595682", "0.7517959", "0.7459979", "0.7405851", "0.7405851", "0.7370349", "0.7234734", "0.7110442", "0.6861783", "0.68284714", "0.6741338", "0.66998273", "0.6696342", "0.6696342", "0.6696342", "0.6627697", "0.66244566", "0.65541816", "0.64701295", "0.64561486", "0.64405733", "0.6439176", "0.64130574", "0.6285231", "0.6277894", "0.622787", "0.62226814", "0.6173185", "0.61520845", "0.6120611", "0.61195815", "0.6115027", "0.6111991", "0.6091892", "0.6086266", "0.60798055", "0.60447294", "0.6025822", "0.6023593", "0.60011387", "0.5974719", "0.59408116", "0.59400487", "0.5924038", "0.58784264", "0.585358", "0.5846802", "0.5815383", "0.5804058", "0.580377", "0.5766544", "0.57646626", "0.57630575", "0.5748743", "0.5743974", "0.57421386", "0.57421064", "0.57368267", "0.57320774", "0.57276297", "0.5659969", "0.5625857", "0.56240165", "0.56091636", "0.56091166", "0.5596877", "0.5596877", "0.5594606", "0.5590107", "0.5583315", "0.5582584", "0.5559158", "0.55540293", "0.5533872", "0.5532233", "0.5489945", "0.5476027", "0.5465838", "0.54539937", "0.5452661", "0.54479384", "0.5439142", "0.5426805", "0.54265904", "0.54242617", "0.542331" ]
0.8393048
0
ThisFunction returns calling function name
Эта функция возвращает имя вызывающей функции
func ThisFunction() string { var pc [32]uintptr runtime.Callers(2, pc[:]) return runtime.FuncForPC(pc[0]).Name() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func CurrentFunctionName() string {\n\tpc, _, _, _ := runtime.Caller(1)\n\tfuncname := path.Base(runtime.FuncForPC(pc).Name())\n\treturn funcname\n}", "func funcName() string {\n\tpc, _, _, _ := runtime.Caller(1)\n\treturn runtime.FuncForPC(pc).Name()\n}", "func ThisFunc() *runtime.Func {\n\tpc, _, _, _ := runtime.Caller(1)\n\treturn runtime.FuncForPC(pc)\n}", "func myCaller() string {\n\t// Skip GetCallerFunctionName and the function to get the caller of\n\treturn getFrame(2).Function\n}", "func funcName() string {\n\tpc, _, _, _ := runtime.Caller(1)\n\tcompleteFuncname := runtime.FuncForPC(pc).Name()\n\treturn strings.Split(completeFuncname, \".\")[len(strings.Split(completeFuncname, \".\"))-1]\n}", "func callerName() (caller string) {\n\tpc, _, _, ok := runtime.Caller(2) // 0: function-self, 1: parent function caller\n\tif !ok {\n\t\tcaller = \"#\"\n\t} else {\n\t\tpath := runtime.FuncForPC(pc).Name()\n\t\titems := strings.Split(path, \".\")\n\t\tcaller = items[len(items)-1]\n\t\tif len(caller) == 0 {\n\t\t\tcaller = path\n\t\t}\n\t}\n\treturn caller\n}", "func (logProxy *loggerProxy)getCallerName() string{\n pc := make([]uintptr, 1)\n //Skipping the functions that are part of loggerProxy to get right caller.\t\n runtime.Callers(4, pc)\n f := runtime.FuncForPC(pc[0])\n return f.Name()\n}", "func getCallerFuncName() (callerFuncName string) {\n\tpc, _, _, _ := runtime.Caller(2)\n\tdetails := runtime.FuncForPC(pc)\n\treturn details.Name()\n}", "func (m Function) Name() string {\n\treturn m.name\n}", "func GetMyCaller() string {\n\t_, filename, _, _ := runtime.Caller(1)\n\treturn filename\n}", "func GetCallingFunction() string {\n\tfpcs := make([]uintptr, 1)\n\n\tn := runtime.Callers(3, fpcs)\n\tif n == 0 {\n\t\treturn \"n/a\"\n\t}\n\n\tfun := runtime.FuncForPC(fpcs[0] - 1)\n\tif fun == nil {\n\t\treturn \"n/a\"\n\t}\n\n\tnameParts := strings.Split(fun.Name(), \".\")\n\n\treturn nameParts[len(nameParts)-1]\n}", "func GetFuncName() string {\n\tpc, _, _, _ := runtime.Caller(1)\n\treturn runtime.FuncForPC(pc).Name()\n}", "func GetFunctionName() string {\n\tpc, _, _, _ := runtime.Caller(1)\n\tfullName := runtime.FuncForPC(pc).Name()\n\tparts := strings.Split(fullName, \".\")\n\treturn parts[len(parts)-1]\n}", "func funcName() string {\n\t// Skip 2 levels to get the caller.\n\tpc, _, _, ok := runtime.Caller(depth)\n\tif !ok {\n\t\tfmt.Println(\"MSG: NO CALLER\")\n\t\treturn \"\"\n\t}\n\n\t// get the function caller.\n\tcaller := runtime.FuncForPC(pc)\n\tif caller == nil {\n\t\tfmt.Println(\"MSG CALLER WAS NIL\")\n\t}\n\n\t// remove extra file path characters.\n\tr := regexp.MustCompile(`[^\\/]+$`)\n\treturn fmt.Sprintf(\"%s\", r.FindString(caller.Name()))\n}", "func getFunctionName(fn interface{}) string {\n\tname := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name()\n\n\t// Method above returns functions in the form : main.foo\n\tparts := strings.Split(name, \".\")\n\treturn parts[len(parts)-1]\n}", "func callerSource() string {\n\tpc, file, line, success := runtime.Caller(2)\n\tif !success {\n\t\tfile = \"<unknown>\"\n\t\tline = 0\n\t}\n\tfile = path.Base(file)\n\tname := runtime.FuncForPC(pc).Name()\n\tname = strings.TrimPrefix(name, \"github.com/minio/minio/cmd.\")\n\treturn fmt.Sprintf(\"[%s:%d:%s()]\", file, line, name)\n}", "func _getFuncName() string {\n var ptrVal = _getFuncPtrVal()\n\n // Look up the function object\n var fun = runtime.FuncForPC(ptrVal)\n\n // returns: _/home/user/src/gohavenet/src.TestProcMgr_False\n var funcNamePath = fun.Name()\n\n // Split on the slash and return just the func name\n var pathElems = strings.Split(funcNamePath, \"/\")\n var index = len(pathElems) - 1\n if index < 0 {\n index = 0\n }\n\n return pathElems[index]\n}", "func getFunctionName(i interface{}) string {\n\treturn runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()\n}", "func This() string {\n\treturn this\n}", "func _caller(n int) string {\n\tif pc, _, _, ok := runtime.Caller(n); ok {\n\t\tfns := strings.Split(runtime.FuncForPC(pc).Name(), \"/\")\n\t\treturn fns[len(fns)-1]\n\t}\n\n\treturn \"unknown\"\n}", "func (p Person) FirstNameCaller() string {\n\treturn p.firstName\n}", "func funcName(fn interface{}) string {\n\tfnV := reflect.ValueOf(fn)\n\tif fnV.Kind() != reflect.Func {\n\t\treturn \"n/a\"\n\t}\n\n\tfunction := runtime.FuncForPC(fnV.Pointer()).Name()\n\treturn fmt.Sprintf(\"%s()\", function)\n}", "func funcName(f interface{}) string {\n\tfi := ess.GetFunctionInfo(f)\n\treturn fi.Name\n}", "func FuncName() string {\n\tp := make([]uintptr, 1)\n\truntime.Callers(2, p)\n\tfullname := runtime.FuncForPC(p[0]).Name()\n\n\t_, name := path.Split(fullname)\n\treturn name\n}", "func (s Stack) FirstFunction() string {\n\treturn s.firstFunction\n}", "func (f *Function) Name() string {\n\treturn f.name\n}", "func getFunctionName(fn interface{}) string {\n\treturn runtime.FuncForPC(reflect.ValueOf((fn)).Pointer()).Name()\n}", "func CallerName() string {\n\tvar callerName string\n\tpc, fileName, line, _ := runtime.Caller(1)\n\n\t// get function name\n\tfuncNameFull := runtime.FuncForPC(pc).Name()\n\tfuncNameEnd := filepath.Ext(funcNameFull)\n\tfuncName := strings.TrimPrefix(funcNameEnd, \".\")\n\n\t// get file name\n\tsuffix := \".go\"\n\t_, nf := filepath.Split(fileName)\n\tif strings.HasSuffix(nf, \".go\") {\n\t\tfileName = strings.TrimSuffix(nf, suffix)\n\t\tcallerName = fileName + suffix + \":\" + strconv.Itoa(line) + \" \" + funcName\n\t}\n\treturn callerName\n}", "func Name(ctx context.Context) string {\n\tf, ok := ctx.Value(stateKey).(*Func)\n\tif !ok {\n\t\treturn \"<Undefined>\"\n\t}\n\tname := runtime.FuncForPC(reflect.ValueOf(*f).Pointer()).Name()\n\treturn strings.TrimRight(nameRe.FindStringSubmatch(name)[1], \")\")\n}", "func SprintFnThis(js string) string {\n\treturn fmt.Sprintf(`function() { return (%s).apply(this, arguments) }`, js)\n}", "func (f *Function) Name() string {\n\treturn \"\"\n}", "func funcName(f interface{}) string {\n\tname := gort.FuncForPC(reflect.ValueOf(f).Pointer()).Name()\n\ti := strings.LastIndex(name, \".\")\n\treturn name[i+1:]\n}", "func GetFunctionName(i interface{}) string {\n\treturn runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()\n}", "func (oe *OraErr) FunName() string { return oe.funName }", "func (l *littr) GetFuncName(i int) string {\n\treturn l.code[i+5 : i+strings.Index(l.code[i:], \"(\")]\n}", "func functionName(i func(int, int) (int, error)) string {\n\treturn runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()\n}", "func Callername() string {\n\tpc := make([]uintptr, 10) // at least 1 entry needed\n\truntime.Callers(3, pc)\n\tf := runtime.FuncForPC(pc[0])\n\treturn shortFuncname(f.Name())\n}", "func (fn *Function) BaseName() string {\n\tinst := fn.instRange()\n\tif i := strings.LastIndex(fn.Name[inst[1]:], \".\"); i != -1 {\n\t\treturn fn.Name[inst[1]+i+1:]\n\t} else if i := strings.LastIndex(fn.Name[:inst[0]], \".\"); i != -1 {\n\t\treturn fn.Name[i+1:]\n\t}\n\treturn fn.Name\n}", "func Funcname() string {\n\tpc := make([]uintptr, 10) // at least 1 entry needed\n\truntime.Callers(2, pc)\n\tf := runtime.FuncForPC(pc[0])\n\treturn shortFuncname(f.Name())\n}", "func GetCallersName(depth int, includeLine bool) string {\n\tpc, file, line, ok := runtime.Caller(depth + 1)\n\tif !ok {\n\t\treturn \"???\"\n\t}\n\n\tfnname := \"\"\n\tif fn := runtime.FuncForPC(pc); fn != nil {\n\t\tfnname = lastComponent(fn.Name())\n\t}\n\n\tif !includeLine {\n\t\treturn fnname\n\t}\n\n\treturn fmt.Sprintf(\"%s() at %s:%d\", fnname, lastComponent(file), line)\n}", "func (f frame) name() string {\n\tfn := runtime.FuncForPC(f.pc())\n\tif fn == nil {\n\t\treturn \"unknown\"\n\t}\n\treturn fn.Name()\n}", "func nameOfFunction(f interface{}) string {\n\tfun := runtime.FuncForPC(reflect.ValueOf(f).Pointer())\n\ttokenized := strings.Split(fun.Name(), \".\")\n\tlast := tokenized[len(tokenized)-1]\n\tlast = strings.TrimSuffix(last, \")·fm\") // < Go 1.5\n\tlast = strings.TrimSuffix(last, \")-fm\") // Go 1.5\n\tlast = strings.TrimSuffix(last, \"·fm\") // < Go 1.5\n\tlast = strings.TrimSuffix(last, \"-fm\") // Go 1.5\n\tif last == \"func1\" { // this could mean conflicts in API docs\n\t\tval := atomic.AddInt32(&anonymousFuncCount, 1)\n\t\tlast = \"func\" + fmt.Sprintf(\"%d\", val)\n\t\tatomic.StoreInt32(&anonymousFuncCount, val)\n\t}\n\treturn last\n}", "func MyCallerFileLine() string {\n\n\t// we get the callers as uintptrs - but we just need 1\n\tfpcs := make([]uintptr, 1)\n\n\t// skip 3 levels to get to the caller of whoever called Caller()\n\tn := runtime.Callers(3, fpcs)\n\tif n == 0 {\n\t\treturn \"n/a\" // proper error her would be better\n\t}\n\n\t// get the info of the actual function that's in the pointer\n\tfun := runtime.FuncForPC(fpcs[0] - 1)\n\tif fun == nil {\n\t\treturn \"n/a\"\n\t}\n\n\t// return its name\n\tfilename, line := fun.FileLine(fpcs[0] - 1)\n\tfilename = filepath.Base(filename)\n\treturn fmt.Sprintf(\"%v:%v\", filename, line)\n}", "func (f *Function) Name() string {\n\tcstr := C.EnvGetDeffunctionName(f.env.env, f.fptr)\n\treturn C.GoString(cstr)\n}", "func funcName(skip int) (name string) {\n\tif pc, _, lineNo, ok := runtime.Caller(skip); ok {\n\t\tif v, ok := fm.Load(pc); ok {\n\t\t\tname = v.(string)\n\t\t} else {\n\t\t\tname = runtime.FuncForPC(pc).Name() + \":\" + strconv.FormatInt(int64(lineNo), 10)\n\t\t\tfm.Store(pc, name)\n\t\t}\n\t}\n\treturn\n}", "func fn(labels ...string) string {\n\tfunction, _, _, _ := runtime.Caller(1)\n\n\tlongname := runtime.FuncForPC(function).Name()\n\n\tnameparts := strings.Split(longname, \".\")\n\tshortname := nameparts[len(nameparts)-1]\n\n\tif labels == nil {\n\t\treturn fmt.Sprintf(\"[%s()]\", shortname)\n\t}\n\n\treturn fmt.Sprintf(\"[%s():%s]\", shortname, strings.Join(labels, \":\"))\n}", "func (l *Lifecycle) RunningHookCaller() string {\n\tl.mu.Lock()\n\tdefer l.mu.Unlock()\n\treturn l.runningHook.callerFrame.Function\n}", "func GetCallersName(depth int) string {\n\tpc, file, line, ok := runtime.Caller(depth + 1)\n\tif !ok {\n\t\treturn \"???\"\n\t}\n\n\tfnname := \"\"\n\tif fn := runtime.FuncForPC(pc); fn != nil {\n\t\tfnname = fn.Name()\n\t}\n\n\treturn fmt.Sprintf(\"%s() at %s:%d\", lastComponent(fnname), lastComponent(file), line)\n}", "func CallerFuncNameString() string {\n\tfuncName, err := CallerFuncName()\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn funcName\n}", "func (m Method) Name() string {\n\treturn m.function.name\n}", "func getCaller(skip int, shortFileName bool) string {\n\tvar b strings.Builder\n\t_, file, no, ok := runtime.Caller(skip)\n\tif ok {\n\t\tif shortFileName {\n\t\t\tif lastSlashIndex := strings.LastIndex(file, \"/\"); lastSlashIndex != -1 {\n\t\t\t\tfile = file[lastSlashIndex+1:]\n\t\t\t}\n\t\t}\n\t\tfmt.Fprintf(&b, \"%s:%d\", file, no)\n\t}\n\treturn b.String()\n}", "func getCallerSourceLocation() string {\n\t_, file, line, ok := runtime.Caller(2)\n\tresult := \"unknown:unknown\"\n\tif ok {\n\t\tresult = fmt.Sprintf(\"%s:%d\", file, line)\n\t}\n\treturn result\n}", "func CallerFuncName() (string, error) {\n\tfpcs := make([]uintptr, 1)\n\tn := runtime.Callers(3, fpcs)\n\tif n == 0 {\n\t\treturn \"\", errors.New(\"Error after runtime.Callers(), n == 0\")\n\t}\n\tf := runtime.FuncForPC(fpcs[0] - 1)\n\tif f == nil {\n\t\treturn \"\", errors.New(\"Error after runtime.FuncForPC(): fun == nil\")\n\t}\n\treturn f.Name(), nil\n}", "func this() *runtime.Func {\n pc := make([]uintptr, 10) // at least 1 entry needed\n runtime.Callers(2, pc)\n f:= runtime.FuncForPC(pc[1])\n return f\n}", "func (p *FuncInfo) Name() string {\n\treturn p.name\n}", "func (f Frame) name() string {\n\tfn := runtime.FuncForPC(f.pc())\n\tif fn == nil {\n\t\treturn \"unknown\"\n\t}\n\treturn fn.Name()\n}", "func procName(shortName bool, level int) (name string, line int) {\n\tpc, _, line, _ := runtime.Caller(level)\n\tname = runtime.FuncForPC(pc).Name()\n\tif shortName {\n\t\tname = name[strings.Index(name, \".\")+1:]\n\t}\n\treturn name, line\n}", "func (fn *Function) Inspect() string {\n\treturn fn.Name\n}", "func (f Function) GetName() string {\n\treturn f.ident.String()\n}", "func LogFunctionName() {\n\tpc, _, _, _ := runtime.Caller(1)\n\tfuncname := path.Base(runtime.FuncForPC(pc).Name())\n\tlogRaw(LevelDebug, 2, \"Function %s.\", funcname)\n}", "func FuncName(frame *govulncheck.StackFrame) string {\n\tswitch {\n\tcase frame.Receiver != \"\":\n\t\treturn fmt.Sprintf(\"%s.%s\", strings.TrimPrefix(frame.Receiver, \"*\"), frame.Function)\n\tcase frame.Package != \"\":\n\t\treturn fmt.Sprintf(\"%s.%s\", frame.Package, frame.Function)\n\tdefault:\n\t\treturn frame.Function\n\t}\n}", "func FuncName(i interface{}) string {\n\treturn runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()\n}", "func (o FunctionOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Function) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func getFullyQualifiedFunctionName(fn interface{}) string {\n\treturn runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name()\n}", "func (f Frame) Func() string {\n\treturn f.tr.getStringDefault(f.fn)\n}", "func fullCaller(skip int) (file string, line int, fnc string, ok bool) {\n\tvar pc uintptr\n\tpc, file, line, ok = runtime.Caller(skip + 1)\n\tif !ok {\n\t\treturn\n\t}\n\n\tf := runtime.FuncForPC(pc)\n\tfnc = f.Name()\n\treturn\n}", "func CallerFuncNameWithOffset(i int) (string, error) {\n\tfpcs := make([]uintptr, 1)\n\tn := runtime.Callers(3+i, fpcs)\n\tif n == 0 {\n\t\treturn \"\", errors.New(\"Error after runtime.Callers(), n == 0\")\n\t}\n\tf := runtime.FuncForPC(fpcs[0] - 1)\n\tif f == nil {\n\t\treturn \"\", errors.New(\"Error after runtime.FuncForPC(): fun == nil\")\n\t}\n\treturn f.Name(), nil\n}", "func me() string {\n\t_, file, line, ok := runtime.Caller(1)\n\tif !ok {\n\t\treturn \"???\"\n\t}\n\n\treturn path.Base(file) + \":\" + strconv.Itoa(line)\n}", "func GetFuncName(depth int, a ...interface{}) string {\n\tpc := make([]uintptr, 10)\n\tn := runtime.Callers(2, pc)\n\tframes := runtime.CallersFrames(pc[:n])\n\tframe, more := frames.Next()\n\tfor more && depth > 0 {\n\t\tframe, more = frames.Next()\n\t\tdepth--\n\t}\n\tname := strings.TrimPrefix(frame.Function, \"main.\")\n\t// Using a switch to prevent calling strings.Join for small (common) use cases. Saves a little mem and processing.\n\tswitch len(a) {\n\tcase 0:\n\t\t// do nothing\n\tcase 1:\n\t\tname += fmt.Sprintf(\": %v\", a[0])\n\tcase 2:\n\t\tname += fmt.Sprintf(\": %v, %v\", a[0], a[1])\n\tcase 3:\n\t\tname += fmt.Sprintf(\": %v, %v, %v\", a[0], a[1], a[2])\n\tdefault:\n\t\targs := make([]string, len(a))\n\t\tfor i, arg := range a {\n\t\t\targs[i] = fmt.Sprintf(\"%v\", arg)\n\t\t}\n\t\tname += fmt.Sprintf(\": %s\", strings.Join(args, \", \"))\n\t}\n\treturn name\n}", "func CallerName(skip int) string {\n\tif pc, _, _, ok := runtime.Caller(skip); ok {\n\t\tsplit := strings.Split(runtime.FuncForPC(pc).Name(), \".\")\n\t\treturn split[len(split)-1]\n\t}\n\treturn \"\"\n}", "func caller(depth int) (string, string, int) {\n\tpc, src, line, ok := runtime.Caller(depth + 1)\n\tif !ok {\n\t\treturn \"\", \"\", 0\n\t}\n\treturn runtime.FuncForPC(pc).Name(), src, line\n}", "func (pc *HTTPProxyClient) GetFunctionName(r *http.Request) string {\n\tvars := mux.Vars(r)\n\treturn vars[\"name\"]\n}", "func (o AzureFunctionOutputDataSourceOutput) FunctionName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v AzureFunctionOutputDataSource) *string { return v.FunctionName }).(pulumi.StringPtrOutput)\n}", "func (f LetFunction) Name() string {\n\treturn f.name\n}", "func (fn *Func) FuncName() string {\n\treturn fn.fnName\n}", "func (s *Instruction) FuncName() string {\n\tif name, ok := protoNameToFuncName[s.Protobuf.TypeName]; ok {\n\t\treturn name\n\t}\n\treturn \"?\"\n}", "func (o FunctionOutput) FunctionId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Function) pulumi.StringPtrOutput { return v.FunctionId }).(pulumi.StringPtrOutput)\n}", "func (t *Test) Name() string {\n\treturn t.callable.Name()\n}", "func GetCaller(skipped int, long bool) string {\n\tif _, file, line, ok := runtime.Caller(skipped + KnownCallerDepth); ok {\n\t\tif base := filepath.Base(file); long {\n\t\t\t// Only the parent directory is added.\n\t\t\treturn filepath.Join(filepath.Base(filepath.Dir(file)), base) + \":\" + strconv.Itoa(line)\n\t\t} else {\n\t\t\treturn base + \":\" + strconv.Itoa(line)\n\t\t}\n\t}\n\treturn \"???:0\"\n}", "func callerName(skip int) (pkgPath, funcName string, ok bool) {\n\tvar pc [1]uintptr\n\tn := runtime.Callers(skip+1, pc[:])\n\tif n != 1 {\n\t\treturn \"\", \"\", false\n\t}\n\n\tf := runtime.FuncForPC(pc[0]).Name()\n\ts := pkgPathRe.FindStringSubmatch(f)\n\tif len(s) < 3 {\n\t\tpanic(fmt.Errorf(\"failed to extract package path and function name from %q\", f))\n\t}\n\n\treturn s[1], s[2], true\n}", "func (v *Function) GetName() (o string) {\n\tif v != nil {\n\t\to = v.Name\n\t}\n\treturn\n}", "func (s UserSet) Function() string {\n\tres, _ := s.RecordCollection.Get(models.NewFieldName(\"Function\", \"function\")).(string)\n\treturn res\n}", "func getFuncName(e *ast.CallExpr) (string, error) {\n\tfID, ok := e.Fun.(*ast.Ident)\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"Syntax error: unexpected call type: %T\", e.Fun)\n\t}\n\treturn fID.Name, nil\n}", "func GetFuncName(f interface{}) string {\n\treturn runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name()\n}", "func (o AzureFunctionOutputDataSourceResponseOutput) FunctionName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v AzureFunctionOutputDataSourceResponse) *string { return v.FunctionName }).(pulumi.StringPtrOutput)\n}", "func (rl *RotateLogs) CurrentFileName() string {\n\trl.mutex.RLock()\n\tdefer rl.mutex.RUnlock()\n\treturn rl.curFn\n}", "func (b *MainRoutineBuilder) Function(functionName string) string {\n\treturn b.functions[functionName]\n}", "func (cx *CurCtx) FuncName() string {\n\tif nm, isMeth := cx.FuncDeclName(); !isMeth {\n\t\treturn nm\n\t}\n\treturn \"\"\n}", "func getCallingStack() string {\n\tvar fileAndLine string\n\t_, file, line, ok := runtime.Caller(3)\n\tif ok {\n\t\tfiles := strings.Split(file, \"/\")\n\t\tfile = files[len(files)-1]\n\t\tfileAndLine = fmt.Sprintf(\"%s:%d :\", file, line)\n\t\treturn fileAndLine\n\t}\n\treturn \"\"\n}", "func GetCaller(offset int) (file string, line int) {\n\tfpcs := make([]uintptr, 1)\n\n\tn := runtime.Callers(offset, fpcs)\n\tif n == 0 {\n\t\treturn \"n/a\", -1\n\t}\n\n\tfun := runtime.FuncForPC(fpcs[0] - 1)\n\tif fun == nil {\n\t\treturn \"n/a\", -1\n\t}\n\n\treturn fun.FileLine(fpcs[0] - 1)\n}", "func FuncName(f interface{}) string {\n\tsplitFuncName := strings.Split(FuncPathAndName(f), \".\")\n\treturn splitFuncName[len(splitFuncName)-1]\n}", "func (p *PropertyGenerator) getFnName(i int) string {\n\tif len(p.Kinds) == 1 {\n\t\treturn getMethod\n\t}\n\treturn fmt.Sprintf(\"%s%s\", getMethod, p.kindCamelName(i))\n}", "func (t *LineTable) funcName(off uint32) string {\n\tif s, ok := t.funcNames[off]; ok {\n\t\treturn s\n\t}\n\ti := bytes.IndexByte(t.funcnametab[off:], 0)\n\ts := string(t.funcnametab[off : off+uint32(i)])\n\tt.funcNames[off] = s\n\treturn s\n}", "func getCallerPosition() string {\n\t_, file, line, ok := runtime.Caller(2)\n\tif !ok {\n\t\treturn \"\"\n\t}\n\tfile = path.Base(file)\n\treturn fmt.Sprintf(\"%s%s:%d%s\", colorWhere, file, line, colorClear)\n}", "func FILE() string {\n\t_, file, _, _ := runtime.Caller(1)\n\treturn file\n}", "func (f nullFunc) name() name {\n\treturn null\n}", "func callerInfo(skip int) string {\n\t_, file, line, _ := runtime.Caller(skip)\n\treturn fmt.Sprintf(\"%v:%v\", file, line)\n}", "func GetFunctionName(function interface{}) (string, error) {\n\tvalue := reflect.ValueOf(function)\n\tkind := value.Kind()\n\tif kind != reflect.Func {\n\t\treturn \"\", fmt.Errorf(\"Kind is not a func: %v\", kind)\n\t}\n\n\tf := runtime.FuncForPC(value.Pointer())\n\tif f == nil {\n\t\treturn \"\", fmt.Errorf(\"Pointer to func is nil\")\n\t}\n\n\tfName := regexp.MustCompile(`^.*[/\\\\]`).ReplaceAllString(f.Name(), \"\")\n\n\treturn fName, nil\n}", "func Self() string {\n\treturn naiveSelf()\n}", "func getFuncName(p uintptr) string {\n\tfnc := runtime.FuncForPC(p)\n\tif fnc == nil {\n\t\treturn \"<unknown>\"\n\t}\n\tname := fnc.Name() // E.g., \"long/path/name/mypkg.(mytype).(long/path/name/mypkg.myfunc)-fm\"\n\tif strings.HasSuffix(name, \")-fm\") || strings.HasSuffix(name, \")·fm\") {\n\t\t// Strip the package name from method name.\n\t\tname = strings.TrimSuffix(name, \")-fm\")\n\t\tname = strings.TrimSuffix(name, \")·fm\")\n\t\tif i := strings.LastIndexByte(name, '('); i >= 0 {\n\t\t\tmethodName := name[i+1:] // E.g., \"long/path/name/mypkg.myfunc\"\n\t\t\tif j := strings.LastIndexByte(methodName, '.'); j >= 0 {\n\t\t\t\tmethodName = methodName[j+1:] // E.g., \"myfunc\"\n\t\t\t}\n\t\t\tname = name[:i] + methodName // E.g., \"long/path/name/mypkg.(mytype).\" + \"myfunc\"\n\t\t}\n\t}\n\tif i := strings.LastIndexByte(name, '/'); i >= 0 {\n\t\t// Strip the package name.\n\t\tname = name[i+1:] // E.g., \"mypkg.(mytype).myfunc\"\n\t}\n\treturn name\n}" ]
[ "0.7722278", "0.766978", "0.76039034", "0.7582178", "0.7449456", "0.7403062", "0.70837003", "0.7075239", "0.6886995", "0.68866515", "0.68251276", "0.68018013", "0.6781213", "0.6740152", "0.67332137", "0.66841245", "0.66416866", "0.66173244", "0.6587148", "0.65851456", "0.65746367", "0.655157", "0.6547077", "0.654094", "0.6538025", "0.6535191", "0.65030086", "0.650037", "0.64811057", "0.64791167", "0.6466215", "0.64526254", "0.64453894", "0.642857", "0.6424168", "0.64218324", "0.6420794", "0.6403818", "0.64018", "0.63949794", "0.6378275", "0.63375944", "0.6336376", "0.6335996", "0.6320839", "0.6319601", "0.63175285", "0.6315676", "0.63125503", "0.62573636", "0.6246019", "0.6228357", "0.62165916", "0.620909", "0.62025785", "0.61857206", "0.6171201", "0.6166255", "0.6162876", "0.61572444", "0.6144164", "0.6137589", "0.61373246", "0.61144495", "0.6093531", "0.6091013", "0.60780674", "0.60756326", "0.6069065", "0.60644674", "0.60640156", "0.6049926", "0.60498554", "0.60364896", "0.6032044", "0.59832954", "0.59667856", "0.59665155", "0.59608686", "0.5954241", "0.5941301", "0.5936304", "0.5928551", "0.5920028", "0.5910365", "0.5899655", "0.58984053", "0.58970374", "0.5876163", "0.584755", "0.5839996", "0.5833327", "0.58190584", "0.5801952", "0.57942253", "0.5794094", "0.57742673", "0.57723045", "0.57662433", "0.57641876" ]
0.84559995
0
Value returns value of the string
Value возвращает значение строки
func (s *SyncString) Value() string { s.Lock() defer s.Unlock() return s.string }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *String) GetValue() string {\n\treturn s.value\n}", "func (sval *ScalarValue) Value() string {\n\tswitch {\n\tcase strings.HasPrefix(sval.Raw, `\"\"\"`):\n\t\treturn parseBlockString(sval.Raw)\n\tcase strings.HasPrefix(sval.Raw, `\"`):\n\t\treturn parseString(sval.Raw)\n\tdefault:\n\t\treturn sval.Raw\n\t}\n}", "func (t Type) Value() string {\n\tstr := string(t)\n\tv, ok := builtin[str]\n\tif !ok {\n\t\treturn gocase.To(strcase.ToCamel(str))\n\t}\n\n\treturn v\n}", "func (s DnaString) GetValue() string {\n\treturn s.Value\n}", "func (s *StringChecksum) Value() string {\n\treturn s.value\n}", "func (p stringProperty) Value() (string, error) {\n\treturn p.value, nil\n}", "func (c *StringChanger) Value() (string, error) {\n\tif c.err != nil {\n\t\treturn \"\", c.err\n\t}\n\tif c.node.content.value() == nil {\n\t\treturn \"\", nil\n\t}\n\treturn c.node.content.value().(string), nil\n}", "func (s String) Value() (driver.Value, error) {\n\tif s == \"\" {\n\t\treturn nil, nil\n\t}\n\treturn string(s), nil\n}", "func (s Stash) Value() string {\n\tvals := utils.MapKeys(s.payload)\n\tif len(vals) < 1 {\n\t\treturn \"\"\n\t}\n\n\treturn expand(fmt.Sprintf(\"%v\", vals[0]))\n}", "func (t *Token) Value() string {\n\treturn t.strBuilder.String()\n}", "func (this *NowStr) Value() value.Value {\n\treturn nil\n}", "func (b *baseSemanticUTF8String) Value() interface{} {\n\treturn b.V\n}", "func (f Formal) Value() string {\n\treturn string(f)\n}", "func (self Param) Value() string { return self.value }", "func (d *Description) Value() string {\n\tif d == nil {\n\t\treturn \"\"\n\t}\n\tif strings.HasPrefix(d.Raw, `\"\"\"`) {\n\t\treturn parseBlockString(d.Raw)\n\t}\n\treturn parseString(d.Raw)\n}", "func StringValue(s string) Value { return Value{Typ: '$', Str: []byte(s)} }", "func (l *LangPackString) GetValue() (value string) {\n\tif l == nil {\n\t\treturn\n\t}\n\treturn l.Value\n}", "func (o unicodeVersion) GetValue() interface{} {\n\treturn string(o)\n}", "func (s *StringSymbol) GetValue() string {\n\treturn s.StringData.GetValue()\n}", "func (p *Property) ValueString() string {\n\treturn p.vstr\n}", "func (m Model) Value() string {\n\treturn string(m.value)\n}", "func (s *StringSetting) Value() interface{} {\n\treturn *s.StringValue\n}", "func (f *Title) Value() string {\n\ts := decode.UTF16(f.data)\n\treturn trim.Nil(s)\n}", "func (x StrState) Value() (driver.Value, error) {\n\treturn x.String(), nil\n}", "func (code Code) Value() (value driver.Value, err error) {\n\tif code == \"\" {\n\t\treturn \"\", nil\n\t}\n\n\tif err = code.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn code.String(), nil\n}", "func (o GetTxtRecordRecordOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetTxtRecordRecord) string { return v.Value }).(pulumi.StringOutput)\n}", "func (o GetTxtRecordRecordOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetTxtRecordRecord) string { return v.Value }).(pulumi.StringOutput)\n}", "func (o HealthCheckTagOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HealthCheckTag) string { return v.Value }).(pulumi.StringOutput)\n}", "func (stringEntry *String) GetValue() interface{} {\n\treturn stringEntry.trueValue\n}", "func (o DomainTagOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DomainTag) string { return v.Value }).(pulumi.StringOutput)\n}", "func (o ThingTypeTagOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ThingTypeTag) string { return v.Value }).(pulumi.StringOutput)\n}", "func (akv StringKeyValue) Value() string {\n\treturn akv.orig.Value\n}", "func (nt NullString) Value() (driver.Value, error) {\n\tif !nt.Valid {\n\t\treturn nil, nil\n\t}\n\treturn nt.String, nil\n}", "func (i *StringIterator) Value() Object {\n\treturn &Char{Value: i.v[i.i-1]}\n}", "func (id PlannerID) Value() string { return id.value }", "func (o MetadataFilterLabelMatchOutput) Value() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v MetadataFilterLabelMatch) *string { return v.Value }).(pulumi.StringPtrOutput)\n}", "func (w *Word) Val() string {\n\treturn w.origStr[w.cptr.begin : w.cptr.begin+w.cptr.length]\n}", "func (r *RegexpObject) Value() interface{} {\n\treturn r.regexp.String()\n}", "func (o AppSecretOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v AppSecret) string { return v.Value }).(pulumi.StringOutput)\n}", "func (o GetAppSecretOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetAppSecret) string { return v.Value }).(pulumi.StringOutput)\n}", "func (l *Label) Value() string {\n\treturn l.value\n}", "func (o EnvironmentDaprComponentSecretOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v EnvironmentDaprComponentSecret) string { return v.Value }).(pulumi.StringOutput)\n}", "func (n NullString) Value() (driver.Value, error) {\n\tif n == \"\" {\n\t\treturn nil, nil\n\t}\n\treturn string(n), nil\n}", "func (node *SimpleNode) Value() string {\n\tif node == nil {\n\t\treturn \"\"\n\t}\n\n\treturn node.value\n}", "func (u UnsafeString) Value() (driver.Value, error) {\n\tpanic(\"UnsafeStrings and its constants NOW, DEFAULT ... are disabled when EnableInterpolation==false\")\n}", "func (o EnvironmentDaprComponentMetadataOutput) Value() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v EnvironmentDaprComponentMetadata) *string { return v.Value }).(pulumi.StringPtrOutput)\n}", "func (o TaintOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v Taint) string { return v.Value }).(pulumi.StringOutput)\n}", "func (o SignalingChannelTagOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v SignalingChannelTag) string { return v.Value }).(pulumi.StringOutput)\n}", "func (s *Scalar) String() string { return s.Value }", "func (s State) Value() (driver.Value, error) {\n\treturn string(s), nil\n}", "func (o StreamTagOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v StreamTag) string { return v.Value }).(pulumi.StringOutput)\n}", "func (this *ClockStr) Value() value.Value {\n\treturn nil\n}", "func Value(value string) *SimpleElement { return newSEString(\"value\", value) }", "func (v *Value) String() string {\n\treturn v.val\n}", "func (o DnsRecordOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *DnsRecord) pulumi.StringOutput { return v.Value }).(pulumi.StringOutput)\n}", "func (b baseValue) String() string {\n\treturn b.value\n}", "func (cfg *Config) Value(name string) string {\n\tv, _ := cfg.findLast(name)\n\treturn string(v)\n}", "func (n *ResourceName) Value() (driver.Value, error) {\n\tif n == nil {\n\t\treturn nil, nil\n\t}\n\n\treturn n.String(), nil\n}", "func (ns NullString) Value() (driver.Value, error) {\n\tif !ns.Valid {\n\t\treturn nil, nil\n\t}\n\treturn ns.String, nil\n}", "func (ns NullString) Value() (driver.Value, error) {\n\tif !ns.Valid {\n\t\treturn nil, nil\n\t}\n\treturn ns.String, nil\n}", "func (ns NullString) Value() (driver.Value, error) {\n\tif !ns.Valid {\n\t\treturn nil, nil\n\t}\n\n\treturn ns.String, nil\n}", "func (s *String) Inspect() string { return s.Value }", "func (o MetadataFilterLabelMatchResponseOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v MetadataFilterLabelMatchResponse) string { return v.Value }).(pulumi.StringOutput)\n}", "func (m *StringMapStringSetting) Value() interface{} {\n\treturn *m.StringMapStringValue\n}", "func (o CaaRecordRecordOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v CaaRecordRecord) string { return v.Value }).(pulumi.StringOutput)\n}", "func (o LookupGroupVariableResultOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupGroupVariableResult) string { return v.Value }).(pulumi.StringOutput)\n}", "func (x NullStrState) Value() (driver.Value, error) {\n\tif !x.Valid {\n\t\treturn nil, nil\n\t}\n\treturn x.StrState.String(), nil\n}", "func (c Cycle) Value() (driver.Value, error) {\n\ts := c.String()\n\tif s == \"\" {\n\t\treturn nil, nil\n\t}\n\n\treturn s, nil\n}", "func (c Cryptstring) Value() (driver.Value, error) {\n\tok, err := IsBcrypt(string(c))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ok {\n\t\treturn string(c), nil\n\t}\n\thash, err := bcrypt.GenerateFromPassword([]byte(c), bcrypt.DefaultCost)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn string(hash), nil\n}", "func (d Driver) ValueString(c int) string {\n\treturn `$` + strconv.Itoa(c)\n}", "func (s Version) Value() (driver.Value, error) {\n\treturn s.String(), nil\n}", "func (fa formulaArg) Value() (value string) {\n\tswitch fa.Type {\n\tcase ArgNumber:\n\t\tif fa.Boolean {\n\t\t\tif fa.Number == 0 {\n\t\t\t\treturn \"FALSE\"\n\t\t\t}\n\t\t\treturn \"TRUE\"\n\t\t}\n\t\treturn fmt.Sprintf(\"%g\", fa.Number)\n\tcase ArgString:\n\t\treturn fa.String\n\tcase ArgError:\n\t\treturn fa.Error\n\t}\n\treturn\n}", "func (t Token) Value() (driver.Value, error) {\n\treturn string(t.Bytes()), nil\n}", "func (d *Downloader) getValue(line string) string {\n\tsplitLine := strings.Split(line, \" = \")\n\treturn (splitLine[len(splitLine)-1])\n}", "func (ip IPv4) Value() string {\n\treturn ip.value\n}", "func (o PatientIdResponseOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v PatientIdResponse) string { return v.Value }).(pulumi.StringOutput)\n}", "func (c Casing) Value() string {\n\treturn c.getCasingValue()\n}", "func (o GetCAARecordRecordOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetCAARecordRecord) string { return v.Value }).(pulumi.StringOutput)\n}", "func (o DiagnosticBackendRequestDataMaskingHeaderOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DiagnosticBackendRequestDataMaskingHeader) string { return v.Value }).(pulumi.StringOutput)\n}", "func (o PatientIdOutput) Value() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v PatientId) *string { return v.Value }).(pulumi.StringPtrOutput)\n}", "func (l *Label) Value(g *Graph) string {\n Assert(nilLabel, l != nil)\n Assert(nilGraph, g != nil)\n Assert(nilTextStore, g.textStore != nil)\n \n t, _ := g.textStore.find(l.value) // TODO don't ignore error\n return t.Value()\n}", "func (n Number) Value() (driver.Value, error) {\n\treturn string(n), nil\n}", "func (o TokenPasswordPassword1Output) Value() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v TokenPasswordPassword1) *string { return v.Value }).(pulumi.StringPtrOutput)\n}", "func (s *String) Get() string {\n\treturn string(s.Value)\n}", "func (number Number) Value() (value driver.Value, err error) {\n\tif number == \"\" {\n\t\treturn \"\", nil\n\t}\n\n\tif err = number.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn number.String(), nil\n}", "func (o *Output) GetValue() string {\n\tif !o.ShowValue || o.Value == nil {\n\t\treturn \"\"\n\t}\n\tmarshaled, err := json.MarshalIndent(o.Value, \"\", \" \")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tvalue := string(marshaled)\n\tif value == `null` {\n\t\treturn \"\" // types.Nil\n\t}\n\treturn value // everything else\n}", "func (o ApplicationPasswordOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ApplicationPassword) pulumi.StringOutput { return v.Value }).(pulumi.StringOutput)\n}", "func (cv PipVersion) Value() string {\n\treturn cv.value\n}", "func (c *KeyStringValueChanger) Value() (string, error) {\n\tif c.err != nil {\n\t\treturn \"\", c.err\n\t}\n\tif c.node.content.value() == nil {\n\t\treturn \"\", nil\n\t}\n\treturn c.node.content.value().(string), nil\n}", "func getValue(valueField string, as *args.Store) (result *string) {\n\t// No input? No result!\n\tif !utils.IsSet(valueField) {\n\t\treturn nil\n\t}\n\n\t// check whether a parameter reference was provided, i.e. something like \"param:<name>\"\n\tparamName := regexParamValue.FindStringSubmatch(valueField)\n\tif len(paramName) > 0 {\n\t\tutils.Assert(len(paramName) == 2, \"Should contain the matching text plus a single capturing group\")\n\n\t\targValue, exists := as.Get(paramName[1])\n\t\tif exists {\n\t\t\treturn &argValue\n\t\t}\n\t\treturn nil\n\t}\n\n\t// else assume that provided value was a static text\n\treturn &valueField\n}", "func (options *Options) ValueStr(name string) string {\n\treturn Str(options.Value(name))\n}", "func (p Policy) Value() (string, error) {\n\tif data, err := json.Marshal(p); err != nil {\n\t\treturn \"\", err\n\t} else {\n\t\treturn string(data), nil\n\t}\n}", "func (o DiagnosticBackendResponseDataMaskingHeaderOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DiagnosticBackendResponseDataMaskingHeader) string { return v.Value }).(pulumi.StringOutput)\n}", "func (args *Args) Value(label string) string {\n if val, ok := (*args)[label]; ok {\n return val\n }\n return \"\"\n}", "func (obj *Value) GetString() string {\n\treturn obj.Candy().Guify(\"g_value_get_string\", obj).String()\n}", "func (d Driver) ValueString(i int) string {\n\treturn `@p` + strconv.Itoa(i)\n}", "func (o GoogleCloudRetailV2alphaConditionQueryTermOutput) Value() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v GoogleCloudRetailV2alphaConditionQueryTerm) *string { return v.Value }).(pulumi.StringPtrOutput)\n}", "func (f *TagField) Value() string {\n\treturn f.value\n}", "func (me *WsPacket) StrValue() string {\n\n\tswitch me.Type {\n\tcase \"string\":\n\t\treturn me.RawValue.(string)\n\n\tcase \"double\":\n\t\treturn fmt.Sprintf(\"%.f\", me.RawValue)\n\n\tcase \"float\":\n\t\treturn strconv.FormatFloat(me.RawValue.(float64), 'f', 20, 64)\n\t}\n\n\treturn \"#### OOOPS ##########\"\n}", "func (ns NullString) Value() (driver.Value, error) {\n\tif ns.IsNull() {\n\t\treturn nil, nil\n\t}\n\treturn ns.Text, nil\n}" ]
[ "0.75110775", "0.74706155", "0.74118876", "0.73986423", "0.73799694", "0.7359382", "0.72732645", "0.72452945", "0.7236553", "0.7220005", "0.72173685", "0.71555114", "0.71239245", "0.71044296", "0.7084027", "0.70409364", "0.70191246", "0.70084894", "0.6993719", "0.6992071", "0.69840467", "0.6920377", "0.69069105", "0.69067216", "0.69066304", "0.68772084", "0.68772084", "0.68441606", "0.68291074", "0.682101", "0.68174046", "0.6817233", "0.68052727", "0.67977595", "0.67834234", "0.6782086", "0.6777485", "0.67674136", "0.67642134", "0.6761256", "0.6758575", "0.67584395", "0.6730781", "0.67291766", "0.6726927", "0.67194694", "0.6716395", "0.67154795", "0.67026234", "0.66989714", "0.6687285", "0.66705215", "0.66695905", "0.66684335", "0.66615105", "0.6660145", "0.6649287", "0.6648158", "0.6646867", "0.6646867", "0.66453725", "0.664089", "0.66389096", "0.66181207", "0.6608312", "0.6604247", "0.6603157", "0.659913", "0.6588552", "0.6585355", "0.65736717", "0.65716964", "0.65711004", "0.6567717", "0.6559617", "0.65582526", "0.6551005", "0.65482885", "0.6544317", "0.6538258", "0.653616", "0.6534154", "0.65325195", "0.65318674", "0.6531563", "0.6525541", "0.65237063", "0.65223706", "0.6521619", "0.65211076", "0.6521076", "0.65146524", "0.6512948", "0.6507393", "0.65003383", "0.6499528", "0.6487254", "0.64858425", "0.64815754", "0.6480747" ]
0.75706613
0
ClickableURL fixes address in url to make sure it's clickable, e.g. it replaces "undefined" address like 0.0.0.0 used in network listeners format with loopback 127.0.0.1
ClickableURL исправляет адрес в URL, чтобы убедиться, что он кликабельный, например, заменяет адрес "undefined" вроде 0.0.0.0, используемый в формате сетевых слушателей, на лупбэк 127.0.0.1
func ClickableURL(in string) string { out, err := url.Parse(in) if err != nil { return in } host, port, err := net.SplitHostPort(out.Host) if err != nil { return in } ip := net.ParseIP(host) // if address is not an IP, unspecified, e.g. all interfaces 0.0.0.0 or multicast, // replace with localhost that is clickable if len(ip) == 0 || ip.IsUnspecified() || ip.IsMulticast() { out.Host = fmt.Sprintf("127.0.0.1:%v", port) return out.String() } return out.String() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func SanitizeURL(in string) string {\n\treturn sanitizeURLWithFlags(in, purell.FlagsSafe|purell.FlagRemoveTrailingSlash|purell.FlagRemoveDotSegments|purell.FlagRemoveDuplicateSlashes|purell.FlagRemoveUnnecessaryHostDots|purell.FlagRemoveEmptyPortSeparator)\n}", "func fixImgurLink(link string) string {\n\toriginalurl, err := url.Parse(link)\n\n\tif err != nil || originalurl.Host != \"imgur.com\" {\n\t\treturn link\n\t}\n\n\treturn fmt.Sprintf(\"http://i.imgur.com%s.gif\", originalurl.Path)\n}", "func sanitizeUrl(href string, domain string) (url.URL, bool){\n\tif strings.Trim(href, \" \") == \"\"{\n\t\treturn url.URL{}, false\n\t}\n\n\tu, err := url.Parse(href)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn url.URL{}, false\n\t}\n\n\tif u.Host == \"\"{\n\t\tu.Host = domain\n\t} else if u.Host != domain || u.Path == \"/\" || u.Path == \"\"{\n\t\treturn url.URL{}, false\n\t}\n\n\tif u.Scheme == \"\"{\n\t\tu.Scheme = \"https\"\n\t}\n\n\t// Ignore alien schemas [ mailto, ftp, etc ]\n\tif !strings.Contains(u.Scheme, \"http\") {\n\t\treturn url.URL{}, false\n\t}\n\n\t// TODO: Check URL is accessible\n\n\treturn *u, true\n}", "func (bot *Bot) handleURLsListener(message events.EventMessage) {\n\n\t// Find all URLs in the message.\n\tlinks := xurls.Strict().FindAllString(message.Message, -1)\n\t// Remove multiple same links from one message.\n\tlinks = utils.RemoveDuplicates(links)\n\tfor i := range links {\n\t\t// Validate the url.\n\t\tbot.Log.Infof(\"Got link %s\", links[i])\n\t\tlink := utils.StandardizeURL(links[i])\n\t\tbot.Log.Debugf(\"Standardized to: %s\", link)\n\n\t\t// Try to get the body of the page.\n\t\terr, finalLink, body := bot.GetPageBody(link, map[string]string{})\n\t\tif err != nil {\n\t\t\tbot.Log.Warningf(\"Could't fetch the body: %s\", err)\n\t\t}\n\n\t\t// Update link if needed.\n\t\tif finalLink != \"\" {\n\t\t\tlink = finalLink\n\t\t}\n\n\t\t// Iterate over meta tags to get the description\n\t\tdescription := \"\"\n\t\tmetas := metaRe.FindAllStringSubmatch(string(body), -1)\n\t\tfor i := range metas {\n\t\t\tif len(metas[i]) > 1 {\n\t\t\t\tisDesc := descRe.FindString(metas[i][0])\n\t\t\t\tif isDesc != \"\" && (len(metas[i][1]) > len(description)) {\n\t\t\t\t\tdescription = utils.CleanString(metas[i][1], true)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// Get the title\n\t\ttitle := \"\"\n\t\tmatch := titleRe.FindStringSubmatch(string(body))\n\t\tif len(match) > 1 {\n\t\t\ttitle = utils.CleanString(match[1], true)\n\t\t}\n\n\t\t// Insert URL into the db.\n\t\tbot.Log.Debugf(\"Storing URL info for: %s\", link)\n\t\tif _, err := bot.Db.Exec(`INSERT INTO urls(transport, channel, nick, link, quote, title) VALUES(?, ?, ?, ?, ?, ?)`,\n\t\t\tmessage.TransportName, message.Channel, message.Nick, link, message.Message, title); err != nil {\n\t\t\tbot.Log.Warningf(\"Can't add url to database: %s\", err)\n\t\t}\n\n\t\t// Trigger url found message.\n\t\tbot.EventDispatcher.Trigger(events.EventMessage{\n\t\t\tmessage.TransportName,\n\t\t\tmessage.TransportFormatting,\n\t\t\tevents.EventURLFound,\n\t\t\tmessage.Nick,\n\t\t\tmessage.UserId,\n\t\t\tmessage.Channel,\n\t\t\tlink,\n\t\t\tmessage.Context,\n\t\t\tmessage.AtBot,\n\t\t})\n\n\t\tlinkKey := link + message.Channel\n\t\t// If we can't announce yet, skip this link.\n\t\tif time.Since(bot.lastURLAnnouncedTime[linkKey]) < bot.Config.UrlAnnounceIntervalMinutes*time.Minute {\n\t\t\tcontinue\n\t\t}\n\t\tif lines, exists := bot.lastURLAnnouncedLinesPassed[linkKey]; exists && lines < bot.Config.UrlAnnounceIntervalLines {\n\t\t\tcontinue\n\t\t}\n\n\t\t// On mattermost we can skip all link info display.\n\t\tif message.TransportName == \"mattermost\" {\n\t\t\treturn\n\t\t}\n\n\t\t// Announce the title, save the description.\n\t\tif title != \"\" {\n\t\t\tif description != \"\" {\n\t\t\t\tbot.SendNotice(&message, title+\" …\")\n\t\t\t} else {\n\t\t\t\tbot.SendNotice(&message, title)\n\t\t\t}\n\t\t\tbot.lastURLAnnouncedTime[linkKey] = time.Now()\n\t\t\tbot.lastURLAnnouncedLinesPassed[linkKey] = 0\n\t\t\t// Keep the long info for later.\n\t\t\tbot.AddMoreInfo(message.TransportName, message.Channel, description)\n\t\t}\n\t}\n}", "func ToURL(s string) string {\n\ts = strings.Trim(s, \" \")\n\ts = strings.ReplaceAll(s, \" \", \"%20\")\n\treturn s\n}", "func (s *sanitizer) sanitizeLink(l string) string {\n\tvar p *url.URL\n\tvar err error\n\tif strings.TrimSpace(l) == \"\" {\n\t\treturn \"\"\n\t}\n\tif isInternalLink(l) {\n\t\treturn l\n\t}\n\tif s.ForceHrefLink {\n\t\treturn s.forceHttpScheme(l)\n\t}\n\tp, err = url.Parse(l)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\tif s.uriSchemesMap[p.Scheme] {\n\t\treturn \"\"\n\t}\n\treturn p.String()\n}", "func fixURL(href, base string) string {\n\turi, err := url.Parse(href)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\tbaseURL, err := url.Parse(base)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\turi = baseURL.ResolveReference(uri)\n\treturn uri.String()\n}", "func Link(url, text string) string {\n\treturn Osc + \"8;;\" + url + Bel + text + Osc + \"8;;\" + Bel\n}", "func canonicalAddress(url *url.URL) string {\n\thost := url.Hostname()\n\tport := url.Port()\n\tif port == \"\" {\n\t\tport = defaultPorts[url.Scheme]\n\t}\n\treturn fmt.Sprintf(\"%s:%s\", host, port)\n}", "func makeAbsoluteHref(baseURL string, href string) string {\n\tif strings.HasPrefix(href, \"http\") {\n\t\treturn href\n\t} else {\n\t\treturn baseURL + href\n\t}\n}", "func URL(e *Context) error {\n\ttarget := extractBaseTarget(e.DOM.HeadNode)\n\n\tfor n := e.DOM.RootNode; n != nil; n = htmlnode.Next(n) {\n\t\t// Skip text nodes and anything inside mustache templates\n\t\tif n.Type == html.TextNode || htmlnode.IsDescendantOf(n, atom.Template) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// TODO(b/112417267): Handle amp-img rewriting.\n\t\tif strings.EqualFold(n.Data, \"amp-img\") {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Make attributes with URLs portable on any tag\n\t\trewritePortableURLs(n, e.BaseURL, anyTagAttrs)\n\n\t\tswitch n.DataAtom {\n\t\tcase atom.Form:\n\t\t\t// Make attributes with URLs absolute on <form> tag.\n\t\t\trewriteAbsoluteURLs(n, e.BaseURL, formTagAttrs)\n\t\tcase atom.Img:\n\t\t\t// Make attributes with URLs portable on <img> tag.\n\t\t\trewritePortableURLs(n, e.BaseURL, imgTagAttrs)\n\t\tdefault:\n\t\t\tswitch n.Data {\n\t\t\tcase \"amp-install-serviceworker\":\n\t\t\t\t// Make attributes with URLs portable on <amp-install-serviceworker> tag.\n\t\t\t\trewritePortableURLs(n, e.BaseURL, ampInstallServiceWorkerTagAttrs)\n\t\t\tcase amphtml.AMPStory:\n\t\t\t\t// Make attributes with URLs portable on <amp-story> tag.\n\t\t\t\trewritePortableURLs(n, e.BaseURL, ampStoryTagAttrs)\n\t\t\tcase \"amp-story-page\":\n\t\t\t\t// Make attributes with URLs portable on <amp-story-page> tag.\n\t\t\t\trewritePortableURLs(n, e.BaseURL, ampStoryPageTagAttrs)\n\t\t\t}\n\t\t}\n\n\t\t// Tags with href attribute.\n\t\tif href, ok := htmlnode.FindAttribute(n, \"\", \"href\"); ok {\n\t\t\t// Remove the base tag href with the following rationale:\n\t\t\t//\n\t\t\t// 1) The <base href> can be harmful. When handling things like image\n\t\t\t// source sets which are re-hosted and served from\n\t\t\t// https://cdn.ampproject.org, paths starting with \"/\" are rewritten\n\t\t\t// into the stored html document with the intent that \"/\" is relative\n\t\t\t// to the root of cdn.ampproject.org. If a base href were present, it\n\t\t\t// would change the meaning of the relative links.\n\t\t\t//\n\t\t\t// 2) Other hrefs are absolutified in the document relative to the base\n\t\t\t// href. Thus, it is not necessary to maintain the base href for\n\t\t\t// browser URL resolution.\n\t\t\tswitch n.DataAtom {\n\t\t\tcase atom.Base:\n\t\t\t\thtmlnode.RemoveAttribute(n, href)\n\t\t\t\tif len(n.Attr) == 0 {\n\t\t\t\t\thtmlnode.RemoveNode(&n)\n\t\t\t\t}\n\t\t\tcase atom.Link:\n\t\t\t\tif v, ok := htmlnode.GetAttributeVal(n, \"rel\"); ok && v == \"canonical\" {\n\t\t\t\t\t// If the origin doc is self-canonical, it should be an absolute URL\n\t\t\t\t\t// and not portable (which would result in canonical = \"#\").\n\t\t\t\t\t// Maintain the original canonical, and absolutify it. See b/36102624\n\t\t\t\t\thtmlnode.SetAttribute(n, \"\", \"href\", amphtml.RewriteAbsoluteURL(e.BaseURL, href.Val))\n\t\t\t\t} else {\n\t\t\t\t\thtmlnode.SetAttribute(n, \"\", \"href\", amphtml.RewritePortableURL(e.BaseURL, href.Val))\n\t\t\t\t}\n\t\t\tcase atom.A:\n\t\t\t\tportableHref := amphtml.RewritePortableURL(e.BaseURL, href.Val)\n\t\t\t\t// Set a default target\n\t\t\t\t// 1. If the href is not a fragment AND\n\t\t\t\t// 2. If there is no target OR If there is a target and it is not an allowed target\n\t\t\t\tif !strings.HasPrefix(portableHref, \"#\") {\n\t\t\t\t\tif v, ok := htmlnode.GetAttributeVal(n, \"target\"); !ok || (ok && !isAllowedTarget(v)) {\n\t\t\t\t\t\thtmlnode.SetAttribute(n, \"\", \"target\", target)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\thtmlnode.SetAttribute(n, \"\", \"href\", portableHref)\n\t\t\tdefault:\n\t\t\t\t// Make a PortableUrl for any remaining tags with href.\n\t\t\t\thtmlnode.SetAttribute(n, \"\", \"href\", amphtml.RewritePortableURL(e.BaseURL, href.Val))\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (s *htmlState) checkURL(raw string) {\n\tif s.ignore&issueURL != 0 {\n\t\treturn\n\t}\n\n\tif strings.HasPrefix(raw, \"mailto:\") {\n\t\tif strings.Index(raw, \"@\") == -1 {\n\t\t\ts.err(fmt.Errorf(\"not an email address\"))\n\t\t}\n\t\treturn\n\t}\n\n\tu, err := url.Parse(raw)\n\tif err != nil {\n\t\ts.err(fmt.Errorf(\"bad URL '%s': %s\", raw, err.Error()))\n\t\treturn\n\t}\n\tif u.Opaque != \"\" {\n\t\ts.err(fmt.Errorf(\"bad URL part '%s'\", u.Opaque))\n\t\treturn\n\t}\n\n\tif strings.Index(raw, \" \") != -1 {\n\t\ts.err(fmt.Errorf(\"unencoded space in URL\"))\n\t}\n}", "func fixUrl(url string) string {\n\turlParts := strings.SplitN(url, \"/\", 2)\n\tif len(urlParts) < 2 {\n\t\treturn \"\"\n\t}\n\n\treturn urlParts[0] + \":\" + urlParts[1]\n}", "func (t *TestRuntime) AddrToURL(addr string) (string, error) {\n\tif strings.HasPrefix(addr, \":\") {\n\t\taddr = \"localhost\" + addr\n\t}\n\n\tif !strings.Contains(addr, \"://\") {\n\t\tscheme := \"http://\"\n\t\tif t.Params.Certificate != nil {\n\t\t\tscheme = \"https://\"\n\t\t}\n\t\taddr = scheme + addr\n\t}\n\n\tparsed, err := url.Parse(addr)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to parse listening address of server: %s\", err)\n\t}\n\n\treturn parsed.String(), nil\n}", "func MakeURL(addr string) string {\n\tprotocol := \"http\"\n\tif config.Config.Env.IsProduction() {\n\t\tprotocol = \"https\"\n\t}\n\n\tif strings.HasPrefix(addr, \":\") {\n\t\treturn fmt.Sprintf(\"%s://127.0.0.1%s\", protocol, addr)\n\t}\n\n\tif !strings.HasPrefix(addr, \"http://\") && !strings.HasPrefix(addr, \"https://\") {\n\t\treturn fmt.Sprintf(\"%s://%s\", protocol, addr)\n\t}\n\n\treturn addr\n}", "func formatUrl(source string) string {\n\tif match, _ := regexp.MatchString(\"https?:\\\\/\\\\/\", source); match {\n\t\treturn source\n\t}\n\n\treturn \"https://\" + source\n}", "func HTMLURL(v string) predicate.User {\n\treturn predicate.User(sql.FieldEQ(FieldHTMLURL, v))\n}", "func URL(route string, opts ...string) (s string) {\n\tsize := len(opts)\n\tif size >= 1 {\n\t\tif strings.Contains(route, \"$1\") {\n\t\t\troute = strings.Replace(route, \"$1\", opts[0], 1)\n\t\t}\n\t\tif size >= 2 && strings.Contains(route, \"$2\") {\n\t\t\troute = strings.Replace(route, \"$2\", opts[1], 1)\n\t\t}\n\t}\n\ts = fmt.Sprintf(\"%s%s\", Host, route)\n\treturn\n}", "func rawUrl(htmlUrl string) string {\n\tdomain := strings.Replace(htmlUrl, \"https://github.com/\", \"https://raw.githubusercontent.com/\", -1)\n\treturn strings.Replace(domain, \"/blob/\", \"/\", -1)\n}", "func SimpleURLChecks(t *testing.T, scheme string, host string, port uint16) mapval.Validator {\n\n\thostPort := host\n\tif port != 0 {\n\t\thostPort = fmt.Sprintf(\"%s:%d\", host, port)\n\t}\n\n\tu, err := url.Parse(fmt.Sprintf(\"%s://%s\", scheme, hostPort))\n\trequire.NoError(t, err)\n\n\treturn mapval.MustCompile(mapval.Map{\n\t\t\"url\": wrappers.URLFields(u),\n\t})\n}", "func Link(t string, u string, a ...string) got.HTML {\n\tattributes := \"\"\n\tif len(a) > 0 {\n\t\tattributes = strings.Join(a, \" \")\n\t}\n\treturn got.HTML(fmt.Sprintf(\"<a href=\\\"%s\\\" %s>%s</a>\", Escape(u), Escape(attributes), Escape(t)))\n}", "func fixURL(href, base string) (string, error) {\n\turi, err := url.Parse(href)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tbaseURL, err := url.Parse(base)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\turi = baseURL.ResolveReference(uri)\n\n\treturn uri.String(), err\n}", "func filterAddress(link, domain string) string {\n\tresolved := resolveReference(link)\n\tif strings.HasPrefix(link, \"/\") {\n\t\tresolved = \"http://\" + domain + resolved\n\t\treturn resolved\n\t}\n\treturn resolved\n}", "func linkify(s string) template.HTML {\n\toutput := \"\"\n\ti := 0\n\tmatches := urlregexp.FindAllStringIndex(s, -1)\n\tfor _, idxs := range matches {\n\t\tstart, end := idxs[0], idxs[1]\n\t\toutput += html.EscapeString(s[i:start])\n\t\toutput += linkreplace(s[start:end])\n\t\ti = end\n\t}\n\toutput += html.EscapeString(s[i:])\n\treturn template.HTML(output)\n}", "func HTMLURL(v string) predicate.GithubRelease {\n\treturn predicate.GithubRelease(sql.FieldEQ(FieldHTMLURL, v))\n}", "func MakeAnchorRequestUrl(api_parts ...string) (string, error) {\n\tvar full_url bytes.Buffer\n\n\tanchorIP := os.Getenv(\"ANCHOR_ADDRESS\")\n\tif len(anchorIP) == 0 {\n\t\tlogger.Logging(logger.ERROR, \"No anchor address environment\")\n\t\treturn \"\", errors.NotFound{\"No anchor address environment\"}\n\t}\n\n\tipTest := net.ParseIP(anchorIP)\n\tif ipTest == nil {\n\t\tlogger.Logging(logger.ERROR, \"Anchor address's validation check failed\")\n\t\treturn \"\", errors.InvalidParam{\"Anchor address's validation check failed\"}\n\t}\n\n\tanchorProxy := os.Getenv(\"ANCHOR_REVERSE_PROXY\")\n\tif len(anchorProxy) == 0 || anchorProxy == \"false\" {\n\t\tfull_url.WriteString(\"http://\" + anchorIP + \":\" + DEFAULT_ANCHOR_PORT + url.Base())\n\t} else if anchorProxy == \"true\" {\n\t\tfull_url.WriteString(\"http://\" + anchorIP + \":\" + UNSECURED_ANCHOR_PORT_WITH_REVERSE_PROXY + url.PharosAnchor() + url.Base())\n\t} else {\n\t\tlogger.Logging(logger.ERROR, \"Invalid value for ANCHOR_REVERSE_PROXY\")\n\t\treturn \"\", errors.InvalidParam{\"Invalid value for ANCHOR_REVERSE_PROXY\"}\n\t}\n\n\tfor _, api_part := range api_parts {\n\t\tfull_url.WriteString(api_part)\n\t}\n\n\tlogger.Logging(logger.DEBUG, full_url.String())\n\treturn full_url.String(), nil\n}", "func (c *Client) URL(path string, a ...interface{}) string {\n\tu, _ := urlx.Parse(c.addr)\n\n\tu.Path = fmt.Sprintf(path, a...)\n\n\treturn u.String()\n}", "func (o GroupBadgeOutput) RenderedLinkUrl() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *GroupBadge) pulumi.StringOutput { return v.RenderedLinkUrl }).(pulumi.StringOutput)\n}", "func TargetHref(value string) *SimpleElement { return newSEString(\"targetHref\", value) }", "func URL(opts ...options.OptionFunc) string {\n\treturn singleFakeData(URLTag, func() interface{} {\n\t\topt := options.BuildOptions(opts)\n\t\ti := Internet{fakerOption: *opt}\n\t\tu, err := i.url()\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t\treturn u\n\t}, opts...).(string)\n}", "func clean_url(cand string) string {\n // TODO: url pattern should be refined\n r, _ := regexp.Compile(\"^((http[s]?|ftp)://)?(www\\\\.)?(?P<body>[a-z]+\\\\.[a-z]+)$\")\n if r.MatchString(cand) {\n r2 := r.FindAllStringSubmatch(cand, -1)\n return r2[0][len(r2[0]) - 1]\n }\n return \"\"\n}", "func Href(value string) *SimpleElement { return newSEString(\"href\", value) }", "func (c *Client) BanURL(channels []string, host string, value []string) error {\n\treturn c.Do(channels, Request{Command: \"ban.url\", Host: host, Value: value})\n}", "func replaceAHrefs(orig_url url.URL, n *html.Node) {\n\tif n.Type == html.ElementNode && n.Data == \"a\" {\n\t\tfor i, a := range n.Attr {\n\t\t\tif a.Key == \"href\" {\n\t\t\t\ta.Val = createProxyableUrl(orig_url, a.Val)\n\t\t\t}\n\t\t\tn.Attr[i] = a\n\t\t}\n\t}\n\tfor c := n.FirstChild; c != nil; c = c.NextSibling {\n\t\treplaceAHrefs(orig_url, c)\n\t}\n}", "func (tu *TwitterURL) IsLinkable() {}", "func sanitizeSkylinks(links []string) []string {\n\tvar result []string\n\n\tfor _, link := range links {\n\t\ttrimmed := strings.TrimPrefix(link, \"sia://\")\n\t\tresult = append(result, trimmed)\n\t}\n\n\treturn result\n}", "func href2url(docUrl string, href string) string {\n\tif strings.HasPrefix(href, \"http://\") || strings.HasPrefix(href, \"https://\") {\n\t\treturn href\n\t}\n\tif strings.HasPrefix(href, \"/\") {\n\t\tr := regexp.MustCompile(`https?://[^/]+`)\n\t\tbaseUrl := r.FindAllString(docUrl, -1)[0]\n\t\treturn baseUrl + href\n\t}\n\treturn docUrl + \"/\" + href\n}", "func href(vuln *osv.Entry) string {\n\tfor _, affected := range vuln.Affected {\n\t\tif url := affected.DatabaseSpecific.URL; url != \"\" {\n\t\t\treturn url\n\t\t}\n\t}\n\tfor _, r := range vuln.References {\n\t\tif r.Type == \"WEB\" {\n\t\t\treturn r.URL\n\t\t}\n\t}\n\treturn fmt.Sprintf(\"https://pkg.go.dev/vuln/%s\", vuln.ID)\n}", "func (u *GithubGistUpsert) UpdateHTMLURL() *GithubGistUpsert {\n\tu.SetExcluded(githubgist.FieldHTMLURL)\n\treturn u\n}", "func (b *Builder) TextURL(s, url string) *Builder {\n\treturn b.appendMessage(s, func(offset, limit int) tg.MessageEntityClass {\n\t\treturn &tg.MessageEntityTextUrl{Offset: offset, Length: limit, URL: url}\n\t})\n}", "func (g GetenvValue) SafeURL() string {\n\tif g.value[len(g.value)-1] == '/' {\n\t\treturn g.value[:len(g.value)-1]\n\t}\n\n\treturn g.value\n}", "func (k *Keyboard) AddURLButton(text, uri string) *Keyboard {\n\treturn k.addInlineButton(text, uri, \"url\")\n}", "func (t *Team) AbsoluteURL(path string) string {\n\treturn fmt.Sprintf(\"%s%s\", t.teamConfig.HTTPURL, path)\n}", "func urlify(rawInput string) string {\n\tencoded := strings.TrimSpace(rawInput)\n\treturn strings.ReplaceAll(encoded, \" \", \"%20\")\n}", "func redactURLString(raw string) string {\n\tif !strings.ContainsRune(raw, '@') {\n\t\treturn raw\n\t}\n\tu, err := url.Parse(raw)\n\tif err != nil {\n\t\treturn raw\n\t}\n\treturn u.Redacted()\n}", "func URLize(path string) string {\n\n\tpath = strings.Replace(strings.TrimSpace(path), \" \", \"-\", -1)\n\tpath = strings.ToLower(path)\n\tpath = UnicodeSanitize(path)\n\treturn path\n}", "func (o GroupBadgeOutput) LinkUrl() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *GroupBadge) pulumi.StringOutput { return v.LinkUrl }).(pulumi.StringOutput)\n}", "func ToURL(path string) string {\n\treturn filepath.Clean(path)\n}", "func (seg *Segmenter) CutUrl(str string, num ...bool) []string {\n\tif len(num) <= 0 {\n\t\t// seg.Num = true\n\t\tstr = SplitNums(str)\n\t}\n\ts := seg.Cut(str)\n\treturn seg.TrimSymbol(s)\n}", "func absoluteURL(link, baselink string) (string, error) {\n\t// scheme relative links, eg <script src=\"//example.com/script.js\">\n\tif len(link) > 1 && link[0:2] == \"//\" {\n\t\tbase, err := url.Parse(baselink)\n\t\tif err != nil {\n\t\t\treturn link, err\n\t\t}\n\t\tlink = base.Scheme + \":\" + link\n\t}\n\n\tu, err := url.Parse(link)\n\tif err != nil {\n\t\treturn link, err\n\t}\n\n\t// remove hashes\n\tu.Fragment = \"\"\n\n\tbase, err := url.Parse(baselink)\n\tif err != nil {\n\t\treturn link, err\n\t}\n\n\t// set global variable\n\tif baseDomain == \"\" {\n\t\tbaseDomain = base.Host\n\t}\n\n\tresult := base.ResolveReference(u)\n\n\t// ensure link is HTTP(S)\n\tif result.Scheme != \"http\" && result.Scheme != \"https\" {\n\t\treturn link, fmt.Errorf(\"Invalid URL: %s\", result.String())\n\t}\n\n\treturn result.String(), nil\n}", "func (u *GithubGistUpsertOne) UpdateHTMLURL() *GithubGistUpsertOne {\n\treturn u.Update(func(s *GithubGistUpsert) {\n\t\ts.UpdateHTMLURL()\n\t})\n}", "func (bT BitTorrent) URL() (string, error) {\n\tbaseURL, err := url.Parse(bT.Announce)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif bT.PeerID == \"\" {\n\t\tbT.GenPeerID()\n\t\tinfoHash, err := bT.Info.InfoHash()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tbT.InfoHash = fmt.Sprintf(\"%s\", infoHash)\n\t}\n\n\tparameters := url.Values{\n\t\t\"info_hash\": []string{bT.InfoHash},\n\t\t\"peer_id\": []string{bT.PeerID},\n\t\t\"port\": []string{bT.Port},\n\t\t\"uploaded\": []string{\"0\"},\n\t\t\"downloaded\": []string{\"0\"},\n\t\t\"compact\": []string{\"1\"},\n\t\t\"left\": []string{fmt.Sprintf(\"%v\", bT.Info.Length)},\n\t}\n\tbaseURL.RawQuery = parameters.Encode()\n\treturn baseURL.String(), nil\n}", "func htmlLinkFormatter(url, text string) string {\n\treturn fmt.Sprintf(`<a href=\"%s\">%s</a>`, html.EscapeString(url), html.EscapeString(text))\n}", "func (p *Proxy) LinkHtml(ctx context.Context,\n\tlabel string,\n\tactionValue string,\n\tattributes html5tag.Attributes,\n) string {\n\tif attributes == nil {\n\t\tattributes = html5tag.NewAttributes()\n\t}\n\tattributes.Set(\"onclick\", \"return false;\") // make sure we do not follow the link if javascript is on.\n\tvar href string\n\tif attributes.Has(\"href\") {\n\t\thref = attributes.Get(\"href\")\n\t} else {\n\t\thref = page.GetContext(ctx).HttpContext.URL.RequestURI() // for non-javascript compatibility\n\t\tif offset := strings.Index(href, page.HtmlVarAction); offset >= 0 {\n\t\t\thref = href[:offset-1] // remove the variables we placed here ourselves\n\t\t}\n\t}\n\n\t// These next two lines allow the proxy to work even when javascript is off.\n\tav := page.HtmlVarAction + \"=\" + p.ID() + \"_\" + actionValue\n\tav += \"&\" + page.HtmlVarPagestate + \"=\" + crypt.SessionEncryptUrlValue(ctx, p.Page().StateID())\n\n\tif !strings.ContainsRune(href, '?') {\n\t\thref += \"?\" + av\n\t} else {\n\t\thref += \"&\" + av\n\t}\n\tattributes.Set(\"href\", href)\n\treturn p.TagHtml(label, actionValue, attributes, \"a\", false)\n}", "func (b *PagesClearCacheBuilder) URL(v string) *PagesClearCacheBuilder {\n\tb.Params[\"url\"] = v\n\treturn b\n}", "func cleanLink(base, link string) (string, error) {\n\tlink = RegexAnchors.ReplaceAllString(link, \"\")\n\tif len(link) == 0 {\n\t\treturn \"\", ErrInvalidLink\n\t}\n\tlinkURL, err := url.Parse(link)\n\tif err != nil {\n\t\treturn \"\", ErrInvalidLink\n\t}\n\n\tif validScheme(linkURL.Scheme) {\n\t\treturn link, nil\n\t}\n\n\tbaseURL, err := url.Parse(base)\n\tif err != nil {\n\t\treturn \"\", ErrInvalidLink\n\t} else if len(baseURL.Host) == 0 {\n\t\treturn \"\", ErrInvalidLink\n\t}\n\n\tif link[0] == '/' || link[len(link)-1] == '/' {\n\t\tlink = strings.Trim(link, \"/\")\n\t}\n\n\treturn strings.Join([]string{baseURL.Scheme, \"://\", baseURL.Host, \"/\", link}, \"\"), nil\n}", "func IsValidURL(address string) bool {\n\tif IsEmptyStr(address) {\n\t\treturn false\n\t}\n\n\treturn govalidator.IsURL(address)\n}", "func DownloadableURL(original string) (string, error) {\n\tif runtime.GOOS == \"windows\" {\n\t\t// If the distance to the first \":\" is just one character, assume\n\t\t// we're dealing with a drive letter and thus a file path.\n\t\tidx := strings.Index(original, \":\")\n\t\tif idx == 1 {\n\t\t\toriginal = \"file:///\" + original\n\t\t}\n\t}\n\n\turl, err := url.Parse(original)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif url.Scheme == \"\" {\n\t\turl.Scheme = \"file\"\n\t}\n\n\tif url.Scheme == \"file\" {\n\t\t// Windows file handling is all sorts of tricky...\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\t// If the path is using Windows-style slashes, URL parses\n\t\t\t// it into the host field.\n\t\t\tif url.Path == \"\" && strings.Contains(url.Host, `\\`) {\n\t\t\t\turl.Path = url.Host\n\t\t\t\turl.Host = \"\"\n\t\t\t}\n\n\t\t\t// For Windows absolute file paths, remove leading / prior to processing\n\t\t\t// since net/url turns \"C:/\" into \"/C:/\"\n\t\t\tif len(url.Path) > 0 && url.Path[0] == '/' {\n\t\t\t\turl.Path = url.Path[1:len(url.Path)]\n\t\t\t}\n\t\t}\n\n\t\t// Only do the filepath transformations if the file appears\n\t\t// to actually exist.\n\t\tif _, err := os.Stat(url.Path); err == nil {\n\t\t\turl.Path, err = filepath.Abs(url.Path)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\turl.Path, err = filepath.EvalSymlinks(url.Path)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\turl.Path = filepath.Clean(url.Path)\n\t\t}\n\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\t// Also replace all backslashes with forwardslashes since Windows\n\t\t\t// users are likely to do this but the URL should actually only\n\t\t\t// contain forward slashes.\n\t\t\turl.Path = strings.Replace(url.Path, `\\`, `/`, -1)\n\t\t}\n\t}\n\n\t// Make sure it is lowercased\n\turl.Scheme = strings.ToLower(url.Scheme)\n\n\t// This is to work around issue #5927. This can safely be removed once\n\t// we distribute with a version of Go that fixes that bug.\n\t//\n\t// See: https://code.google.com/p/go/issues/detail?id=5927\n\tif url.Path != \"\" && url.Path[0] != '/' {\n\t\turl.Path = \"/\" + url.Path\n\t}\n\n\t// Verify that the scheme is something we support in our common downloader.\n\tsupported := []string{\"file\", \"http\", \"https\"}\n\tfound := false\n\tfor _, s := range supported {\n\t\tif url.Scheme == s {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !found {\n\t\treturn \"\", fmt.Errorf(\"Unsupported URL scheme: %s\", url.Scheme)\n\t}\n\n\treturn url.String(), nil\n}", "func (i Internet) URL() string {\n\turl := i.Faker.RandomStringElement(urlFormats)\n\n\t// {{domain}}\n\turl = strings.Replace(url, \"{{domain}}\", i.Domain(), 1)\n\n\t// {{slug}}\n\turl = strings.Replace(url, \"{{slug}}\", i.Slug(), 1)\n\n\treturn url\n}", "func (u *User) HTMLURL() string {\n\treturn conf.Server.ExternalURL + u.Name\n}", "func (b *Builder) URL(s string) *Builder {\n\treturn b.appendMessage(s, func(offset, limit int) tg.MessageEntityClass {\n\t\treturn &tg.MessageEntityUrl{Offset: offset, Length: limit}\n\t})\n}", "func effectiveURL(a *analysis.Analyzer, diag analysis.Diagnostic) string {\n\tu := diag.URL\n\tif u == \"\" && diag.Category != \"\" {\n\t\tu = \"#\" + diag.Category\n\t}\n\tif base, err := urlpkg.Parse(a.URL); err == nil {\n\t\tif rel, err := urlpkg.Parse(u); err == nil {\n\t\t\tu = base.ResolveReference(rel).String()\n\t\t}\n\t}\n\treturn u\n}", "func (o ReleaseLinkOutput) Url() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ReleaseLink) pulumi.StringOutput { return v.Url }).(pulumi.StringOutput)\n}", "func ShortenURL(urlvar string, b *bitly.Client) string {\n\n\tshortURL, err := b.Links.Shorten(urlvar)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\treturn shortURL.URL\n}", "func NormalizeURL(addr string) (*url.URL, error) {\n\taddr = strings.TrimSpace(addr)\n\tu, err := url.Parse(addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif u.Opaque != \"\" {\n\t\tu.Host = net.JoinHostPort(u.Scheme, u.Opaque)\n\t\tu.Opaque = \"\"\n\t} else if u.Path != \"\" && !strings.Contains(u.Path, \":\") {\n\t\tu.Host = net.JoinHostPort(u.Path, strconv.Itoa(config.DefaultServerPort))\n\t\tu.Path = \"\"\n\t} else if u.Scheme == \"\" {\n\t\tu.Host = u.Path\n\t\tu.Path = \"\"\n\t}\n\tif u.Scheme != \"https\" {\n\t\tu.Scheme = \"http\"\n\t}\n\t_, port, err := net.SplitHostPort(u.Host)\n\tif err != nil {\n\t\t_, port, err = net.SplitHostPort(u.Host + \":\" + strconv.Itoa(config.DefaultServerPort))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif port != \"\" {\n\t\t_, err = strconv.Atoi(port)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn u, nil\n}", "func FormatURL(url string) string {\n\turl = strings.TrimSpace(url)\n\tif strings.Contains(url, \"\\\\\") {\n\t\turl = strings.ReplaceAll(url, \"\\\\\", \"/\")\n\t}\n\turl = strings.TrimRight(url, \"#/?\")\n\treturn url\n}", "func (u *GithubGistUpsertBulk) UpdateHTMLURL() *GithubGistUpsertBulk {\n\treturn u.Update(func(s *GithubGistUpsert) {\n\t\ts.UpdateHTMLURL()\n\t})\n}", "func urlify(s string) string {\n\tvar r strings.Builder\n\n\tfor i := 0; i < len(s); i++ {\n\t\tif string(s[i]) == \" \" {\n\t\t\tr.WriteString(\"%20\")\n\t\t} else {\n\t\t\tr.WriteString(string(s[i]))\n\t\t}\n\t}\n\treturn r.String()\n}", "func (o DomainOutput) CheckUrl() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Domain) pulumi.StringPtrOutput { return v.CheckUrl }).(pulumi.StringPtrOutput)\n}", "func (r *Attestor) urlNormalized() *Attestor {\n\tnormalized := deepcopy.Copy(*r).(Attestor)\n\tnormalized.Name = dcl.SelfLinkToName(r.Name)\n\tnormalized.Description = dcl.SelfLinkToName(r.Description)\n\tnormalized.Project = dcl.SelfLinkToName(r.Project)\n\treturn &normalized\n}", "func (h *Host) ToURL() string {\n\treturn fmt.Sprintf(\"%s:%d\", h.Address, h.Port)\n}", "func URL(url string) string {\n\tscheme, host, _, path, query := unpackURL(url)\n\t// log.S(\"url\", url).S(\"host\", host).Debug(fmt.Sprintf(\"should discover: %v\", shouldDiscoverHost(host)))\n\tif !shouldDiscoverHost(host) {\n\t\treturn url\n\t}\n\tsrvs, err := Services(host)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn url\n\t}\n\t// log.I(\"len_srvs\", len(srvs)).Debug(\"service entries\")\n\tif len(srvs) == 0 {\n\t\treturn url\n\t}\n\tsrv := srvs[rand.Intn(len(srvs))]\n\treturn packURL(scheme, srv.String(), \"\", path, query)\n}", "func FormatWebhookURL(url string, event events.Event) string {\n\treturn strings.ReplaceAll(url, WebhookURLEvent, string(event))\n}", "func StandardizeURL(url string) string {\n\tlink := url\n\tvar schema, domain, path string\n\n\t// Try to get the schema\n\tslice := strings.SplitN(url, \"://\", 2)\n\tif len(slice) == 2 && len(slice[0]) < 10 { // schema exists\n\t\tschema = slice[0] + \"://\"\n\t\tlink = slice[1]\n\t} else {\n\t\tschema = \"http://\"\n\t}\n\n\t// Get the domain\n\tslice = strings.SplitN(link, \"/\", 2)\n\tif len(slice) == 2 {\n\t\tdomain = slice[0]\n\t\tpath = \"/\" + slice[1]\n\t} else {\n\t\tdomain = slice[0]\n\t\tpath = \"/\"\n\t}\n\n\tdomain, _ = idna.ToASCII(domain)\n\tlink = schema + domain + path\n\n\treturn link\n}", "func verifyURL(myUrl string) string {\n\tu, _ := url.Parse(myUrl)\n\n\tif u.Scheme != \"\" {\n\t\treturn myUrl\n\t}\n\treturn \"http://\" + myUrl\n}", "func mutateURL(url string) string {\n\treturn strings.Replace(url, \"/j/\", \"/wc/join/\", 1)\n}", "func (m *WorkforceIntegration) SetUrl(value *string)() {\n m.url = value\n}", "func EscapeURL(s string) string {\n\treturn got.URLQueryEscaper(s)\n}", "func (x XKCDStrip) URL() string {\n\treturn fmt.Sprintf(\"https://xkcd.com/%d\", x.ID)\n}", "func (r *Rietveld) Url(issueID int64) string {\n\tif issueID == 0 {\n\t\treturn r.url\n\t}\n\treturn fmt.Sprintf(\"%s/%d\", r.url, issueID)\n}", "func NewURL(userID uint, address string, threshold int) (*URL, error) {\n\turl := new(URL)\n\turl.UserId = userID\n\turl.Threshold = threshold\n\turl.FailedTimes = 0\n\n\tisValid := govalidator.IsURL(address)\n\tif !strings.HasPrefix(\"http://\", address) {\n\t\taddress = \"http://\" + address\n\t}\n\tif isValid {\n\t\t//valid URL address\n\t\turl.Address = address\n\t\treturn url, nil\n\t}\n\treturn nil, errors.New(\"not a valid URL address\")\n}", "func URL(data ValidationData) error {\n\tv, err := helper.ToString(data.Value)\n\tif err != nil {\n\t\treturn ErrInvalid{\n\t\t\tValidationData: data,\n\t\t\tFailure: \"is not a string\",\n\t\t\tMessage: data.Message,\n\t\t}\n\t}\n\n\tparsed, err := url.Parse(v)\n\tif err != nil {\n\t\treturn ErrInvalid{\n\t\t\tValidationData: data,\n\t\t\tFailure: \"is not a valid URL\",\n\t\t\tMessage: data.Message,\n\t\t}\n\t}\n\n\tif parsed.Scheme != \"http\" && parsed.Scheme != \"https\" {\n\t\treturn ErrInvalid{\n\t\t\tValidationData: data,\n\t\t\tFailure: fmt.Sprintf(\"has an invalid scheme '%s'\", parsed.Scheme),\n\t\t\tMessage: data.Message,\n\t\t}\n\t}\n\n\tif parsed.Host == \"\" || strings.IndexRune(parsed.Host, '\\\\') > 0 {\n\t\treturn ErrInvalid{\n\t\t\tValidationData: data,\n\t\t\tFailure: fmt.Sprintf(\"has an invalid host ('%s')\", parsed.Host),\n\t\t\tMessage: data.Message,\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *AddOn) HREF() string {\n\tif o != nil && o.bitmap_&4 != 0 {\n\t\treturn o.href\n\t}\n\treturn \"\"\n}", "func (r *Router) URL(name string, vars ...string) string {\n\tif route, ok := r.Router.NamedRoutes[name]; ok {\n\t\tu, err := r.Router.matcher.Build(route, vars...)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn u\n\t}\n\treturn \"\"\n}", "func BitlyURLShorten(urlStr string) string {\n\tapiKey := os.Getenv(\"BITLY_TOKEN\")\n\tb := bitly.New(apiKey)\n\tshortURL, err := b.Links.Shorten(urlStr)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to sign request\", err)\n\t} else {\n\t\tlog.Debug(\"The bitly URL is\", shortURL)\n\t}\n\treturn shortURL.URL\n}", "func MakeUrl(address string) string {\n\n params := url.Values{}\n params.Add(\"near\", address)\n\n uri := fmt.Sprintf(\"%s?%s\", URL, params.Encode())\n\n return uri\n}", "func (r *Attestor) urlNormalized() *Attestor {\n\tnormalized := dcl.Copy(*r).(Attestor)\n\tnormalized.Name = dcl.SelfLinkToName(r.Name)\n\tnormalized.Description = dcl.SelfLinkToName(r.Description)\n\tnormalized.Project = dcl.SelfLinkToName(r.Project)\n\treturn &normalized\n}", "func (b *ServiceClusterBuilder) HREF(value string) *ServiceClusterBuilder {\n\tb.href = value\n\tb.bitmap_ |= 4\n\treturn b\n}", "func (lm LinksManager) CleanLinkParams(url *url.URL) bool {\n\t// we try to clean all URLs, not specific ones\n\treturn true\n}", "func (b *AddonInstallationBuilder) HREF(value string) *AddonInstallationBuilder {\n\tb.href = value\n\tb.bitmap_ |= 4\n\treturn b\n}", "func AppendURL(url_text string, append_text []string) (url string, err error) {\n\tappend_len := len(append_text)\n\tif append_len <= 0 {\n\t\treturn url_text, nil\n\t}\n\tlast_rune, width := utf8.DecodeLastRuneInString(url_text)\n\tif last_rune == utf8.RuneError && width == 1 {\n\t\treturn \"\", errors.New(fmt.Sprintf(\"'%s' is not a valid utf8 string\", url_text))\n\t}\n\tif append_len == 1 && last_rune == '/' {\n\t\turl = url_text + append_text[0]\n\t\treturn url, nil\n\n\t} else if append_len > 1 && last_rune == '/' {\n\t\turl = strings.Join(append([]string{url_text + append_text[0]}, append_text[1:]...), \"/\")\n\t\treturn url, nil\n\t}\n\turl = strings.Join(append([]string{url_text}, append_text...), \"/\")\n\treturn url, nil\n}", "func URL(s string) got.URL {\n\treturn got.URL(s)\n}", "func (b *FollowUpBuilder) Url(value string) *FollowUpBuilder {\n\tb.url = value\n\tb.bitmap_ |= 16384\n\treturn b\n}", "func (m *BookingBusiness) SetPublicUrl(value *string)() {\n err := m.GetBackingStore().Set(\"publicUrl\", value)\n if err != nil {\n panic(err)\n }\n}", "func generateURLIssue(h string) string {\n\tconst (\n\t\ttitle = \"Move your ass\"\n\t\turlFormat = \"https://github.com/sjeandeaux/nexus-cli/issues/new?title=%s&body=%s\"\n\t\tbodyFormat = \"Could you add the hash %q lazy man?\\n%s\"\n\t)\n\tescapedTitle := url.QueryEscape(title)\n\tbody := fmt.Sprintf(bodyFormat, h, information.Print())\n\tescapedBody := url.QueryEscape(body)\n\turlIssue := fmt.Sprintf(urlFormat, escapedTitle, escapedBody)\n\treturn urlIssue\n}", "func buildURL(url string, x, y, z int) string {\n\turl = strings.Replace(url, \"{x}\", strconv.Itoa(x), 1)\n\turl = strings.Replace(url, \"{y}\", strconv.Itoa(y), 1)\n\turl = strings.Replace(url, \"{z}\", strconv.Itoa(z), 1)\n\treturn url\n}", "func isURL(v string) bool {\n\tvalGen := pflagValueFuncMap[urlFlag]\n\treturn valGen().Set(v) == nil\n}", "func URL(s *httptest.Server) string {\n\treturn strings.Replace(s.URL, \"http\", \"ws\", 1)\n}", "func BuildURL(route string) string {\n\tprefix := os.Getenv(\"AWS_LAMBDA_RUNTIME_API\")\n\tif len(prefix) == 0 {\n\t\treturn fmt.Sprintf(\"http://localhost:9001%s\", route)\n\t}\n\treturn fmt.Sprintf(\"http://%s%s\", prefix, route)\n}", "func IsURLValid(value string) bool {\n\tcheck := value != \"\" && !strings.Contains(value, \".gif\") && !strings.Contains(value, \"logo\") && !strings.Contains(value, \"mobilebanner\")\n\n\tif check {\n\t\treturn strings.HasPrefix(value, \"http\") || strings.HasPrefix(value, \"https\")\n\t}\n\n\treturn check\n}" ]
[ "0.52412796", "0.5147074", "0.50879025", "0.5021607", "0.49071804", "0.48897803", "0.48651856", "0.4834832", "0.48083636", "0.48071218", "0.4795265", "0.47837985", "0.47502625", "0.47394225", "0.4733341", "0.47310492", "0.47305945", "0.47228217", "0.4715572", "0.47154", "0.47066662", "0.47008896", "0.46928576", "0.46910858", "0.46733606", "0.4666813", "0.46651357", "0.46502602", "0.46433145", "0.46423548", "0.46404752", "0.46353295", "0.4630457", "0.4620223", "0.46176836", "0.46131787", "0.46093136", "0.4607997", "0.46017453", "0.45999104", "0.45905638", "0.45857587", "0.45853943", "0.45840877", "0.45714846", "0.4569403", "0.4567161", "0.45542407", "0.45467266", "0.454488", "0.45406902", "0.45028764", "0.44873312", "0.44799915", "0.44783986", "0.44740924", "0.4462845", "0.44611317", "0.44609594", "0.44572997", "0.44550335", "0.4453449", "0.44501913", "0.4448752", "0.44452807", "0.443646", "0.4436013", "0.44356793", "0.44345385", "0.44342208", "0.44265288", "0.44148684", "0.4410505", "0.44078612", "0.44063443", "0.4393336", "0.43865782", "0.43777984", "0.4377606", "0.43702507", "0.43610692", "0.43514523", "0.43448067", "0.43427974", "0.43374193", "0.4336337", "0.43351358", "0.43321317", "0.43301448", "0.43292397", "0.4327983", "0.43262455", "0.43260548", "0.43253723", "0.43247643", "0.43230867", "0.4323012", "0.43193543", "0.43164158", "0.4309948" ]
0.802518
0
AsBool converts string to bool, in case of the value is empty or unknown, defaults to false
AsBool преобразует строку в логическое значение, при отсутствии значения или неизвестном значении по умолчанию устанавливается false
func AsBool(v string) bool { if v == "" { return false } out, _ := apiutils.ParseBool(v) return out }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *Value) asBool() (bool, error) {\n\t// A missing value is considered false\n\tif s == nil {\n\t\treturn false, nil\n\t}\n\tswitch s.Name {\n\tcase \"true\":\n\t\treturn true, nil\n\tcase \"false\":\n\t\treturn false, nil\n\tdefault:\n\t\treturn false, fmt.Errorf(\"invalid boolean: %s\", s.Name)\n\t}\n}", "func (v *Value) AsBool(dv bool) bool {\n\tif v.IsUndefined() {\n\t\treturn dv\n\t}\n\tswitch tv := v.raw.(type) {\n\tcase string:\n\t\tb, err := strconv.ParseBool(tv)\n\t\tif err != nil {\n\t\t\treturn dv\n\t\t}\n\t\treturn b\n\tcase int:\n\t\treturn tv == 1\n\tcase float64:\n\t\treturn tv == 1.0\n\tcase bool:\n\t\treturn tv\n\tcase time.Time:\n\t\treturn tv.UnixNano() > 0\n\tcase time.Duration:\n\t\treturn tv.Nanoseconds() > 0\n\t}\n\treturn dv\n}", "func (val stringValue) toBool() boolValue {\n\tif val.null {\n\t\treturn boolValue{false, true}\n\t}\n\treturn boolValue{true, false}\n}", "func Bool(value interface{}) bool {\r\n\ts := String(value)\r\n\tb, _ := strconv.ParseBool(s)\r\n\treturn b\r\n}", "func parseBool(asString string) (bool, error) {\n\tswitch asString {\n\tcase \"true\":\n\t\treturn true, nil\n\tcase \"false\":\n\t\treturn false, nil\n\tdefault:\n\t\treturn false, fmt.Errorf(\"could not parse %q as a bool\", asString)\n\t}\n}", "func (s *Str) Bool() bool {\n\tval, err := strconv.ParseBool(s.val)\n\tif err != nil {\n\t\ts.err = err\n\t}\n\treturn val\n}", "func ParseBool(str string) (bool, error) {}", "func StringToBool(s string, def bool) bool {\n\tv, err := strconv.ParseBool(s)\n\tif err != nil {\n\t\tlog.Warnf(\"failed to parse bool value: %s\", s)\n\t\treturn def\n\t}\n\treturn v\n}", "func (me StringData) toBoolean() bool {\n\tif b, err := strconv.ParseBool(me.val); err != nil {\n\t\treturn b\n\t}\n\treturn false\n}", "func Bool(val string) error {\n\tif strings.EqualFold(val, \"true\") || strings.EqualFold(val, \"false\") {\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"invalid bool value '%s', can be only 'true' or 'false'\", val)\n}", "func ToBool(value interface{}) (bool, error) {\n\tvalue = indirect(value)\n\n\tvar s string\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn false, nil\n\tcase bool:\n\t\treturn v, nil\n\tcase []byte:\n\t\tif len(v) == 0 {\n\t\t\treturn false, nil\n\t\t}\n\t\ts = string(v)\n\tcase string:\n\t\ts = v\n\tcase fmt.Stringer:\n\t\ts = v.String()\n\tdefault:\n\t\treturn !IsZero(v), nil\n\t}\n\n\tswitch s {\n\tcase \"t\", \"T\", \"1\", \"on\", \"On\", \"ON\", \"true\", \"True\", \"TRUE\", \"yes\", \"Yes\", \"YES\":\n\t\treturn true, nil\n\tcase \"f\", \"F\", \"0\", \"off\", \"Off\", \"OFF\", \"false\", \"False\", \"FALSE\", \"no\", \"No\", \"NO\", \"\":\n\t\treturn false, nil\n\tdefault:\n\t\treturn false, fmt.Errorf(\"unrecognized bool string: %s\", s)\n\t}\n}", "func AsBool(v interface{}) bool {\n\tif v == nil {\n\t\treturn false\n\t}\n\tr := reflect.ValueOf(v)\n\tv = AsValueRef(r).Interface()\n\tswitch v.(type) {\n\tcase int:\n\t\treturn v.(int) > 0\n\tcase int8:\n\t\treturn v.(int8) > 0\n\tcase int16:\n\t\treturn v.(int16) > 0\n\tcase int32:\n\t\treturn v.(int32) > 0\n\tcase int64:\n\t\treturn v.(int64) > 0\n\tcase uint:\n\t\treturn v.(uint) > 0\n\tcase uint8:\n\t\treturn v.(uint8) > 0\n\tcase uint16:\n\t\treturn v.(uint16) > 0\n\tcase uint32:\n\t\treturn v.(uint32) > 0\n\tcase uint64:\n\t\treturn v.(uint64) > 0\n\tcase float32:\n\t\treturn v.(float32) > 0\n\tcase float64:\n\t\treturn v.(float64) > 0\n\tcase []uint8:\n\t\tb, err := strconv.ParseBool(string(v.([]uint8)))\n\t\tif err == nil {\n\t\t\treturn b\n\t\t} else {\n\t\t\treturn len(v.([]uint8)) != 0\n\t\t}\n\tcase string:\n\t\tb, err := strconv.ParseBool(v.(string))\n\t\tif err == nil {\n\t\t\treturn b\n\t\t} else {\n\t\t\treturn len(v.(string)) != 0\n\t\t}\n\tcase bool:\n\t\treturn v.(bool)\n\tcase error:\n\t\treturn false\n\tdefault:\n\t\t// check nil and empty value\n\t\tswitch r.Kind() {\n\t\tcase reflect.Array:\n\t\t\treturn r.Len() != 0\n\t\tcase reflect.Map, reflect.Slice:\n\t\t\treturn !(r.IsNil() || r.Len() == 0)\n\t\tcase reflect.Interface, reflect.Ptr, reflect.Chan, reflect.Func:\n\t\t\treturn !r.IsNil()\n\t\t}\n\t\treturn !reflect.DeepEqual(v, reflect.Zero(r.Type()).Interface())\n\t}\n}", "func ToBoolean(str string) (bool, error) {\n\tres, err := strconv.ParseBool(str)\n\tif err != nil {\n\t\tres = false\n\t}\n\treturn res, err\n}", "func ToBoolean(str string) (bool, error) {\n\tres, err := strconv.ParseBool(str)\n\tif err != nil {\n\t\tres = false\n\t}\n\treturn res, err\n}", "func (s VerbatimString) ToBool() (bool, error) { return _verbatimString(s).ToBool() }", "func ToBool(v interface{}, def bool) bool {\r\n\tif b, ok := v.(bool); ok {\r\n\t\treturn b\r\n\t}\r\n\tif i, ok := v.(int); ok {\r\n\t\treturn i > 0\r\n\t}\r\n\tif i, ok := v.(float64); ok {\r\n\t\treturn i > 0\r\n\t}\r\n\tif i, ok := v.(float32); ok {\r\n\t\treturn i > 0\r\n\t}\r\n\tif ss, ok := v.([]string); ok {\r\n\t\tv = ss[0]\r\n\t}\r\n\tif s, ok := v.(string); ok {\r\n\t\tif s == \"on\" {\r\n\t\t\treturn true\r\n\t\t}\r\n\t\tif s == \"off\" || s == \"\" {\r\n\t\t\treturn false\r\n\t\t}\r\n\t\tif b, err := strconv.ParseBool(s); err == nil {\r\n\t\t\treturn b\r\n\t\t}\r\n\t}\r\n\r\n\treturn def\r\n\r\n}", "func StrToBool(s string) bool {\n\tif s == \"да\" {\n\t\treturn true\n\t}\n\n\tif v, err := strconv.ParseBool(s); err == nil {\n\t\treturn v\n\t}\n\n\treturn false\n}", "func StrToBool(s string) (bool, error) {\n\tclean := strings.TrimSpace(s)\n\n\tif regexp.MustCompile(`(?i)^(1|yes|true|y|t)$`).MatchString(clean) {\n\t\treturn true, nil\n\t}\n\n\tif regexp.MustCompile(`(?i)^(0|no|false|n|f)$`).MatchString(clean) {\n\t\treturn false, nil\n\t}\n\n\treturn false, fmt.Errorf(\"cannot convert string value '%s' into a boolean\", clean)\n}", "func TextToBool(value string) (result bool) {\n\tvalue = strings.ToLower(value)\n\tswitch value {\n\tcase \"yes\":\n\t\tresult = true\n\tcase \"true\":\n\t\tresult = true\n\tcase \"1\":\n\t\tresult = true\n\tdefault:\n\t\tresult = false\n\t}\n\treturn\n}", "func atob(str string) (value bool, err error) {\n\tv, err := strconv.ParseBool(str)\n\tif err == nil {\n\t\treturn v, nil\n\t}\n\n\tswitch str {\n\tcase \"y\", \"Y\", \"yes\", \"YES\", \"Yes\":\n\t\treturn true, nil\n\tcase \"n\", \"N\", \"no\", \"NO\", \"No\":\n\t\treturn false, nil\n\t}\n\n\t// Check extra characters, if any.\n\tboolExtra, ok := ExtraBoolString[str]\n\tif ok {\n\t\treturn boolExtra, nil\n\t}\n\n\treturn false, err // Return error of 'strconv.Atob'\n}", "func ParseBool(operand string) (value bool, err error) { return strconv.ParseBool(operand) }", "func Str2Bool(v interface{}) (t bool) {\n\tvar i = 0\n\n\tswitch v.(type) {\n\tcase string:\n\t\ti, _ = strconv.Atoi(v.(string))\n\n\tcase int:\n\t\ti = v.(int)\n\n\tcase bool:\n\t\tif v.(bool) == true {\n\t\t\ti = 1\n\t\t} else {\n\t\t\ti = 0\n\t\t}\n\t}\n\n\tif i > 0 {\n\t\tt = true\n\t}\n\n\treturn\n}", "func StringToBool(str String) Bool {\n\tv := &stringToBool{from: str}\n\tstr.AddListener(v)\n\treturn v\n}", "func Bool(i interface{}) bool {\n\tif i == nil {\n\t\treturn false\n\t}\n\tif v, ok := i.(bool); ok {\n\t\treturn v\n\t}\n\tif s := String(i); s != \"\" && s != \"0\" && s != \"false\" && s != \"off\" {\n\t\treturn true\n\t}\n\treturn false\n}", "func boolValue(s string) bool {\n\tswitch s {\n\tcase \"yes\", \"true\":\n\t\treturn true\n\t}\n\n\treturn false\n}", "func typeConvertBool(i interface{}) bool {\n\tif i == nil {\n\t\treturn false\n\t}\n\tif v, ok := i.(bool); ok {\n\t\treturn v\n\t}\n\tif s := typeConvertString(i); s != \"\" && s != \"0\" && s != \"false\" && s != \"off\" {\n\t\treturn true\n\t}\n\treturn false\n}", "func BoolConverter(str string, target reflect.Value) (ok bool) {\n\tb, err := strconv.ParseBool(str)\n\tif err != nil {\n\t\treturn\n\t}\n\n\ttarget.SetBool(b)\n\treturn true\n}", "func ToBool(value interface{}) bool {\n\tswitch value := value.(type) {\n\tcase bool:\n\t\treturn value\n\tcase *bool:\n\t\treturn *value\n\tcase string:\n\t\tswitch value {\n\t\tcase \"\", \"false\":\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\tcase *string:\n\t\treturn ToBool(*value)\n\tcase float64:\n\t\tif value != 0 {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\tcase *float64:\n\t\treturn ToBool(*value)\n\tcase float32:\n\t\tif value != 0 {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\tcase *float32:\n\t\treturn ToBool(*value)\n\tcase int:\n\t\tif value != 0 {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\tcase *int:\n\t\treturn ToBool(*value)\n\t}\n\treturn false\n}", "func parseBoolFromString(content string, aggErr *AggregateError) bool {\n result, err := strconv.ParseBool(content)\n if err != nil {\n aggErr.Append(err)\n }\n return result\n}", "func Bool(v interface{}) *bool {\n\tswitch v.(type) {\n\tcase bool:\n\t\tval := v.(bool)\n\t\treturn &val\n\tcase int, uint, int32, int16, int8, int64, uint32, uint16, uint8, uint64, float32, float64:\n\t\tval, err := strconv.Atoi(fmt.Sprintf(\"%v\", v))\n\t\tif err != nil {\n\t\t\texception.Err(err, 500).Ctx(M{\"v\": v}).Throw()\n\t\t}\n\t\tres := false\n\t\tif val != 0 {\n\t\t\tres = true\n\t\t}\n\t\treturn &res\n\tdefault:\n\t\tval := fmt.Sprintf(\"%v\", v)\n\t\tres := false\n\t\tif val != \"\" {\n\t\t\tres = true\n\t\t}\n\t\treturn &res\n\t}\n}", "func OkToBool(ok string) bool {\n\tif ok == \"ok\" {\n\t\treturn true\n\t}\n\treturn false\n}", "func ParseBool(val interface{}) (value bool, err error) {\n\tif val != nil {\n\t\tswitch v := val.(type) {\n\t\tcase bool:\n\t\t\treturn v, nil\n\t\tcase string:\n\t\t\tswitch v {\n\t\t\tcase \"1\", \"t\", \"T\", \"true\", \"TRUE\", \"True\", \"YES\", \"yes\", \"Yes\", \"Y\", \"y\", \"ON\", \"on\", \"On\":\n\t\t\t\treturn true, nil\n\t\t\tcase \"0\", \"f\", \"F\", \"false\", \"FALSE\", \"False\", \"NO\", \"no\", \"No\", \"N\", \"n\", \"OFF\", \"off\", \"Off\":\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\tcase int8, int32, int64:\n\t\t\tstrV := fmt.Sprintf(\"%s\", v)\n\t\t\tif strV == \"1\" {\n\t\t\t\treturn true, nil\n\t\t\t} else if strV == \"0\" {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\tcase float64:\n\t\t\tif v == 1 {\n\t\t\t\treturn true, nil\n\t\t\t} else if v == 0 {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\treturn false, fmt.Errorf(\"parsing %q: invalid syntax\", val)\n\t}\n\treturn false, fmt.Errorf(\"parsing <nil>: invalid syntax\")\n}", "func ToBool(value interface{}) (bool, bool) {\n\tvar data bool\n\tswitch converted := value.(type) {\n\tcase string:\n\t\tvar err error\n\t\tdata, err = strconv.ParseBool(strings.ToLower(converted))\n\t\tif err != nil {\n\t\t\treturn false, false\n\t\t}\n\tcase float64, float32, int:\n\t\tdata = converted != 0\n\tcase bool:\n\t\tdata = converted\n\tdefault:\n\t\treturn false, false\n\t}\n\treturn data, true\n}", "func getBoolVal(input string) bool {\n\tinput = strings.ToLower(input)\n\tif input == \"yes\" || input == \"true\" {\n\t\treturn true\n\t}\n\treturn false\n}", "func toBool(val interface{}) bool {\n\tif val == nil || val == false {\n\t\treturn false\n\t}\n\treturn true\n}", "func flagToBool(f string) bool {\n\tif f == \"true\" {\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n}", "func Bool(name string, defaultValue bool) bool {\n\tif strVal, ok := os.LookupEnv(name); ok {\n\t\tif res, err := strconv.ParseBool(strVal); err == nil {\n\t\t\treturn res\n\t\t}\n\t}\n\n\treturn defaultValue\n}", "func Bool(v interface{}, defaults ...bool) (b bool) {\n\tswitch tv := v.(type) {\n\tcase nil:\n\t\tif 1 < len(defaults) {\n\t\t\tb = defaults[1]\n\t\t}\n\tcase bool:\n\t\tb = tv\n\tcase string:\n\t\tvar err error\n\t\tif 1 < len(defaults) {\n\t\t\tb = defaults[1]\n\t\t} else if b, err = strconv.ParseBool(tv); err != nil {\n\t\t\tif 0 < len(defaults) {\n\t\t\t\tb = defaults[0]\n\t\t\t}\n\t\t}\n\tcase gen.Bool:\n\t\tb = bool(tv)\n\tcase gen.String:\n\t\tvar err error\n\t\tif 1 < len(defaults) {\n\t\t\tb = defaults[1]\n\t\t} else if b, err = strconv.ParseBool(string(tv)); err != nil {\n\t\t\tif 0 < len(defaults) {\n\t\t\t\tb = defaults[0]\n\t\t\t}\n\t\t}\n\tdefault:\n\t\tif 0 < len(defaults) {\n\t\t\tb = defaults[0]\n\t\t}\n\t}\n\treturn\n}", "func (v AnnotationValue) AsBool() bool {\n\treturn v.Value.(bool)\n}", "func ToNullableBoolean(value interface{}) *bool {\n\tif value == nil {\n\t\treturn nil\n\t}\n\n\tvar v string\n\n\tswitch value.(type) {\n\tcase bool:\n\t\tr := value.(bool)\n\t\treturn &r\n\n\tcase string:\n\t\tv = strings.ToLower(value.(string))\n\n\tcase time.Duration:\n\t\td := value.(time.Duration)\n\t\tr := d.Nanoseconds() > 0\n\t\treturn &r\n\n\tdefault:\n\t\tv = strings.ToLower(fmt.Sprint(value))\n\t}\n\n\tif v == \"1\" || v == \"true\" || v == \"t\" || v == \"yes\" || v == \"y\" {\n\t\tr := true\n\t\treturn &r\n\t}\n\n\tif v == \"0\" || v == \"false\" || v == \"f\" || v == \"no\" || v == \"n\" {\n\t\tr := false\n\t\treturn &r\n\t}\n\n\treturn nil\n}", "func (v Value) AsBool() bool {\n\treturn v.iface.(bool)\n}", "func (c *JSONElement) AsBool() bool {\n\tvalue := false\n\n\tif v, err := c.Bool(); err == nil {\n\t\tvalue = v\n\t} else if v, err := c.Json.String(); err == nil {\n\t\tif v == \"true\" || v == \"1\" {\n\t\t\tvalue = true\n\t\t}\n\t} else if v, err := c.Json.Int(); err == nil {\n\t\tif v == 1 {\n\t\t\tvalue = true\n\t\t}\n\t}\n\n\treturn value\n}", "func Bool(key string, def bool) bool {\n\tif s := String(key, \"\"); s != \"\" {\n\t\tif d, err := strconv.ParseBool(s); err == nil {\n\t\t\treturn d\n\t\t} else {\n\t\t\tLog(key, err)\n\t\t}\n\t}\n\treturn def\n}", "func (s *String) Bool() bool {\n\tif len(s.s) == 0 {\n\t\tpanic(\"Empty string not allowed (should be == None)\")\n\t}\n\treturn true\n}", "func parseBool(str string) (value bool, err error) {\n\tswitch str {\n\tcase \"1\", \"t\", \"T\", \"true\", \"TRUE\", \"True\", \"YES\", \"yes\", \"Yes\", \"ON\", \"on\", \"On\":\n\t\treturn true, nil\n\tcase \"0\", \"f\", \"F\", \"false\", \"FALSE\", \"False\", \"NO\", \"no\", \"No\", \"OFF\", \"off\", \"Off\":\n\t\treturn false, nil\n\t}\n\treturn false, fmt.Errorf(\"parsing \\\"%s\\\": invalid syntax\", str)\n}", "func isTrue(s string) bool {\n\tv, _ := strconv.ParseBool(s)\n\treturn v\n}", "func (f *flag) Bool() bool {\n\tvalue, err := strconv.ParseBool(f.String())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn value\n}", "func (p Parser) Bool(ctx context.Context) (*bool, error) {\n\tvalue, err := p.Source.String(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif value == nil {\n\t\treturn nil, nil\n\t}\n\n\tparsed, err := stringutil.ParseBool(*value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &parsed, nil\n}", "func ConvertToBool(value string) bool {\n\tboolValue, err := strconv.ParseBool(value)\n\tif err != nil {\n\t\tutilsDiagnostics.ConvertToBoolErr(err, value)\n\t\treturn false\n\t}\n\treturn boolValue\n}", "func IsBool(val any) bool {\n\tif _, ok := val.(bool); ok {\n\t\treturn true\n\t}\n\n\tif typVal, ok := val.(string); ok {\n\t\t_, err := strutil.ToBool(typVal)\n\t\treturn err == nil\n\t}\n\treturn false\n}", "func StringBool(b bool) string {\n\tconst true = \"true\"\n\tconst false = \"false\"\n\tif b {\n\t\treturn true\n\t}\n\treturn false\n}", "func Bool(name string) bool {\n\treturn strings.EqualFold(String(name), \"true\")\n}", "func ExampleBool() {\n\n\t// Bool conversion from other bool values will be returned without\n\t// modification.\n\tfmt.Println(conv.Bool(true))\n\tfmt.Println(conv.Bool(false))\n\n\t// Bool conversion from strings consider the following values true:\n\t// \"t\", \"T\", \"true\", \"True\", \"TRUE\",\n\t// \t \"y\", \"Y\", \"yes\", \"Yes\", \"YES\", \"1\"\n\t//\n\t// It considers the following values false:\n\t// \"f\", \"F\", \"false\", \"False\", \"FALSE\",\n\t// \"n\", \"N\", \"no\", \"No\", \"NO\", \"0\"\n\tfmt.Println(conv.Bool(\"T\"))\n\tfmt.Println(conv.Bool(\"False\"))\n\n\t// Bool conversion from other supported types will return true unless it is\n\t// the zero value for the given type.\n\tfmt.Println(conv.Bool(int64(123)))\n\tfmt.Println(conv.Bool(int64(0)))\n\tfmt.Println(conv.Bool(time.Duration(123)))\n\tfmt.Println(conv.Bool(time.Duration(0)))\n\tfmt.Println(conv.Bool(time.Now()))\n\tfmt.Println(conv.Bool(time.Time{}))\n\n\t// All other types will return false.\n\tfmt.Println(conv.Bool(struct{ string }{\"\"}))\n\n\t// Output:\n\t// true <nil>\n\t// false <nil>\n\t// true <nil>\n\t// false <nil>\n\t// true <nil>\n\t// false <nil>\n\t// true <nil>\n\t// false <nil>\n\t// true <nil>\n\t// false <nil>\n\t// false cannot convert struct { string }{string:\"\"} (type struct { string }) to bool\n}", "func BoolToBool(bool_ bool) bool {\n\treturn bool_\n}", "func ParseToBool(data string) (bool, error) {\n\treturn strconv.ParseBool(data)\n}", "func CastToBool(i interface{}) (bool, error) {\n\tstr := CastOrEmpty(i)\n\treturn strconv.ParseBool(str)\n}", "func (ref *UIElement) ValueAsBool() (bool, error) {\n\treturn ref.BoolAttr(ValueAttribute)\n}", "func (nvp *NameValues) Bool(name string) (bool, bool) {\n\tvalue, _ := nvp.String(name)\n\treturn (value == \"true\" || value == \"yes\" || value == \"1\" || value == \"-1\" || value == \"on\"), true\n}", "func ToBool(v interface{}) bool {\n\tswitch v.(type) {\n\tcase bool:\n\t\treturn v.(bool)\n\tcase string:\n\t\tif strings.ToUpper(v.(string)) == \"FALSE\" {\n\t\t\treturn false\n\t\t}\n\t\tif strings.ToUpper(v.(string)) == \"TRUE\" {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\tcase int:\n\t\treturn v.(int) > 0\n\tcase int64:\n\t\treturn v.(int64) > 0\n\tcase float64:\n\t\treturn v.(float64) > 0\n\tdefault:\n\t\treturn false\n\t}\n}", "func (f *Form) Bool(param string, defaultValue bool) bool {\n\tvals, ok := f.values[param]\n\tif !ok {\n\t\treturn defaultValue\n\t}\n\tparamVal, err := strconv.ParseBool(vals[0])\n\tif err != nil {\n\t\tf.err = err\n\t\treturn defaultValue\n\t}\n\treturn paramVal\n}", "func GetBool(v interface{}) bool {\n\tswitch result := v.(type) {\n\tcase bool:\n\t\treturn result\n\tdefault:\n\t\tif d := GetString(v); d != \"\" {\n\t\t\tvalue, _ := strconv.ParseBool(d)\n\t\t\treturn value\n\t\t}\n\t}\n\treturn false\n}", "func normalizeBool(value string) string {\n\tif contains(strings.ToLower(value), truthy) {\n\t\treturn \"true\"\n\t}\n\treturn \"false\"\n}", "func ParseBool(s string) (bool, error) {\n\tswitch s {\n\tdefault:\n\t\tb, err := strconv.ParseBool(s)\n\t\tif err != nil {\n\t\t\treturn b, errz.Err(err)\n\t\t}\n\t\treturn b, nil\n\tcase \"1\", \"yes\", \"Yes\", \"YES\", \"y\", \"Y\":\n\t\treturn true, nil\n\tcase \"0\", \"no\", \"No\", \"NO\", \"n\", \"N\":\n\t\treturn false, nil\n\t}\n}", "func Bool(b bool) string {\n\treturn strconv.FormatBool(b)\n}", "func ParseBool(str string) bool {\n\tb, _ := strconv.ParseBool(str)\n\treturn b\n}", "func getEnvAsBool(name string, defaultVal bool) bool {\n\tvalStr := getEnv(name, \"\")\n\tif val, err := strconv.ParseBool(valStr); err == nil {\n\t\treturn val\n\t}\n\n\treturn defaultVal\n}", "func getEnvAsBool(name string, defaultVal bool) bool {\n\tvalStr := getEnv(name, \"\")\n\tif val, err := strconv.ParseBool(valStr); err == nil {\n\t\treturn val\n\t}\n\n\treturn defaultVal\n}", "func TestToBool(t *testing.T) {\n\t// conversion 0 to false\n\tresult := evaluator.ToBool(0)\n\tassert.False(t, result)\n\n\t// conversion 1 to true\n\tresult = evaluator.ToBool(1)\n\tassert.True(t, result)\n}", "func (data *Data) Bool(s ...string) bool {\n\treturn data.Interface(s...).(bool)\n}", "func ToBoolean(value interface{}) bool {\n\treturn ToBooleanWithDefault(value, false)\n}", "func parseBool(content []byte, aggErr *AggregateError) bool {\n result, err := strconv.ParseBool(string(content))\n if err != nil {\n aggErr.Append(err)\n }\n return result\n}", "func (setGroup *SettingGroup) GetValueAsBool(id string) (bool, error) {\n\t//Get the value\n\tvalue, err := setGroup.GetValueAsString(id)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t//Now convert to bool\n\tvalueBool, err := strconv.ParseBool(value)\n\treturn valueBool, nil\n\n}", "func AssertBool(s string) bool {\n\tb, err := strconv.ParseBool(s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn b\n}", "func AsBool() Option {\n\treturn func(c *conf.Config) {\n\t\tc.Expect = reflect.Bool\n\t}\n}", "func BoolStrict(name string, defaultValue bool) (bool, error) {\n\tif strVal, ok := os.LookupEnv(name); ok {\n\t\tres, err := strconv.ParseBool(strVal)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\treturn res, nil\n\t}\n\n\treturn defaultValue, nil\n}", "func ConvBool(p payload.Safe) (bool, error) {\n\ts, ok := p.(bwrap)\n\tif !ok {\n\t\treturn false, errors.New(\"payload is not a string\")\n\t}\n\n\treturn bool(s), nil\n}", "func ParseBool(str string) (bool, error) {\n\tif str == \"on\" {\n\t\treturn true, nil\n\t}\n\tif str == \"off\" {\n\t\treturn false, nil\n\t}\n\treturn strconv.ParseBool(str)\n}", "func TestBool(tst *testing.T) {\n\n\t// Test bool\n\tb, err := StringToBool(\"true\")\n\tbrtesting.AssertEqual(tst, err, nil, \"StringToBool failed\")\n\tbrtesting.AssertEqual(tst, b, true, \"StringToBool failed\")\n\tb, err = StringToBool(\"True\")\n\tbrtesting.AssertEqual(tst, err, nil, \"StringToBool failed\")\n\tbrtesting.AssertEqual(tst, b, true, \"StringToBool failed\")\n\tb, err = StringToBool(\"TruE\")\n\tbrtesting.AssertEqual(tst, err, nil, \"StringToBool failed\")\n\tbrtesting.AssertEqual(tst, b, true, \"StringToBool failed\")\n\tb, err = StringToBool(\"false\")\n\tbrtesting.AssertEqual(tst, err, nil, \"StringToBool failed\")\n\tbrtesting.AssertEqual(tst, b, false, \"StringToBool failed\")\n\tb, err = StringToBool(\"go-bedrock\")\n\tbrtesting.AssertNotEqual(tst, err, nil, \"StringToBool failed\")\n}", "func (fa formulaArg) ToBool() formulaArg {\n\tvar b bool\n\tvar err error\n\tswitch fa.Type {\n\tcase ArgString:\n\t\tb, err = strconv.ParseBool(fa.String)\n\t\tif err != nil {\n\t\t\treturn newErrorFormulaArg(formulaErrorVALUE, err.Error())\n\t\t}\n\tcase ArgNumber:\n\t\tif fa.Boolean && fa.Number == 1 {\n\t\t\tb = true\n\t\t}\n\t}\n\treturn newBoolFormulaArg(b)\n}", "func Bool(name string) (bool, error) {\n\tv, err := getenv(name)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn strconv.ParseBool(v)\n}", "func formatBool(v bool) string {\n\tif v {\n\t\treturn \"true\"\n\t}\n\treturn \"false\"\n}", "func BoolStr(b bool, s *string) {\n\tif b {\n\t\t*s = \"true\"\n\t} else {\n\t\t*s = \"false\"\n\t}\n}", "func (a Attributes) GetAsBoolWithDefault(key string, defaultValue bool) bool {\n\tswitch v := a[key].(type) {\n\tcase bool:\n\t\treturn v\n\tcase string:\n\t\tif result, err := strconv.ParseBool(v); err == nil {\n\t\t\treturn result\n\t\t}\n\t}\n\treturn defaultValue\n}", "func stringifyBool(b bool) string {\n\tif b {\n\t\treturn \"True\"\n\t}\n\treturn \"False\"\n}", "func (formatter) fBool(v *types.RecordValue) *types.RecordValue {\n\tif v.Value != strBoolTrue {\n\t\tv.Value = \"\"\n\t}\n\n\treturn v\n}", "func ParseBoolean(s string) Boolean {\n\tif s == \"\" {\n\t\treturn NullBoolean()\n\t}\n\n\treturn NewBoolean(s == \"true\")\n}", "func (r *Response) Bool() (bool, error) {\n\treturn strconv.ParseBool(r.String())\n}", "func ParseBool(str string) (val bool, isBool bool) {\n\t// Note: Not using strconv.ParseBool because I want it a bit looser (any casing) and to allow yes/no/off/on values.\n\tlstr := strings.ToLower(strings.TrimSpace(str))\n\tswitch lstr {\n\tcase \"false\", \"f\", \"0\", \"no\", \"n\", \"off\":\n\t\tisBool = true\n\tcase \"true\", \"t\", \"1\", \"yes\", \"y\", \"on\":\n\t\tval = true\n\t\tisBool = true\n\t}\n\treturn\n}", "func CastBool(val interface{}) (bool, bool) {\n\tswitch val.(type) {\n\tcase bool:\n\t\treturn val.(bool), true\n\tcase int:\n\t\treturn val.(int) != 0, true\n\tcase int8:\n\t\treturn val.(int8) != 0, true\n\tcase int16:\n\t\treturn val.(int16) != 0, true\n\tcase int32:\n\t\treturn val.(int32) != 0, true\n\tcase int64:\n\t\treturn val.(int64) != 0, true\n\tcase uint:\n\t\treturn val.(uint) != 0, true\n\tcase uint8:\n\t\treturn val.(uint8) != 0, true\n\tcase uint16:\n\t\treturn val.(uint16) != 0, true\n\tcase uint32:\n\t\treturn val.(uint32) != 0, true\n\tcase uint64:\n\t\treturn val.(uint64) != 0, true\n\tcase float32:\n\t\treturn val.(float32) != 0, true\n\tcase float64:\n\t\treturn val.(float64) != 0, true\n\tcase string:\n\t\tif bval, err := strconv.ParseBool(val.(string)); err != nil {\n\t\t\tif fval, ok := CastFloat(val.(string)); ok {\n\t\t\t\treturn fval != 0, true\n\t\t\t}\n\t\t\treturn false, false\n\t\t} else {\n\t\t\treturn bval, true\n\t\t}\n\t}\n\treturn false, false\n}", "func IsBool(val interface{}) bool {\n\tif _, ok := val.(bool); ok {\n\t\treturn true\n\t}\n\n\tif typVal, ok := val.(string); ok {\n\t\t_, err := ToBool(typVal)\n\t\treturn err == nil\n\t}\n\treturn false\n}", "func (v Value) Bool(defaults ...bool) bool {\n\t// Return the first default if the raw is undefined\n\tif v.raw == nil {\n\t\t// Make sure there's at least one thing in the list\n\t\tdefaults = append(defaults, false)\n\t\treturn defaults[0]\n\t}\n\n\tswitch t := v.raw.(type) {\n\tcase string:\n\t\tb, err := strconv.ParseBool(t)\n\t\tif err != nil {\n\t\t\tslog.Panicf(\"failed to parse bool: %v\", err)\n\t\t}\n\t\treturn b\n\n\tcase bool:\n\t\treturn t\n\n\tdefault:\n\t\tslog.Panicf(\"%v is of unsupported type %v\", t, reflect.TypeOf(t).String())\n\t}\n\n\treturn false\n}", "func ConvertToBool(value interface{}) (bool, bool) {\n\tif v, ok := value.(bool); ok {\n\t\treturn v, ok\n\t}\n\n\t// try converting \"true\" \"false\"\n\tif v, ok := value.(string); ok {\n\t\tif strings.ToLower(v) == \"true\" {\n\t\t\treturn true, true\n\t\t} else if strings.ToLower(v) == \"false\" {\n\t\t\treturn false, true\n\t\t}\n\t}\n\n\t// try convert as number\n\tv, ok := ConvertToInt8(value)\n\tif ok {\n\t\tif v == 1 {\n\t\t\treturn true, true\n\t\t} else if v == 0 {\n\t\t\treturn false, true\n\t\t}\n\t}\n\n\treturn false, false\n}", "func (me TdtypeType) IsBoolean() bool { return me.String() == \"boolean\" }", "func StringToBoolWithFormat(str String, format string) Bool {\n\tif format == \"%t\" { // Same as not using custom format.\n\t\treturn StringToBool(str)\n\t}\n\n\tv := &stringToBool{from: str, format: format}\n\tstr.AddListener(v)\n\treturn v\n}", "func (d Driver) BoolString(v bool) string {\n\treturn strconv.FormatBool(v)\n}", "func FakeBool(v interface{}) bool {\n\tswitch r := v.(type) {\n\tcase float64:\n\t\treturn r != 0\n\tcase string:\n\t\treturn r != \"\"\n\tcase bool:\n\t\treturn r\n\tcase nil:\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n}", "func (f *FlagSet) Bool(name string) bool {\n\tvalue := f.String(name)\n\tif value != \"\" {\n\t\tval, err := strconv.ParseBool(value)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\treturn val\n\t}\n\treturn false\n}", "func (res Response) AsBool() (bool, error) {\n\treturn res.Bits.AsBool(), res.Error\n}", "func YesNo2Bool(val string) bool {\n\tif val != \"\" {\n\t\tif strings.ToLower(val) == \"yes\" {\n\t\t\treturn true\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t} else {\n\t\treturn false\n\t}\n}", "func ToBool(i interface{}) (bool, error) {\n\ti = indirect(i)\n\n\tswitch b := i.(type) {\n\tcase bool:\n\t\treturn b, nil\n\tcase nil:\n\t\treturn false, nil\n\tcase int:\n\t\tif i.(int) != 0 {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\tcase string:\n\t\treturn strconv.ParseBool(i.(string))\n\tdefault:\n\t\treturn false, fmt.Errorf(\"unable to cast %#v to bool\", i)\n\t}\n}" ]
[ "0.76321614", "0.7480224", "0.7370424", "0.7344179", "0.729552", "0.7221894", "0.7180088", "0.7171699", "0.71345377", "0.7087665", "0.70560706", "0.70353085", "0.7016963", "0.7016963", "0.69989306", "0.6966437", "0.6965041", "0.6953353", "0.69252783", "0.6895328", "0.68600845", "0.68598616", "0.68428993", "0.68333215", "0.6791774", "0.67740434", "0.67525893", "0.6751817", "0.67463815", "0.6715709", "0.67108727", "0.6699126", "0.66969556", "0.6679258", "0.6666134", "0.6650232", "0.66473275", "0.6643164", "0.66400635", "0.6638218", "0.66367465", "0.663148", "0.6614024", "0.6608295", "0.6589852", "0.65792376", "0.65463316", "0.6543714", "0.653524", "0.65288687", "0.65236133", "0.6522243", "0.65146446", "0.65097064", "0.6509268", "0.64971936", "0.6490878", "0.6472862", "0.64467645", "0.6445549", "0.64440507", "0.64408934", "0.64179784", "0.64118516", "0.64024806", "0.63837755", "0.63837755", "0.6379381", "0.63720506", "0.6360922", "0.6358412", "0.6349097", "0.63476634", "0.63462204", "0.6343388", "0.6341356", "0.6327867", "0.6320456", "0.6311369", "0.6308425", "0.6304961", "0.63037133", "0.63029945", "0.6300882", "0.62964827", "0.6294834", "0.6294422", "0.62896305", "0.6289209", "0.6287016", "0.6275975", "0.6272531", "0.6253642", "0.62410384", "0.6236598", "0.6233495", "0.6226951", "0.6220002", "0.621444", "0.6207518" ]
0.8361102
0
ParseAdvertiseAddr validates advertise address, makes sure it's not an unreachable or multicast address returns address split into host and port, port could be empty if not specified
ParseAdvertiseAddr проверяет адрес рассылки, убеждается, что он не является неразрешимым или многокастовым адресом, возвращает адрес, разделенный на хост и порт, порт может быть пустым, если не указан
func ParseAdvertiseAddr(advertiseIP string) (string, string, error) { advertiseIP = strings.TrimSpace(advertiseIP) host := advertiseIP port := "" if len(net.ParseIP(host)) == 0 && strings.Contains(advertiseIP, ":") { var err error host, port, err = net.SplitHostPort(advertiseIP) if err != nil { return "", "", trace.BadParameter("failed to parse address %q", advertiseIP) } if _, err := strconv.Atoi(port); err != nil { return "", "", trace.BadParameter("bad port %q, expected integer", port) } if host == "" { return "", "", trace.BadParameter("missing host parameter") } } ip := net.ParseIP(host) if len(ip) != 0 { if ip.IsUnspecified() || ip.IsMulticast() { return "", "", trace.BadParameter("unreachable advertise IP: %v", advertiseIP) } } return host, port, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func parseAdvertiseAddr(advAddr string, port int) (string, int) {\n\treturn advAddr, port\n\n\t// bug: if use domain, always return empty host\n\t/*m, e := regexp.Match(ipv4Pattern, []byte(advAddr))\n\t// if parse error, use serve port and parsed ip address\n\tif e != nil {\n\t\treturn \"\", port\n\t}\n\tif m {\n\t\treturn advAddr, port\n\t}\n\n\tm, e1 := regexp.Match(ipv4WithPortPattern, []byte(advAddr))\n\t// if parse error, use serve port and parsed ip address\n\tif e1 != nil {\n\t\treturn \"\", port\n\t}\n\tif m {\n\t\t// 1 5\n\t\tregxp := regexp.MustCompile(ipv4WithPortPattern)\n\t\tadAddr := regxp.ReplaceAllString(advAddr, \"${1}\")\n\t\tadPort, _ := strconv.Atoi(regxp.ReplaceAllString(advAddr, \"${5}\"))\n\t\treturn adAddr, adPort\n\t}\n\treturn \"\", port*/\n}", "func parseAdvertiseAddr(advAddr string, port int) (string, int) {\n\tm, e := regexp.Match(ipv4Pattern, []byte(advAddr))\n\t// if parse error, use serve port and parsed ip address\n\tif e != nil {\n\t\treturn \"\", port\n\t}\n\tif m {\n\t\treturn advAddr, port\n\t}\n\n\tm, e1 := regexp.Match(ipv4WithPortPattern, []byte(advAddr))\n\t// if parse error, use serve port and parsed ip address\n\tif e1 != nil {\n\t\treturn \"\", port\n\t}\n\tif m {\n\t\t// 1 5\n\t\tregxp := regexp.MustCompile(ipv4WithPortPattern)\n\t\tadAddr := regxp.ReplaceAllString(advAddr, \"${1}\")\n\t\tadPort, _ := strconv.Atoi(regxp.ReplaceAllString(advAddr, \"${5}\"))\n\t\treturn adAddr, adPort\n\t}\n\treturn \"\", port\n}", "func calculateAdvertiseAddress(bindAddr, advertiseAddr string) (net.IP, error) {\n\tif advertiseAddr != \"\" {\n\t\tip := net.ParseIP(advertiseAddr)\n\t\tif ip == nil {\n\t\t\treturn nil, errors.Errorf(\"failed to parse advertise addr '%s'\", advertiseAddr)\n\t\t}\n\t\tif ip4 := ip.To4(); ip4 != nil {\n\t\t\tip = ip4\n\t\t}\n\t\treturn ip, nil\n\t}\n\n\tif isAny(bindAddr) {\n\t\tprivateIP, err := getPrivateAddress()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to get private IP\")\n\t\t}\n\t\tif privateIP == \"\" {\n\t\t\treturn nil, errors.New(\"no private IP found, explicit advertise addr not provided\")\n\t\t}\n\t\tip := net.ParseIP(privateIP)\n\t\tif ip == nil {\n\t\t\treturn nil, errors.Errorf(\"failed to parse private IP '%s'\", privateIP)\n\t\t}\n\t\treturn ip, nil\n\t}\n\n\tip := net.ParseIP(bindAddr)\n\tif ip == nil {\n\t\treturn nil, errors.Errorf(\"failed to parse bind addr '%s'\", bindAddr)\n\t}\n\treturn ip, nil\n}", "func parseHost(addr string) string {\n\tvar (\n\t\thost, port string\n\t\tdefaultAssigned bool\n\t)\n\n\tv := strings.Split(addr, \":\")\n\n\tswitch len(v) {\n\tcase 2:\n\t\thost = v[0]\n\t\tport = v[1]\n\n\t\tif host == \"\" {\n\t\t\thost = _DEFAULT_HOST\n\t\t\tdefaultAssigned = true\n\t\t}\n\n\t\tif port == \"\" {\n\t\t\tport = _DEFAULT_PORT\n\t\t\tdefaultAssigned = true\n\t\t}\n\n\t\tif defaultAssigned == false {\n\t\t\treturn addr // addr is already in required format\n\t\t}\n\t\tbreak\n\n\tcase 1:\n\t\thost = v[0]\n\t\tif host == \"\" {\n\t\t\thost = _DEFAULT_HOST\n\t\t}\n\t\tport = _DEFAULT_PORT\n\tcase 0:\n\t\tfallthrough\n\tdefault:\n\t\thost = _DEFAULT_HOST\n\t\tport = _DEFAULT_PORT\n\t\tbreak\n\t}\n\treturn strings.Join([]string{host, port}, \":\")\n}", "func ParseAddress(address string) (*Address, errors.TracerError) {\n\taddr := &Address{}\n\tif ValidateIPv6Address(address) {\n\t\tclean, testPort := cleanIPv6(address)\n\t\thasPort := false\n\t\tport := 0\n\t\tif testPort > 0 {\n\t\t\thasPort = true\n\t\t\tport = testPort\n\t\t}\n\t\treturn &Address{Host: clean, Port: port, IsIPv6: true, HasPort: hasPort}, nil\n\t}\n\tcolons := strings.Count(address, \":\")\n\tif colons > 1 {\n\t\treturn nil, errors.New(\"Invalid address: too many colons '%s'\", address)\n\t} else if colons == 0 {\n\t\treturn &Address{Host: address, HasPort: false}, nil\n\t}\n\tsplit := strings.Split(address, \":\")\n\taddr.Host = split[0]\n\tport, err := strconv.Atoi(split[1])\n\tif err != nil {\n\t\treturn nil, errors.New(\"address '%s' is invalid: could not parse port data, %s\", address, err)\n\t}\n\tif port <= 0 || port > math.MaxUint16 {\n\t\treturn nil, errors.New(\"port '%d' is not a valid port number, must be uint16\", port)\n\t}\n\taddr.Port = port\n\taddr.HasPort = true\n\treturn addr, nil\n}", "func CalculateAdvertiseIP(bindHost, advertiseHost string, resolver Resolver, logger log.Logger) (net.IP, error) {\n\t// Prefer advertise host, if it's given.\n\tif advertiseHost != \"\" {\n\t\t// Best case: parse a plain IP.\n\t\tif ip := net.ParseIP(advertiseHost); ip != nil {\n\t\t\tif ip4 := ip.To4(); ip4 != nil {\n\t\t\t\tip = ip4\n\t\t\t}\n\t\t\treturn ip, nil\n\t\t}\n\n\t\t// Otherwise, try to resolve it as if it's a hostname.\n\t\tips, err := resolver.LookupIPAddr(context.Background(), advertiseHost)\n\t\tif err == nil && len(ips) == 1 {\n\t\t\tif ip4 := ips[0].IP.To4(); ip4 != nil {\n\t\t\t\tips[0].IP = ip4\n\t\t\t}\n\t\t\treturn ips[0].IP, nil\n\t\t}\n\n\t\t// Didn't work, fall back to the bind host.\n\t\tif err == nil && len(ips) != 1 {\n\t\t\terr = fmt.Errorf(\"advertise host '%s' resolved to %d IPs\", advertiseHost, len(ips))\n\t\t}\n\t\tlevel.Warn(logger).Log(\"err\", err, \"msg\", \"falling back to bind host\")\n\t}\n\n\t// If bind host is all-zeroes, try to get a private IP.\n\tif bindHost == \"0.0.0.0\" {\n\t\tprivateIP, err := sockaddr.GetPrivateIP()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to deduce private IP from all-zeroes bind address\")\n\t\t}\n\t\tif privateIP == \"\" {\n\t\t\treturn nil, errors.Wrap(err, \"no private IP found, and explicit advertise address not provided\")\n\t\t}\n\t\tip := net.ParseIP(privateIP)\n\t\tif ip == nil {\n\t\t\treturn nil, errors.Errorf(\"failed to parse private IP '%s'\", privateIP)\n\t\t}\n\t\treturn ip, nil\n\t}\n\n\t// Otherwise, try to parse the bind host as an IP.\n\tif ip := net.ParseIP(bindHost); ip != nil {\n\t\treturn ip, nil\n\t}\n\n\t// And finally, try to resolve the bind host.\n\tips, err := resolver.LookupIPAddr(context.Background(), bindHost)\n\tif err == nil && len(ips) == 1 {\n\t\tif ip4 := ips[0].IP.To4(); ip4 != nil {\n\t\t\tips[0].IP = ip4\n\t\t}\n\t\treturn ips[0].IP, nil\n\t}\n\n\t// Didn't work. This time it's fatal.\n\tif err == nil && len(ips) != 1 {\n\t\terr = fmt.Errorf(\"bind host '%s' resolved to %d IPs\", bindHost, len(ips))\n\t}\n\treturn nil, errors.Wrap(err, \"bind host failed to resolve\")\n}", "func (c *OneConnection) ParseAddr(pl []byte) {\n\tb := bytes.NewBuffer(pl)\n\tcnt, _ := btc.ReadVLen(b)\n\tfor i := 0; i < int(cnt); i++ {\n\t\tvar buf [30]byte\n\t\tn, e := b.Read(buf[:])\n\t\tif n != len(buf) || e != nil {\n\t\t\tcommon.CountSafe(\"AddrError\")\n\t\t\tc.DoS(\"AddrError\")\n\t\t\t//println(\"ParseAddr:\", n, e)\n\t\t\tbreak\n\t\t}\n\t\ta := peersdb.NewPeer(buf[:])\n\t\tif !sys.ValidIp4(a.Ip4[:]) {\n\t\t\tcommon.CountSafe(\"AddrInvalid\")\n\t\t\t/*if c.Misbehave(\"AddrLocal\", 1) {\n\t\t\t\tbreak\n\t\t\t}*/\n\t\t\t//print(c.PeerAddr.Ip(), \" \", c.Node.Agent, \" \", c.Node.Version, \" addr local \", a.String(), \"\\n> \")\n\t\t} else if time.Unix(int64(a.Time), 0).Before(time.Now().Add(time.Hour)) {\n\t\t\tif time.Now().Before(time.Unix(int64(a.Time), 0).Add(peersdb.ExpirePeerAfter)) {\n\t\t\t\tk := qdb.KeyType(a.UniqID())\n\t\t\t\tv := peersdb.PeerDB.Get(k)\n\t\t\t\tif v != nil {\n\t\t\t\t\ta.Banned = peersdb.NewPeer(v[:]).Banned\n\t\t\t\t}\n\t\t\t\ta.Time = uint32(time.Now().Add(-5 * time.Minute).Unix()) // add new peers as not just alive\n\t\t\t\tif a.Time > uint32(time.Now().Unix()) {\n\t\t\t\t\tprintln(\"wtf\", a.Time, time.Now().Unix())\n\t\t\t\t}\n\t\t\t\tpeersdb.PeerDB.Put(k, a.Bytes())\n\t\t\t} else {\n\t\t\t\tcommon.CountSafe(\"AddrStale\")\n\t\t\t}\n\t\t} else {\n\t\t\tif c.Misbehave(\"AddrFuture\", 50) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}", "func ParseAddress(address string) (string, string) {\n\tsplit := strings.Split(address, \":\")\n\tip := split[0]\n\tport := split[1]\n\n\treturn ip, port\n}", "func parseListeningAddress(ctx *context.T, laddress string) (network string, address string, p flow.Protocol, err error) {\n\tparts := strings.SplitN(laddress, \"/\", 2)\n\tif len(parts) != 2 {\n\t\treturn \"\", \"\", nil, ErrorfInvalidAddress(ctx, \"invalid vine address %v, address must be of the form 'network/address/tag'\", laddress)\n\t}\n\tp, _ = flow.RegisteredProtocol(parts[0])\n\tif p == nil {\n\t\treturn \"\", \"\", nil, ErrorfNoRegisteredProtocol(ctx, \"no registered protocol: %v\", parts[0])\n\t}\n\treturn parts[0], parts[1], p, nil\n}", "func parseAddr(addr string) (string, string) {\n\tparsed := strings.SplitN(addr, \":\", 2)\n\treturn parsed[0], parsed[1]\n}", "func ParseAddress(addr string) Address {\n\t// Handle IPv6 address in form as \"[2001:4860:0:2001::68]\"\n\tlenAddr := len(addr)\n\tif lenAddr > 0 && addr[0] == '[' && addr[lenAddr-1] == ']' {\n\t\taddr = addr[1 : lenAddr-1]\n\t}\n\taddr = strings.TrimSpace(addr)\n\n\tip := net.ParseIP(addr)\n\tif ip != nil {\n\t\treturn IPAddress(ip)\n\t}\n\treturn DomainAddress(addr)\n}", "func ParseAddr(s string) (Addr, error) {\n\tcomma := strings.IndexByte(s, ',')\n\tif comma < 0 {\n\t\treturn Addr{}, serrors.New(\"invalid address: expected comma\", \"value\", s)\n\t}\n\tia, err := ParseIA(s[0:comma])\n\tif err != nil {\n\t\treturn Addr{}, err\n\t}\n\th, err := ParseHost(s[comma+1:])\n\tif err != nil {\n\t\treturn Addr{}, err\n\t}\n\treturn Addr{IA: ia, Host: h}, nil\n}", "func extractAddress(str string) string {\n\tvar addr string\n\n\tswitch {\n\tcase strings.Contains(str, `]`):\n\t\t// IPv6 address [2001:db8::1%lo0]:48467\n\t\taddr = strings.Split(str, `]`)[0]\n\t\taddr = strings.Split(addr, `%`)[0]\n\t\taddr = strings.TrimLeft(addr, `[`)\n\tdefault:\n\t\t// IPv4 address 192.0.2.1:48467\n\t\taddr = strings.Split(str, `:`)[0]\n\t}\n\treturn addr\n}", "func extractAddress(str string) string {\n\tvar addr string\n\n\tswitch {\n\tcase strings.Contains(str, `]`):\n\t\t// IPv6 address [2001:db8::1%lo0]:48467\n\t\taddr = strings.Split(str, `]`)[0]\n\t\taddr = strings.Split(addr, `%`)[0]\n\t\taddr = strings.TrimLeft(addr, `[`)\n\tdefault:\n\t\t// IPv4 address 192.0.2.1:48467\n\t\taddr = strings.Split(str, `:`)[0]\n\t}\n\treturn addr\n}", "func AdvertiseHost(listen string) string {\n\tif listen == \"0.0.0.0\" {\n\t\taddrs, err := net.InterfaceAddrs()\n\t\tif err != nil || len(addrs) == 0 {\n\t\t\treturn \"localhost\"\n\t\t}\n\n\t\tfor _, addr := range addrs {\n\t\t\tif ip, ok := addr.(*net.IPNet); ok && !ip.IP.IsLoopback() && ip.IP.To4() != nil {\n\t\t\t\treturn ip.IP.To4().String()\n\t\t\t}\n\t\t}\n\t\treturn \"localhost\"\n\t}\n\n\treturn listen\n}", "func parseAddr(text string) (*net.TCPAddr, error) {\n\tif text[0] == ':' {\n\t\ttext = \"0.0.0.0\" + text\n\t}\n\n\taddr := strings.Replace(text, \"public\", address.External().String(), 1)\n\treturn net.ResolveTCPAddr(\"tcp\", addr)\n}", "func SplitAddr(b []byte) Addr {\n\taddrLen := 1\n\tif len(b) < addrLen {\n\t\treturn nil\n\t}\n\n\tswitch b[0] {\n\tcase AtypDomainName:\n\t\tif len(b) < 2 {\n\t\t\treturn nil\n\t\t}\n\t\taddrLen = 1 + 1 + int(b[1]) + 2\n\tcase AtypIPv4:\n\t\taddrLen = 1 + net.IPv4len + 2\n\tcase AtypIPv6:\n\t\taddrLen = 1 + net.IPv6len + 2\n\tdefault:\n\t\treturn nil\n\n\t}\n\n\tif len(b) < addrLen {\n\t\treturn nil\n\t}\n\n\treturn b[:addrLen]\n}", "func parseEPRTtoAddr(line string) (string, string, error) {\n\taddr := strings.Split(line, \"|\")\n\n\tif len(addr) != 5 {\n\t\treturn \"\", \"\", fmt.Errorf(\"invalid data address\")\n\t}\n\n\tnetProtocol := addr[1]\n\tIP := addr[2]\n\n\t// check port is valid\n\tport := addr[3]\n\tif integerPort, err := strconv.Atoi(port); err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"invalid data address\")\n\t} else if integerPort <= 0 || integerPort > 65535 {\n\t\treturn \"\", \"\", fmt.Errorf(\"invalid data address\")\n\t}\n\n\tswitch netProtocol {\n\tcase \"1\", \"2\":\n\t\t// use protocol 1 means IPv4. 2 means IPv6\n\t\t// net.ParseIP for validate IP\n\t\tif net.ParseIP(IP) == nil {\n\t\t\treturn \"\", \"\", fmt.Errorf(\"invalid data address\")\n\t\t}\n\t\tbreak\n\tdefault:\n\t\t// wrong network protocol\n\t\treturn \"\", \"\", fmt.Errorf(\"unknown network protocol\")\n\t}\n\n\treturn IP, port, nil\n}", "func ParseCliAddr(ctx *cli.Context) (string, string) {\n\treturn ctx.GlobalString(\"address\"), ctx.GlobalString(\"port\")\n}", "func (b *Backend) ParseAddress(addr string) (err error) {\n\tif b.Addr, err = url.Parse(addr); err != nil {\n\t\treturn err\n\t}\n\n\tif b.Addr.Scheme == \"\" {\n\t\tb.Addr.Scheme = \"http\"\n\t}\n\n\thttps := b.Addr.Scheme == \"https\"\n\tb.Host = b.Addr.Host\n\n\tif b.Addr.Port() == \"\" {\n\t\tif https {\n\t\t\tb.Host += \":443\"\n\t\t} else {\n\t\t\tb.Host += \":80\"\n\t\t}\n\t}\n\n\treturn nil\n}", "func (_BaseAccessWallet *BaseAccessWalletFilterer) ParseDbgAddress(log types.Log) (*BaseAccessWalletDbgAddress, error) {\n\tevent := new(BaseAccessWalletDbgAddress)\n\tif err := _BaseAccessWallet.contract.UnpackLog(event, \"dbgAddress\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}", "func (p *AddressParser) Parse(address string) (*Address, error)", "func ParseAddress(s string) (Address, error) {\n\n\tvar family uint8\n\tvar sn uint64\n\tvar crcStr string\n\tcnt, err := fmt.Sscanf(s, \"%x.%x.%s\", &family, &sn, &crcStr)\n\n\tif (nil != err) || (3 != cnt) || (sn != (0xffffffffffff & sn)) {\n\t\treturn 0, errors.New(\"onewire: invalid address \" + s)\n\t}\n\ta := sn<<8 | (uint64(family) << 56)\n\n\tbuf := make([]byte, 8)\n\tbinary.LittleEndian.PutUint64(buf, sn<<8|(uint64(family)<<56))\n\n\tcrc := RevCrc8(buf[1:])\n\n\tif \"--\" != crcStr {\n\t\tvar c uint8\n\t\tcnt, err = fmt.Sscanf(crcStr, \"%x\", &c)\n\t\tif c != crc {\n\t\t\treturn 0, errors.New(\"onewire: invalid crc \" + s)\n\t\t}\n\t}\n\n\ta |= 0xff & uint64(crc)\n\n\treturn Address(a), nil\n}", "func parseInetAddr(af int, b []byte) (Addr, error) {\n\tswitch af {\n\tcase syscall.AF_INET:\n\t\tif len(b) < sizeofSockaddrInet {\n\t\t\treturn nil, errInvalidAddr\n\t\t}\n\t\ta := &Inet4Addr{}\n\t\tcopy(a.IP[:], b[4:8])\n\t\treturn a, nil\n\tcase syscall.AF_INET6:\n\t\tif len(b) < sizeofSockaddrInet6 {\n\t\t\treturn nil, errInvalidAddr\n\t\t}\n\t\ta := &Inet6Addr{ZoneID: int(nativeEndian.Uint32(b[24:28]))}\n\t\tcopy(a.IP[:], b[8:24])\n\t\tif a.IP[0] == 0xfe && a.IP[1]&0xc0 == 0x80 || a.IP[0] == 0xff && (a.IP[1]&0x0f == 0x01 || a.IP[1]&0x0f == 0x02) {\n\t\t\t// KAME based IPv6 protocol stack usually\n\t\t\t// embeds the interface index in the\n\t\t\t// interface-local or link-local address as\n\t\t\t// the kernel-internal form.\n\t\t\tid := int(bigEndian.Uint16(a.IP[2:4]))\n\t\t\tif id != 0 {\n\t\t\t\ta.ZoneID = id\n\t\t\t\ta.IP[2], a.IP[3] = 0, 0\n\t\t\t}\n\t\t}\n\t\treturn a, nil\n\tdefault:\n\t\treturn nil, errInvalidAddr\n\t}\n}", "func parseEnsAPIAddress(s string) (tld, endpoint string, addr common.Address) {\n\tisAllLetterString := func(s string) bool {\n\t\tfor _, r := range s {\n\t\t\tif !unicode.IsLetter(r) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\tendpoint = s\n\tif i := strings.Index(endpoint, \":\"); i > 0 {\n\t\tif isAllLetterString(endpoint[:i]) && len(endpoint) > i+2 && endpoint[i+1:i+3] != \"//\" {\n\t\t\ttld = endpoint[:i]\n\t\t\tendpoint = endpoint[i+1:]\n\t\t}\n\t}\n\tif i := strings.Index(endpoint, \"@\"); i > 0 {\n\t\taddr = common.HexToAddress(endpoint[:i])\n\t\tendpoint = endpoint[i+1:]\n\t}\n\treturn\n}", "func ParseAddress(addr string) (*Address, error) {\n\taddr = strings.ToUpper(addr)\n\tl := len(addr)\n\tif l < 50 {\n\t\treturn nil, InvalidAccountAddrError{reason: \"length\"}\n\t}\n\ti := l - 50 // start index of hex\n\n\tidh, err := hex.DecodeString(addr[i:])\n\tif err != nil {\n\t\treturn nil, InvalidAccountAddrError{reason: \"hex\"}\n\t}\n\n\t_addr := &Address{}\n\t_addr.Code = addr[0:i]\n\t_addr.Type = AccountType(idh[0])\n\t_addr.Hash = idh[1:]\n\n\tif err = _addr.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn _addr, nil\n}", "func (_TokenVesting *TokenVestingCaller) ParseAddr(opts *bind.CallOpts, data []byte) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _TokenVesting.contract.Call(opts, out, \"parseAddr\", data)\n\treturn *ret0, err\n}", "func parseBindAddr(s string) (address net.Addr, err error) {\n\tconst maxUnixLen = 106\n\n\t// '@' prefix specifies a Linux abstract domain socket.\n\tif runtime.GOOS == \"linux\" && strings.HasPrefix(s, \"@\") {\n\t\tif len(s) > maxUnixLen {\n\t\t\treturn nil, fmt.Errorf(\"sock file length must be less than %d characters\", maxUnixLen)\n\t\t}\n\t\treturn &net.UnixAddr{Name: s, Net: \"unix\"}, nil\n\t}\n\n\tif strings.Contains(s, \"/\") {\n\t\tif !filepath.IsAbs(s) {\n\t\t\treturn nil, errors.New(\"sock file must be an absolute path\")\n\t\t} else if len(s) > maxUnixLen {\n\t\t\treturn nil, fmt.Errorf(\"sock file length must be less than %d characters\", maxUnixLen)\n\t\t}\n\t\treturn &net.UnixAddr{Name: s, Net: \"unix\"}, nil\n\t}\n\n\t// For TCP, the supplied address string, s, is one of a port, a :port, or a host:port.\n\tip, port := net.IPv4(127, 0, 0, 1), 0\n\n\tif strings.Contains(s, \":\") {\n\t\thost, portString, err := net.SplitHostPort(s)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid addr %q - must be provided as host:port\", s)\n\t\t}\n\t\tif host != \"\" {\n\t\t\tip = net.ParseIP(host)\n\t\t}\n\n\t\tport, err = strconv.Atoi(portString)\n\t} else {\n\t\tport, err = strconv.Atoi(s)\n\t}\n\n\tif err != nil || port < 1 || port > 65534 {\n\t\treturn nil, fmt.Errorf(\"invalid port %d - must be between 1 and 65534\", port)\n\t}\n\treturn &net.TCPAddr{IP: ip, Port: port}, nil\n}", "func parseDialingAddress(ctx *context.T, vaddress string) (network string, address string, tag string, p flow.Protocol, err error) {\n\tparts := strings.SplitN(vaddress, \"/\", 3)\n\tif len(parts) != 3 {\n\t\treturn \"\", \"\", \"\", nil, ErrorfInvalidAddress(ctx, \"invalid vine address %v, address must be of the form 'network/address/tag'\", vaddress)\n\t}\n\tp, _ = flow.RegisteredProtocol(parts[0])\n\tif p == nil {\n\t\treturn \"\", \"\", \"\", nil, ErrorfNoRegisteredProtocol(ctx, \"no registered protocol: %v\", parts[0])\n\t}\n\treturn parts[0], parts[1], parts[2], p, nil\n}", "func splitAddr(v string) (network, addr string, err error) {\n\tep := strings.Split(v, \"://\")\n\tif len(ep) != 2 {\n\t\terr = errInvalidAddress\n\t\treturn network, addr, err\n\t}\n\tnetwork = ep[0]\n\n\ttrans, ok := drivers.get(network)\n\tif !ok {\n\t\terr = fmt.Errorf(\"zmq4: unknown transport %q\", network)\n\t\treturn network, addr, err\n\t}\n\n\taddr, err = trans.Addr(ep[1])\n\treturn network, addr, err\n}", "func ValidateAddress(ipPort string, allowLocalhost bool) bool {\n\tipPort = whitespaceFilter.ReplaceAllString(ipPort, \"\")\n\tpts := strings.Split(ipPort, \":\")\n\tif len(pts) != 2 {\n\t\treturn false\n\t}\n\n\tip := net.ParseIP(pts[0])\n\tif ip == nil {\n\t\treturn false\n\t} else if ip.IsLoopback() {\n\t\tif !allowLocalhost {\n\t\t\treturn false\n\t\t}\n\t} else if !ip.IsGlobalUnicast() {\n\t\treturn false\n\t}\n\n\tport, err := strconv.ParseUint(pts[1], 10, 16)\n\tif err != nil || port < 1024 {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func ParseDevAddr(input string) (addr DevAddr, err error) {\n\tbytes, err := ParseHEX(input, 4)\n\tif err != nil {\n\t\treturn\n\t}\n\tcopy(addr[:], bytes)\n\treturn\n}", "func ValidateAddress(address string) error {\n\t// TODO: this list is not extensive and needs to be changed once we allow DNS\n\t// names for external metrics endpoints\n\tconst invalidChars = `abcdefghijklmnopqrstuvwxyz/\\ `\n\n\taddress = strings.ToLower(address)\n\tif strings.ContainsAny(address, invalidChars) {\n\t\treturn errors.New(\"invalid character detected (required format: <IP>:<PORT>)\")\n\t}\n\n\t// \tcheck if port if specified\n\tif !strings.Contains(address, \":\") {\n\t\treturn errors.New(\"no port specified\")\n\t}\n\n\th, p, err := net.SplitHostPort(address)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif h == \"\" {\n\t\treturn errors.New(\"no IP listen address specified\")\n\t}\n\n\tif p == \"\" {\n\t\treturn errors.New(\"no port specified\")\n\t}\n\n\treturn nil\n}", "func SplitAddress(addr string) (string, int) {\n\ts := strings.Split(addr, \":\")\n\thostname := s[0]\n\tport, _ := strconv.Atoi(s[1])\n\treturn hostname, port\n}", "func (_TokenVesting *TokenVestingSession) ParseAddr(data []byte) (common.Address, error) {\n\treturn _TokenVesting.Contract.ParseAddr(&_TokenVesting.CallOpts, data)\n}", "func ParseAddress(address string) (*mail.Address, error)", "func parseAddress(address string) (scheme, host, port string, err error) {\n\tif address == \"\" {\n\t\treturn\n\t}\n\tif strings.Contains(address, \"://\") {\n\t\turl, err := url.Parse(address)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", \"\", err\n\t\t}\n\t\tscheme, address = url.Scheme, url.Host\n\t}\n\tif strings.Contains(address, \":\") {\n\t\thost, port, err = net.SplitHostPort(address)\n\t\tif err != nil {\n\t\t\thost = address\n\t\t\terr = nil\n\t\t}\n\t} else {\n\t\thost = address\n\t}\n\tif port == \"\" {\n\t\tswitch scheme {\n\t\tcase \"http\", \"ws\":\n\t\t\tport = \"80\"\n\t\tcase \"https\", \"wss\":\n\t\t\tport = \"443\"\n\t\t}\n\t}\n\treturn\n}", "func normalizeNetworkAddress(a, defaultHost, defaultPort string) (string, error) {\n\tif strings.Contains(a, \"://\") {\n\t\treturn a, fmt.Errorf(\"Address %s contains a protocol identifier, which is not allowed\", a)\n\t}\n\tif a == \"\" {\n\t\treturn defaultHost + \":\" + defaultPort, nil\n\t}\n\thost, port, err := net.SplitHostPort(a)\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), \"missing port in address\") {\n\t\t\tnormalized := a + \":\" + defaultPort\n\t\t\thost, port, err = net.SplitHostPort(normalized)\n\t\t\tif err != nil {\n\t\t\t\treturn a, fmt.Errorf(\"Unable to address %s after port resolution: %v\", normalized, err)\n\t\t\t}\n\t\t} else {\n\t\t\treturn a, fmt.Errorf(\"Unable to normalize address %s: %v\", a, err)\n\t\t}\n\t}\n\tif host == \"\" {\n\t\thost = defaultHost\n\t}\n\tif port == \"\" {\n\t\tport = defaultPort\n\t}\n\treturn host + \":\" + port, nil\n}", "func normalizeNetworkAddress(a, defaultHost, defaultPort string) (string, error) {\n\tif strings.Contains(a, \"://\") {\n\t\treturn a, fmt.Errorf(\"Address %s contains a protocol identifier, which is not allowed\", a)\n\t}\n\tif a == \"\" {\n\t\treturn defaultHost + \":\" + defaultPort, nil\n\t}\n\thost, port, err := net.SplitHostPort(a)\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), \"missing port in address\") {\n\t\t\tnormalized := a + \":\" + defaultPort\n\t\t\thost, port, err = net.SplitHostPort(normalized)\n\t\t\tif err != nil {\n\t\t\t\treturn a, fmt.Errorf(\"Unable to address %s after port resolution: %v\", normalized, err)\n\t\t\t}\n\t\t} else {\n\t\t\treturn a, fmt.Errorf(\"Unable to normalize address %s: %v\", a, err)\n\t\t}\n\t}\n\tif host == \"\" {\n\t\thost = defaultHost\n\t}\n\tif port == \"\" {\n\t\tport = defaultPort\n\t}\n\treturn host + \":\" + port, nil\n}", "func ParseAddress(address string) (common.Address, error) {\n\tif common.IsHexAddress(address) {\n\t\treturn common.HexToAddress(address), nil\n\t}\n\treturn common.Address{}, fmt.Errorf(\"invalid address: %v\", address)\n}", "func buildAddress(address string, zone string, l Listener) string {\n\taddr, _, err := net.SplitHostPort(address)\n\tif err != nil {\n\t\taddr = address\n\t}\n\tif addr == \"\" {\n\t\taddr = \"localhost\"\n\t}\n\n\tport := l.GetPort()\n\tif port > 0 && port != 80 && port != 443 {\n\t\taddr += \":\" + strconv.Itoa(port)\n\t}\n\n\treturn withZonePrefix(addr, zone)\n}", "func (_BaseAccessControlGroup *BaseAccessControlGroupFilterer) ParseDbgAddress(log types.Log) (*BaseAccessControlGroupDbgAddress, error) {\n\tevent := new(BaseAccessControlGroupDbgAddress)\n\tif err := _BaseAccessControlGroup.contract.UnpackLog(event, \"dbgAddress\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}", "func AddressParserParse(p *mail.AddressParser, address string) (*mail.Address, error)", "func parseAddresses(addrs []string) (iaddrs []iaddr.IPFSAddr, err error) {\n\tiaddrs = make([]iaddr.IPFSAddr, len(addrs))\n\tfor i, saddr := range addrs {\n\t\tiaddrs[i], err = iaddr.ParseString(saddr)\n\t\tif err != nil {\n\t\t\treturn nil, cmds.ClientError(\"invalid peer address: \" + err.Error())\n\t\t}\n\t}\n\treturn\n}", "func ParseAddress(address string) (string, int) {\n\tmatch, err := gregex.MatchString(`^(.+):(\\d+)$`, address)\n\tif err == nil {\n\t\ti, _ := strconv.Atoi(match[2])\n\t\treturn match[1], i\n\t}\n\treturn \"\", 0\n}", "func (_TokenVesting *TokenVestingCallerSession) ParseAddr(data []byte) (common.Address, error) {\n\treturn _TokenVesting.Contract.ParseAddr(&_TokenVesting.CallOpts, data)\n}", "func Extract(addr string) (string, error) {\n\t// if addr specified then its returned\n\tif len(addr) > 0 && (addr != \"0.0.0.0\" && addr != \"[::]\" && addr != \"::\") {\n\t\treturn addr, nil\n\t}\n\n\tifaces, err := net.Interfaces()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to get interfaces! Err: %v\", err)\n\t}\n\n\t//nolint:prealloc\n\tvar addrs []net.Addr\n\tvar loAddrs []net.Addr\n\tfor _, iface := range ifaces {\n\t\tifaceAddrs, err := iface.Addrs()\n\t\tif err != nil {\n\t\t\t// ignore error, interface can dissapear from system\n\t\t\tcontinue\n\t\t}\n\t\tif iface.Flags&net.FlagLoopback != 0 {\n\t\t\tloAddrs = append(loAddrs, ifaceAddrs...)\n\t\t\tcontinue\n\t\t}\n\t\taddrs = append(addrs, ifaceAddrs...)\n\t}\n\taddrs = append(addrs, loAddrs...)\n\tfmt.Println(\"addrs\", addrs)\n\tvar ipAddr []byte\n\tvar publicIP []byte\n\n\tfor _, rawAddr := range addrs {\n\t\tvar ip net.IP\n\t\tswitch addr := rawAddr.(type) {\n\t\tcase *net.IPAddr:\n\t\t\tip = addr.IP\n\t\tcase *net.IPNet:\n\t\t\tip = addr.IP\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\n\t\tif !isPrivateIP(ip.String()) {\n\t\t\tpublicIP = ip\n\t\t\tcontinue\n\t\t}\n\n\t\tipAddr = ip\n\t\tbreak\n\t}\n\n\t// return private ip\n\tif ipAddr != nil {\n\t\treturn net.IP(ipAddr).String(), nil\n\t}\n\n\t// return public or virtual ip\n\tif publicIP != nil {\n\t\treturn net.IP(publicIP).String(), nil\n\t}\n\n\treturn \"\", fmt.Errorf(\"No IP address found, and explicit IP not provided\")\n}", "func ParseAddress(tp string) error {\n\t// check source\n\tif tp == conf.TypeDump || tp == conf.TypeSync || tp == conf.TypeRump {\n\t\tif err := parseAddress(tp, conf.Options.SourceAddress, conf.Options.SourceType, true); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(conf.Options.SourceAddressList) == 0 {\n\t\t\treturn fmt.Errorf(\"source address shouldn't be empty when type in {dump, sync, rump}\")\n\t\t}\n\t}\n\n\t// check target\n\tif tp == conf.TypeRestore || tp == conf.TypeSync || tp == conf.TypeRump {\n\t\tif err := parseAddress(tp, conf.Options.TargetAddress, conf.Options.TargetType, false); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(conf.Options.TargetAddressList) == 0 {\n\t\t\treturn fmt.Errorf(\"target address shouldn't be empty when type in {restore, sync, rump}\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func ParseAddrPort(s string) (Addr, uint16, error) {\n\thost, port, err := net.SplitHostPort(s)\n\tif err != nil {\n\t\treturn Addr{}, 0, serrors.WrapStr(\"invalid address: split host:port\", err, \"addr\", s)\n\t}\n\ta, err := ParseAddr(host)\n\tif err != nil {\n\t\treturn Addr{}, 0, serrors.WrapStr(\"invalid address: host invalid\", err, \"host\", host)\n\t}\n\tp, err := strconv.ParseUint(port, 10, 16)\n\tif err != nil {\n\t\treturn Addr{}, 0, serrors.WrapStr(\"invalid address: port invalid\", err, \"port\", port)\n\t}\n\treturn a, uint16(p), nil\n}", "func readAddr(r io.Reader, b []byte) (Addr, error) {\n\tif len(b) < MaxAddrLen {\n\t\treturn nil, io.ErrShortBuffer\n\t}\n\t_, err := io.ReadFull(r, b[:1]) // read 1st byte for address type\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch b[0] {\n\tcase AtypIPv4:\n\t\t_, err = io.ReadFull(r, b[1:1+net.IPv4len+2])\n\t\treturn b[:1+net.IPv4len+2], err\n\tcase AtypDomainName:\n\t\t_, err = io.ReadFull(r, b[1:2]) // read 2nd byte for domain length\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t_, err = io.ReadFull(r, b[2:2+int(b[1])+2])\n\t\treturn b[:1+1+int(b[1])+2], err\n\tcase AtypIPv6:\n\t\t_, err = io.ReadFull(r, b[1:1+net.IPv6len+2])\n\t\treturn b[:1+net.IPv6len+2], err\n\t}\n\n\treturn nil, ErrAddressNotSupported\n}", "func validChallengeAddr(a string) bool {\n\t// TODO: flesh this out. parse a, make configurable, support\n\t// IPv6. Good enough for now.\n\treturn strings.HasPrefix(a, \"10.\") || strings.HasPrefix(a, \"192.168.\")\n}", "func (ic *ifConfigurator) advertiseContainerAddr(containerNetNS string, containerIfaceName string, result *current.Result) error {\n\tif err := nsIsNSorErr(containerNetNS); err != nil {\n\t\treturn fmt.Errorf(\"%s is not a valid network namespace: %v\", containerNetNS, err)\n\t}\n\tif len(result.IPs) == 0 {\n\t\tklog.Warningf(\"Expected at least one IP address in CNI result, skip sending Gratuitous ARP\")\n\t\treturn nil\n\t}\n\t// Sending Gratuitous ARP is a best-effort action and is unlikely to fail as we have ensured the netns is valid.\n\tgo nsWithNetNSPath(containerNetNS, func(_ ns.NetNS) error {\n\t\tiface, err := netInterfaceByName(containerIfaceName)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Failed to find container interface %s in ns %s: %v\", containerIfaceName, containerNetNS, err)\n\t\t\treturn nil\n\t\t}\n\t\tvar targetIPv4, targetIPv6 net.IP\n\t\tfor _, ipc := range result.IPs {\n\t\t\tif ipc.Address.IP.To4() != nil {\n\t\t\t\ttargetIPv4 = ipc.Address.IP\n\t\t\t} else {\n\t\t\t\ttargetIPv6 = ipc.Address.IP\n\t\t\t}\n\t\t}\n\t\tif targetIPv4 == nil && targetIPv6 == nil {\n\t\t\tklog.V(2).Infof(\"No IPv4 and IPv6 address found for container interface %s in ns %s, skip sending Gratuitous ARP/NDP\", containerIfaceName, containerNetNS)\n\t\t\treturn nil\n\t\t}\n\t\tticker := time.NewTicker(50 * time.Millisecond)\n\t\tdefer ticker.Stop()\n\t\tcount := 0\n\t\tfor {\n\t\t\t// Send gratuitous ARP/NDP to network in case of stale mappings for this IP address\n\t\t\t// (e.g. if a previous - deleted - Pod was using the same IP).\n\t\t\tif targetIPv4 != nil {\n\t\t\t\tif err := arpingGratuitousARPOverIface(targetIPv4, iface); err != nil {\n\t\t\t\t\tklog.Warningf(\"Failed to send gratuitous ARP #%d: %v\", count, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif targetIPv6 != nil {\n\t\t\t\tif err := ndpGratuitousNDPOverIface(targetIPv6, iface); err != nil {\n\t\t\t\t\tklog.Warningf(\"Failed to send gratuitous NDP #%d: %v\", count, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tcount++\n\t\t\tif count == 3 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t<-ticker.C\n\t\t}\n\t\treturn nil\n\t})\n\treturn nil\n}", "func normalizeAddress(addr, defaultPort string) string {\n\t_, _, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\treturn net.JoinHostPort(addr, defaultPort)\n\t}\n\treturn addr\n}", "func (_AccessIndexor *AccessIndexorFilterer) ParseDbgAddress(log types.Log) (*AccessIndexorDbgAddress, error) {\n\tevent := new(AccessIndexorDbgAddress)\n\tif err := _AccessIndexor.contract.UnpackLog(event, \"dbgAddress\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}", "func parseIp(args []string) {\n\tname := args[0]\n\taddr := net.ParseIP(name)\n\tif addr == nil {\n\t\tfmt.Println(\"Invalid address\")\n\t} else {\n\t\tfmt.Println(\"The address is\", addr.String())\n\t}\n}", "func MustParseAddr(s string) Addr {\n\ta, err := ParseAddr(s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn a\n}", "func unpackAddr(value nlgo.Binary, af Af) (net.IP, error) {\n\tbuf := ([]byte)(value)\n\tsize := 0\n\n\tswitch af {\n\tcase syscall.AF_INET:\n\t\tsize = 4\n\tcase syscall.AF_INET6:\n\t\tsize = 16\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"ipvs: unknown af=%d addr=%v\", af, buf)\n\t}\n\n\tif size > len(buf) {\n\t\treturn nil, fmt.Errorf(\"ipvs: short af=%d addr=%v\", af, buf)\n\t}\n\n\treturn (net.IP)(buf[:size]), nil\n}", "func ParseAddress(addr string) (proto string, path string, err error) {\n\tm := netAddrRx.FindStringSubmatch(addr)\n\tif m == nil {\n\t\treturn \"\", \"\", goof.WithField(\"address\", addr, \"invalid address\")\n\t}\n\treturn m[1], m[2], nil\n}", "func ipAddressFromAnnotation(svc *Service, cloud *gce.Cloud, ipVersion string) (string, error) {\n\tannotationVal, ok := svc.v[StaticL4AddressesAnnotationKey]\n\tif !ok {\n\t\treturn \"\", nil\n\t}\n\n\taddressNames := strings.Split(annotationVal, \",\")\n\n\t// Truncated to 2 values (this is technically maximum, 1 IPv4 and 1 IPv6 address)\n\t// to not make too many API calls.\n\tif len(addressNames) > maxNumberOfAddresses {\n\t\taddressNames = addressNames[:maxNumberOfAddresses]\n\t}\n\n\tfor _, addressName := range addressNames {\n\t\ttrimmedAddressName := strings.TrimSpace(addressName)\n\t\tcloudAddress, err := cloud.GetRegionAddress(trimmedAddressName, cloud.Region())\n\t\tif err != nil {\n\t\t\tif isNotFoundError(err) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn \"\", err\n\t\t}\n\t\tif cloudAddress.IpVersion == \"\" {\n\t\t\tcloudAddress.IpVersion = IPv4Version\n\t\t}\n\n\t\tif cloudAddress.IpVersion == ipVersion {\n\t\t\treturn cloudAddress.Address, nil\n\t\t}\n\t}\n\treturn \"\", nil\n}", "func initAddr(addr string) (string, error) {\n\thost, port, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif host == \"\" {\n\t\thost = \"127.0.0.1\"\n\t}\n\treturn net.JoinHostPort(host, port), nil\n}", "func ParseAddress(address string) (*ParsedAddress, error) {\n\taddressParts := &ParsedAddress{}\n\taddressList := strings.Split(address, \"/\")\n\tif len(addressList) != 3 {\n\t\treturn addressParts, logThenErrorf(\"invalid address string %s\", address)\n\t}\n\n\taddressParts = &ParsedAddress{\n\t\tLocationSegment: addressList[0],\n\t\tNetworkSegment: addressList[1],\n\t\tViewSegment: addressList[2],\n\t}\n\n\treturn addressParts, nil\n}", "func ParseAddress(addr interface{}) (a Address, err error) {\n\t// handle the allowed types\n\tswitch addrVal := addr.(type) {\n\tcase string: // simple string value\n\t\tif addrVal == \"\" {\n\t\t\terr = errors.New(\"Recipient.Address may not be empty\")\n\t\t} else {\n\t\t\ta.Email = addrVal\n\t\t}\n\n\tcase Address:\n\t\ta = addr.(Address)\n\n\tcase map[string]interface{}:\n\t\t// auto-parsed nested json object\n\t\tfor k, v := range addrVal {\n\t\t\tswitch vVal := v.(type) {\n\t\t\tcase string:\n\t\t\t\tif strings.EqualFold(k, \"name\") {\n\t\t\t\t\ta.Name = vVal\n\t\t\t\t} else if strings.EqualFold(k, \"email\") {\n\t\t\t\t\ta.Email = vVal\n\t\t\t\t} else if strings.EqualFold(k, \"header_to\") {\n\t\t\t\t\ta.HeaderTo = vVal\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\terr = errors.New(\"strings are required for all Recipient.Address values\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\tcase map[string]string:\n\t\t// user-provided json literal (convenience)\n\t\tfor k, v := range addrVal {\n\t\t\tif strings.EqualFold(k, \"name\") {\n\t\t\t\ta.Name = v\n\t\t\t} else if strings.EqualFold(k, \"email\") {\n\t\t\t\ta.Email = v\n\t\t\t} else if strings.EqualFold(k, \"header_to\") {\n\t\t\t\ta.HeaderTo = v\n\t\t\t}\n\t\t}\n\n\tdefault:\n\t\terr = errors.Errorf(\"unsupported Recipient.Address value type [%T]\", addrVal)\n\t}\n\n\treturn\n}", "func parseLineToAddr(line string) (string, string, error) {\n\taddr := strings.Split(line, \",\")\n\n\tif len(addr) != 6 {\n\t\treturn \"\", \"\", fmt.Errorf(\"invalid data address\")\n\t}\n\n\t// Get IP string from line\n\tip := strings.Join(addr[0:4], \".\")\n\n\t// get port number from line\n\tport1, _ := strconv.Atoi(addr[4])\n\tport2, _ := strconv.Atoi(addr[5])\n\n\tport := (port1 << 8) + port2\n\n\t// check IP and Port is valid\n\tif net.ParseIP(ip) == nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"invalid data address\")\n\t}\n\n\tif port <= 0 || port > 65535 {\n\t\treturn \"\", \"\", fmt.Errorf(\"invalid data address\")\n\t}\n\n\treturn ip, strconv.Itoa(port), nil\n}", "func parseAddrMsg(msg *netlink.Message) (*AddrEntry, bool, error) {\n\tvar ifamsg iproute2.IfAddrMsg\n\tif err := ifamsg.UnmarshalBinary(msg.Data); err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tvar e AddrEntry\n\te.init()\n\te.Family = int(ifamsg.Family)\n\te.PrefixLen = int(ifamsg.Prefixlen)\n\te.Flags = AddrFlag(ifamsg.Flags)\n\te.Scope = AddrScope(ifamsg.Scope)\n\te.Ifindex = int(ifamsg.Index)\n\n\tad, err := netlink.NewAttributeDecoder(msg.Data[iproute2.SizeofIfAddrMsg:])\n\tif err != nil {\n\t\treturn &e, false, err\n\t}\n\n\tfor ad.Next() {\n\t\tswitch ad.Type() {\n\t\tcase unix.IFA_ADDRESS:\n\t\t\te.InterfaceAddr = net.IP(ad.Bytes())\n\t\tcase unix.IFA_LOCAL:\n\t\t\te.LocalAddr = net.IP(ad.Bytes())\n\t\tcase unix.IFA_LABEL:\n\t\t\te.Label = ad.String()\n\t\tcase unix.IFA_BROADCAST:\n\t\t\te.BroadcastAddr = net.IP(ad.Bytes())\n\t\tcase unix.IFA_ANYCAST:\n\t\t\te.AnycastAddr = net.IP(ad.Bytes())\n\t\tcase unix.IFA_CACHEINFO:\n\t\t\te.AddrInfo = new(iproute2.IfaCacheinfo)\n\t\t\t_ = e.AddrInfo.UnmarshalBinary(ad.Bytes())\n\t\tcase unix.IFA_MULTICAST:\n\t\t\te.MulticastAddr = net.IP(ad.Bytes())\n\t\tcase unix.IFA_FLAGS:\n\t\t\te.AddrFlags = AddrFlag(ad.Uint32())\n\t\t}\n\t}\n\terr = ad.Err()\n\treturn &e, err == nil, err\n}", "func addrToHost(addr string) string {\n\treturn strings.Split(addr, \":\")[0]\n}", "func extractIPAndPortFromAddresses(addresses []string) (string, string) {\n\tfor _, addr := range addresses {\n\t\taddrParts := strings.SplitN(addr, \"://\", 2)\n\t\tif len(addrParts) != 2 {\n\t\t\tlogrus.Errorf(\"invalid listening address %s: must be in format [protocol]://[address]\", addr)\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch addrParts[0] {\n\t\tcase \"tcp\":\n\t\t\thost, port, err := net.SplitHostPort(addrParts[1])\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"failed to split host and port from address: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn host, port\n\t\tcase \"unix\":\n\t\t\tcontinue\n\t\tdefault:\n\t\t\tlogrus.Errorf(\"only unix socket or tcp address is support\")\n\t\t}\n\t}\n\treturn \"\", \"\"\n}", "func ResolveAddr(addr string) string {\n\tvar scheme string\n\thost, port, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\turi, err := url.Parse(addr)\n\t\tif err != nil {\n\t\t\treturn \"0.0.0.0:\" + strconv.Itoa(FreePort())\n\t\t}\n\n\t\tif strings.Contains(uri.Host, \":\") {\n\t\t\tsub := strings.Index(uri.Host, \":\")\n\t\t\turi.Host = uri.Host[0:sub]\n\t\t}\n\n\t\tscheme = uri.Scheme\n\t\thost = uri.Host\n\t\tport = uri.Port()\n\t}\n\n\tif host == \"\" {\n\t\thost = \"0.0.0.0\"\n\t}\n\n\tif port == \"\" || zeros.MatchString(port) {\n\t\tport = strconv.Itoa(FreePort())\n\t}\n\n\tif scheme == \"\" {\n\t\treturn host + \":\" + port\n\t}\n\n\treturn scheme + \"://\" + host + \":\" + port\n}", "func parseAddress(mailAddress string) (address *mail.Address, err error) {\n\tstrimmed := strings.TrimSpace(mailAddress)\n\n\tif address, err = mail.ParseAddress(strimmed); err == nil {\n\t\treturn address, nil\n\t}\n\n\tlog.Printf(\"[mail] parseAddress: %s\\n\", err)\n\treturn nil, err\n}", "func ParseHost(s string) (*Host, error) {\n\tisValidHost := func(host string) bool {\n\t\tif host == \"\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif ip := net.ParseIP(host); ip != nil {\n\t\t\treturn true\n\t\t}\n\n\t\t// host is not a valid IPv4 or IPv6 address\n\t\t// host may be a hostname\n\t\t// refer https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names\n\t\t// why checks are done like below\n\t\tif len(host) < 1 || len(host) > 253 {\n\t\t\treturn false\n\t\t}\n\n\t\tfor _, label := range strings.Split(host, \".\") {\n\t\t\tif len(label) < 1 || len(label) > 63 {\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tif !hostLabelRegexp.MatchString(label) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}\n\n\tvar port Port\n\tvar isPortSet bool\n\thost, portStr, err := net.SplitHostPort(s)\n\tif err != nil {\n\t\tif !strings.Contains(err.Error(), \"missing port in address\") {\n\t\t\treturn nil, err\n\t\t}\n\t\thost = s\n\t\tportStr = \"\"\n\t} else {\n\t\tif port, err = ParsePort(portStr); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tisPortSet = true\n\t}\n\n\tif host != \"\" {\n\t\thost, err = trimIPv6(host)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// IPv6 requires a link-local address on every network interface.\n\t// `%interface` should be preserved.\n\ttrimmedHost := host\n\n\tif i := strings.LastIndex(trimmedHost, \"%\"); i > -1 {\n\t\t// `%interface` can be skipped for validity check though.\n\t\ttrimmedHost = trimmedHost[:i]\n\t}\n\n\tif !isValidHost(trimmedHost) {\n\t\treturn nil, errors.New(\"invalid hostname\")\n\t}\n\n\treturn &Host{\n\t\tName: host,\n\t\tPort: port,\n\t\tIsPortSet: isPortSet,\n\t}, nil\n}", "func DecodeAddr(address []byte) string {\n\tvar stringAddr string\n\tvar ip []byte\n\tvar port []byte\n\n\tip = address[:4]\n\tport = address[4:]\n\n\t// Decode IP\n\tfor index, octet := range ip {\n\t\tstringAddr = stringAddr + strconv.Itoa(int(octet))\n\t\tif index != 3 {\n\t\t\tstringAddr += \".\"\n\t\t}\n\t}\n\tstringAddr += \":\"\n\n\t// Decode Port\n\tb := make([]byte, 8)\n\tfor i := 0; i < 6; i++ {\n\t\tb[i] = byte(0)\n\t}\n\tb[6] = port[0]\n\tb[7] = port[1]\n\tp := binary.BigEndian.Uint64(b)\n\tstringAddr += strconv.FormatUint(p, 10)\n\t//fmt.Println(\"Complete IP:\", stringAddr)\n\treturn stringAddr\n}", "func ParseAddress(s string) (Address, error) {\n\tvar addr Address\n\terr := addr.parse(s)\n\treturn addr, err\n}", "func ParseFromIPAddr(ipNet *net.IPNet) (*IPv4Address, *IPv6Address, error) {\n\tif ipNet == nil {\n\t\treturn nil, nil, fmt.Errorf(\"Nil address: %v\", ipNet)\n\t}\n\n\tif v4Addr := ipNet.IP.To4(); v4Addr != nil {\n\t\tcidr, _ := ipNet.Mask.Size()\n\t\tret := NewIPv4AddressFromBytes(v4Addr, uint(cidr))\n\t\treturn &ret, nil, nil\n\t}\n\tif v6Addr := ipNet.IP.To16(); v6Addr != nil {\n\t\tcidr, _ := ipNet.Mask.Size()\n\t\tret := NewIPv6Address(v6Addr, uint(cidr))\n\t\treturn nil, &ret, nil\n\t}\n\n\treturn nil, nil, fmt.Errorf(\"couldn't parse either v4 or v6 address: %v\", ipNet)\n}", "func IPFromAddr(addr string) string {\n\t// check for IPv6\n\tif strings.Count(addr, \":\") > 1 {\n\t\tip := addr\n\t\t// assume also has suffix if it has prefix for IPv6\n\t\tif strings.HasPrefix(addr, \"[\") {\n\t\t\tip = ip[1:strings.Index(ip, \"]\")]\n\t\t}\n\t\t// otherwise assume only IP present and check for zone\n\t\tzoneIdx := strings.LastIndex(ip, \"%\")\n\t\tif zoneIdx != -1 {\n\t\t\tip = ip[:zoneIdx]\n\t\t}\n\t\treturn ip\n\t}\n\n\t// IPv4\n\tval := strings.LastIndex(addr, \":\")\n\tif val == -1 {\n\t\treturn addr // assume valid IP without port\n\t}\n\tip := addr[:val]\n\treturn ip\n}", "func DecodeAddress(b []byte) (net.IP, []byte, error) {\n\tif len(b) < 6 {\n\t\treturn nil, nil, errors.New(\"too short\")\n\t}\n\n\t// IPv4\n\tif b[0] == 4 && b[1] == 4 {\n\t\treturn net.IP(b[2:6]), b[6:], nil\n\t}\n\n\t// IPv6\n\tif len(b) < 18 {\n\t\treturn nil, nil, errors.New(\"too short\")\n\t}\n\tif b[0] == 6 && b[1] == 16 {\n\t\treturn net.IP(b[2:18]), b[18:], nil\n\t}\n\n\treturn nil, nil, errors.New(\"unrecognized format\")\n}", "func GetAddr(addr string) string {\n\tif addr == \"\" {\n\t\tif real, err := GetMainIP(); err == nil {\n\t\t\treturn real + \":0\"\n\t\t}\n\t}\n\n\tip, port, err := net.SplitHostPort(addr)\n\tif err == nil && (ip == \"\" || ip == \"0.0.0.0\") {\n\t\tif realIP, err := GetMainIP(); err == nil {\n\t\t\treturn net.JoinHostPort(realIP, port)\n\t\t}\n\t}\n\n\treturn addr\n}", "func ParseAddrPort(str string) (ip net.IP, port uint16, err error) {\n\t// See func net.SplitHostPort(hostport string) (host, port string, err error)\n\tpair := strings.Split(str, \":\")\n\tif len(pair) == 2 {\n\t\tip = net.ParseIP(pair[0])\n\t\tif ip != nil {\n\t\t\tvar v uint64\n\t\t\tv, err = strconv.ParseUint(pair[1], 10, 16)\n\t\t\tif err == nil {\n\t\t\t\tport = uint16(v)\n\t\t\t} else {\n\t\t\t\terr = errf(\"\\\"%s\\\" is invalid port specifier\", pair[1])\n\t\t\t}\n\t\t} else {\n\t\t\terr = errf(\"\\\"%s\\\" not a valid IP address\", pair[0])\n\t\t}\n\t} else {\n\t\terr = errf(\"\\\"%s\\\" is missing port specifier\", str)\n\t}\n\treturn\n}", "func ParseAddress(address string) Address {\n\tif !TrackPositions {\n\t\treturn 0\n\t}\n\taddr, _ := strconv.ParseUint(address, 0, 64)\n\n\treturn Address(addr)\n}", "func parseListeners(addrs []string) ([]string, []string, error) {\n\tipv4ListenAddrs := make([]string, 0, len(addrs)*2)\n\tipv6ListenAddrs := make([]string, 0, len(addrs)*2)\n\tfor _, addr := range addrs {\n\t\thost, _, err := net.SplitHostPort(addr)\n\t\tif err != nil {\n\t\t\t// Shouldn't happen due to already being normalized.\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\t// Empty host or host of * on plan9 is both IPv4 and IPv6.\n\t\tif host == \"\" || (host == \"*\" && runtime.GOOS == \"plan9\") {\n\t\t\tipv4ListenAddrs = append(ipv4ListenAddrs, addr)\n\t\t\tipv6ListenAddrs = append(ipv6ListenAddrs, addr)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Parse the IP.\n\t\tip := net.ParseIP(host)\n\t\tif ip == nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"'%s' is not a valid IP \"+\n\t\t\t\t\"address\", host)\n\t\t}\n\n\t\t// To4 returns nil when the IP is not an IPv4 address, so use\n\t\t// this determine the address type.\n\t\tif ip.To4() == nil {\n\t\t\tipv6ListenAddrs = append(ipv6ListenAddrs, addr)\n\t\t} else {\n\t\t\tipv4ListenAddrs = append(ipv4ListenAddrs, addr)\n\t\t}\n\t}\n\treturn ipv4ListenAddrs, ipv6ListenAddrs, nil\n}", "func ExtractAddressFromReverse(reverseName string) string {\n\tsearch := \"\"\n\n\tf := reverse\n\n\tswitch {\n\tcase strings.HasSuffix(reverseName, IP4arpa):\n\t\tsearch = strings.TrimSuffix(reverseName, IP4arpa)\n\tcase strings.HasSuffix(reverseName, IP6arpa):\n\t\tsearch = strings.TrimSuffix(reverseName, IP6arpa)\n\t\tf = reverse6\n\tdefault:\n\t\treturn \"\"\n\t}\n\n\t// Reverse the segments and then combine them.\n\treturn f(strings.Split(search, \".\"))\n}", "func SplitHostPort(hostport string) (host, port string, err error) {\n\tj, k := 0, 0\n\n\t// The port starts after the last colon.\n\ti := last(hostport, ':')\n\tif i < 0 {\n\t\tgoto missingPort\n\t}\n\n\tif hostport[0] == '[' {\n\t\t// Expect the first ']' just before the last ':'.\n\t\tend := byteIndex(hostport, ']')\n\t\tif end < 0 {\n\t\t\terr = &AddrError{\"missing ']' in address\", hostport}\n\t\t\treturn\n\t\t}\n\t\tswitch end + 1 {\n\t\tcase len(hostport):\n\t\t\t// There can't be a ':' behind the ']' now.\n\t\t\tgoto missingPort\n\t\tcase i:\n\t\t\t// The expected result.\n\t\tdefault:\n\t\t\t// Either ']' isn't followed by a colon, or it is\n\t\t\t// followed by a colon that is not the last one.\n\t\t\tif hostport[end+1] == ':' {\n\t\t\t\tgoto tooManyColons\n\t\t\t}\n\t\t\tgoto missingPort\n\t\t}\n\t\thost = hostport[1:end]\n\t\tj, k = 1, end+1 // there can't be a '[' resp. ']' before these positions\n\t} else {\n\t\thost = hostport[:i]\n\t\tif byteIndex(host, ':') >= 0 {\n\t\t\tgoto tooManyColons\n\t\t}\n\t\tif byteIndex(host, '%') >= 0 {\n\t\t\tgoto missingBrackets\n\t\t}\n\t}\n\tif byteIndex(hostport[j:], '[') >= 0 {\n\t\terr = &AddrError{\"unexpected '[' in address\", hostport}\n\t\treturn\n\t}\n\tif byteIndex(hostport[k:], ']') >= 0 {\n\t\terr = &AddrError{\"unexpected ']' in address\", hostport}\n\t\treturn\n\t}\n\n\tport = hostport[i+1:]\n\treturn\n\nmissingPort:\n\terr = &AddrError{\"missing port in address\", hostport}\n\treturn\n\ntooManyColons:\n\terr = &AddrError{\"too many colons in address\", hostport}\n\treturn\n\nmissingBrackets:\n\terr = &AddrError{\"missing brackets in address\", hostport}\n\treturn\n}", "func ResolveAddress(lookupIPFunc lookup.LookupIPFunc, dataplane *core_mesh.DataplaneResource) (*core_mesh.DataplaneResource, error) {\n\tvar ips, aips []net.IP\n\tvar err error\n\tvar update_ip, update_aip bool = false, false\n\tif ips, err = lookupIPFunc(dataplane.Spec.Networking.Address); err != nil {\n\t\treturn nil, err\n\t}\n\tif len(ips) == 0 {\n\t\treturn nil, errors.Errorf(\"can't resolve address %v\", dataplane.Spec.Networking.Address)\n\t}\n\tif dataplane.Spec.Networking.Address != ips[0].String() {\n\t\tupdate_ip = true\n\t}\n\tif dataplane.Spec.Networking.AdvertisedAddress != \"\" {\n\t\tif aips, err = lookupIPFunc(dataplane.Spec.Networking.AdvertisedAddress); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(aips) == 0 {\n\t\t\treturn nil, errors.Errorf(\"can't resolve address %v\", dataplane.Spec.Networking.AdvertisedAddress)\n\t\t}\n\t\tif dataplane.Spec.Networking.AdvertisedAddress != aips[0].String() {\n\t\t\tupdate_aip = true\n\t\t}\n\t}\n\n\tif update_ip || update_aip { // only if we resolve any address, in most cases this is IP not a hostname\n\t\tdpSpec := proto.Clone(dataplane.Spec).(*mesh_proto.Dataplane)\n\t\tif update_ip {\n\t\t\tdpSpec.Networking.Address = ips[0].String()\n\t\t}\n\t\tif update_aip {\n\t\t\tdpSpec.Networking.AdvertisedAddress = aips[0].String()\n\t\t}\n\t\treturn &core_mesh.DataplaneResource{\n\t\t\tMeta: dataplane.Meta,\n\t\t\tSpec: dpSpec,\n\t\t}, nil\n\t}\n\treturn dataplane, nil\n}", "func ParseOptIAAddress(data []byte) (*OptIAAddress, error) {\n\tvar opt OptIAAddress\n\tbuf := uio.NewBigEndianBuffer(data)\n\topt.IPv6Addr = net.IP(buf.CopyN(net.IPv6len))\n\topt.PreferredLifetime = buf.Read32()\n\topt.ValidLifetime = buf.Read32()\n\tif err := opt.Options.FromBytes(buf.ReadAll()); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &opt, buf.FinError()\n}", "func predictAddr(t *netutil.IPTracker) (net.IP, int) {\n\tep := t.PredictEndpoint()\n\tif ep == \"\" {\n\t\treturn nil, 0\n\t}\n\tipString, portString, _ := net.SplitHostPort(ep)\n\tip := net.ParseIP(ipString)\n\tport, err := strconv.ParseInt(portString, 10, 16)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn ip, int(port)\n}", "func Extract(addr string) (string, error) {\n\t// if addr specified then its returned\n\tif len(addr) > 0 && (addr != \"0.0.0.0\" && addr != \"[::]\" && addr != \"::\") {\n\t\treturn addr, nil\n\t}\n\n\treturn LocalIP, nil\n}", "func parseListenString(s string) (string, string, error) {\n\tif s == \"\" {\n\t\treturn \"\", \"\", fmt.Errorf(\"patroni configuration option 'restapi.listen' not found\")\n\t}\n\n\tif s == \"::\" {\n\t\treturn \"[::1]\", \"8008\", nil\n\t}\n\n\tparts := strings.Split(s, \":\")\n\n\tvar addr, port string\n\tvar ip net.IP\n\n\tif len(parts) != 1 {\n\t\tip = net.ParseIP(strings.Join(parts[0:len(parts)-1], \":\"))\n\t\tport = parts[len(parts)-1]\n\t} else {\n\t\tip = net.ParseIP(parts[0])\n\t\tport = \"8008\"\n\t}\n\n\t// Convert 'unspecified' address to loopback. Wraps IPv6 addresses into square brackets (required for net/http).\n\tif ip.Equal(net.IPv4zero) {\n\t\taddr = \"127.0.0.1\"\n\t} else if ip.Equal(net.IPv6unspecified) {\n\t\taddr = fmt.Sprintf(\"[%s]\", net.IPv6loopback.String())\n\t} else {\n\t\tif ip.To4() != nil {\n\t\t\taddr = ip.String()\n\t\t} else {\n\t\t\taddr = fmt.Sprintf(\"[%s]\", ip.String())\n\t\t}\n\t}\n\n\treturn addr, port, nil\n}", "func NormalizeAddr(addr string) (string, error) {\n\tu, err := ParseAddr(addr)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\thost, _, err := net.SplitHostPort(u.Host)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to parse host-port pair: %v\", err)\n\t} else if host == \"\" {\n\t\treturn \"\", fmt.Errorf(\"no hostname in address: %q\", addr)\n\t}\n\treturn u.String(), nil\n}", "func getDefaultAddr(addr string, n int) string {\n\ta := strings.Split(addr, \":\")\n\tport, _ := strconv.Atoi(a[len(a)-1])\n\ta[len(a)-1] = strconv.Itoa(port + n)\n\treturn strings.Join(a, \":\")\n}", "func ParseAddr(addr string) (*url.URL, error) {\n\tu, err := url.Parse(addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif u.Scheme != SchemaADC && u.Scheme != SchemaADCS {\n\t\treturn u, fmt.Errorf(\"unsupported protocol: %q\", u.Scheme)\n\t}\n\tu.Path = strings.TrimRight(u.Path, \"/\")\n\treturn u, nil\n}", "func ParseIPHostFromString(addrStr string) (string, uint16, error) {\n\tip, port, err := net.SplitHostPort(addrStr)\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\tportUint, err := strconv.ParseUint(port, 10, 16)\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\n\treturn ip, uint16(portUint), nil\n}", "func decodePeerAddress(chunk string) string {\n\tip := net.IPv4(chunk[0], chunk[1], chunk[2], chunk[3])\n\tremotePort := 256*int(chunk[4]) + int(chunk[5]) // Port is given in network encoding.\n\treturn fmt.Sprintf(\"%s:%d\", ip.String(), remotePort)\n}", "func (n *Net) ResolveUDPAddr(network, address string) (*net.UDPAddr, error) {\n\ta := &net.UDPAddr{\n\t\tPort: 0,\n\t\tIP: net.IPv4(127, 0, 0, 1),\n\t}\n\thost, port, err := net.SplitHostPort(address)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif a.IP = net.ParseIP(host); a.IP == nil {\n\t\t// Probably we should use virtual DNS here.\n\t\treturn nil, errors.New(\"bad IP\")\n\t}\n\tif a.Port, err = strconv.Atoi(port); err != nil {\n\t\treturn nil, err\n\t}\n\treturn a, nil\n}", "func ResolveUDPAddr(network, address string) (*UDPAddr, error) {\n\t// TODO: make sure network is 'udp'\n\t// separate domain from port, if any\n\tr := strings.Split(address, \":\")\n\taddr, err := ActiveDevice.GetDNS(r[0])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tip := IP(addr)\n\tif len(r) > 1 {\n\t\tport, e := strconv.Atoi(r[1])\n\t\tif e != nil {\n\t\t\treturn nil, e\n\t\t}\n\t\treturn &UDPAddr{IP: ip, Port: port}, nil\n\t}\n\n\treturn &UDPAddr{IP: ip}, nil\n}", "func (_Contract *ContractFilterer) ParseAddrChanged(log types.Log) (*ContractAddrChanged, error) {\n\tevent := new(ContractAddrChanged)\n\tif err := _Contract.contract.UnpackLog(event, \"AddrChanged\", log); err != nil {\n\t\treturn nil, err\n\t}\n\treturn event, nil\n}", "func (s *ProvisionedServer) Address() string { return s.Server.AdvertiseIP }", "func extractIPv4(ptr string) string {\n\ts := strings.Replace(ptr, \".in-addr.arpa\", \"\", 1)\n\twords := strings.Split(s, \".\")\n\tfor i, j := 0, len(words)-1; i < j; i, j = i+1, j-1 {\n\t\twords[i], words[j] = words[j], words[i]\n\t}\n\treturn strings.Join(words, \".\")\n}", "func IsAddress(a string) bool {\n\tif len(a) > 0 && a[:3] == string(binary.PrefixAccountPubkey) {\n\t\treturn true\n\t}\n\treturn false\n}", "func ListenAddresses(value string) ([]string, error) {\n\taddresses := make([]string, 0)\n\n\tif value == \"\" {\n\t\treturn addresses, nil\n\t}\n\n\tlocalHost, localPort, err := net.SplitHostPort(value)\n\tif err != nil {\n\t\tlocalHost = value\n\t\tlocalPort = DefaultPort\n\t}\n\n\tif localHost == \"0.0.0.0\" || localHost == \"::\" || localHost == \"[::]\" {\n\t\tifaces, err := net.Interfaces()\n\t\tif err != nil {\n\t\t\treturn addresses, err\n\t\t}\n\n\t\tfor _, i := range ifaces {\n\t\t\taddrs, err := i.Addrs()\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor _, addr := range addrs {\n\t\t\t\tvar ip net.IP\n\t\t\t\tswitch v := addr.(type) {\n\t\t\t\tcase *net.IPNet:\n\t\t\t\t\tip = v.IP\n\t\t\t\tcase *net.IPAddr:\n\t\t\t\t\tip = v.IP\n\t\t\t\t}\n\n\t\t\t\tif !ip.IsGlobalUnicast() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif ip.To4() == nil {\n\t\t\t\t\tif localHost == \"0.0.0.0\" {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\taddresses = append(addresses, fmt.Sprintf(\"[%s]:%s\", ip, localPort))\n\t\t\t\t} else {\n\t\t\t\t\taddresses = append(addresses, fmt.Sprintf(\"%s:%s\", ip, localPort))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif strings.Contains(localHost, \":\") {\n\t\t\taddresses = append(addresses, fmt.Sprintf(\"[%s]:%s\", localHost, localPort))\n\t\t} else {\n\t\t\taddresses = append(addresses, fmt.Sprintf(\"%s:%s\", localHost, localPort))\n\t\t}\n\t}\n\n\treturn addresses, nil\n}", "func parseOptClientLinkLayerAddress(data []byte) (*optClientLinkLayerAddress, error) {\n\tvar opt optClientLinkLayerAddress\n\tbuf := uio.NewBigEndianBuffer(data)\n\topt.LinkLayerType = iana.HWType(buf.Read16())\n\topt.LinkLayerAddress = buf.ReadAll()\n\treturn &opt, buf.FinError()\n}", "func (s *Setting) CheckAddress(adr string, hasPort, isEmptyHost bool) error {\n\tif strings.HasSuffix(adr, \".onion\") {\n\t\tif s.UseTor {\n\t\t\treturn nil\n\t\t}\n\t\treturn ErrTorAddress\n\t}\n\th, p, err2 := net.SplitHostPort(adr)\n\tif err2 != nil && hasPort {\n\t\treturn err2\n\t}\n\tif err2 == nil && !hasPort {\n\t\treturn errors.New(\"should not have port number\")\n\t}\n\tif hasPort {\n\t\tpo, err := strconv.Atoi(p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif po > 0xffff || po <= 0 {\n\t\t\treturn errors.New(\"illegal port number\")\n\t\t}\n\t\tadr = h\n\t}\n\tif adr == \"\" {\n\t\tif !isEmptyHost {\n\t\t\treturn errors.New(\"empty host name\")\n\t\t}\n\t\treturn nil\n\t}\n\t_, err2 = net.LookupIP(adr)\n\treturn err2\n}", "func parseIPAndPort(input string) (string, int, error) {\n\tseparator := strings.LastIndex(input, \":\")\n\tif separator == -1 {\n\t\treturn \"\", 0, errors.New(\"cannot parse IP and port correctly\")\n\t}\n\tIPStr := input[0:separator]\n\tif IPStr[0] == '[' {\n\t\tIPStr = IPStr[1 : len(IPStr)-1]\n\t}\n\tfor _, prefix := range localIPv4 {\n\t\tif strings.HasPrefix(IPStr, prefix) {\n\t\t\treturn \"\", 0, errors.New(\"ignore this IP address\")\n\t\t}\n\t}\n\toutputIP := net.ParseIP(IPStr)\n\tif outputIP == nil {\n\t\treturn \"\", 0, errors.New(\"invalid IP address\")\n\t}\n\n\tport, err := strconv.Atoi(input[separator+1:])\n\tif err != nil {\n\t\treturn \"\", 0, errors.New(\"invalid IP port\")\n\t}\n\treturn IPStr, port, nil\n}" ]
[ "0.8340167", "0.8205273", "0.7002564", "0.638477", "0.6307578", "0.6307132", "0.6303563", "0.6131761", "0.61101985", "0.6086822", "0.6052451", "0.60465807", "0.5911784", "0.5911784", "0.59074295", "0.5907354", "0.58481467", "0.575326", "0.57085407", "0.5657779", "0.5657514", "0.5645455", "0.56334203", "0.5629601", "0.56295305", "0.5621209", "0.5616896", "0.5603567", "0.5593392", "0.5580358", "0.5573355", "0.55725026", "0.55687046", "0.5561967", "0.5528769", "0.5520014", "0.5518512", "0.55170566", "0.55170566", "0.5516679", "0.5497167", "0.54890287", "0.5478747", "0.54338837", "0.5412715", "0.5403231", "0.5402765", "0.540043", "0.5383037", "0.5381386", "0.5378193", "0.535518", "0.5346018", "0.534566", "0.53361344", "0.5333118", "0.5331835", "0.53231114", "0.530948", "0.53014886", "0.5299297", "0.52983034", "0.5288783", "0.5283077", "0.5273748", "0.5261342", "0.52536565", "0.52411807", "0.52387226", "0.5223436", "0.5222616", "0.5220266", "0.5214675", "0.5208065", "0.5201532", "0.5185989", "0.5181148", "0.5180541", "0.5179498", "0.51750064", "0.5174316", "0.5173106", "0.51727796", "0.5162102", "0.5150736", "0.5142128", "0.51311284", "0.5126323", "0.512302", "0.512183", "0.51162857", "0.51034033", "0.5102507", "0.5093374", "0.5089154", "0.50860536", "0.5082816", "0.5062692", "0.5058135", "0.5057238" ]
0.84405524
0
StringsSliceFromSet returns a sorted strings slice from set
StringsSliceFromSet возвращает отсортированный срез строк из множества
func StringsSliceFromSet(in map[string]struct{}) []string { if in == nil { return nil } out := make([]string, 0, len(in)) for key := range in { out = append(out, key) } sort.Strings(out) return out }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func StrSliceSet(slice []string) []string {\n\tset := make([]string, 0)\n\ttempMap := make(map[string]bool, len(slice))\n\tfor _, v := range slice {\n\t\tif !tempMap[v] {\n\t\t\tset = append(set, v)\n\t\t\ttempMap[v] = true\n\t\t}\n\t}\n\n\treturn set\n}", "func (set StringSet) ToSlice() []string {\n\tif n := set.Len(); n > 0 {\n\t\tresult := make([]string, 0, n)\n\t\tfor str := range set {\n\t\t\tresult = append(result, str)\n\t\t}\n\t\treturn result\n\t}\n\treturn nil\n}", "func (s *Set) StringSlice() []string {\n\tslice := make([]string, 0, s.Size())\n\n\ts.mutex.Lock()\n\tfor k := range s.m {\n\t\tslice = append(slice, k.(string))\n\t}\n\ts.mutex.Unlock()\n\n\treturn slice\n}", "func (s StringSet) ToSlice() []string {\n\tret := make([]string, len(s))\n\tidx := 0\n\tfor v := range s {\n\t\tret[idx] = v\n\t\tidx++\n\t}\n\tsort.Strings(ret)\n\treturn ret\n}", "func (s StringSet) Slice() []string {\n\tt := make([]string, 0, len(s))\n\tfor k := range s {\n\t\tt = append(t, k)\n\t}\n\treturn t\n}", "func SetToSlice(set map[string]struct{}) []string {\n\tdata := make([]string, 0, len(set))\n\tfor key := range set {\n\t\tdata = append(data, key)\n\t}\n\treturn data\n}", "func ToStringSlice(set mapset.Set) []string {\n\tif set == nil {\n\t\treturn nil\n\t}\n\tslice := set.ToSlice()\n\tresult := make([]string, len(slice))\n\tfor i, item := range slice {\n\t\tresult[i] = item.(string)\n\t}\n\treturn result\n}", "func (s StringSet) ToSlice() []string {\n\tslice := []string{}\n\tfor value := range s {\n\t\tslice = append(slice, value)\n\t}\n\treturn slice\n}", "func NewStringSetFromSlice(start []string) StringSet {\n\tret := make(StringSet)\n\tfor _, s := range start {\n\t\tret.Add(s)\n\t}\n\treturn ret\n}", "func StringSliceToSet(slice []string) String {\n\tset := make(String, len(slice))\n\tfor _, s := range slice {\n\t\tset.Add(s)\n\t}\n\treturn set\n}", "func (queryParametersBag) uniqueStringsSlice(in []string) []string {\n\tkeys := make(map[string]bool)\n\tout := make([]string, 0)\n\n\tfor _, entry := range in {\n\t\tif _, ok := keys[entry]; !ok {\n\t\t\tkeys[entry] = true\n\t\t\tout = append(out, entry)\n\t\t}\n\t}\n\n\treturn out\n}", "func (s StringSet) AsSlice() []string {\n\tresult := make([]string, len(s), len(s))\n\ti := 0\n\tfor v := range s {\n\t\tresult[i] = v\n\t\ti++\n\t}\n\treturn result\n}", "func StringSet(s sets.String) zapcore.ObjectMarshalerFunc {\n\treturn func(enc zapcore.ObjectEncoder) error {\n\t\tenc.AddString(\"keys\", strings.Join(s.UnsortedList(), \",\"))\n\t\treturn nil\n\t}\n}", "func copyAndSortStringSlice(s []string) []string {\n\tsc := make([]string, 0, len(s))\n\tsc = append(sc, s...)\n\n\tsort.Strings(sc)\n\treturn sc\n}", "func UniqueStringSlice(ins ...[]string) []string {\n\tkeys := make(map[string]bool)\n\tlist := []string{}\n\tfor _, in := range ins {\n\t\tfor _, k := range in {\n\t\t\tif _, value := keys[k]; !value {\n\t\t\t\tkeys[k] = true\n\t\t\t\tlist = append(list, k)\n\t\t\t}\n\t\t}\n\t}\n\treturn list\n}", "func (s Set) Slice() []string {\n\tvar i uint64\n\n\tk := make([]string, len(s))\n\n\tfor key := range s {\n\t\tk[i] = key\n\t\ti++\n\t}\n\n\treturn k\n}", "func (ss Set) Slice() []string {\n\tslc := make([]string, 0, len(ss))\n\tfor k := range ss {\n\t\tslc = append(slc, k)\n\t}\n\n\treturn slc\n}", "func Strings(data []string) []string {\n\tsort.Strings(data)\n\tn := Uniq(sort.StringSlice(data))\n\treturn data[:n]\n}", "func StringSet() *StringSetFilter {\r\n\tf := new(StringSetFilter)\r\n\tf.strcase = STRING_RAWCASE\r\n\tf.delimiter = \",\"\r\n\tf.minCount = 0\r\n\tf.maxCount = types.MaxInt\r\n\treturn f\r\n}", "func (s Set) Slice() []string {\n\ttoReturn := make([]string, s.Len())\n\ti := 0\n\tfor st := range s {\n\t\ttoReturn[i] = st\n\t\ti++\n\t}\n\treturn toReturn\n}", "func StringSliceSubset(a []string, b []string) error {\n\taset := make(map[string]bool)\n\tfor _, v := range a {\n\t\taset[v] = true\n\t}\n\n\tfor _, v := range b {\n\t\t_, ok := aset[v]\n\t\tif !ok {\n\t\t\treturn trace.BadParameter(\"%v not in set\", v)\n\t\t}\n\n\t}\n\treturn nil\n}", "func (tickerSet TickerSet) ToSlice() []string {\n\ttickerSlice := make([]string, 0)\n\tfor ticker, _ := range tickerSet {\n\t\ttickerSlice = append(tickerSlice, ticker)\n\t}\n\treturn tickerSlice\n}", "func (c *Context) StringSet(strings ...string) *AST {\n\tset := &AST{\n\t\trawCtx: c.raw,\n\t\trawAST: C.Z3_mk_empty_set(\n\t\t\tc.raw,\n\t\t\tc.StringSort().rawSort,\n\t\t),\n\t}\n\tfor _, content := range strings {\n\t\tC.Z3_mk_set_add(\n\t\t\tc.raw,\n\t\t\tset.rawAST,\n\t\t\tc.Str(content).rawAST,\n\t\t)\n\t}\n\treturn set\n}", "func NewStringSet() *Set {\n\treturn NewCustomSet(func(l, r interface{}) bool {\n\t\treturn l.(string) < r.(string)\n\t})\n}", "func (p StringSlice) Sort() { Sort(p) }", "func (cl *CommandLineInterface) StringSliceFlagOnFlagSet(flagSet *pflag.FlagSet, name string, shorthand *string, defaultValue []string, description string) {\n\tif defaultValue == nil {\n\t\tcl.nilDefaults[name] = true\n\t\tdefaultValue = []string{}\n\t}\n\tif shorthand != nil {\n\t\tcl.Flags[name] = flagSet.StringSliceP(name, string(*shorthand), defaultValue, description)\n\t\treturn\n\t}\n\tcl.Flags[name] = flagSet.StringSlice(name, defaultValue, description)\n}", "func (m refCountedUrlSet) getAsStringSlice() []string {\n\ta := make([]string, 0, len(m))\n\tfor u := range m {\n\t\ta = append(a, u)\n\t}\n\treturn a\n}", "func (s *Set) Strings() []string {\n\titems := make([]string, 0, s.Size())\n\tfor k := range s.m {\n\t\titems = append(items, k)\n\t}\n\treturn items\n}", "func Strings(a []string) { Sort(StringSlice(a)) }", "func NewStringSet(ss []string) StringSet {\n\tres := StringSet{}\n\tfor _, s := range ss {\n\t\tres[s] = struct{}{}\n\t}\n\treturn res\n}", "func StringSlicesUnion(one, two []string) []string {\n\tvar union []string\n\tunion = append(union, one...)\n\tunion = append(union, two...)\n\treturn OnlyUnique(union)\n}", "func (f *FlagSet) StringSlice(name string) []string {\n\ts := f.Lookup(name)\n\tif s != nil {\n\t\treturn (s.Value.(*stringSlice)).Value()\n\t}\n\treturn nil\n}", "func filterSet(autocompletions sets.String, sub string, ignoreCase bool, inclusionFunc func(string, string) bool) sets.String {\n\tif sub == \"\" {\n\t\treturn autocompletions\n\t}\n\tif ignoreCase {\n\t\tsub = strings.ToLower(sub)\n\t}\n\tret := sets.NewString()\n\tfor _, item := range autocompletions.List() {\n\t\tif ignoreCase {\n\t\t\titem = strings.ToLower(item)\n\t\t}\n\t\tif inclusionFunc(item, sub) {\n\t\t\tret.Insert(item)\n\t\t}\n\t}\n\treturn ret\n}", "func expandSetToStrings(strings []interface{}) []string {\n\texpandedStrings := make([]string, len(strings))\n\tfor i, v := range strings {\n\t\texpandedStrings[i] = v.(string)\n\t}\n\n\treturn expandedStrings\n}", "func NewStringSet() StringSet {\n\treturn make(StringSet)\n}", "func (s String) Sorted() []string {\n\tslice := s.ToSlice()\n\tsort.Strings(slice)\n\treturn slice\n}", "func (s StringSlice) StringSlice() []string {\n\tss := []string{}\n\tfor _, tmp := range s {\n\t\tif len(tmp) <= 2 {\n\t\t\tcontinue\n\t\t}\n\t\tss = append(ss, tmp)\n\t}\n\treturn ss\n}", "func (this *HandlerBase) getStringSlice(s string) []string {\n\ta := this.get(s)\n\tif m, ok := a.([]interface{}); ok {\n\t\tsl := make([]string, len(m))\n\t\tfor i, v := range m {\n\t\t\tif s, ok := v.(string); ok {\n\t\t\t\tsl[i] = string(s)\n\t\t\t}\n\t\t}\n\t\treturn sl\n\t}\n\treturn nil\n}", "func (s *Set) Slice() []string {\n\tn := len(s.m)\n\tif n == 0 {\n\t\treturn nil\n\t}\n\tarr := make([]string, 0, n)\n\tfor val := range s.m {\n\t\tarr = append(arr, val)\n\t}\n\treturn arr\n}", "func NewFromSlice(sl []string) Set {\n\ttoReturn := New()\n\tfor _, s := range sl {\n\t\ttoReturn.Add(s)\n\t}\n\treturn toReturn\n}", "func toList(set map[string]struct{}) []string {\n\tlist := make([]string, 0, len(set))\n\tfor item, _ := range set {\n\t\tlist = append(list, item)\n\t}\n\tsort.Strings(list)\n\treturn list\n}", "func (v *InvoiceCollection) stringSlice(vsizes, dsizes []int) (\n\tvdata [][]string, rvsizes []int,\n\tddata [][][]string, rdsizes []int) {\n\trvsizes, rdsizes = vsizes, dsizes\n\tvdata = make([][]string, len(*v))\n\tddata = make([][][]string, len(*v))\n\tfor i, p := range *v {\n\t\tvdata[i] = p.stringSlice(i + 1)\n\t\tfor k, f := range vdata[i] {\n\t\t\t_, _, n := util.CountChars(f)\n\t\t\trvsizes[k] = util.Imax(rvsizes[k], n)\n\t\t}\n\n\t\tddata[i] = make([][]string, len(p.Details))\n\t\tfor j, d := range p.Details {\n\t\t\tddata[i][j] = d.stringSlice(j + 1)\n\t\t\tfor k, f := range ddata[i][j] {\n\t\t\t\t_, _, n := util.CountChars(f)\n\t\t\t\trdsizes[k] = util.Imax(rdsizes[k], n)\n\t\t\t}\n\t\t}\n\t}\n\treturn vdata, rvsizes, ddata, rdsizes\n}", "func (set KeySet) ToSlice() []Key {\n\tkeys := []Key{}\n\n\tfor key := range set {\n\t\tkeys = append(keys, key)\n\t}\n\n\treturn keys\n}", "func FirstUniqueStrings(list []string) []string {\n\tk := 0\nouter:\n\tfor i := 0; i < len(list); i++ {\n\t\tfor j := 0; j < k; j++ {\n\t\t\tif list[i] == list[j] {\n\t\t\t\tcontinue outer\n\t\t\t}\n\t\t}\n\t\tlist[k] = list[i]\n\t\tk++\n\t}\n\treturn list[:k]\n}", "func MergeStringSlices(a []string, b []string) []string {\n\tset := sets.NewString(a...)\n\tset.Insert(b...)\n\treturn set.UnsortedList()\n}", "func TopicSetFromSlice(s []string) TopicSet {\n\tvar ts = TopicSet{}\n\tfor _, t := range s {\n\t\tts[t] = nil\n\t}\n\treturn ts\n}", "func (set UInt64Set) Slice(sorted bool) UInt64Slice {\n\tslice := NewUInt64Slice(0, len(set))\n\tfor x := range set {\n\t\tslice = append(slice, x)\n\t}\n\tif sorted {\n\t\tslice.Sort()\n\t}\n\treturn slice\n}", "func (s StringSet) GetAll() []string {\n\tres := []string{}\n\tfor item := range s {\n\t\tres = append(res, item)\n\t}\n\treturn res\n}", "func RemoveDuplicatedStrings(slice []string) []string {\n\tresult := []string{}\n\n\tcheck := make(map[string]bool)\n\tfor _, element := range slice {\n\t\tcheck[element] = true\n\t}\n\n\tfor key := range check {\n\t\tresult = append(result, key)\n\t}\n\n\treturn result\n}", "func NewStringSlice(n ...string) *Slice { return NewSlice(sort.StringSlice(n)) }", "func NewStringSet() StringSet {\n\treturn &stringSetImpl{\n\t\tentries: make(map[string]bool),\n\t}\n}", "func StringSliceExtractUnique(strSlice []string) (result []string) {\n\tif strSlice == nil {\n\t\treturn []string{}\n\t} else if len(strSlice) <= 1 {\n\t\treturn strSlice\n\t} else {\n\t\tfor _, v := range strSlice {\n\t\t\tif !StringSliceContains(&result, v) {\n\t\t\t\tresult = append(result, v)\n\t\t\t}\n\t\t}\n\n\t\treturn result\n\t}\n}", "func getSetValues(s set) []string {\n\tvar retVal []string\n\tfor k := range s {\n\t\tretVal = append(retVal, k)\n\t}\n\treturn retVal\n}", "func (set *SetString) Slice() SliceString {\n\tset.lock.Lock()\n\tkeys := make(SliceString, len(set.cache))\n\ti := 0\n\tfor k := range set.cache {\n\t\tkeys[i] = k\n\t}\n\tset.lock.Unlock()\n\treturn keys\n}", "func (s S) SetSlice(key, value string, before, after int) (slice []string, err error) {\n\tvar vv SortedString\n\terr = s.ReadModify(key, &vv, func(_ interface{}) (r bool) {\n\t\tslice = vv.Slice(value, before, after)\n\t\treturn\n\t})\n\treturn\n}", "func getstrings(fullpath string) mapset.Set {\n\tfileStrings, _ := exec.Command(\"strings\", fullpath).CombinedOutput()\n\tstrArray := strings.Split(string(fileStrings), \"\\n\")\n\tstringSet := mapset.NewSet()\n\n\tfor _, s := range strArray {\n\t\tstringSet.Add(s)\n\t}\n\n\treturn stringSet\n}", "func NewStringSet(values ...string) StringSet {\n\ts := make(StringSet, len(values))\n\tfor _, v := range values {\n\t\ts.Add(v)\n\t}\n\treturn s\n}", "func (s *Set) SortedSlice() []uint32 {\n\tss := s.Slice()\n\tsort.Slice(ss, func(i, j int) bool {\n\t\treturn ss[i] < ss[j]\n\t})\n\treturn ss\n}", "func StringMapToSlice(in map[string]string) []string {\n\tret := []string{}\n\n\tfor _, val := range in {\n\t\tret = append(ret, val)\n\t}\n\n\tsort.Strings(ret)\n\n\treturn ret\n\n}", "func StringSlice(src []*string) []string {\n\tdst := make([]string, len(src))\n\tfor i := 0; i < len(src); i++ {\n\t\tif src[i] != nil {\n\t\t\tdst[i] = *(src[i])\n\t\t}\n\t}\n\treturn dst\n}", "func Strings(s []string) int {\n\treturn Sort(sort.StringSlice(s))\n}", "func NewStringSet() *StringSet {\n\treturn &StringSet{\n\t\tmembers: make(map[string]struct{}),\n\t}\n}", "func SortedList(set map[string]bool) []string {\n\tvar ret []string\n\tfor s := range set {\n\t\tret = append(ret, s)\n\t}\n\tsort.Strings(ret)\n\treturn ret\n}", "func (s StringSet) Keys() []string {\n\tret := make([]string, 0, len(s))\n\tfor v := range s {\n\t\tret = append(ret, v)\n\t}\n\treturn ret\n}", "func NewStringSet(initItems ...string) StringSet {\n\tnewSet := StringSet{}\n\tfor _, item := range initItems {\n\t\tnewSet.Add(item)\n\t}\n\treturn newSet\n}", "func expandStringSet(configured *schema.Set) []*string {\n\treturn expandStringList(configured.List())\n}", "func (s stringSet) list() []string {\n\tl := make([]string, 0, len(s))\n\tfor k := range s {\n\t\tl = append(l, k)\n\t}\n\tsort.Strings(l)\n\treturn l\n}", "func stringSetDifference(aa, bb []string) []string {\n\trr := []string{}\n\nLoopStrings:\n\tfor _, a := range aa {\n\t\tfor _, b := range bb {\n\t\t\tif reflect.DeepEqual(a, b) {\n\t\t\t\tcontinue LoopStrings\n\t\t\t}\n\t\t}\n\n\t\trr = append(rr, a)\n\t}\n\n\treturn rr\n}", "func NewStringSet(elems ...string) StringSet {\n\tres := StringSet{Set: make(map[string]bool)}\n\tfor _, elem := range elems {\n\t\tres.Set[elem] = true\n\t}\n\treturn res\n}", "func GetSortedKeySlice(m map[string]string) []string {\n\tkeys := make([]string, 0)\n\tfor key := range m {\n\t\tkeys = append(keys, key)\n\t}\n\tsort.Sort(sort.StringSlice(keys))\n\treturn keys\n}", "func OfStrings(strings ...string) (s StringSet) {\n\treturn Strings(strings)\n}", "func NewStringSet() *StringSet {\n\treturn &StringSet{make(map[string]bool), make([]string, 0), false}\n}", "func RemoveFromSlice(slice []string, values ...string) []string {\n\toutput := make([]string, 0, len(slice))\n\n\tremove := make(map[string]bool)\n\tfor _, value := range values {\n\t\tremove[value] = true\n\t}\n\n\tfor _, s := range slice {\n\t\t_, ok := remove[s]\n\t\tif ok {\n\t\t\tcontinue\n\t\t}\n\t\toutput = append(output, s)\n\t}\n\n\treturn output\n}", "func SortedUniqueStrings(list []string) []string {\n\tunique := FirstUniqueStrings(list)\n\tsort.Strings(unique)\n\treturn unique\n}", "func (c StringArrayCollection) Slice(keys ...int) Collection {\n\tvar d = make([]string, len(c.value))\n\tcopy(d, c.value)\n\tif len(keys) == 1 {\n\t\treturn StringArrayCollection{\n\t\t\tvalue: d[keys[0]:],\n\t\t}\n\t} else {\n\t\treturn StringArrayCollection{\n\t\t\tvalue: d[keys[0] : keys[0]+keys[1]],\n\t\t}\n\t}\n}", "func StringSliceMap(ss []string, fn func(string) string) []string {\n\tss2 := make([]string, len(ss))\n\tfor i, v := range ss {\n\t\tss2[i] = fn(v)\n\t}\n\n\treturn ss2\n}", "func (ss *StringSet) ElementsSorted() []string {\n\telements := ss.Elements()\n\tsort.Slice(elements, func(i, j int) bool { return elements[i] < elements[j] })\n\treturn elements\n}", "func (opts *Opts) StringSlice() (sl []string) {\n\topts.updateOptions()\n\tfor option, ok := range opts.options {\n\t\tif ok {\n\t\t\tsl = append(sl, option)\n\t\t}\n\t}\n\treturn\n}", "func (set *StringMap) SetToArray() []string {\n\tset.RWlock.RLock()\n\tdefer set.RWlock.RUnlock()\n\tkeys := make([]string, 0)\n\tfor k := range set.set {\n\t\tkeys = append(keys, k)\n\t}\n\treturn keys\n}", "func (this *DynMap) GetStringSlice(key string) ([]string, bool) {\n\tlst, ok := this.Get(key)\n\tif !ok {\n\t\treturn nil, false\n\t}\n\tswitch v := lst.(type) {\n\tcase []string:\n\t\treturn v, true\n\tcase []interface{}:\n\t\tretlist := make([]string, 0)\n\t\tfor _, tmp := range v {\n\t\t\tin := ToString(tmp)\n\t\t\tretlist = append(retlist, in)\n\t\t}\n\t\treturn retlist, true\n\t}\n\treturn nil, false\n}", "func stringSliceIntersect(s, t []string) []string {\n\tvar res []string\n\tm := make(map[string]bool, len(s))\n\tfor _, x := range s {\n\t\tm[x] = true\n\t}\n\tfor _, y := range t {\n\t\tif m[y] {\n\t\t\tres = append(res, y)\n\t\t}\n\t}\n\treturn res\n}", "func Union(sets ...StringSet) StringSet {\n\ts := make(StringSet)\n\tfor _, t := range sets {\n\t\tfor k := range t {\n\t\t\ts.Add(k)\n\t\t}\n\t}\n\treturn s\n}", "func diffStrings(src ...[]string) []string {\n\tfirst := make(map[string]struct{})\n\tunique := make(map[string]struct{})\n\n\tfor i, srci := range src {\n\t\tfor _, v := range srci {\n\t\t\tif i == 0 {\n\t\t\t\tfirst[v] = struct{}{}\n\t\t\t} else {\n\t\t\t\tif _, ok := first[v]; !ok {\n\t\t\t\t\tunique[v] = struct{}{}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tkeys := make([]string, len(unique))\n\ti := 0\n\n\tfor k := range unique {\n\t\tkeys[i] = k\n\t\ti++\n\t}\n\n\treturn keys\n}", "func uniqueStrings(s []string) []string {\n\tunique := make(map[string]bool, len(s))\n\tus := make([]string, len(unique))\n\tfor _, elem := range s {\n\t\tif len(elem) != 0 {\n\t\t\tif !unique[elem] {\n\t\t\t\tus = append(us, elem)\n\t\t\t\tunique[elem] = true\n\t\t\t}\n\t\t}\n\t}\n\treturn us\n}", "func removeDuplicates(stringSlices ...[]string) []string {\n\tuniqueMap := map[string]bool{}\n\n\tfor _, stringSlice := range stringSlices {\n\t\tfor _, str := range stringSlice {\n\t\t\tuniqueMap[str] = true\n\t\t}\n\t}\n\n\t// Create a slice with the capacity of unique items\n\t// This capacity make appending flow much more efficient\n\tresult := make([]string, 0, len(uniqueMap))\n\n\tfor key := range uniqueMap {\n\t\tresult = append(result, key)\n\t}\n\n\treturn result\n}", "func StringSlice(s []string, err error) []string {\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\treturn s\n}", "func NewStringSet(lists ...[]string) StringSet {\n\tret := make(map[string]bool)\n\tfor _, list := range lists {\n\t\tfor _, entry := range list {\n\t\t\tret[entry] = true\n\t\t}\n\t}\n\treturn ret\n}", "func Strings(s []string, caseInsensitive bool) {\n\tif caseInsensitive {\n\t\tsort.Sort(stringSlice(s))\n\t} else {\n\t\tsort.Strings(s)\n\t}\n}", "func Strings(values []string) StringSet {\n\ts := make(StringSet)\n\tfor _, str := range values {\n\t\ts[str] = struct{}{}\n\t}\n\treturn s\n}", "func DedupeSortSlice(s []string, modifier func(string) string) []string {\n\to := DedupeSlice(s, modifier)\n\tsort.Strings(o)\n\treturn o\n}", "func ShardStrings(s []string, shardSize int) [][]string {\n\tif len(s) == 0 {\n\t\treturn nil\n\t}\n\tret := make([][]string, 0, (len(s)+shardSize-1)/shardSize)\n\tfor len(s) > shardSize {\n\t\tret = append(ret, s[0:shardSize])\n\t\ts = s[shardSize:]\n\t}\n\tif len(s) > 0 {\n\t\tret = append(ret, s)\n\t}\n\treturn ret\n}", "func sortSliceString(dataSlice sliceStringInterface, keyOrder, sortDirection string) sliceStringInterface {\n\tindex := make(PairList, len(dataSlice))\n\ti := 0\n\tfor k, v := range dataSlice {\n\t\tindex[i] = Pair{v[keyOrder].(string), k}\n\t\ti++\n\t}\n\tsort.Sort(index)\n\n\torderedDataSlice := make(sliceStringInterface, len(dataSlice))\n\tif sortDirection == \"asc\" {\n\t\tfor k, v := range index {\n\t\t\torderedDataSlice[k] = dataSlice[v.Value]\n\t\t}\n\t} else {\n\t\tfor k, v := range index {\n\t\t\torderedDataSlice[len(dataSlice)-k-1] = dataSlice[v.Value]\n\t\t}\n\t}\n\treturn orderedDataSlice\n}", "func ExampleIntSet_Slice() {\n\ts1 := gset.NewIntSet()\n\ts1.Add([]int{1, 2, 3, 4}...)\n\tfmt.Println(s1.Slice())\n\n\t// May Output:\n\t// [1, 2, 3, 4]\n}", "func (s String) ToSlice() []string {\n\tres := make([]string, 0)\n\tfor k := range s {\n\t\tres = append(res, k)\n\t}\n\treturn res\n}", "func removeStringFromSlice(str string, slice []string) []string {\n\tfor i, v := range slice {\n\t\tif v == str {\n\t\t\t//append the subslice of all elements after this one, to the sublice of all elements before this one\n\t\t\treturn append(slice[:i], slice[i+1:]...)\n\t\t}\n\t}\n\n\t//if the string was not present, just return the slice back\n\treturn slice\n}", "func UniqueStrings(s []string) []string {\n\tkeys := make(map[string]bool, len(s))\n\tlist := []string{}\n\n\tfor _, entry := range s {\n\t\tif _, value := keys[entry]; !value {\n\t\t\tkeys[entry] = true\n\t\t\tlist = append(list, entry)\n\t\t}\n\t}\n\n\treturn list\n}", "func DecodeStringSet(blob []byte) (*StringSet, error) {\n\tss := NewStringSet()\n\tbuf := bytes.NewBuffer(blob)\n\tdec := gob.NewDecoder(buf)\n\terr := dec.Decode(&ss.members)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ss, nil\n}", "func SelectPrefixInStringSlice(prefix string, items []string) []string {\n\n\tl := len(prefix)\n\n\tvar results []string\n\n\t// iterate through the slice of items\n\tfor _, item := range items {\n\n\t\t// check the item length is geater than or equal to the prefix length\n\t\t// this ensures no out of bounds memory errors will occur\n\t\tif len(item) >= l {\n\t\t\tif prefix == item[:l] {\n\t\t\t\tresults = append(results, item)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn results\n}", "func IntersectionSlice(one []string, two []string) []string {\n\tm1 := make(map[string]struct{})\n\tfor _, e := range one {\n\t\tif e == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tm1[e] = struct{}{}\n\t}\n\tm2 := make(map[string]struct{})\n\tfor _, e := range two {\n\t\tif e == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tm2[e] = struct{}{}\n\t}\n\tfor key := range m1 {\n\t\tif _, ok := m2[key]; !ok {\n\t\t\tdelete(m1, key)\n\t\t}\n\t}\n\ts := make([]string, 0, len(m1))\n\tfor key := range m1 {\n\t\ts = append(s, key)\n\t}\n\tsort.Strings(s)\n\treturn s\n}", "func OnlyUnique(slice []string) []string {\n\tuniqMap := make(map[string]struct{})\n\tfor _, v := range slice {\n\t\tuniqMap[v] = struct{}{}\n\t}\n\n\tuniqSlice := make([]string, 0, len(uniqMap))\n\n\tkeys := make([]string, 0, len(uniqMap))\n\tfor k := range uniqMap {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\tuniqSlice = append(uniqSlice, keys...)\n\n\treturn uniqSlice\n}" ]
[ "0.7395667", "0.70148724", "0.6999877", "0.69945043", "0.69452065", "0.6828592", "0.6691718", "0.669042", "0.6537828", "0.64243567", "0.641947", "0.63832146", "0.6322088", "0.62684745", "0.62402517", "0.62099415", "0.6133305", "0.60791206", "0.6033735", "0.6019684", "0.60185736", "0.6000332", "0.5990574", "0.5971549", "0.5957457", "0.5927781", "0.5909564", "0.5882885", "0.58540154", "0.5821667", "0.58215857", "0.57886046", "0.5785382", "0.5780324", "0.5772196", "0.5761199", "0.57473755", "0.571585", "0.5706161", "0.56939733", "0.5679153", "0.5668431", "0.56633186", "0.56620383", "0.5657056", "0.5646248", "0.56129694", "0.56099904", "0.5598388", "0.55948895", "0.5593206", "0.558793", "0.55861187", "0.55823", "0.5577969", "0.5571112", "0.55604225", "0.55514336", "0.55507195", "0.5532341", "0.5526158", "0.5518267", "0.5512108", "0.5504869", "0.5491781", "0.54910356", "0.54904777", "0.5470335", "0.5466717", "0.5466551", "0.54528874", "0.545049", "0.5433288", "0.5423347", "0.54194474", "0.5417994", "0.5414889", "0.5401876", "0.5395807", "0.5393886", "0.53859645", "0.5382767", "0.5381976", "0.537445", "0.5373625", "0.53725606", "0.5371492", "0.5369686", "0.535884", "0.5351194", "0.53490186", "0.53469795", "0.5341149", "0.5338002", "0.5318319", "0.53142923", "0.5313247", "0.53093255", "0.53051925", "0.5303803" ]
0.81835073
0
ParseOnOff parses whether value is "on" or "off", parameterName is passed for error reporting purposes, defaultValue is returned when no value is set
ParseOnOff анализирует, является ли значение "включено" или "отключено", parameterName передается для целей отчетности об ошибках, defaultValue возвращается, если значение не задано
func ParseOnOff(parameterName, val string, defaultValue bool) (bool, error) { switch val { case teleport.On: return true, nil case teleport.Off: return false, nil case "": return defaultValue, nil default: return false, trace.BadParameter("bad %q parameter value: %q, supported values are on or off", parameterName, val) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (dps *domainParser) di2OnOff() {\n\tdps.defaultValue = dps.onOffDefaultValue\n\tdps.customParseID = dps.onOffCustomParseID\n\tdps.checkEndedCorrect = dps.onOffCheckEndedCorrect\n\tdps.appendQP = dps.onOffAppendQp\n}", "func ParseBool(str string) (bool, error) {\n\tif str == \"on\" {\n\t\treturn true, nil\n\t}\n\tif str == \"off\" {\n\t\treturn false, nil\n\t}\n\treturn strconv.ParseBool(str)\n}", "func (f flagBool) Parse(value string) interface{} {\n\tswitch value {\n\tcase \"1\", \"t\", \"T\", \"true\", \"TRUE\", \"True\", \"y\", \"Y\", \"yes\", \"YES\", \"Yes\":\n\t\treturn true\n\t}\n\treturn false\n}", "func (f flagString) Parse(value string) interface{} {\n\treturn value\n}", "func ParseBoolP(cmd *cobra.Command, name string) (*bool, error) {\n\tflagRaw, err := cmd.Flags().GetString(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar flagVal *bool\n\tss, err := strconv.ParseBool(flagRaw)\n\tif err != nil && flagRaw != \"\" {\n\t\treturn nil, err\n\t}\n\n\tif flagRaw != \"\" && err == nil {\n\t\treturn &ss, nil\n\t}\n\n\treturn flagVal, nil\n}", "func (f *Form) Bool(param string, defaultValue bool) bool {\n\tvals, ok := f.values[param]\n\tif !ok {\n\t\treturn defaultValue\n\t}\n\tparamVal, err := strconv.ParseBool(vals[0])\n\tif err != nil {\n\t\tf.err = err\n\t\treturn defaultValue\n\t}\n\treturn paramVal\n}", "func NamedBoolDefault(name string, def bool) func(http.ResponseWriter, url.Values, martini.Context) {\n\treturn func(w http.ResponseWriter, query url.Values, m martini.Context) {\n\t\tvalue_string := query.Get(name)\n\t\tvalue, err := strconv.ParseBool(value_string)\n\n\t\tif \"\" == value_string {\n\t\t\tm.Map(NamedBoolParameter(def))\n\t\t\treturn\n\t\t}\n\n\t\tif nil != err {\n\t\t\thttp.Error(w, fmt.Sprintf(\"\\\"%s\\\" is not a boolean\"), 422)\n\t\t}\n\n\t\tm.Map(NamedBoolParameter(value))\n\t}\n}", "func ParseBool(operand string) (value bool, err error) { return strconv.ParseBool(operand) }", "func (dps *domainParser) onOffDefaultValue() (tmpIDs []string, queryPieceIDs map[string]bool) {\n\ttmpIDs = []string{onoff_default_id}\n\tqueryPieceIDs = map[string]bool{onoff_default_id: true}\n\treturn\n}", "func ParseBool(str string) (bool, error) {}", "func ToBool(v interface{}, def bool) bool {\r\n\tif b, ok := v.(bool); ok {\r\n\t\treturn b\r\n\t}\r\n\tif i, ok := v.(int); ok {\r\n\t\treturn i > 0\r\n\t}\r\n\tif i, ok := v.(float64); ok {\r\n\t\treturn i > 0\r\n\t}\r\n\tif i, ok := v.(float32); ok {\r\n\t\treturn i > 0\r\n\t}\r\n\tif ss, ok := v.([]string); ok {\r\n\t\tv = ss[0]\r\n\t}\r\n\tif s, ok := v.(string); ok {\r\n\t\tif s == \"on\" {\r\n\t\t\treturn true\r\n\t\t}\r\n\t\tif s == \"off\" || s == \"\" {\r\n\t\t\treturn false\r\n\t\t}\r\n\t\tif b, err := strconv.ParseBool(s); err == nil {\r\n\t\t\treturn b\r\n\t\t}\r\n\t}\r\n\r\n\treturn def\r\n\r\n}", "func parseBool(str string) (value bool, err error) {\n\tswitch str {\n\tcase \"1\", \"t\", \"T\", \"true\", \"TRUE\", \"True\", \"YES\", \"yes\", \"Yes\", \"ON\", \"on\", \"On\":\n\t\treturn true, nil\n\tcase \"0\", \"f\", \"F\", \"false\", \"FALSE\", \"False\", \"NO\", \"no\", \"No\", \"OFF\", \"off\", \"Off\":\n\t\treturn false, nil\n\t}\n\treturn false, fmt.Errorf(\"parsing \\\"%s\\\": invalid syntax\", str)\n}", "func ParseBool(q url.Values, name string) (bool, bool, error) {\n\tstringVal := q.Get(name)\n\tif stringVal == \"\" {\n\t\treturn false, false, nil\n\t}\n\n\tval, err := strconv.ParseBool(stringVal)\n\tif err != nil {\n\t\treturn false, false, trace.BadParameter(\n\t\t\t\"'%v': expected 'true' or 'false', got %v\", name, stringVal)\n\t}\n\treturn val, true, nil\n}", "func parseBoolean(s *scanner) (bool, error) {\n\tif s.eof() || s.data[s.off] != '?' {\n\t\treturn false, &UnmarshalError{s.off, ErrInvalidBooleanFormat}\n\t}\n\ts.off++\n\n\tif s.eof() {\n\t\treturn false, &UnmarshalError{s.off, ErrInvalidBooleanFormat}\n\t}\n\n\tswitch s.data[s.off] {\n\tcase '0':\n\t\ts.off++\n\n\t\treturn false, nil\n\tcase '1':\n\t\ts.off++\n\n\t\treturn true, nil\n\t}\n\n\treturn false, &UnmarshalError{s.off, ErrInvalidBooleanFormat}\n}", "func AutotypeValue(input interface{}) interface{} {\n\tif strValue, ok := input.(string); ok {\n\t\tif intVal, err := strconv.ParseInt(strValue, 10, 64); err == nil {\n\t\t\treturn intVal\n\t\t} else if floatVal, err := strconv.ParseFloat(strValue, 64); err == nil {\n\t\t\treturn floatVal\n\t\t} else if strValue == \"true\" {\n\t\t\treturn true\n\t\t} else if strValue == \"false\" {\n\t\t\treturn false\n\t\t} else if strValue == \"null\" {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn input\n}", "func ParseBool(str string) (val bool, isBool bool) {\n\t// Note: Not using strconv.ParseBool because I want it a bit looser (any casing) and to allow yes/no/off/on values.\n\tlstr := strings.ToLower(strings.TrimSpace(str))\n\tswitch lstr {\n\tcase \"false\", \"f\", \"0\", \"no\", \"n\", \"off\":\n\t\tisBool = true\n\tcase \"true\", \"t\", \"1\", \"yes\", \"y\", \"on\":\n\t\tval = true\n\t\tisBool = true\n\t}\n\treturn\n}", "func OptionalURLParamBool(request *http.Request, name string) (null.Bool, IResponse) {\n\tvalueStr := chi.URLParam(request, name)\n\tif valueStr == \"\" {\n\t\treturn null.Bool{}, nil\n\t}\n\n\tvalue, err := strconv.ParseBool(valueStr)\n\tif err != nil {\n\t\treturn null.Bool{}, BadRequest(request, \"Invalid url param %s (value: '%s'): %s\", name, valueStr, err)\n\t}\n\n\treturn null.BoolFrom(value), nil\n}", "func ParseFlagBool(args []string) (bool, int, error) {\n\tif strings.ContainsAny(args[0], \"= \") {\n\t\tparts := strings.SplitN(args[0], \"=\", 2)\n\t\tif len(parts) == 1 {\n\t\t\tparts = strings.SplitN(args[0], \" \", 2)\n\t\t}\n\t\tif len(parts) == 2 {\n\t\t\tval, isBool := ParseBool(parts[1])\n\t\t\tif !isBool {\n\t\t\t\treturn false, 0, fmt.Errorf(\"invalid %s bool value: [%s]\", parts[0], parts[1])\n\t\t\t}\n\t\t\treturn val, 0, nil\n\t\t}\n\t\treturn false, 0, fmt.Errorf(\"unable to split flag and value from string: [%s]\", args[0])\n\t}\n\tif len(args) > 1 {\n\t\tval, isBool := ParseBool(args[1])\n\t\tif isBool {\n\t\t\treturn val, 1, nil\n\t\t}\n\t}\n\treturn true, 0, nil\n}", "func ParseBool(val interface{}) (value bool, err error) {\n\tif val != nil {\n\t\tswitch v := val.(type) {\n\t\tcase bool:\n\t\t\treturn v, nil\n\t\tcase string:\n\t\t\tswitch v {\n\t\t\tcase \"1\", \"t\", \"T\", \"true\", \"TRUE\", \"True\", \"YES\", \"yes\", \"Yes\", \"Y\", \"y\", \"ON\", \"on\", \"On\":\n\t\t\t\treturn true, nil\n\t\t\tcase \"0\", \"f\", \"F\", \"false\", \"FALSE\", \"False\", \"NO\", \"no\", \"No\", \"N\", \"n\", \"OFF\", \"off\", \"Off\":\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\tcase int8, int32, int64:\n\t\t\tstrV := fmt.Sprintf(\"%s\", v)\n\t\t\tif strV == \"1\" {\n\t\t\t\treturn true, nil\n\t\t\t} else if strV == \"0\" {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\tcase float64:\n\t\t\tif v == 1 {\n\t\t\t\treturn true, nil\n\t\t\t} else if v == 0 {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\treturn false, fmt.Errorf(\"parsing %q: invalid syntax\", val)\n\t}\n\treturn false, fmt.Errorf(\"parsing <nil>: invalid syntax\")\n}", "func URLParamBool(request *http.Request, name string) (bool, IResponse) {\n\tvalueStr := chi.URLParam(request, name)\n\tvalue, err := strconv.ParseBool(valueStr)\n\tif err != nil {\n\t\treturn false, BadRequest(request, \"Invalid url param %s (value: '%s'): %s\", name, valueStr, err)\n\t}\n\n\treturn value, nil\n}", "func ParseBooleanDefaultFalse(s string) Boolean {\n\tif s == \"\" {\n\t\treturn NewBoolean(false)\n\t}\n\n\treturn NewBoolean(s == \"true\")\n}", "func ValBool(k string, d bool, p map[string]string) (v bool, err error) {\n\n\tbStr, ok := p[k]\n\tif !ok {\n\t\tv = d\n\t\treturn\n\t}\n\n\ttErr := fmt.Errorf(\"invalid value for the parameter %s\", k)\n\tb, err := strconv.ParseBool(bStr)\n\tif err != nil {\n\t\terr = tErr\n\t\treturn\n\t}\n\n\tv = b\n\n\treturn\n}", "func Bool(name string, defaultValue bool) bool {\n\tif strVal, ok := os.LookupEnv(name); ok {\n\t\tif res, err := strconv.ParseBool(strVal); err == nil {\n\t\t\treturn res\n\t\t}\n\t}\n\n\treturn defaultValue\n}", "func ParseBooleanDefaultTrue(s string) Boolean {\n\tif s == \"\" {\n\t\treturn NewBoolean(true)\n\t}\n\n\treturn NewBoolean(s == \"true\")\n}", "func onOffParseFile(path string) (*parseResult, error) {\n\tif pr, ok := hasOnOffParseDone[path]; ok {\n\t\treturn pr, nil\n\t}\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\treader := bufio.NewReader(f)\n\tdps := newDomainParser()\n\tdps.di2OnOff()\n\tpr, err := dps.parse(reader)\n\thasOnOffParseDone[path] = pr\n\treturn pr, err\n}", "func boolHandler(set func(bool) error, get func() bool) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvars := mux.Vars(r)\n\n\t\tval, err := strconv.ParseBool(vars[\"value\"])\n\t\tif err != nil {\n\t\t\tjsonError(w, http.StatusBadRequest, err)\n\t\t\treturn\n\t\t}\n\n\t\terr = set(val)\n\t\tif err != nil {\n\t\t\tjsonError(w, http.StatusNotAcceptable, err)\n\t\t\treturn\n\t\t}\n\n\t\tjsonResult(w, get())\n\t}\n}", "func getBoolParamFromURL(r *http.Request, key string) (bool, error) {\n\tval, err := hchi.GetStringFromURL(r, key)\n\tif err != nil {\n\t\treturn false, errors.Wrapf(err, \"loading %s from URL\", key)\n\t}\n\n\tif val == \"true\" {\n\t\treturn true, nil\n\t}\n\tif val == \"false\" || val == \"\" {\n\t\treturn false, nil\n\t}\n\n\treturn false, problem.MakeInvalidFieldProblem(key, errors.New(\"invalid bool value\"))\n}", "func Parse(config interface{}) { Define(config); flag.Parse() }", "func (this *OptionBool) Parse(arg string) error {\n argint, err := strconv.Atoi(arg)\n if err != nil {\n return err\n }\n\n //this.opt_storage = argint!=0;\n var storage *bool\n storage = this.opt_storage.(*bool)\n\n *storage = argint != 0\n\n return nil\n}", "func isFlagged(tagValue *string) bool {\n\tif tagValue == nil {\n\t\treturn false\n\t}\n\tb, err := strconv.ParseBool(*tagValue)\n\tif err == nil {\n\t\treturn b\n\t}\n\treturn false\n}", "func Bool(v interface{}, defaults ...bool) (b bool) {\n\tswitch tv := v.(type) {\n\tcase nil:\n\t\tif 1 < len(defaults) {\n\t\t\tb = defaults[1]\n\t\t}\n\tcase bool:\n\t\tb = tv\n\tcase string:\n\t\tvar err error\n\t\tif 1 < len(defaults) {\n\t\t\tb = defaults[1]\n\t\t} else if b, err = strconv.ParseBool(tv); err != nil {\n\t\t\tif 0 < len(defaults) {\n\t\t\t\tb = defaults[0]\n\t\t\t}\n\t\t}\n\tcase gen.Bool:\n\t\tb = bool(tv)\n\tcase gen.String:\n\t\tvar err error\n\t\tif 1 < len(defaults) {\n\t\t\tb = defaults[1]\n\t\t} else if b, err = strconv.ParseBool(string(tv)); err != nil {\n\t\t\tif 0 < len(defaults) {\n\t\t\t\tb = defaults[0]\n\t\t\t}\n\t\t}\n\tdefault:\n\t\tif 0 < len(defaults) {\n\t\t\tb = defaults[0]\n\t\t}\n\t}\n\treturn\n}", "func QueryBoolParam(r *http.Request, param string, defaultValue bool) bool {\n\tvalue := r.URL.Query().Get(param)\n\tif value == \"\" {\n\t\treturn defaultValue\n\t}\n\n\tval, err := strconv.ParseBool(value)\n\n\tif err != nil {\n\t\treturn defaultValue\n\t}\n\n\treturn val\n}", "func parseBool(asString string) (bool, error) {\n\tswitch asString {\n\tcase \"true\":\n\t\treturn true, nil\n\tcase \"false\":\n\t\treturn false, nil\n\tdefault:\n\t\treturn false, fmt.Errorf(\"could not parse %q as a bool\", asString)\n\t}\n}", "func (a *Args) IsOff(s string) bool {\n\treturn !a.IsOn(s)\n}", "func Bool(v string) (bool, bool, error) {\n\tswitch os.Getenv(v) {\n\tcase \"true\":\n\t\treturn true, true, nil\n\tcase \"false\":\n\t\treturn false, true, nil\n\tcase \"\":\n\t\treturn false, false, nil\n\tdefault:\n\t\treturn false, false, fmt.Errorf(\"%s must be 'true' or 'false'\", v)\n\t}\n}", "func parseOrDefault(req *http.Request, requestParam string, defaultValue int) int {\n\tresult := defaultValue\n\tparamAsInt, ok := req.URL.Query()[requestParam]\n\tif !ok || len(paramAsInt[0]) < 1 {\n\t\tlog.Printf(\"no parameter provided in request --> defaulting to %d \\n\", defaultValue)\n\t} else {\n\t\tintValue, err := strconv.Atoi(paramAsInt[0])\n\t\tif err != nil {\n\t\t\tlog.Printf(\"something went wrong parsing the parameter --> defaulting to %d \\n\", defaultValue)\n\t\t\tlog.Println(err)\n\t\t} else {\n\t\t\tresult = intValue\n\t\t}\n\t}\n\treturn result\n}", "func (e Entry) BoolDefault(def bool) (bool, error) {\n\tv := e.ValueRaw\n\tif v == nil {\n\t\treturn def, errFindParse.Format(\"bool\", e.Key)\n\t}\n\n\tif vBoolean, ok := v.(bool); ok {\n\t\treturn vBoolean, nil\n\t}\n\n\tif vString, ok := v.(string); ok {\n\t\tb, err := strconv.ParseBool(vString)\n\t\tif err != nil {\n\t\t\treturn def, err\n\t\t}\n\t\treturn b, nil\n\t}\n\n\tif vInt, ok := v.(int); ok {\n\t\tif vInt == 1 {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t}\n\n\treturn def, errFindParse.Format(\"bool\", e.Key)\n}", "func ParseQueryBool(param string, request *http.Request, params imageserver.Params) error {\n\ts := request.URL.Query().Get(param)\n\tif s == \"\" {\n\t\treturn nil\n\t}\n\tb, err := strconv.ParseBool(s)\n\tif err != nil {\n\t\treturn newParseTypeParamError(param, \"bool\", err)\n\t}\n\tparams.Set(param, b)\n\treturn nil\n}", "func assignValueBool(params map[string]interface{}, name string, out *bool) error {\n\tif raw, ok := params[name]; ok {\n\t\tval, ok := raw.(bool)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Expecting %s to be a boolean\", name)\n\t\t}\n\t\t*out = val\n\t\tdelete(params, name)\n\t}\n\treturn nil\n}", "func parseModeFlag() {\n\tswitch modeStr {\n\tcase \"iteration\":\n\t\tmode = coloring.IterationCount\n\tcase \"modulo\":\n\t\tmode = coloring.Modulo\n\tcase \"vector\":\n\t\tmode = coloring.VectorField\n\tcase \"orbit\":\n\t\tmode = coloring.OrbitLength\n\tdefault:\n\t\tlogrus.Fatalln(\"invalid coloring function:\", modeStr)\n\t}\n}", "func (k *Parameters) ParseFlags(defaultEndpoint string, printVersionInfoFun func(), storeParamsMap map[string]string) {\n\tvar params []*StrParamDetail\n\tk.ConfigFile = StrParamDetail{Name: \"config-file\", ShortName: \"k\", Usage: \"Configuration file.\"}\n\tparams = append(params, &k.ConfigFile)\n\tk.Endpoint = StrParamDetail{Name: \"endpoint\", DefaultValue: defaultEndpoint, Usage: \"Server config: Endpoint the server listen and serve.\"}\n\tparams = append(params, &k.Endpoint)\n\tk.Insecure = StrParamDetail{Name: \"insecure\", DefaultValue: strconv.FormatBool(DefaultInsecure), Usage: \"Server config: Disable transport security.\"}\n\tparams = append(params, &k.Insecure)\n\tk.EnableAuthz = StrParamDetail{Name: \"enable-authz\", DefaultValue: strconv.FormatBool(DefaultEnableAuthz), Usage: \"Server config: Enable authorization check.\"}\n\tparams = append(params, &k.EnableAuthz)\n\tk.CertPath = StrParamDetail{Name: \"cert\", Usage: \"Server config: Server certifice file path.\"}\n\tparams = append(params, &k.CertPath)\n\tk.KeyPath = StrParamDetail{Name: \"key\", Usage: \"Server config: Server key file path.\"}\n\tparams = append(params, &k.KeyPath)\n\tk.ClientCertPath = StrParamDetail{Name: \"client-cert\", ShortName: \"c\", Usage: \"Server config: Client certifice file path.\"}\n\tparams = append(params, &k.ClientCertPath)\n\tk.ForceClientCert = StrParamDetail{Name: \"force-client-cert\", ShortName: \"f\", Usage: \"Server config: Force Client certification.\"}\n\tparams = append(params, &k.ForceClientCert)\n\n\tk.StoreType = StrParamDetail{Name: \"store-type\", DefaultValue: DefaultStoreType, Usage: \"Store config: Policy store type, etcd or file.\"}\n\tparams = append(params, &k.StoreType)\n\tk.StoreWatchEnabled = StrParamDetail{Name: \"enable-watch\", DefaultValue: strconv.FormatBool(DefaultStoreWatchEnabled), Usage: \"Evaluator config: Whether enable watch store changes.\"}\n\tparams = append(params, &k.StoreWatchEnabled)\n\n\t// Log configurations\n\tk.LogConf.LogLevel = StrParamDetail{Name: \"log-level\", Usage: \"Log config: log level, available levels are panic, fatal, error, warn, info and debug.\"}\n\tparams = append(params, &k.LogConf.LogLevel)\n\tk.LogConf.LogFormatter = StrParamDetail{Name: \"log-formatter\", Usage: \"Log config: log formatter, available values are text and json.\"}\n\tparams = append(params, &k.LogConf.LogFormatter)\n\tk.LogConf.LogReportCaller = StrParamDetail{Name: \"log-reportcaller\", DefaultValue: strconv.FormatBool(false), Usage: \"Log config: if the caller(file, line and function) is included in the log entry.\"}\n\tparams = append(params, &k.LogConf.LogReportCaller)\n\tk.LogConf.LogFileName = StrParamDetail{Name: \"log-filename\", Usage: \"Log config: log file name.\"}\n\tparams = append(params, &k.LogConf.LogFileName)\n\tk.LogConf.LogMaxSize = StrParamDetail{Name: \"log-maxsize\", Usage: \"Log config: maximum size in megabytes of the log file before it gets rotated.\"}\n\tparams = append(params, &k.LogConf.LogMaxSize)\n\tk.LogConf.LogCompress = StrParamDetail{Name: \"log-compress\", Usage: \"Log config: if the rotated log files should be compressed.\"}\n\tparams = append(params, &k.LogConf.LogCompress)\n\tk.LogConf.LogMaxBackups = StrParamDetail{Name: \"log-maxbackups\", Usage: \"Log config: maximum number of old log files to retain.\"}\n\tparams = append(params, &k.LogConf.LogMaxBackups)\n\tk.LogConf.LogMaxAge = StrParamDetail{Name: \"log-maxage\", Usage: \"Log config: maximum number of days to retain old log files.\"}\n\tparams = append(params, &k.LogConf.LogMaxAge)\n\tk.LogConf.LogLocalTime = StrParamDetail{Name: \"log-localtime\", Usage: \"Log config: if local time is used for formatting the timestamps in backup files.\"}\n\tparams = append(params, &k.LogConf.LogLocalTime)\n\n\t// Audit Log configurations\n\tk.AuditLogConf.LogLevel = StrParamDetail{Name: \"auditlog-level\", DefaultValue: DefaultAuditLogLevel, Usage: \"Audit Log config: log level, available levels are panic, fatal, error, warn, info and debug.\"}\n\tparams = append(params, &k.AuditLogConf.LogLevel)\n\tk.AuditLogConf.LogFormatter = StrParamDetail{Name: \"auditlog-formatter\", DefaultValue: DefaultAuditLogFormatter, Usage: \"Audit Log config: log formatter, available values are text and json.\"}\n\tparams = append(params, &k.AuditLogConf.LogFormatter)\n\tk.AuditLogConf.LogReportCaller = StrParamDetail{Name: \"auditlog-reportcaller\", DefaultValue: strconv.FormatBool(false), Usage: \"Audit Log config: if the caller(file, line and function) is included in the log entry.\"}\n\tparams = append(params, &k.AuditLogConf.LogReportCaller)\n\tk.AuditLogConf.LogFileName = StrParamDetail{Name: \"auditlog-filename\", DefaultValue: DefaultAuditLogFilename, Usage: \"Audit Log config: log file name.\"}\n\tparams = append(params, &k.AuditLogConf.LogFileName)\n\tk.AuditLogConf.LogMaxSize = StrParamDetail{Name: \"auditlog-maxsize\", DefaultValue: DefaultAuditLogMaxSize, Usage: \"Audit Log config: maximum size in megabytes of the log file before it gets rotated.\"}\n\tparams = append(params, &k.AuditLogConf.LogMaxSize)\n\tk.AuditLogConf.LogCompress = StrParamDetail{Name: \"auditlog-compress\", DefaultValue: \"false\", Usage: \"Audit Log config: if the rotated log files should be compressed.\"}\n\tparams = append(params, &k.AuditLogConf.LogCompress)\n\tk.AuditLogConf.LogMaxBackups = StrParamDetail{Name: \"auditlog-maxbackups\", DefaultValue: DefaultAuditLogMaxBackups, Usage: \"Audit Log config: maximum number of old log files to retain.\"}\n\tparams = append(params, &k.AuditLogConf.LogMaxBackups)\n\tk.AuditLogConf.LogMaxAge = StrParamDetail{Name: \"auditlog-maxage\", DefaultValue: DefaultAuditLogMaxAge, Usage: \"Audit Log config: maximum number of days to retain old log files.\"}\n\tparams = append(params, &k.AuditLogConf.LogMaxAge)\n\tk.AuditLogConf.LogLocalTime = StrParamDetail{Name: \"auditlog-localtime\", DefaultValue: \"false\", Usage: \"Audit Log config: if local time is used for formatting the timestamps in backup files.\"}\n\tparams = append(params, &k.AuditLogConf.LogLocalTime)\n\n\tk.AsserterConf.AsserterEndpoint = StrParamDetail{Name: \"asserter-endpoint\", Usage: \"Assertion server endpoint.\"}\n\tparams = append(params, &k.AsserterConf.AsserterEndpoint)\n\tk.AsserterConf.AsserterClientKeyPath = StrParamDetail{Name: \"asserter-client-key\", Usage: \"Assertion service client key file.\"}\n\tparams = append(params, &k.AsserterConf.AsserterClientKeyPath)\n\tk.AsserterConf.AsserterClientCertPath = StrParamDetail{Name: \"asserter-client-cert\", Usage: \"Assertion service client cert file.\"}\n\tparams = append(params, &k.AsserterConf.AsserterClientCertPath)\n\tk.AsserterConf.AsserterCaPath = StrParamDetail{Name: \"asserter-ca-cert\", Usage: \"Assertion service CA cert file.\"}\n\tparams = append(params, &k.AsserterConf.AsserterCaPath)\n\tk.AsserterConf.AsserterClientTimeout = StrParamDetail{Name: \"asserter-client-timeout\", DefaultValue: DefaultAsserterClientTimeout, Usage: \"Assertion service client http timeout value.\"}\n\tparams = append(params, &k.AsserterConf.AsserterClientTimeout)\n\n\tpflag.BoolVarP(&k.Version, \"version\", \"\", false, \"print version information\")\n\n\tfor _, paramDetail := range params {\n\t\tpflag.StringVarP(&(paramDetail.Value), paramDetail.Name, paramDetail.ShortName, paramDetail.DefaultValue, paramDetail.Usage)\n\t}\n\tpflag.Parse()\n\n\tif k.Version {\n\t\tprintVersionInfoFun()\n\t\tos.Exit(0)\n\t}\n\n\tif len(k.ConfigFile.Value) == 0 {\n\t\tenvVarName := FlagToEnv(k.ConfigFile.Name)\n\t\tval := os.Getenv(envVarName)\n\t\tif len(val) != 0 {\n\t\t\tk.ConfigFile.Value = val\n\t\t}\n\t}\n\n\tvar conf *cfg.Config\n\tif k.ConfigFile.Value != \"\" {\n\t\tvar err error\n\t\tconf, err = cfg.ReadConfig(k.ConfigFile.Value)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Fail to parse config file %s, error is %v. \\n\", k.ConfigFile.Value, err)\n\t\t\tk.usage()\n\t\t}\n\t} else {\n\t\tconf = nil\n\t}\n\n\tpflag.VisitAll(func(f *pflag.Flag) {\n\t\tkey := FlagToEnv(f.Name)\n\t\tif !f.Changed {\n\t\t\t//if not set from command line, search it from environment variable\n\t\t\tval := os.Getenv(key)\n\t\t\tif val != \"\" {\n\t\t\t\tf.Value.Set(val)\n\t\t\t} else {\n\t\t\t\t//if not set from environment variable, search it from config file\n\t\t\t\tswitch f.Name {\n\t\t\t\tcase k.Endpoint.Name:\n\t\t\t\t\tif conf != nil && conf.ServerConfig != nil && len(conf.ServerConfig.Endpoint) != 0 {\n\t\t\t\t\t\tf.Value.Set(conf.ServerConfig.Endpoint)\n\t\t\t\t\t}\n\t\t\t\tcase k.Insecure.Name:\n\t\t\t\t\tif conf != nil && conf.ServerConfig != nil && len(conf.ServerConfig.Insecure) != 0 {\n\t\t\t\t\t\tf.Value.Set(conf.ServerConfig.Insecure)\n\t\t\t\t\t}\n\t\t\t\tcase k.EnableAuthz.Name:\n\t\t\t\t\tif conf != nil && conf.ServerConfig != nil && len(conf.ServerConfig.EnableAuthz) != 0 {\n\t\t\t\t\t\tf.Value.Set(conf.ServerConfig.EnableAuthz)\n\t\t\t\t\t}\n\t\t\t\tcase k.KeyPath.Name:\n\t\t\t\t\tif conf != nil && conf.ServerConfig != nil && len(conf.ServerConfig.KeyPath) != 0 {\n\t\t\t\t\t\tf.Value.Set(conf.ServerConfig.KeyPath)\n\t\t\t\t\t}\n\t\t\t\tcase k.CertPath.Name:\n\t\t\t\t\tif conf != nil && conf.ServerConfig != nil && len(conf.ServerConfig.CertPath) != 0 {\n\t\t\t\t\t\tf.Value.Set(conf.ServerConfig.CertPath)\n\t\t\t\t\t}\n\t\t\t\tcase k.ForceClientCert.Name:\n\t\t\t\t\tif conf != nil && conf.ServerConfig != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.FormatBool(conf.ServerConfig.ForceClientCert))\n\t\t\t\t\t}\n\t\t\t\tcase k.ClientCertPath.Name:\n\t\t\t\t\tif conf != nil && conf.ServerConfig != nil && len(conf.ServerConfig.ClientCertPath) != 0 {\n\t\t\t\t\t\tf.Value.Set(conf.ServerConfig.ClientCertPath)\n\t\t\t\t\t}\n\t\t\t\tcase k.StoreType.Name:\n\t\t\t\t\tif conf != nil && conf.StoreConfig != nil && len(conf.StoreConfig.StoreType) != 0 {\n\t\t\t\t\t\tf.Value.Set(conf.StoreConfig.StoreType)\n\t\t\t\t\t}\n\t\t\t\tcase k.StoreWatchEnabled.Name:\n\t\t\t\t\tif conf != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.FormatBool(conf.EnableWatch))\n\t\t\t\t\t}\n\t\t\t\t// Log configurations\n\t\t\t\tcase k.LogConf.LogLevel.Name:\n\t\t\t\t\tif conf != nil && conf.LogConfig != nil {\n\t\t\t\t\t\tf.Value.Set(conf.LogConfig.Level)\n\t\t\t\t\t}\n\t\t\t\tcase k.LogConf.LogFormatter.Name:\n\t\t\t\t\tif conf != nil && conf.LogConfig != nil {\n\t\t\t\t\t\tf.Value.Set(conf.LogConfig.Formatter)\n\t\t\t\t\t}\n\t\t\t\tcase k.LogConf.LogReportCaller.Name:\n\t\t\t\t\tif conf != nil && conf.LogConfig != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.FormatBool(conf.LogConfig.SetReportCaller))\n\t\t\t\t\t}\n\t\t\t\tcase k.LogConf.LogFileName.Name:\n\t\t\t\t\tif conf != nil && conf.LogConfig != nil && conf.LogConfig.RotationConfig != nil {\n\t\t\t\t\t\tf.Value.Set(conf.LogConfig.RotationConfig.Filename)\n\t\t\t\t\t}\n\t\t\t\tcase k.LogConf.LogMaxSize.Name:\n\t\t\t\t\tif conf != nil && conf.LogConfig != nil && conf.LogConfig.RotationConfig != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.Itoa(conf.LogConfig.RotationConfig.MaxSize))\n\t\t\t\t\t}\n\t\t\t\tcase k.LogConf.LogMaxAge.Name:\n\t\t\t\t\tif conf != nil && conf.LogConfig != nil && conf.LogConfig.RotationConfig != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.Itoa(conf.LogConfig.RotationConfig.MaxAge))\n\t\t\t\t\t}\n\t\t\t\tcase k.LogConf.LogMaxBackups.Name:\n\t\t\t\t\tif conf != nil && conf.LogConfig != nil && conf.LogConfig.RotationConfig != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.Itoa(conf.LogConfig.RotationConfig.MaxBackups))\n\t\t\t\t\t}\n\t\t\t\tcase k.LogConf.LogCompress.Name:\n\t\t\t\t\tif conf != nil && conf.LogConfig != nil && conf.LogConfig.RotationConfig != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.FormatBool(conf.LogConfig.RotationConfig.Compress))\n\t\t\t\t\t}\n\t\t\t\tcase k.LogConf.LogLocalTime.Name:\n\t\t\t\t\tif conf != nil && conf.LogConfig != nil && conf.LogConfig.RotationConfig != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.FormatBool(conf.LogConfig.RotationConfig.LocalTime))\n\t\t\t\t\t}\n\t\t\t\t// Audit Log configurations\n\t\t\t\tcase k.AuditLogConf.LogLevel.Name:\n\t\t\t\t\tif conf != nil && conf.AuditLogConfig != nil {\n\t\t\t\t\t\tf.Value.Set(conf.AuditLogConfig.Level)\n\t\t\t\t\t}\n\t\t\t\tcase k.AuditLogConf.LogFormatter.Name:\n\t\t\t\t\tif conf != nil && conf.AuditLogConfig != nil {\n\t\t\t\t\t\tf.Value.Set(conf.AuditLogConfig.Formatter)\n\t\t\t\t\t}\n\t\t\t\tcase k.AuditLogConf.LogReportCaller.Name:\n\t\t\t\t\tif conf != nil && conf.AuditLogConfig != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.FormatBool(conf.AuditLogConfig.SetReportCaller))\n\t\t\t\t\t}\n\t\t\t\tcase k.AuditLogConf.LogFileName.Name:\n\t\t\t\t\tif conf != nil && conf.AuditLogConfig != nil && conf.AuditLogConfig.RotationConfig != nil {\n\t\t\t\t\t\tf.Value.Set(conf.AuditLogConfig.RotationConfig.Filename)\n\t\t\t\t\t}\n\t\t\t\tcase k.AuditLogConf.LogMaxSize.Name:\n\t\t\t\t\tif conf != nil && conf.AuditLogConfig != nil && conf.AuditLogConfig.RotationConfig != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.Itoa(conf.AuditLogConfig.RotationConfig.MaxSize))\n\t\t\t\t\t}\n\t\t\t\tcase k.AuditLogConf.LogMaxAge.Name:\n\t\t\t\t\tif conf != nil && conf.AuditLogConfig != nil && conf.AuditLogConfig.RotationConfig != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.Itoa(conf.AuditLogConfig.RotationConfig.MaxAge))\n\t\t\t\t\t}\n\t\t\t\tcase k.AuditLogConf.LogMaxBackups.Name:\n\t\t\t\t\tif conf != nil && conf.AuditLogConfig != nil && conf.AuditLogConfig.RotationConfig != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.Itoa(conf.AuditLogConfig.RotationConfig.MaxBackups))\n\t\t\t\t\t}\n\t\t\t\tcase k.AuditLogConf.LogCompress.Name:\n\t\t\t\t\tif conf != nil && conf.AuditLogConfig != nil && conf.AuditLogConfig.RotationConfig != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.FormatBool(conf.AuditLogConfig.RotationConfig.Compress))\n\t\t\t\t\t}\n\t\t\t\tcase k.AuditLogConf.LogLocalTime.Name:\n\t\t\t\t\tif conf != nil && conf.AuditLogConfig != nil && conf.AuditLogConfig.RotationConfig != nil {\n\t\t\t\t\t\tf.Value.Set(strconv.FormatBool(conf.AuditLogConfig.RotationConfig.LocalTime))\n\t\t\t\t\t}\n\t\t\t\t\t// Asserter webhook configurations\n\t\t\t\tcase k.AsserterConf.AsserterEndpoint.Name:\n\t\t\t\t\tif conf != nil && conf.AsserterWebhookConfig != nil {\n\t\t\t\t\t\tf.Value.Set(conf.AsserterWebhookConfig.Endpoint)\n\t\t\t\t\t}\n\t\t\t\tcase k.AsserterConf.AsserterCaPath.Name:\n\t\t\t\t\tif conf != nil && conf.AsserterWebhookConfig != nil {\n\t\t\t\t\t\tf.Value.Set(conf.AsserterWebhookConfig.CACert)\n\t\t\t\t\t}\n\t\t\t\tcase k.AsserterConf.AsserterClientCertPath.Name:\n\t\t\t\t\tif conf != nil && conf.AsserterWebhookConfig != nil {\n\t\t\t\t\t\tf.Value.Set(conf.AsserterWebhookConfig.ClientCert)\n\t\t\t\t\t}\n\t\t\t\tcase k.AsserterConf.AsserterClientKeyPath.Name:\n\t\t\t\t\tif conf != nil && conf.AsserterWebhookConfig != nil {\n\t\t\t\t\t\tf.Value.Set(conf.AsserterWebhookConfig.ClientKey)\n\t\t\t\t\t}\n\t\t\t\tcase k.AsserterConf.AsserterClientTimeout.Name:\n\t\t\t\t\tif conf != nil && conf.AsserterWebhookConfig != nil {\n\t\t\t\t\t\tf.Value.Set(string(conf.AsserterWebhookConfig.HTTPTimeout))\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\t//\n\t\t\t\t}\n\n\t\t\t\tkey, ok := storeParamsMap[f.Name]\n\t\t\t\tif ok {\n\t\t\t\t\tif conf != nil && conf.StoreConfig != nil && conf.StoreConfig.StoreProps != nil {\n\t\t\t\t\t\tif value, ok := conf.StoreConfig.StoreProps[key]; ok {\n\t\t\t\t\t\t\tswitch x := value.(type) {\n\t\t\t\t\t\t\tcase bool:\n\t\t\t\t\t\t\t\tf.Value.Set(strconv.FormatBool(value.(bool)))\n\t\t\t\t\t\t\tcase int:\n\t\t\t\t\t\t\t\tf.Value.Set(strconv.Itoa(value.(int)))\n\t\t\t\t\t\t\tcase string:\n\t\t\t\t\t\t\t\tf.Value.Set(value.(string))\n\t\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\t\tfmt.Printf(\"Unsupported type: %T\\n\", x)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t})\n\n\tfmt.Printf(\"parameters:%v\\n\", k)\n}", "func (_Mevsky *MevskyFilterer) ParseTurnedOff(log types.Log) (*MevskyTurnedOff, error) {\n\tevent := new(MevskyTurnedOff)\n\tif err := _Mevsky.contract.UnpackLog(event, \"TurnedOff\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}", "func (BooleanLiteral) paramValueNode() {}", "func getBoolVal(input string) bool {\n\tinput = strings.ToLower(input)\n\tif input == \"yes\" || input == \"true\" {\n\t\treturn true\n\t}\n\treturn false\n}", "func Bool(key string, def bool) bool {\n\tif s := String(key, \"\"); s != \"\" {\n\t\tif d, err := strconv.ParseBool(s); err == nil {\n\t\t\treturn d\n\t\t} else {\n\t\t\tLog(key, err)\n\t\t}\n\t}\n\treturn def\n}", "func LoggingOnOffHandler(w http.ResponseWriter, r *http.Request) {\n\tInfo(\"Toggling Debug and Trace Logs\")\n\n\tresponseString := \"Changing Logging: \"\n\n\t// toggle trace\n\ttrace := r.URL.Query().Get(\"trace\")\n\ttrace = strings.ToLower(trace)\n\tif trace == \"on\" {\n\t\tInfo(\"Toggling Trace Logs on\")\n\t\tresponseString += \"Trace: on \"\n\t\tTraceLogger = log.New(os.Stdout, Tag+\" TRACE: \", loggerFlags)\n\t} else if trace == \"off\" {\n\t\tInfo(\"Toggling Trace Logs off\")\n\t\tresponseString += \"Trace: off \"\n\t\tTraceLogger = log.New(ioutil.Discard, Tag+\" TRACE: \", loggerFlags)\n\t}\n\n\t// toggle debug\n\tdebug := r.URL.Query().Get(\"debug\")\n\tdebug = strings.ToLower(debug)\n\tif debug == \"on\" {\n\t\tInfo(\"Toggling Debug Logs on\")\n\t\tresponseString += \"Debug: on \"\n\t\tDebugLogger = log.New(os.Stdout, Tag+\" DEBUG: \", loggerFlags)\n\t} else if debug == \"off\" {\n\t\tInfo(\"Toggling Debug Logs off \")\n\t\tresponseString += \"Debug: off \"\n\t\tDebugLogger = log.New(ioutil.Discard, Tag+\" DEBUG: \", loggerFlags)\n\t}\n\n\tw.Write([]byte(responseString))\n}", "func (f *flag) Bool() bool {\n\tvalue, err := strconv.ParseBool(f.String())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn value\n}", "func ParseBool(s string) (bool, error) {\n\tswitch s {\n\tdefault:\n\t\tb, err := strconv.ParseBool(s)\n\t\tif err != nil {\n\t\t\treturn b, errz.Err(err)\n\t\t}\n\t\treturn b, nil\n\tcase \"1\", \"yes\", \"Yes\", \"YES\", \"y\", \"Y\":\n\t\treturn true, nil\n\tcase \"0\", \"no\", \"No\", \"NO\", \"n\", \"N\":\n\t\treturn false, nil\n\t}\n}", "func ParseBoolean(s string) Boolean {\n\tif s == \"\" {\n\t\treturn NullBoolean()\n\t}\n\n\treturn NewBoolean(s == \"true\")\n}", "func getBoolValue(i *ini.File, section, key string, vdefault bool) bool {\n\treturn i.Section(section).Key(key).MustBool(vdefault)\n}", "func getIsOn(internal *DeviceUpdateMessage) bool {\n\ton, ok := internal.State[enums.PropOn]\n\tif !ok {\n\t\treturn getIsOnDeviceSpecific(internal)\n\t}\n\n\treturn on.(bool)\n}", "func (fOpenMode FileOpenMode) ParseString(\n valueString string,\n caseSensitive bool) (FileOpenMode, error) {\n\n ePrefix := \"FileOpenMode.ParseString() \"\n\n fOpenMode.checkInitializeMaps(false)\n\n result := FileOpenMode(0)\n\n lenValueStr := len(valueString)\n\n if strings.HasSuffix(valueString, \"()\") {\n valueString = valueString[0 : lenValueStr-2]\n lenValueStr -= 2\n }\n\n if lenValueStr < 3 {\n return result,\n fmt.Errorf(ePrefix+\n \"Input parameter 'valueString' is INVALID! valueString='%v' \", valueString)\n }\n\n var ok bool\n var idx int\n\n if caseSensitive {\n\n if !strings.HasPrefix(valueString, \"Mode\") {\n valueString = \"Mode\" + valueString\n }\n\n idx, ok = mFileOpenModeStringToInt[valueString]\n\n if !ok {\n return FileOpenMode(0),\n fmt.Errorf(ePrefix+\n \"'valueString' did NOT MATCH a FileOpenMode. valueString='%v' \", valueString)\n }\n\n result = FileOpenMode(idx)\n\n } else {\n\n valueString = strings.ToLower(valueString)\n\n if !strings.HasPrefix(valueString, \"mode\") {\n valueString = \"mode\" + valueString\n }\n\n idx, ok = mFileOpenModeLwrCaseStringToInt[valueString]\n\n if !ok {\n return FileOpenMode(0),\n fmt.Errorf(ePrefix+\n \"'valueString' did NOT MATCH a FileOpenMode. valueString='%v' \", valueString)\n }\n\n result =\n FileOpenMode(idx)\n }\n\n return result, nil\n}", "func ParseFlag(input string, flags []*Flag) (*Flag, string, error) {\n\tkeyvalue := strings.SplitN(input, \"=\", 2)\n\tkey := keyvalue[0]\n\tvalue := \"\"\n\tif len(keyvalue) == 2 {\n\t\tvalue = keyvalue[1]\n\t}\n\tif len(key) > 2 && key[1] != '-' {\n\t\treturn ParseFlag(key[:2]+\"=\"+key[2:], flags)\n\t}\n\tfor _, flag := range flags {\n\t\tif (flag.Char != \"\" && key == \"-\"+flag.Char) || key == \"--\"+flag.Name {\n\t\t\tif flag.HasValue {\n\t\t\t\tif value == \"\" {\n\t\t\t\t\treturn nil, \"\", errors.New(flag.String() + \" needs a value\")\n\t\t\t\t}\n\t\t\t\treturn flag, value, nil\n\t\t\t}\n\t\t\tif value != \"\" {\n\t\t\t\treturn nil, \"\", errors.New(flag.String() + \" does not take a value\")\n\t\t\t}\n\t\t\treturn flag, \"\", nil\n\t\t}\n\t}\n\treturn nil, \"\", nil\n}", "func (o BoolObj) Parse() ([][]string, error) {\n\treturn [][]string{\n\t\t{string(*o.Prefix)},\n\t\t{strconv.FormatBool(o.Val)},\n\t}, nil\n}", "func flagToBool(f string) bool {\n\tif f == \"true\" {\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n}", "func (ctx *serverRequestContextImpl) GetBoolQueryParm(name string) (bool, error) {\n\tvar err error\n\n\tvalue := false\n\tparam := ctx.req.URL.Query().Get(name)\n\tif param != \"\" {\n\t\tvalue, err = strconv.ParseBool(strings.ToLower(param))\n\t\tif err != nil {\n\t\t\treturn false, caerrors.NewHTTPErr(400, caerrors.ErrUpdateConfigRemoveAff, \"Failed to correctly parse value of '%s' query parameter: %s\", name, err)\n\t\t}\n\t}\n\n\treturn value, nil\n}", "func GetBool(name string) bool {\n\t//params, err := url.ParseQuery(r.URL.RawQuery)\n\t//if err != nil {\n\t//\treturn false\n\t//}\n\n\t//value, ok := params[name]\n\t//if !ok {\n\t//\treturn false\n\t//}\n\n\tstrValue := strings.Join([]string{\"\", \"\"}, \"\")\n\tif strValue == \"\" {\n\t\treturn true\n\t}\n\n\tboolValue, err := strconv.ParseBool(strValue)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn boolValue\n}", "func (p *Parser) parseBoolean() asti.ExpressionI {\n\treturn &ast.Boolean{Token: p.curToken, Value: p.curTokenIs(tokentype.TRUE)}\n}", "func BoolValue(t bool) Value {\n\tif t {\n\t\treturn Value{Typ: ':', IntegerV: 1}\n\t}\n\treturn Value{Typ: ':', IntegerV: 0}\n}", "func (v Value) Bool(defaults ...bool) bool {\n\t// Return the first default if the raw is undefined\n\tif v.raw == nil {\n\t\t// Make sure there's at least one thing in the list\n\t\tdefaults = append(defaults, false)\n\t\treturn defaults[0]\n\t}\n\n\tswitch t := v.raw.(type) {\n\tcase string:\n\t\tb, err := strconv.ParseBool(t)\n\t\tif err != nil {\n\t\t\tslog.Panicf(\"failed to parse bool: %v\", err)\n\t\t}\n\t\treturn b\n\n\tcase bool:\n\t\treturn t\n\n\tdefault:\n\t\tslog.Panicf(\"%v is of unsupported type %v\", t, reflect.TypeOf(t).String())\n\t}\n\n\treturn false\n}", "func internalNewOptionalBoolValue(p *optionalBool) pflag.Value {\n\tp.present = false\n\treturn (*optionalBoolValue)(p)\n}", "func validateBoolParam(ctx *HttpContext, param *HttpParam) {\n\n\tparam.Raw = retrieveParamValue(ctx, param).(string)\n\n\tif len(param.Raw) == 0 && param.Required {\n\t\tappendInvalidErrorCode(ctx, param)\n\t\treturn\n\t}\n\n\tif len(param.Raw) == 0 { return }\n\n\tif val, err := strconv.ParseBool(param.Raw); err != nil {\n\t\tappendInvalidErrorCode(ctx, param)\n\t} else {\n\t\tparam.setPresentValue(val)\n\t}\n}", "func OptionalQueryParamBool(request *http.Request, name string) (null.Bool, IResponse) {\n\tvalueStr := request.URL.Query().Get(name)\n\tif valueStr == \"\" {\n\t\treturn null.Bool{}, nil\n\t}\n\n\tvalue, err := strconv.ParseBool(valueStr)\n\tif err != nil {\n\t\treturn null.Bool{}, BadRequest(request, \"Invalid query param %s (value: '%s'): %s\", name, valueStr, err)\n\t}\n\n\treturn null.BoolFrom(value), nil\n}", "func (m *DeviceHealthScriptBooleanParameter) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.DeviceHealthScriptParameter.GetFieldDeserializers()\n res[\"defaultValue\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetBoolValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDefaultValue(val)\n }\n return nil\n }\n return res\n}", "func boolValue(s string) bool {\n\tswitch s {\n\tcase \"yes\", \"true\":\n\t\treturn true\n\t}\n\n\treturn false\n}", "func FlagParse() (err error) {\n\tConfigFlagsRegister()\n\tflag.Parse()\n\treturn ConfigFlagsProcess()\n}", "func (w waf) Parse(ing *extensions.Ingress) (interface{}, error) {\n\ts, err := parser.GetStringAnnotation(wafAnn, ing)\n\tif err != nil {\n\t\treturn Config{}, nil\n\t}\n\tif !wafAnnRegex.MatchString(s) {\n\t\tglog.Warningf(\"ignoring invalid WAF option '%v' on %v/%v\", s, ing.Namespace, ing.Name)\n\t\treturn Config{}, nil\n\t}\n\treturn Config{\n\t\tMode: s,\n\t}, nil\n}", "func (parser *Parser) parsePredefined(resolvedInput string, targetType reflect.Type) (interface{}, error) {\n\tvar result interface{}\n\tvar err error\n\tswitch targetType.Kind() {\n\tcase reflect.Bool:\n\t\tresult, err = strconv.ParseBool(resolvedInput)\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\tresult, err = strconv.ParseInt(resolvedInput, 0, targetType.Bits())\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\tresult, err = strconv.ParseUint(resolvedInput, 0, targetType.Bits())\n\tcase reflect.Float32, reflect.Float64:\n\t\tresult, err = strconv.ParseFloat(resolvedInput, targetType.Bits())\n\tcase reflect.String:\n\t\tresult, err = resolvedInput, nil\n\tdefault:\n\t\tresult, err = nil, errors.New(\"\")\n\t}\n\tif err == nil {\n\t\treturn reflect.ValueOf(result).Convert(targetType).Interface(), nil\n\t}\n\treturn nil, toErrorf(\"Could not convert '%v' to type '%v'\", resolvedInput, targetType.String())\n}", "func parseBoolEx(repr string) (value bool, err error) {\n\tif value, err = strconv.ParseBool(repr); err != nil {\n\t\tswitch repr {\n\t\tcase \"y\", \"yes\", \"YES\", \"Yes\":\n\t\t\treturn true, nil\n\t\tcase \"n\", \"no\", \"NO\", \"No\":\n\t\t\treturn false, nil\n\t\t}\n\t}\n\n\treturn\n}", "func parseAutoRemoveParam(req *http.Request) (bool, error) {\n\tc := req.URL.Query().Get(\"autoremove\")\n\tvar autoRemove bool\n\tvar err error\n\n\tif c != \"\" {\n\t\tautoRemove, err = strconv.ParseBool(c)\n\t\tif err != nil {\n\t\t\treturn autoRemove, errAutoRemoveNotBool\n\t\t}\n\t}\n\n\treturn autoRemove, nil\n}", "func (c *Validator) GetBool(key string, def ...bool) (bool, error) {\n\tstrv := c.Input.Query(key)\n\tif len(strv) == 0 && len(def) > 0 {\n\t\treturn def[0], nil\n\t}\n\treturn strconv.ParseBool(strv)\n}", "func BoolConverter(str string, target reflect.Value) (ok bool) {\n\tb, err := strconv.ParseBool(str)\n\tif err != nil {\n\t\treturn\n\t}\n\n\ttarget.SetBool(b)\n\treturn true\n}", "func flagValue(flags []string, name string) string {\n\tisBool := booleanFlag(name)\n\tfor i, arg := range flags {\n\t\tif val := strings.TrimPrefix(arg, name+\"=\"); val != arg {\n\t\t\t// -name=value\n\t\t\treturn val\n\t\t}\n\t\tif arg == name { // -name ...\n\t\t\tif isBool {\n\t\t\t\t// -name, equivalent to -name=true\n\t\t\t\treturn \"true\"\n\t\t\t}\n\t\t\tif i+1 < len(flags) {\n\t\t\t\t// -name value\n\t\t\t\treturn flags[i+1]\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}", "func (formatter) fBool(v *types.RecordValue) *types.RecordValue {\n\tif v.Value != strBoolTrue {\n\t\tv.Value = \"\"\n\t}\n\n\treturn v\n}", "func parseBoolFromString(content string, aggErr *AggregateError) bool {\n result, err := strconv.ParseBool(content)\n if err != nil {\n aggErr.Append(err)\n }\n return result\n}", "func ParseBool(str string) bool {\n\tb, _ := strconv.ParseBool(str)\n\treturn b\n}", "func QueryParamBool(request *http.Request, name string) (bool, IResponse) {\n\tvalueStr := request.URL.Query().Get(name)\n\tvalue, err := strconv.ParseBool(valueStr)\n\tif err != nil {\n\t\treturn false, BadRequest(request, \"Invalid query param %s (value: '%s'): %s\", name, valueStr, err)\n\t}\n\n\treturn value, nil\n}", "func StringToBool(s string, def bool) bool {\n\tv, err := strconv.ParseBool(s)\n\tif err != nil {\n\t\tlog.Warnf(\"failed to parse bool value: %s\", s)\n\t\treturn def\n\t}\n\treturn v\n}", "func ParseFlagString(args []string) (string, int, error) {\n\tif strings.ContainsAny(args[0], \"= \") {\n\t\tparts := strings.SplitN(args[0], \"=\", 2)\n\t\tif len(parts) == 1 {\n\t\t\tparts = strings.SplitN(args[0], \" \", 2)\n\t\t}\n\t\tif len(parts) == 2 {\n\t\t\tfor _, c := range []string{`'`, `\"`} {\n\t\t\t\tif parts[1][:1] == c && parts[1][len(parts[1])-1:] == c {\n\t\t\t\t\treturn parts[1][1 : len(parts[1])-1], 0, nil\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn parts[1], 0, nil\n\t\t}\n\t\treturn \"\", 0, fmt.Errorf(\"unable to split flag and value from string: [%s]\", args[0])\n\t}\n\tif len(args) > 1 {\n\t\treturn args[1], 1, nil\n\t}\n\treturn \"\", 0, fmt.Errorf(\"no value provided after %s flag\", args[0])\n}", "func NewSchemaChangerModeFromString(val string) (_ NewSchemaChangerMode, ok bool) {\n\tswitch strings.ToUpper(val) {\n\tcase \"OFF\":\n\t\treturn UseNewSchemaChangerOff, true\n\tcase \"ON\":\n\t\treturn UseNewSchemaChangerOn, true\n\tcase \"UNSAFE_ALWAYS\":\n\t\treturn UseNewSchemaChangerUnsafeAlways, true\n\tdefault:\n\t\treturn 0, false\n\t}\n}", "func (m *DeviceHealthScriptBooleanParameter) GetDefaultValue()(*bool) {\n val, err := m.GetBackingStore().Get(\"defaultValue\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*bool)\n }\n return nil\n}", "func flagValue(flags []string, name string) string {\n\tfor i, arg := range flags {\n\t\tif val := strings.TrimPrefix(arg, name+\"=\"); val != arg {\n\t\t\t// -name=value\n\t\t\treturn val\n\t\t}\n\t\tif arg == name {\n\t\t\tif i+1 < len(flags) {\n\t\t\t\tif val := flags[i+1]; !strings.HasPrefix(val, \"-\") {\n\t\t\t\t\t// -name value\n\t\t\t\t\treturn flags[i+1]\n\t\t\t\t}\n\t\t\t}\n\t\t\t// -name, equivalent to -name=true\n\t\t\treturn \"true\"\n\t\t}\n\t}\n\treturn \"\"\n}", "func ParseValue(in string) (string, []string) {\n\top, field := parseOp(in)\n\treturn op, parseDelimited(field, ListDelimiter)\n}", "func (c Controller) GetBool(key string, def ...bool) bool {\n\tif v := string(c.QueryArgs().Peek(key)); v != \"\" {\n\t\ttmp, _ := strconv.ParseBool(v)\n\t\treturn tmp\n\t}\n\tif len(def) > 0 {\n\t\treturn def[0]\n\t}\n\treturn false\n}", "func (_Mevsky *MevskyFilterer) ParseTurnedOn(log types.Log) (*MevskyTurnedOn, error) {\n\tevent := new(MevskyTurnedOn)\n\tif err := _Mevsky.contract.UnpackLog(event, \"TurnedOn\", log); err != nil {\n\t\treturn nil, err\n\t}\n\tevent.Raw = log\n\treturn event, nil\n}", "func Bool(v interface{}) *bool {\n\tswitch v.(type) {\n\tcase bool:\n\t\tval := v.(bool)\n\t\treturn &val\n\tcase int, uint, int32, int16, int8, int64, uint32, uint16, uint8, uint64, float32, float64:\n\t\tval, err := strconv.Atoi(fmt.Sprintf(\"%v\", v))\n\t\tif err != nil {\n\t\t\texception.Err(err, 500).Ctx(M{\"v\": v}).Throw()\n\t\t}\n\t\tres := false\n\t\tif val != 0 {\n\t\t\tres = true\n\t\t}\n\t\treturn &res\n\tdefault:\n\t\tval := fmt.Sprintf(\"%v\", v)\n\t\tres := false\n\t\tif val != \"\" {\n\t\t\tres = true\n\t\t}\n\t\treturn &res\n\t}\n}", "func (c *Configure) ReadBoolValue(key string, def bool) bool {\n\tv := c.Get(key)\n\tif v == \"\" {\n\t\treturn def\n\t}\n\tvalue, err := strconv.ParseBool(v )\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn value\n\n}", "func GetBool(v interface{}) bool {\n\tswitch result := v.(type) {\n\tcase bool:\n\t\treturn result\n\tdefault:\n\t\tif d := GetString(v); d != \"\" {\n\t\t\tvalue, _ := strconv.ParseBool(d)\n\t\t\treturn value\n\t\t}\n\t}\n\treturn false\n}", "func MakeBoolOrDefault(in *bool, defaultValue bool) *google_protobuf.BoolValue {\n\tif in == nil {\n\t\treturn &google_protobuf.BoolValue{\n\t\t\tValue: defaultValue,\n\t\t}\n\t}\n\n\treturn &google_protobuf.BoolValue{\n\t\tValue: *in,\n\t}\n}", "func GetDefaultBool(in bool) bool {\n\treturn in\n}", "func ParseBool(key string) (bool, error) {\n\tval := os.Getenv(key)\n\n\tif val == \"\" {\n\t\treturn false, notFoundError(key, \"ParseBool\")\n\t}\n\n\tparsedVal, err := strconv.ParseBool(val)\n\tif err != nil {\n\t\treturn false, &EnvError{\"ParseBool\", key, err}\n\t}\n\n\treturn parsedVal, err\n}", "func ParseOp(opString string) (Op, error) {\n\tvar (\n\t\tempty Op\n\t\top Op\n\t)\n\toperationMatch := operationPattern.FindStringSubmatch(opString)\n\tif len(operationMatch) != 2 {\n\t\treturn empty, errors.New(\"operation should surrounded by {}\")\n\t}\n\n\topIndexMatch := opIndexPattern.FindStringSubmatch(operationMatch[1])\n\tif len(opIndexMatch) == 2 {\n\t\topIndex, err := strconv.Atoi(strings.Trim(opIndexMatch[1], \" \"))\n\t\tif err != nil {\n\t\t\treturn empty, err\n\t\t}\n\t\top.Index = IntOptional{opIndex}\n\t}\n\n\topTimeMatch := opTimePattern.FindStringSubmatch(operationMatch[1])\n\tif len(opTimeMatch) == 2 {\n\t\topTime, err := strconv.Atoi(strings.Trim(opTimeMatch[1], \" \"))\n\t\tif err != nil {\n\t\t\treturn empty, err\n\t\t}\n\t\top.Time = time.Unix(0, int64(opTime))\n\t}\n\n\topProcessMatch := opProcessPattern.FindStringSubmatch(operationMatch[1])\n\tif len(opProcessMatch) == 2 {\n\t\tif opProcessMatch[1] == \":nemesis\" {\n\t\t\top.Process.Set(NemesisProcessMagicNumber)\n\t\t} else {\n\t\t\topProcess, err := strconv.Atoi(strings.Trim(opProcessMatch[1], \" \"))\n\t\t\tif err != nil {\n\t\t\t\treturn empty, err\n\t\t\t}\n\t\t\top.Process.Set(opProcess)\n\t\t}\n\t}\n\n\topTypeMatch := opTypePattern.FindStringSubmatch(operationMatch[1])\n\tif len(opTypeMatch) != 2 {\n\t\treturn empty, errors.New(\"operation should have :type field\")\n\t}\n\tswitch opTypeMatch[1] {\n\tcase \":invoke\":\n\t\top.Type = OpTypeInvoke\n\tcase \":ok\":\n\t\top.Type = OpTypeOk\n\tcase \":fail\":\n\t\top.Type = OpTypeFail\n\tcase \":info\":\n\t\top.Type = OpTypeInfo\n\tdefault:\n\t\treturn empty, errors.Errorf(\"invalid type, %s\", opTypeMatch[1])\n\t}\n\n\topValueMatch := opValuePattern.FindStringSubmatch(operationMatch[1])\n\t// can values be empty?\n\tif len(opValueMatch) == 2 {\n\t\tmopContent := strings.Trim(opValueMatch[1], \" \")\n\t\tif mopContent != \"\" {\n\t\t\tmopMatches := mopPattern.FindAllStringSubmatch(mopContent, -1)\n\t\t\tfor _, mopMatch := range mopMatches {\n\t\t\t\tif len(mopMatch) != 5 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tkey := strings.Trim(mopMatch[3], \" \")\n\t\t\t\tvar value MopValueType\n\t\t\t\tmopValueMatches := mopValuePattern.FindStringSubmatch(mopMatch[4])\n\t\t\t\tif len(mopValueMatches) == 2 {\n\t\t\t\t\tvalues := []int{}\n\t\t\t\t\ttrimVal := strings.Trim(mopValueMatches[1], \"[\")\n\t\t\t\t\ttrimVal = strings.Trim(trimVal, \"]\")\n\t\t\t\t\tif trimVal != \"\" {\n\t\t\t\t\t\tfor _, valStr := range strings.Split(trimVal, \" \") {\n\t\t\t\t\t\t\tval, err := strconv.Atoi(valStr)\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\treturn empty, err\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tvalues = append(values, val)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tvalue = values\n\t\t\t\t} else {\n\t\t\t\t\ttrimVal := strings.Trim(mopMatch[4], \" \")\n\t\t\t\t\tif trimVal == \"nil\" {\n\t\t\t\t\t\tvalue = nil\n\t\t\t\t\t} else {\n\t\t\t\t\t\tval, err := strconv.Atoi(trimVal)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn empty, err\n\t\t\t\t\t\t}\n\t\t\t\t\t\tvalue = val\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tvar mop Mop\n\t\t\t\tswitch mopMatch[2] {\n\t\t\t\tcase \"append\":\n\t\t\t\t\tmop = Append(key, value.(int))\n\t\t\t\tcase \"r\":\n\t\t\t\t\tif value != nil {\n\t\t\t\t\t\tmop = Read(key, value.([]int))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tmop = Read(key, nil)\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\tpanic(\"unreachable\")\n\t\t\t\t}\n\t\t\t\tif op.Value == nil {\n\t\t\t\t\tdestArray := make([]Mop, 0)\n\t\t\t\t\top.Value = &destArray\n\t\t\t\t}\n\t\t\t\t*op.Value = append(*op.Value, mop)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn op, nil\n}", "func (fm *FieldModelFlagsSimple) GetValue(value *FlagsSimple) error {\n return fm.GetValueDefault(value, FlagsSimple(0))\n}", "func updateBoolFromFlag(cmd *cobra.Command, v *bool, key string) {\n\tif cmd.Flags().Changed(key) {\n\t\t*v = viper.GetBool(key)\n\t}\n}", "func ParseStatus(state string) Status {\n\tswitch strings.ToUpper(state) {\n\tcase \"OUTAGE\":\n\t\treturn OUTAGE\n\tcase \"MAJOR\":\n\t\treturn MAJOR\n\tcase \"MINOR\":\n\t\treturn MINOR\n\tcase \"OK\":\n\t\treturn OK\n\tdefault:\n\t\treturn OUTAGE\n\t}\n}", "func (f *Flag) defaultIsZeroValue() bool {\n\tswitch f.Value.(type) {\n\tcase boolFlag:\n\t\treturn f.DefValue == \"false\"\n\tcase *durationValue:\n\t\t// Beginning in Go 1.7, duration zero values are \"0s\"\n\t\treturn f.DefValue == \"0\" || f.DefValue == \"0s\"\n\tcase *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value:\n\t\treturn f.DefValue == \"0\"\n\tcase *stringValue:\n\t\treturn f.DefValue == \"\"\n\tcase *ipValue, *ipMaskValue, *ipNetValue:\n\t\treturn f.DefValue == \"<nil>\"\n\tcase *intSliceValue, *stringSliceValue, *stringArrayValue:\n\t\treturn f.DefValue == \"[]\"\n\tdefault:\n\t\tswitch f.Value.String() {\n\t\tcase \"false\":\n\t\t\treturn true\n\t\tcase \"<nil>\":\n\t\t\treturn true\n\t\tcase \"\":\n\t\t\treturn true\n\t\tcase \"0\":\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n}", "func parseSortFlag(flagName string, flags *pflag.FlagSet) v1.SortOrder {\n\tvalue := \"\"\n\tif v, err := flags.GetString(flagName); err == nil {\n\t\tvalue = strings.ToLower(v)\n\t}\n\n\tswitch value {\n\tcase \"asc\":\n\t\treturn v1.SortOrder_Asc\n\tcase \"desc\":\n\t\treturn v1.SortOrder_Desc\n\tdefault:\n\t\treturn v1.SortOrder_Undefined\n\t}\n}", "func (me TviewRefreshModeEnumType) IsOnRequest() bool { return me == \"onRequest\" }", "func Parse(m FlagMap) {\n\tflag.Parse()\n\tflag.VisitAll(func(f *flag.Flag) {\n\t\tmapping := Flag{}\n\t\tif s, ok := m[f.Name]; ok {\n\t\t\tif len(s.Name) > 0 {\n\t\t\t\tmapping.Name = s.Name\n\t\t\t}\n\t\t\tif s.Filter != nil {\n\t\t\t\tmapping.Filter = s.Filter\n\t\t\t}\n\t\t}\n\t\tif len(mapping.Name) == 0 {\n\t\t\tmapping.Name = strings.ToUpper(strings.Replace(f.Name, \"-\", \"_\", -1))\n\t\t}\n\t\tif mapping.Filter == nil {\n\t\t\tmapping.Filter = func(s string) string { return s }\n\t\t}\n\t\tif v := os.Getenv(mapping.Name); len(v) > 0 {\n\t\t\tf.Value.Set(mapping.Filter(v))\n\t\t}\n\t})\n}", "func (fOpenType FileOpenType) ParseString(\n valueString string,\n caseSensitive bool) (FileOpenType, error) {\n\n ePrefix := \"FileOpenType.ParseString() \"\n\n fOpenType.checkInitializeMaps(false)\n\n result := FileOpenType(0)\n\n lenValueStr := len(valueString)\n\n if strings.HasSuffix(valueString, \"()\") {\n valueString = valueString[0 : lenValueStr-2]\n lenValueStr -= 2\n }\n\n if lenValueStr < 3 {\n return result,\n fmt.Errorf(ePrefix+\n \"Input parameter 'valueString' is INVALID! valueString='%v' \", valueString)\n }\n\n var ok bool\n var idx int\n\n if caseSensitive {\n\n if !strings.HasPrefix(valueString, \"Type\") {\n valueString = \"Type\" + valueString\n }\n\n idx, ok = mFileOpenTypeStringToInt[valueString]\n\n if !ok {\n return FileOpenType(0),\n fmt.Errorf(ePrefix+\n \"'valueString' did NOT MATCH a FileOpenType. valueString='%v' \", valueString)\n }\n\n result = FileOpenType(idx)\n\n } else {\n\n valueString = strings.ToLower(valueString)\n\n if !strings.HasPrefix(valueString, \"type\") {\n valueString = \"type\" + valueString\n }\n\n idx, ok = mFileOpenTypeLwrCaseStringToInt[valueString]\n\n if !ok {\n return FileOpenType(0),\n fmt.Errorf(ePrefix+\n \"'valueString' did NOT MATCH a FileOpenType. valueString='%v' \", valueString)\n }\n\n result =\n FileOpenType(idx)\n }\n\n return result, nil\n}" ]
[ "0.56482726", "0.5562257", "0.5544399", "0.5209864", "0.5193872", "0.5183491", "0.51610637", "0.51105624", "0.5067777", "0.50530106", "0.5005197", "0.4955115", "0.49473256", "0.49443442", "0.4923302", "0.49199948", "0.49097046", "0.48919663", "0.48528236", "0.4831881", "0.47686684", "0.47660202", "0.4759802", "0.475832", "0.4756866", "0.46989515", "0.4688759", "0.46885583", "0.46858177", "0.46536285", "0.46514457", "0.46509397", "0.46480742", "0.464771", "0.46458536", "0.4642464", "0.46230674", "0.46128467", "0.46044528", "0.46011853", "0.4594015", "0.45860073", "0.4547251", "0.45416772", "0.4538139", "0.449131", "0.4484553", "0.44539034", "0.44317263", "0.44245106", "0.4420204", "0.44089636", "0.43926036", "0.43854716", "0.4379263", "0.43739718", "0.4371565", "0.43544528", "0.43489546", "0.4347758", "0.43392053", "0.43382442", "0.43281335", "0.43276453", "0.43273976", "0.43257824", "0.431898", "0.43188292", "0.4302453", "0.43015116", "0.42956987", "0.42925814", "0.42779908", "0.42687303", "0.4268698", "0.42648664", "0.42645562", "0.42643517", "0.42630795", "0.42544964", "0.4250056", "0.42471611", "0.42425978", "0.4242403", "0.42295763", "0.42226705", "0.42206922", "0.42103982", "0.4194116", "0.41865808", "0.41810435", "0.4179507", "0.41784546", "0.41782928", "0.4166169", "0.41441712", "0.41425732", "0.41412678", "0.4136381", "0.41333908" ]
0.87817115
0
IsGroupMember returns whether currently logged user is a member of a group
IsGroupMember возвращает, является ли текущий авторизованный пользователь членом группы
func IsGroupMember(gid int) (bool, error) { groups, err := os.Getgroups() if err != nil { return false, trace.ConvertSystemError(err) } for _, group := range groups { if group == gid { return true, nil } } return false, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *GroupService) isGroupMember(groupId, userId string) (bool, error) {\n\tvar condition = map[string]interface{}{\n\t\t\"groupId\": groupId,\n\t\t\"userId\": userId,\n\t}\n\tmemberProfile, err := groupRepo.FindOneMember(condition)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn memberProfile == nil, nil\n}", "func (g *Group) IsMember(userID uint) bool {\n\tfor _, u := range g.Users {\n\t\tif u.UserID == userID {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func IsMember(claims jwtgo.Claims, groups []string, scopes []string) bool {\n\tmapClaims, err := MapClaims(claims)\n\tif err != nil {\n\t\treturn false\n\t}\n\t// O(n^2) loop\n\tfor _, userGroup := range GetGroups(mapClaims, scopes) {\n\t\tfor _, group := range groups {\n\t\t\tif userGroup == group {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (mv *MembershipValidator) IsInGroup(\n\tpublicKey *operator.PublicKey,\n) bool {\n\taddress, err := mv.signing.PublicKeyToAddress(publicKey)\n\tif err != nil {\n\t\tmv.logger.Errorf(\"cannot convert public key to chain address: [%v]\", err)\n\t\treturn false\n\t}\n\n\t_, isInGroup := mv.members[address.String()]\n\treturn isInGroup\n}", "func (m *Manager) IsMember(globalID, username string) (ismember bool, err error) {\n\tmatches, err := m.collection.Find(bson.M{\"globalid\": globalID, \"members\": username}).Count()\n\tismember = (matches > 0)\n\treturn\n}", "func (c *client) IsMember(org, user string) (bool, error) {\n\tc.log(\"IsMember\", org, user)\n\tif org == user {\n\t\t// Make it possible to run a couple of plugins on personal repos.\n\t\treturn true, nil\n\t}\n\tcode, err := c.request(&request{\n\t\tmethod: http.MethodGet,\n\t\tpath: fmt.Sprintf(\"/orgs/%s/members/%s\", org, user),\n\t\torg: org,\n\t\texitCodes: []int{204, 404, 302},\n\t}, nil)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif code == 204 {\n\t\treturn true, nil\n\t} else if code == 404 {\n\t\treturn false, nil\n\t} else if code == 302 {\n\t\treturn false, fmt.Errorf(\"requester is not %s org member\", org)\n\t}\n\t// Should be unreachable.\n\treturn false, fmt.Errorf(\"unexpected status: %d\", code)\n}", "func (fc *fakeClient) IsMember(org, user string) (bool, error) {\n\tfor _, m := range fc.orgMembers[org] {\n\t\tif m == user {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}", "func (c *Client) IsMember(org, user string) (bool, error) {\n\tresp, err := c.request(http.MethodGet, fmt.Sprintf(\"%s/orgs/%s/members/%s\", c.base, org, user), nil)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode == 204 {\n\t\treturn true, nil\n\t} else if resp.StatusCode == 404 {\n\t\treturn false, nil\n\t} else if resp.StatusCode == 302 {\n\t\treturn false, fmt.Errorf(\"requester is not %s org member\", org)\n\t}\n\treturn false, fmt.Errorf(\"unexpected status: %s\", resp.Status)\n}", "func (m *Member) IsMember() bool { return m.Role == MemberRoleMember }", "func (u *UserInfoLDAPSource) IsgroupmemberorNot(groupname string, username string) (bool, string, error) {\n\n\tAllUsersinGroup, GroupmanagedbyValue, err := u.GetusersofaGroup(groupname)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn false, GroupmanagedbyValue, err\n\t}\n\tfor _, entry := range AllUsersinGroup {\n\t\tif entry == username {\n\t\t\treturn true, GroupmanagedbyValue, nil\n\t\t}\n\t}\n\treturn false, GroupmanagedbyValue, nil\n}", "func (c *Settings) IsMember(teams []*Team) bool {\n\tfor _, team := range teams {\n\t\tif c.Orgs[team.Login] {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (g *Group) IsMyGroup(u *User) bool {\n\n\tif g.IsAdmin(u) {\n\t\treturn true\n\t}\n\n\tfor _, user := range g.Users {\n\t\tif user == u.Username {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (i *Installation) IsInGroup() bool {\n\treturn i.GroupID != nil\n}", "func (s *SyncStorage) IsMember(ns string, group string, member interface{}) (bool, error) {\n\tretVal, err := s.getDbBackend(ns).SIsMember(getNsPrefix(ns)+group, member)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn retVal, err\n}", "func (ctx *TestContext) UserIsAMemberOfTheGroup(user, group string) error {\n\terr := ctx.ThereIsAUserWith(getParameterString(map[string]string{\n\t\t\"group_id\": user,\n\t\t\"user\": user,\n\t}))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ctx.GroupIsAChildOfTheGroup(user, group)\n}", "func (htGroup *HTGroup) IsUserInGroup(user string, group string) bool {\n\tgroups := htGroup.GetUserGroups(user)\n\treturn containsGroup(groups, group)\n}", "func UserInGroup(u *user.User, g *Group) (bool, error) {\n\treturn userInGroup(u, g)\n}", "func (ctx *TestContext) UserIsAManagerOfTheGroupAndCanWatchItsMembers(user, group string) error {\n\treturn ctx.UserIsAManagerOfTheGroupWith(getParameterString(map[string]string{\n\t\t\"id\": group,\n\t\t\"user_id\": user,\n\t\t\"name\": group,\n\t\t\"can_watch_members\": strTrue,\n\t}))\n}", "func (app *App) GroupContainsMember(ctx context.Context, groupName, uid string) (bool, error) {\n\tif uid == \"\" {\n\t\treturn false, constants.ErrEmptyUID\n\t}\n\treturn app.groups.GroupContainsMember(ctx, groupName, uid)\n}", "func (c *EtcdGroupService) GroupContainsMember(ctx context.Context, groupName, uid string) (bool, error) {\n\tctxT, cancel := context.WithTimeout(ctx, transactionTimeout)\n\tdefer cancel()\n\tetcdRes, err := clientInstance.Txn(ctxT).\n\t\tIf(clientv3.Compare(clientv3.CreateRevision(groupKey(groupName)), \">\", 0)).\n\t\tThen(clientv3.OpGet(memberKey(groupName, uid), clientv3.WithCountOnly())).\n\t\tCommit()\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif !etcdRes.Succeeded {\n\t\treturn false, constants.ErrGroupNotFound\n\t}\n\treturn etcdRes.Responses[0].GetResponseRange().GetCount() > 0, nil\n}", "func (u *User) HasGroup(group string) bool {\r\n\tfor _, g := range u.Groups {\r\n\t\tif g == group {\r\n\t\t\treturn true\r\n\t\t}\r\n\t}\r\n\treturn false\r\n}", "func (b *Handler) IsMember(pubKeyBLS []byte, round uint64, step uint8, maxSize int) bool {\n\treturn b.Committee(round, step, maxSize).IsMember(pubKeyBLS)\n}", "func isMember(role string) bool {\n\t// Possible values are \"COLLABORATOR\", \"CONTRIBUTOR\", \"FIRST_TIMER\", \"FIRST_TIME_CONTRIBUTOR\", \"MEMBER\", \"OWNER\", or \"NONE\".\n\tswitch role {\n\tcase \"COLLABORATOR\", \"MEMBER\", \"OWNER\":\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func (c *Chat) IsGroup() bool {\n\treturn c.Type == \"group\"\n}", "func (a *Account) IsMember(userID uuid.UUID) bool {\n\tfor _, value := range a.AccountUser {\n\t\tif value.ID == userID {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (set Set) IsMember(ctx context.Context, member string) (bool, error) {\n\treq := newRequest(\"*3\\r\\n$9\\r\\nSISMEMBER\\r\\n$\")\n\treq.addString2(set.name, member)\n\tres, err := set.c.cmdInt(ctx, req)\n\treturn res == 1, err\n}", "func (m *Message) IsGroup() bool {\n\treturn m.From.ID != m.Chat.ID\n}", "func (g *slimGrouping) IsIn(m types.Member, grp types.Group) (bool, error) {\n\tgroups, err := g.GroupsOf(m)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t_, ok := groups[grp]\n\treturn ok, nil\n}", "func (_AuthContract *AuthContractCaller) IsMember(opts *bind.CallOpts, arg0 common.Address) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _AuthContract.contract.Call(opts, out, \"isMember\", arg0)\n\treturn *ret0, err\n}", "func (_AuthContract *AuthContractCallerSession) IsMember(arg0 common.Address) (bool, error) {\n\treturn _AuthContract.Contract.IsMember(&_AuthContract.CallOpts, arg0)\n}", "func (c *Channel) IsMember(cl *Client) bool {\n\tfor _, v := range c.Members {\n\t\tif v == cl {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (s *Server) userHasGroupByName(phone string, grpNameOrAlias string) bool {\n\n\tfor _, v := range s.groups {\n\t\tif v.AdminPhone == phone {\n\t\t\tif v.Name == grpNameOrAlias || v.Alias == grpNameOrAlias {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn false\n}", "func (t *TeamsService) IsUserMember(teamID, userID int) (bool, *simpleresty.Response, error) {\n\tisMember := false\n\turlStr := t.client.http.RequestURL(\"/team/%d/user/%d\", teamID, userID)\n\n\t// Set the correct authentication header\n\tt.client.setAuthTokenHeader(t.client.accountAccessToken)\n\n\t// Execute the request\n\tresponse, getErr := t.client.http.Get(urlStr, nil, nil)\n\tif getErr != nil {\n\t\treturn false, response, getErr\n\t}\n\n\t// Per API documentation, the response returns a 200 if user belongs to the team\n\tif response.StatusCode == 200 {\n\t\tisMember = true\n\t}\n\n\treturn isMember, response, nil\n}", "func (o *Permissao) GetIsGroup() bool {\n\tif o == nil || o.IsGroup == nil {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\treturn *o.IsGroup\n}", "func (_AuthContract *AuthContractSession) IsMember(arg0 common.Address) (bool, error) {\n\treturn _AuthContract.Contract.IsMember(&_AuthContract.CallOpts, arg0)\n}", "func VerifyUserInGroup(token string, groupName string) bool {\n\tvar payload string\n\tpayload = strings.Split(token, \".\")[1]\n\tsDec, err := base64.RawStdEncoding.DecodeString(payload)\n\tif err != nil {\n\t\tfmt.Println(\"payload:\", payload)\n\t\tfmt.Println(\"error:\", err)\n\t\treturn false\n\t}\n\tvar result map[string]interface{}\n\tjson.Unmarshal([]byte(sDec), &result)\n\n\tif groups, ok := result[\"cognito:groups\"]; ok {\n\t\tfor _, v := range groups.([]interface{}) {\n\t\t\tif groupName == v.(string) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (_ConsortiumManagement *ConsortiumManagementCallerSession) IsMember(addr common.Address) (bool, error) {\n\treturn _ConsortiumManagement.Contract.IsMember(&_ConsortiumManagement.CallOpts, addr)\n}", "func (o *Permissao) HasIsGroup() bool {\n\tif o != nil && o.IsGroup != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (b *Handler) IsMember(pubKeyBLS []byte, round uint64, step uint8) bool {\n\treturn b.Handler.IsMember(pubKeyBLS, round, step, config.ConsensusSelectionCommitteeSize)\n}", "func (_ConsortiumManagement *ConsortiumManagementCaller) IsMember(opts *bind.CallOpts, addr common.Address) (bool, error) {\n\tvar out []interface{}\n\terr := _ConsortiumManagement.contract.Call(opts, &out, \"isMember\", addr)\n\n\tif err != nil {\n\t\treturn *new(bool), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(bool)).(*bool)\n\n\treturn out0, err\n\n}", "func (u *UserRecord) HasGroup(group string) bool {\n\tfor _, g := range u.Groups {\n\t\tif g == group {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (ctx *TestContext) IAmAManagerOfTheGroupAndCanWatchItsMembers(group string) error {\n\treturn ctx.UserIsAManagerOfTheGroupWith(getParameterString(map[string]string{\n\t\t\"id\": group,\n\t\t\"user_id\": ctx.user,\n\t\t\"name\": group,\n\t\t\"can_watch_members\": strTrue,\n\t}))\n}", "func (lc *LdapConfig) CheckGroupMembership(username, group string) (bool, error) {\n\tre := regexp.MustCompile(\"CN=([a-zA-Z0-9_-]+?),\")\n\n\terr := lc.ldapsConnect()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tsearchRequest := ldap.NewSearchRequest(\n\t\tlc.BaseDN,\n\t\tldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,\n\t\tfmt.Sprintf(\"(&(objectClass=group)(CN=%s))\", group),\n\t\t[]string{\"member\"},\n\t\tnil,\n\t)\n\n\ts, err := lc.Conn.Search(searchRequest)\n\tif err != nil {\n\t\tlog.Printf(\"Group search failed: %v\", err)\n\t\treturn false, err\n\t}\n\n\tif len(s.Entries) != 1 {\n\t\treturn false, fmt.Errorf(\"Group '%s' does not exist or too many results\", group)\n\t}\n\n\tfor _, entry := range s.Entries {\n\t\tmemberDNs := entry.GetAttributeValues(\"member\")\n\t\tif len(memberDNs) == 0 {\n\t\t\treturn false, fmt.Errorf(\"Group '%s' does not have any members\", group)\n\t\t}\n\t\tfor _, memberDN := range memberDNs {\n\t\t\tmember := re.FindStringSubmatch(memberDN)\n\t\t\tif strings.ToLower(username) == strings.ToLower(member[1]) {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false, fmt.Errorf(\"User '%s' is not member of group '%s'\", username, group)\n}", "func (g ScimGroup) HasMember(memberID string) bool {\n\tfor _, member := range g.Members {\n\t\tif member.Value == memberID {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (_ConsortiumManagement *ConsortiumManagementSession) IsMember(addr common.Address) (bool, error) {\n\treturn _ConsortiumManagement.Contract.IsMember(&_ConsortiumManagement.CallOpts, addr)\n}", "func (g *Group) HasMembers() bool {\n\treturn len(g.Allow) != 0 || len(g.Disallow) != 0 || g.CrawlDelay != \"\"\n}", "func IsMember(next echo.HandlerFunc) echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\t\tvar (\n\t\t\tres = response.Echo{C: c}\n\t\t)\n\t\tuser := c.Get(\"user\").(*jwt.Token)\n\t\tclaims := user.Claims.(jwt.MapClaims)\n\t\trole := claims[\"role\"].(bool)\n\t\tif !role {\n\t\t\tres.Response(http.StatusUnauthorized, \"Unauthorized\", nil)\n\t\t\treturn echo.ErrUnauthorized\n\t\t}\n\t\tres.Response(http.StatusOK, \"\", nil)\n\t\treturn next(c)\n\t}\n}", "func (g *Group) Match(o Owner) bool {\n\tif g.Equal(o) {\n\t\treturn true\n\t}\n\tif g.GetOwner().Match(o) {\n\t\treturn true\n\t}\n\tfor _, m := range g.GetPerm(\"%member%\") {\n\t\tif m.Match(o) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (u *User) IsPublicMember(orgId int64) bool {\n\treturn IsPublicMembership(orgId, u.ID)\n}", "func (s *Session) IsMemberAdmin() bool {\n\tif s.data.IsGenesis {\n\t\treturn true\n\t}\n\t// TODO\n\treturn false\n}", "func IsGroupIsUserGroupOrWhitelisted(name string, whitelist ...string) bool {\n\t// check whitelist of groups\n\tfor _, el := range whitelist {\n\t\tif el == name {\n\t\t\treturn true\n\t\t}\n\t}\n\n\tgroup, err := user.LookupGroup(name)\n\tif err != nil {\n\t\treturn false // fail on lookup error\n\t}\n\n\tgid, err := strconv.ParseUint(group.Gid, 10, 32)\n\tif err != nil {\n\t\treturn false // fail on parse error\n\t}\n\n\tminGID, maxGiD := ReadUserGIDRange(LoginDefsPath)\n\n\tif gid < minGID {\n\t\treturn false // group not in lower range\n\t}\n\n\tif gid > maxGiD {\n\t\treturn false // group not in upper range\n\t}\n\n\treturn true\n}", "func (g *Group) IsAdmin(u *User) bool {\n\n\tif u.IsAdmin() {\n\t\treturn true\n\t}\n\n\tfor _, admin := range g.Admins {\n\t\tif admin == u.Username {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (u *User) IsUser() bool {\n\treturn u.UserGroupID == USER\n}", "func (m *Member) IsGuest() bool { return m.Role == MemberRoleGuest }", "func (g *Godis) SIsMember(key, member string) bool {\n\treturn g.cmdInt(\"SISMEMBER\", key, member) == 1\n}", "func (u *User) In(g *Group) (bool, error) {\n\treturn userInGroup(u, g)\n}", "func UserInGroup(e *Engine, userId int64, groupId int64) (bool, error) {\n\tres, _, err := e.RawSelect(Filter(\"autoscope_user_groups\", map[string]interface{}{\n\t\t\"user_id\": userId,\n\t\t\"group_id\": groupId,\n\t}))\n\tif err != nil { return false, err }\n\treturn res.Next(), nil\n}", "func (ctx *TestContext) IsAMemberOfTheGroup(childGroupName, parentGroupName string) {\n\tctx.addGroupGroup(parentGroupName, childGroupName)\n}", "func IsAdmin(username, groupName string) (bool, error) {\n\tgroup, err := GetGroup(groupName)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"getGroup(%s) err: %s\", groupName, err)\n\t}\n\n\taccount, err := GetAccount(username)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"getAccount(%s) err: %s\", username, err)\n\t}\n\n\tselector := Selector{\n\t\t\"sourceId\": group.Id,\n\t\t\"sourceName\": \"JGroup\",\n\t\t\"targetId\": account.Id,\n\t\t\"targetName\": \"JAccount\",\n\t\t\"as\": \"admin\",\n\t}\n\n\tcount, err := RelationshipCount(selector)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"checkAdminRelationship err: %s\", err)\n\t}\n\n\treturn count == 1, nil\n}", "func (a *UserGroupAuthZBasic) CanGetGroup(ctx context.Context, curUser model.User, gid int) error {\n\treturn nil\n}", "func (c *collection) hasGroup(g string) bool {\n\treturn c.has(c.groups, g)\n}", "func (r *marathonClient) HasGroup(name string) (bool, error) {\n\turi := fmt.Sprintf(\"%s/%s\", marathonAPIGroups, trimRootPath(name))\n\terr := r.apiCall(\"GET\", uri, \"\", nil)\n\tif err != nil {\n\t\tif apiErr, ok := err.(*APIError); ok && apiErr.ErrCode == ErrCodeNotFound {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\t}\n\treturn true, nil\n}", "func (c Customer) ContainsGroup(groupName string) bool {\n\tfor _, g := range c.Groups {\n\t\tif g == groupName {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (d UserData) HasGroups() bool {\n\treturn d.ModelData.Has(models.NewFieldName(\"Groups\", \"group_ids\"))\n}", "func (m *Group) GetMemberOf()([]DirectoryObjectable) {\n return m.memberOf\n}", "func (w *Widget) IsMemberChannel(channelID string) bool {\n\tfor activeID := range w.activeChannels {\n\t\tif channelID == activeID {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (p *Plugin) HasGroupExpression(user *security.User, resourceTenant string, groupExpression grp.GroupExpression) bool {\n\n\t// no resource tenant is not ok, there can be no default on this layer\n\tif resourceTenant == \"\" {\n\t\treturn false\n\t}\n\n\t// what we have now is the slice of groups that the user has\n\t// (including \"on behalf\", with concrete cluster-tenant or wildcard \"all\")\n\t// \"on behalf\"-groups do not have cluster-tenant because it is already evaluated for the concrete tenant to act\n\n\tfor i := range user.Groups {\n\t\tgrpCtx, err := p.grpr.ParseGroupName(string(user.Groups[i]))\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// check if group maches for any of the tenants\n\t\tif resourceTenant == grp.Any {\n\t\t\tif groupExpression.Matches(*grpCtx) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\t// resource belongs to own tenant\n\t\tif strings.EqualFold(user.Tenant, resourceTenant) && grpCtx.OnBehalfTenant == \"\" {\n\t\t\tif groupExpression.Matches(*grpCtx) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\t// resource belongs to other tenant, access \"on behalf\": if group is for resource-tenant or for \"all\" then check\n\t\tif strings.EqualFold(grpCtx.OnBehalfTenant, resourceTenant) || grpCtx.OnBehalfTenant == grp.All {\n\t\t\tif groupExpression.Matches(*grpCtx) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t}\n\n\treturn false\n\n}", "func (i UserGroupAccess) IsAUserGroupAccess() bool {\n\tfor _, v := range _UserGroupAccessValues {\n\t\tif i == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func HasGroup(r *http.Request, searchGroups ...string) bool {\n\tgroupMap := r.Context().Value(GroupCtxKey).(map[string]bool)\n\n\tfor _, searchGroup := range searchGroups {\n\t\tif _, ok := groupMap[searchGroup]; ok {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (ctx *TestContext) ThereIsAGroup(group string) error {\n\treturn ctx.ThereIsAGroupWith(getParameterString(map[string]string{\n\t\t\"id\": group,\n\t\t\"name\": group,\n\t}))\n}", "func MemberHasPermission(s *discordgo.Session, guildID string, userID string, permission int64) (bool, error) {\n\tmember, err := s.State.Member(guildID, userID)\n\tif err != nil {\n\t\tif member, err = s.GuildMember(guildID, userID); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t}\n\tg, err := s.Guild(guildID)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif g.OwnerID == userID {\n\t\treturn true, nil\n\t}\n\t// Iterate through the role IDs stored in member.Roles\n\t// to check permissions\n\tfor _, roleID := range member.Roles {\n\t\trole, err := s.State.Role(guildID, roleID)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif role.Permissions&permission != 0 {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, nil\n}", "func (s UserSet) HasGroup(groupID string) bool {\n\tres := s.Collection().Call(\"HasGroup\", groupID)\n\tresTyped, _ := res.(bool)\n\treturn resTyped\n}", "func isAllowedUser(request admissionctl.Request) bool {\n\tif utils.SliceContains(request.UserInfo.Username, allowedUsers) {\n\t\treturn true\n\t}\n\n\tfor _, group := range sreAdminGroups {\n\t\tif utils.SliceContains(group, request.UserInfo.Groups) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (ctx *TestContext) ICanWatchGroup(groupName string) error {\n\treturn ctx.UserIsAManagerOfTheGroupWith(getParameterString(map[string]string{\n\t\t\"id\": groupName,\n\t\t\"user_id\": ctx.user,\n\t\t\"name\": groupName,\n\t\t\"can_watch_members\": strTrue,\n\t}))\n}", "func (m *User) GetMemberOf()([]DirectoryObjectable) {\n return m.memberOf\n}", "func IsGroupExist(uid int64, name string) (bool, error) {\n\tif len(name) == 0 {\n\t\treturn false, nil\n\t}\n\treturn x.Where(\"id!=?\", uid).Get(&Group{Name: name})\n}", "func (o *Permissao) SetIsGroup(v bool) {\n\to.IsGroup = &v\n}", "func (o *User) GetMemberOfOk() ([]Group, bool) {\n\tif o == nil || o.MemberOf == nil {\n\t\treturn nil, false\n\t}\n\treturn o.MemberOf, true\n}", "func isInACL(c context.Context, acl *api.PrefixMetadata_ACL) (bool, error) {\n\tcaller := string(auth.CurrentIdentity(c)) // e.g. \"user:abc@example.com\"\n\n\tvar groups []string\n\tfor _, p := range acl.Principals {\n\t\tif p == caller {\n\t\t\treturn true, nil // the caller was specified in ACLs explicitly\n\t\t}\n\t\tif s := strings.SplitN(p, \":\", 2); len(s) == 2 && s[0] == \"group\" {\n\t\t\tgroups = append(groups, s[1])\n\t\t}\n\t}\n\n\tyes, err := auth.IsMember(c, groups...)\n\tif err != nil {\n\t\treturn false, errors.Annotate(err, \"failed to check group memberships when checking ACLs\").Err()\n\t}\n\treturn yes, nil\n}", "func MemberHasPermission(s *discordgo.Session, guildID string, userID string, perm int) bool {\n\t// Get the guild member\n\tm, err := s.State.Member(guildID, userID)\n\tif err != nil {\n\t\tif m, err = s.GuildMember(guildID, userID); err != nil {\n\t\t\treturn false\n\t\t}\n\t}\n\t// Iterate through all roles to check permissions\n\tfor _, roleID := range m.Roles {\n\t\t// Get the role\n\t\trole, err := s.State.Role(guildID, roleID)\n\t\t// Make sure the role exists\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\t// Check if the role's permissions contains the sought after permission\n\t\tif role.Permissions&perm != 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (e PolicyEntity) IsGroup() bool {\n\treturn e.Type == entityTypeGroup\n}", "func (sid *SteamID) IsGroupChat() bool {\n\treturn !!(sid.Type == TypeChat && (sid.Instance&chatInstanceFlagClan) != 0)\n}", "func (s *Session) IsServiceMember() bool {\n\treturn s.ServiceMemberID != uuid.Nil\n}", "func (o *User) HasMemberOf() bool {\n\tif o != nil && o.MemberOf != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *User) HasMemberOf() bool {\n\tif o != nil && o.MemberOf != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func IsLoggedIn(r *http.Request) (bool, error) {\n\tsession, err := getSession(r)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tt := session.Values[\"accessToken\"]\n\tif t == nil {\n\t\treturn false, nil\n\t}\n\tstoredToken, ok := t.(string)\n\tif !ok {\n\t\treturn false, fmt.Errorf(\"bad type of %q value in session: %v\", \"accessToken\", err)\n\t}\n\tgp := session.Values[\"gplusID\"]\n\tif t == nil {\n\t\treturn false, nil\n\t}\n\tgplusId, ok := gp.(string)\n\tif !ok {\n\t\treturn false, fmt.Errorf(\"bad type of %q value in session: %v\", \"gplusID\", err)\n\t}\n\treturn storedToken != \"\" && isAllowed(gplusId), nil\n}", "func MemberHasPermission(s *discordgo.Session, guildID string, userID string, permission int) (bool, error) {\n\tif permission <= 0 {\n\t\treturn true, nil\n\t}\n\n\t// https://github.com/bwmarrin/discordgo/wiki/FAQ#determining-if-a-role-has-a-permission\n\tmember, err := s.State.Member(guildID, userID)\n\tif err != nil {\n\t\tif member, err = s.GuildMember(guildID, userID); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t}\n\n\t// Iterate through the role IDs stored in member.Roles\n\t// to check permissions\n\tfor _, roleID := range member.Roles {\n\t\trole, err := s.State.Role(guildID, roleID)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif role.Permissions&permission != 0 {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, nil\n}", "func (ctx *TestContext) IAmAManagerOfTheGroup(group string) error {\n\treturn ctx.UserIsAManagerOfTheGroupWith(getParameterString(map[string]string{\n\t\t\"id\": group,\n\t\t\"user_id\": ctx.user,\n\t\t\"name\": group,\n\t\t\"can_watch_members\": \"false\",\n\t}))\n}", "func (d *DB) IsAdmin(uuid string) (bool, error) {\n\tcnt := 0\n\tr := d.db.QueryRow(\"SELECT COUNT(*) FROM teammember INNER JOIN username ON teamuuid = uuid WHERE useruuid = $1 AND username = $2\", uuid, teamNameAdmin)\n\terr := r.Scan(&cnt)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn cnt == 1, nil\n}", "func (u *User) IsAdmin() bool {\n\treturn u.UserGroupID == ADMIN\n}", "func (q *QueryGVR) containsGroup(groups []*metav1.APIGroup, group string) bool {\n\tfor _, grp := range groups {\n\t\tif strings.EqualFold(grp.Name, group) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (app *App) GroupMembers(ctx context.Context, groupName string) ([]string, error) {\n\treturn app.groups.GroupMembers(ctx, groupName)\n}", "func (db *MongoDBRooms) HasMember(memberID, roomID string) (bool, error) {\n\n\thasMember := false\n\n\tsession, err := mgo.Dial(db.HOST.URI)\n\tif err != nil {\n\t\treturn hasMember, errors.New(\"error dialing the database\")\n\t}\n\tdefer session.Close()\n\n\tvar room RoomStruct\n\n\t// search query for room\n\tfind := bson.M{\"_id\": bson.ObjectIdHex(roomID)}\n\terr = session.DB(db.HOST.NAME).C(db.COLLECTION).Find(find).One(&room)\n\n\tfor _, _memberID := range room.MemberIDs {\n\t\tif _memberID == memberID {\n\t\t\thasMember = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif err != nil {\n\t\treturn hasMember, errors.New(\"error finding the document\")\n\t}\n\treturn hasMember, nil\n}", "func (dag *DdgAdminGroup) Exists() bool { //ddg_admin_group\n\treturn dag._exists\n}", "func (p *G1Affine) IsInSubGroup() bool {\n\tvar _p G1Jac\n\t_p.FromAffine(p)\n\treturn _p.IsInSubGroup()\n}", "func (c Chat) IsGroupChat() bool {\n\treturn c.Type != \"private\"\n}", "func (o *Permissao) GetIsGroupOk() (*bool, bool) {\n\tif o == nil || o.IsGroup == nil {\n\t\treturn nil, false\n\t}\n\treturn o.IsGroup, true\n}", "func HaveIAskedMember(s *discordgo.Session, member string) bool {\n\tc, err := s.UserChannelCreate(member)\n\tif err != nil {\n\t\treturn false\n\t}\n\tmessages, err := s.ChannelMessages(c.ID, 10, \"\", \"\", \"\") // reading 10 messages to overcome possible user-sent messages\n\tif err != nil {\n\t\treturn false\n\t}\n\tfor _, message := range messages {\n\t\tif message.Author.Bot {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (c *client) TeamHasMember(org string, teamID int, memberLogin string) (bool, error) {\n\tdurationLogger := c.log(\"TeamHasMember\", teamID, memberLogin)\n\tdefer durationLogger()\n\n\tprojectMaintainers, err := c.ListTeamMembers(org, teamID, RoleAll)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tfor _, person := range projectMaintainers {\n\t\tif NormLogin(person.Login) == NormLogin(memberLogin) {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}", "func (ctx *TestContext) UserIsAManagerOfTheGroupWith(parameters string) error {\n\terr := ctx.ThereIsAGroupWith(parameters)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// We create a parent group of which the user is the manager.\n\tgroup := ctx.getParameterMap(parameters)\n\n\tcanWatchMembers := \"0\"\n\tcanGrantGroupAccess := \"0\"\n\twatchedGroupName := group[\"user_id\"] + \" manages \" + referenceToName(group[\"name\"])\n\n\tif group[\"can_watch_members\"] == strTrue {\n\t\tcanWatchMembers = \"1\"\n\t\twatchedGroupName += \" with can_watch_members\"\n\t}\n\tif group[\"can_grant_group_access\"] == strTrue {\n\t\tcanGrantGroupAccess = \"1\"\n\t\twatchedGroupName += \" with can_grant_group_access\"\n\t}\n\n\terr = ctx.ThereIsAGroupWith(getParameterString(map[string]string{\n\t\t\"id\": watchedGroupName,\n\t\t\"name\": watchedGroupName,\n\t}))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tctx.IsAMemberOfTheGroup(group[\"id\"], watchedGroupName)\n\n\tctx.addGroupManager(group[\"user_id\"], watchedGroupName, canWatchMembers, canGrantGroupAccess)\n\n\treturn nil\n}" ]
[ "0.76126474", "0.7386575", "0.73728687", "0.7257297", "0.7106678", "0.6925676", "0.6812919", "0.67271173", "0.67248964", "0.66902804", "0.6681328", "0.6652703", "0.6634036", "0.6619776", "0.657318", "0.64903814", "0.64772445", "0.64528537", "0.63925785", "0.6369088", "0.6359759", "0.63573694", "0.63542205", "0.6330751", "0.62784445", "0.62735087", "0.6266872", "0.624544", "0.62349355", "0.6209475", "0.620026", "0.61901104", "0.61734724", "0.61572963", "0.6152183", "0.6151992", "0.6113642", "0.60998946", "0.60814357", "0.6066931", "0.605734", "0.60565954", "0.6027011", "0.6017587", "0.598068", "0.5940163", "0.5925166", "0.59237367", "0.5874775", "0.58684903", "0.5844566", "0.5839029", "0.582963", "0.58260256", "0.57648593", "0.57309186", "0.5712062", "0.5699605", "0.5683727", "0.5671324", "0.563709", "0.55956984", "0.557315", "0.5566756", "0.55630046", "0.55621207", "0.5552294", "0.55197036", "0.55150825", "0.5512918", "0.54987144", "0.5485967", "0.5485232", "0.5459601", "0.5451298", "0.5437292", "0.542229", "0.5408021", "0.5398786", "0.5390968", "0.5381086", "0.53694266", "0.5366776", "0.5361372", "0.5361372", "0.53600496", "0.5348922", "0.5348099", "0.53216136", "0.5321024", "0.529885", "0.5284517", "0.528294", "0.528091", "0.52618986", "0.52573484", "0.52573097", "0.5247586", "0.5223666", "0.52061695" ]
0.7899438
0
DNSName extracts DNS name from host:port string.
DNSName извлекает имя DNS из строки host:port.
func DNSName(hostport string) (string, error) { host, err := Host(hostport) if err != nil { return "", trace.Wrap(err) } if ip := net.ParseIP(host); len(ip) != 0 { return "", trace.BadParameter("%v is an IP address", host) } return host, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GetHostName(hostAddr string) string {\n\treturn strings.Split(hostAddr, base.UrlPortNumberDelimiter)[0]\n}", "func GetHostname(addr string) string {\n\treturn strings.Split(addr, \":\")[0]\n}", "func hostname(hostport string) (string, error) {\n\thost, _, err := net.SplitHostPort(hostport)\n\treturn host, err\n}", "func ExtractHostName(urlStr string) (HostNames, error) {\n\thn := &HostNames{\n\t\tURL: \"\",\n\t\tHostName: \"\",\n\t}\n\n\tu, err := url.Parse(urlStr)\n\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn *hn, err\n\t}\n\n\tisSchema, err := IsSchema(urlStr)\n\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn *hn, err\n\t}\n\n\tif u.Hostname() != \"\" && true == isSchema {\n\t\thn.URL = u.Scheme + \"://\" + u.Hostname()\n\t\thn.HostName = u.Hostname()\n\t}\n\n\treturn *hn, nil\n}", "func DnsDecoder(urlStr string) (*string, *string, error) {\n\tu, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\thostTmp := u.Host\n\tIP := Dns(u.Host)\n\tif IP != nil {\n\t\tu.Host = IP.String()\n\t\turlStr = u.String()\n\t\treturn &urlStr, &hostTmp, nil\n\t}\n\treturn nil, nil, fmt.Errorf(\"dnsDecoder fail\")\n}", "func parseHost(addr string) string {\n\tvar (\n\t\thost, port string\n\t\tdefaultAssigned bool\n\t)\n\n\tv := strings.Split(addr, \":\")\n\n\tswitch len(v) {\n\tcase 2:\n\t\thost = v[0]\n\t\tport = v[1]\n\n\t\tif host == \"\" {\n\t\t\thost = _DEFAULT_HOST\n\t\t\tdefaultAssigned = true\n\t\t}\n\n\t\tif port == \"\" {\n\t\t\tport = _DEFAULT_PORT\n\t\t\tdefaultAssigned = true\n\t\t}\n\n\t\tif defaultAssigned == false {\n\t\t\treturn addr // addr is already in required format\n\t\t}\n\t\tbreak\n\n\tcase 1:\n\t\thost = v[0]\n\t\tif host == \"\" {\n\t\t\thost = _DEFAULT_HOST\n\t\t}\n\t\tport = _DEFAULT_PORT\n\tcase 0:\n\t\tfallthrough\n\tdefault:\n\t\thost = _DEFAULT_HOST\n\t\tport = _DEFAULT_PORT\n\t\tbreak\n\t}\n\treturn strings.Join([]string{host, port}, \":\")\n}", "func addrToHost(addr string) string {\n\treturn strings.Split(addr, \":\")[0]\n}", "func HostNameandPort(node string) (host, port string, ipv6 bool, err error) {\n\ttokens := []string{}\n\n\t// Set _IPv6 based on input address\n\tipv6, err = IsIPv6(node)\n\n\tif err != nil {\n\t\treturn \"\", \"\", false, err\n\t}\n\n\terr = nil\n\t// For IPv6\n\tif ipv6 {\n\t\t// Then the url should be of the form [::1]:8091\n\t\ttokens = strings.Split(node, \"]:\")\n\t\thost = strings.Replace(tokens[0], \"[\", \"\", 1)\n\n\t} else {\n\t\t// For IPv4\n\t\ttokens = strings.Split(node, \":\")\n\t\thost = tokens[0]\n\t}\n\n\tif len(tokens) == 2 {\n\t\tport = tokens[1]\n\t} else {\n\t\tport = \"\"\n\t}\n\n\treturn\n}", "func parseHostPort(str string) (string, string) {\n\tvar (\n\t\thost string\n\t\tport string\n\n\t\ti = strings.Index(str, \":\")\n\t)\n\tif i == -1 {\n\t\treturn str, \"\"\n\t}\n\n\thost = str[:i]\n\tport = str[i+1:]\n\n\treturn host, port\n}", "func Hostname() (string, error)", "func hostnameInSNI(name string) string {\n\thost := name\n\tif len(host) > 0 && host[0] == '[' && host[len(host)-1] == ']' {\n\t\thost = host[1 : len(host)-1]\n\t}\n\tif i := strings.LastIndex(host, \"%\"); i > 0 {\n\t\thost = host[:i]\n\t}\n\tif net.ParseIP(host) != nil {\n\t\treturn \"\"\n\t}\n\tfor len(name) > 0 && name[len(name)-1] == '.' {\n\t\tname = name[:len(name)-1]\n\t}\n\treturn name\n}", "func getHostNameAndPort(hostInfo string) (string, int, error) {\n\thost := strings.SplitN(hostInfo, \":\", -1)\n\tif len(host) != 2 {\n\t\treturn \"\", 0, fmt.Errorf(\"expected hostname:port, got %s\", host)\n\t}\n\n\tport, err := strconv.Atoi(host[1])\n\tif err != nil {\n\t\treturn \"\", 0, fmt.Errorf(\"invalid port number, got %s\", host[1])\n\t}\n\n\treturn host[0], port, nil\n}", "func ExtractHost(address string) string {\n\thost, _, _ := net.SplitHostPort(address)\n\tif host == \"\" {\n\t\treturn \"localhost\"\n\t}\n\treturn host\n}", "func GetHostAddr(hostName string, port uint16) string {\n\treturn hostName + base.UrlPortNumberDelimiter + strconv.FormatInt(int64(port), base.ParseIntBase)\n}", "func (internet Internet) DomainName(v reflect.Value) (interface{}, error) {\n\treturn internet.domainName()\n}", "func ParseHost(s string) (*Host, error) {\n\tisValidHost := func(host string) bool {\n\t\tif host == \"\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif ip := net.ParseIP(host); ip != nil {\n\t\t\treturn true\n\t\t}\n\n\t\t// host is not a valid IPv4 or IPv6 address\n\t\t// host may be a hostname\n\t\t// refer https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names\n\t\t// why checks are done like below\n\t\tif len(host) < 1 || len(host) > 253 {\n\t\t\treturn false\n\t\t}\n\n\t\tfor _, label := range strings.Split(host, \".\") {\n\t\t\tif len(label) < 1 || len(label) > 63 {\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tif !hostLabelRegexp.MatchString(label) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}\n\n\tvar port Port\n\tvar isPortSet bool\n\thost, portStr, err := net.SplitHostPort(s)\n\tif err != nil {\n\t\tif !strings.Contains(err.Error(), \"missing port in address\") {\n\t\t\treturn nil, err\n\t\t}\n\t\thost = s\n\t\tportStr = \"\"\n\t} else {\n\t\tif port, err = ParsePort(portStr); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tisPortSet = true\n\t}\n\n\tif host != \"\" {\n\t\thost, err = trimIPv6(host)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// IPv6 requires a link-local address on every network interface.\n\t// `%interface` should be preserved.\n\ttrimmedHost := host\n\n\tif i := strings.LastIndex(trimmedHost, \"%\"); i > -1 {\n\t\t// `%interface` can be skipped for validity check though.\n\t\ttrimmedHost = trimmedHost[:i]\n\t}\n\n\tif !isValidHost(trimmedHost) {\n\t\treturn nil, errors.New(\"invalid hostname\")\n\t}\n\n\treturn &Host{\n\t\tName: host,\n\t\tPort: port,\n\t\tIsPortSet: isPortSet,\n\t}, nil\n}", "func ParseDSN(name string) apmsql.DSNInfo {\n\tif pos := strings.IndexRune(name, '?'); pos >= 0 {\n\t\tname = name[:pos]\n\t}\n\treturn apmsql.DSNInfo{\n\t\tDatabase: name,\n\t}\n}", "func parseHost(host string) string {\n\trealHost, _, _ := net.SplitHostPort(host)\n\tif realHost != \"\" {\n\t\treturn realHost\n\t}\n\treturn host\n}", "func parseHostname(hostname string) (string, error) {\n\t// TODO does the hostname even need to be parsed?\n\treturn hostname, nil\n}", "func parseAddr(addr string) (string, string) {\n\tparsed := strings.SplitN(addr, \":\", 2)\n\treturn parsed[0], parsed[1]\n}", "func HostWithoutPort(s string) string {\n\tif strings.Contains(s, \":\") {\n\t\treturn strings.Split(s, \":\")[0]\n\t}\n\treturn s\n}", "func (p *Printer) Hostname(ip, port string, ns, pod, svc string, names []string) (host string) {\n\thost = ip\n\tif p.opts.enableIPTranslation {\n\t\tif pod != \"\" {\n\t\t\t// path.Join omits the slash if ns is empty\n\t\t\thost = path.Join(ns, pod)\n\t\t} else if svc != \"\" {\n\t\t\thost = path.Join(ns, svc)\n\t\t} else if len(names) != 0 {\n\t\t\thost = strings.Join(names, \",\")\n\t\t}\n\t}\n\n\tif port != \"\" && port != \"0\" {\n\t\treturn net.JoinHostPort(host, port)\n\t}\n\n\treturn host\n}", "func DnsDomain(s string) string {\n\tl := strings.Split(s, \"/\")\n\t// start with 1, to strip /skydns\n\tfor i, j := 1, len(l)-1; i < j; i, j = i+1, j-1 {\n\t\tl[i], l[j] = l[j], l[i]\n\t}\n\treturn dns.Fqdn(strings.Join(l[2:len(l)-1], \".\"))\n}", "func Fqdn(hostname string) string {\n\taddrs, err := net.LookupIP(hostname)\n\tif err != nil {\n\t\treturn hostname\n\t}\n\n\tfor _, addr := range addrs {\n\t\tif ipv4 := addr.To4(); ipv4 != nil {\n\t\t\tip, err := ipv4.MarshalText()\n\t\t\tif err != nil {\n\t\t\t\treturn hostname\n\t\t\t}\n\t\t\thosts, err := net.LookupAddr(string(ip))\n\t\t\tif err != nil || len(hosts) == 0 {\n\t\t\t\treturn hostname\n\t\t\t}\n\t\t\treturn hosts[0]\n\t\t}\n\t}\n\treturn hostname\n}", "func buildHostName(subDomainPrefix string, subDomainSuffix string, subDomain string, domain string) string {\n\treturn joinNonEmpty([]interface{}{joinNonEmpty([]interface{}{subDomainPrefix, subDomain, subDomainSuffix}, \"-\"), domain}, \".\")\n}", "func GetNameServer() string {\n\t// run command: netsh interface ip show dnsservers\n\tif out, _, err := util.RunAndWait(exec.Command(\"netsh\",\n\t\t\"interface\",\n\t\t\"ip\",\n\t\t\"show\",\n\t\t\"dnsservers\",\n\t)); err != nil {\n\t\tlog.Error().Msgf(\"Failed to get dns server\")\n\t\treturn \"\"\n\t} else {\n\t\tr, _ := regexp.Compile(\"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\\\\.[0-9]+\")\n\t\treturn r.FindString(out)\n\t}\n}", "func SplitHostPort(hostport string) (host, port string, err error) {\n\tj, k := 0, 0\n\n\t// The port starts after the last colon.\n\ti := last(hostport, ':')\n\tif i < 0 {\n\t\tgoto missingPort\n\t}\n\n\tif hostport[0] == '[' {\n\t\t// Expect the first ']' just before the last ':'.\n\t\tend := byteIndex(hostport, ']')\n\t\tif end < 0 {\n\t\t\terr = &AddrError{\"missing ']' in address\", hostport}\n\t\t\treturn\n\t\t}\n\t\tswitch end + 1 {\n\t\tcase len(hostport):\n\t\t\t// There can't be a ':' behind the ']' now.\n\t\t\tgoto missingPort\n\t\tcase i:\n\t\t\t// The expected result.\n\t\tdefault:\n\t\t\t// Either ']' isn't followed by a colon, or it is\n\t\t\t// followed by a colon that is not the last one.\n\t\t\tif hostport[end+1] == ':' {\n\t\t\t\tgoto tooManyColons\n\t\t\t}\n\t\t\tgoto missingPort\n\t\t}\n\t\thost = hostport[1:end]\n\t\tj, k = 1, end+1 // there can't be a '[' resp. ']' before these positions\n\t} else {\n\t\thost = hostport[:i]\n\t\tif byteIndex(host, ':') >= 0 {\n\t\t\tgoto tooManyColons\n\t\t}\n\t\tif byteIndex(host, '%') >= 0 {\n\t\t\tgoto missingBrackets\n\t\t}\n\t}\n\tif byteIndex(hostport[j:], '[') >= 0 {\n\t\terr = &AddrError{\"unexpected '[' in address\", hostport}\n\t\treturn\n\t}\n\tif byteIndex(hostport[k:], ']') >= 0 {\n\t\terr = &AddrError{\"unexpected ']' in address\", hostport}\n\t\treturn\n\t}\n\n\tport = hostport[i+1:]\n\treturn\n\nmissingPort:\n\terr = &AddrError{\"missing port in address\", hostport}\n\treturn\n\ntooManyColons:\n\terr = &AddrError{\"too many colons in address\", hostport}\n\treturn\n\nmissingBrackets:\n\terr = &AddrError{\"missing brackets in address\", hostport}\n\treturn\n}", "func DomainName(opts ...options.OptionFunc) string {\n\treturn singleFakeData(DomainNameTag, func() interface{} {\n\t\topt := options.BuildOptions(opts)\n\t\ti := Internet{fakerOption: *opt}\n\t\td, err := i.domainName()\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t\treturn d\n\t}, opts...).(string)\n}", "func HostPort(urlStr string) (string, error) {\n\t// TODO: rename this function to URLHostPort instead, like\n\t// ListenHostPort below.\n\tu, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"could not parse %q as a url: %v\", urlStr, err)\n\t}\n\tif u.Scheme == \"\" {\n\t\treturn \"\", fmt.Errorf(\"url %q has no scheme\", urlStr)\n\t}\n\thostPort := u.Host\n\tif hostPort == \"\" || strings.HasPrefix(hostPort, \":\") {\n\t\treturn \"\", fmt.Errorf(\"url %q has no host\", urlStr)\n\t}\n\tidx := strings.Index(hostPort, \"]\")\n\tif idx == -1 {\n\t\tidx = 0\n\t}\n\tif !strings.Contains(hostPort[idx:], \":\") {\n\t\tif u.Scheme == \"https\" {\n\t\t\thostPort += \":443\"\n\t\t} else {\n\t\t\thostPort += \":80\"\n\t\t}\n\t}\n\treturn hostPort, nil\n}", "func DNSResolve(host string) string {\n\taddress, err := net.ResolveIPAddr(\"ip\", host)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn address.String()\n}", "func getHost(host_url string) (host string, err error) {\n\tu, err := url.Parse(host_url)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn strings.Split(u.Host, \":\")[0], nil\n}", "func getHostName(staxName string, nodeName string) string {\n\treturn fmt.Sprintf(\"%s-%s\", staxName, nodeName)\n}", "func LookupDNSHostCNAME(domain string) string {\n\tif nsRecord, err := net.LookupCNAME(domain); err == nil {\n\t\treturn nsRecord\n\t}\n\treturn \"\"\n}", "func main() {\n\tflag.StringVar(&addr, \"addr\", \"127.0.0.1\", \"host address to lookup\")\n\tflag.Parse()\n\n\tname, err := net.LookupAddr(addr)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tfmt.Println(name)\n}", "func Host(hostname string) (string, error) {\n\tif hostname == \"\" {\n\t\treturn \"\", trace.BadParameter(\"missing parameter hostname\")\n\t}\n\t// if this is IPv4 or V6, return as is\n\tif ip := net.ParseIP(hostname); len(ip) != 0 {\n\t\treturn hostname, nil\n\t}\n\t// has no indication of port, return, note that\n\t// it will not break ipv6 as it always has at least one colon\n\tif !strings.Contains(hostname, \":\") {\n\t\treturn hostname, nil\n\t}\n\thost, _, err := SplitHostPort(hostname)\n\tif err != nil {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\treturn host, nil\n}", "func (o DnsDomainOutput) DomainName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *DnsDomain) pulumi.StringOutput { return v.DomainName }).(pulumi.StringOutput)\n}", "func withoutPort(addr string) string {\n\tif h, _, err := net.SplitHostPort(addr); err == nil {\n\t\treturn h\n\t}\n\treturn addr\n}", "func findMDA(host string) (string, error) {\n\tresults, err := net.LookupMX(host)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(results) == 0 {\n\t\treturn \"\", errors.New(\"No MX records found\")\n\t}\n\n\t// todo: support for multiple MX records\n\th := results[0].Host\n\treturn h[:len(h)-1] + \":25\", nil\n}", "func Resolve(q string) (ip net.IP, port uint16, target string, err error) {\n c := new(dns.Client)\n m := new(dns.Msg)\n m.SetQuestion(dns.Fqdn(q), dns.TypeSRV)\n m.RecursionDesired = true\n\n dns_server := \"127.0.0.1:8600\"\n if len(os.Args) > 1 {\n dns_server = os.Args[1]\n }\n fmt.Printf(\"Using dns server: %v\\n\", dns_server)\n\n r, _, err := c.Exchange(m, dns_server)\n if r == nil {\n log.Fatalf(\"error: %s\\n\", err.Error())\n }\n\n if r.Rcode != dns.RcodeSuccess {\n log.Fatalf(\"dns lookup failed\\n\")\n }\n\n for _, srv := range r.Answer {\n port = srv.(*dns.SRV).Port\n target = srv.(*dns.SRV).Target\n\n fmt.Printf(\"%v %v\\n\", port, target)\n\n for _, a := range r.Extra {\n if target != a.(*dns.A).Hdr.Name {\n continue\n }\n ip = a.(*dns.A).A\n fmt.Printf(\"%v %v\\n\", target, ip)\n return\n }\n }\n\n log.Fatalf(\"no DNS record found\\n\")\n return\n}", "func extractAddress(str string) string {\n\tvar addr string\n\n\tswitch {\n\tcase strings.Contains(str, `]`):\n\t\t// IPv6 address [2001:db8::1%lo0]:48467\n\t\taddr = strings.Split(str, `]`)[0]\n\t\taddr = strings.Split(addr, `%`)[0]\n\t\taddr = strings.TrimLeft(addr, `[`)\n\tdefault:\n\t\t// IPv4 address 192.0.2.1:48467\n\t\taddr = strings.Split(str, `:`)[0]\n\t}\n\treturn addr\n}", "func extractAddress(str string) string {\n\tvar addr string\n\n\tswitch {\n\tcase strings.Contains(str, `]`):\n\t\t// IPv6 address [2001:db8::1%lo0]:48467\n\t\taddr = strings.Split(str, `]`)[0]\n\t\taddr = strings.Split(addr, `%`)[0]\n\t\taddr = strings.TrimLeft(addr, `[`)\n\tdefault:\n\t\t// IPv4 address 192.0.2.1:48467\n\t\taddr = strings.Split(str, `:`)[0]\n\t}\n\treturn addr\n}", "func getFqdnHostname(osHost string) (string, error) {\n\tips, err := lookupIp(osHost)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, ip := range ips {\n\t\thosts, err := lookupAddr(ip.String())\n\t\tif err != nil || len(hosts) == 0 {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif hosts[0] == \"localhost\" {\n\t\t\tcontinue\n\t\t}\n\t\ttrace.Hostname(\"found FQDN hosts: %s\", strings.Join(hosts, \", \"))\n\t\treturn strings.TrimSuffix(hosts[0], \".\"), nil\n\t}\n\treturn \"\", errors.New(\"can't lookup FQDN\")\n}", "func (o TCPHealthCheckOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v TCPHealthCheck) *string { return v.PortName }).(pulumi.StringPtrOutput)\n}", "func DomainName() string {\n\treturn fmt.Sprintf(\"%s.%s\",\n\t\tAlpha(\"\", 14),\n\t\tItem(\"net\", \"com\", \"org\", \"io\", \"gov\"))\n}", "func ParseAddrPort(str string) (ip net.IP, port uint16, err error) {\n\t// See func net.SplitHostPort(hostport string) (host, port string, err error)\n\tpair := strings.Split(str, \":\")\n\tif len(pair) == 2 {\n\t\tip = net.ParseIP(pair[0])\n\t\tif ip != nil {\n\t\t\tvar v uint64\n\t\t\tv, err = strconv.ParseUint(pair[1], 10, 16)\n\t\t\tif err == nil {\n\t\t\t\tport = uint16(v)\n\t\t\t} else {\n\t\t\t\terr = errf(\"\\\"%s\\\" is invalid port specifier\", pair[1])\n\t\t\t}\n\t\t} else {\n\t\t\terr = errf(\"\\\"%s\\\" not a valid IP address\", pair[0])\n\t\t}\n\t} else {\n\t\terr = errf(\"\\\"%s\\\" is missing port specifier\", str)\n\t}\n\treturn\n}", "func ValidateNameserverIpAndPort(nameServer string) (string, string, error) {\n\tif ip := net.ParseIP(nameServer); ip != nil {\n\t\treturn ip.String(), \"53\", nil\n\t}\n\n\thost, port, err := net.SplitHostPort(nameServer)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tif ip := net.ParseIP(host); ip == nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"bad IP address: %q\", host)\n\t}\n\tif p, err := strconv.Atoi(port); err != nil || p < 1 || p > 65535 {\n\t\treturn \"\", \"\", fmt.Errorf(\"bad port number: %q\", port)\n\t}\n\treturn host, port, nil\n}", "func HostPort(addr string, port interface{}) string {\n\thost := addr\n\tif strings.Count(addr, \":\") > 0 {\n\t\thost = fmt.Sprintf(\"[%s]\", addr)\n\t}\n\t// TODO check for NATS case\n\tif v, ok := port.(string); ok {\n\t\tif v == \"\" {\n\t\t\treturn fmt.Sprintf(\"%s\", host)\n\t\t}\n\t}\n\treturn fmt.Sprintf(\"%s:%v\", host, port)\n}", "func GetHost(URLString string) string {\n\tu, err := url.Parse(URLString)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\treturn u.Hostname()\n}", "func getsockname(fd int, rsa *unix.RawSockaddrAny, addrlen *socklen) (err error)", "func callboxHostName(dut *dut.DUT) (string, error) {\n\tdutHost := dut.HostName()\n\tif host, _, err := net.SplitHostPort(dutHost); err == nil {\n\t\tdutHost = host\n\t}\n\n\tdutHost = strings.TrimSuffix(dutHost, \".cros\")\n\tif dutHost == \"localhost\" {\n\t\treturn \"\", errors.Errorf(\"unable to parse hostname from: %q, localhost not supported\", dutHost)\n\t}\n\n\tif ip := net.ParseIP(dutHost); ip != nil {\n\t\treturn \"\", errors.Errorf(\"unable to parse hostname from: %q, ip:port format not supported\", dutHost)\n\t}\n\n\thostname := strings.Split(dutHost, \"-\")\n\tif len(hostname) < 2 {\n\t\treturn \"\", errors.Errorf(\"unable to parse hostname from: %q, unknown name format\", dutHost)\n\t}\n\n\t// CallboxManager expects callbox hostnames to end in .cros\n\thostname = hostname[0 : len(hostname)-1]\n\treturn fmt.Sprintf(\"%s.cros\", strings.Join(hostname, \"-\")), nil\n}", "func (o TCPHealthCheckResponseOutput) PortName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TCPHealthCheckResponse) string { return v.PortName }).(pulumi.StringOutput)\n}", "func LooselyGetHost(hostport string) string {\n\thoststart, hostend := 0, 0\n\tif len(hostport) >= 1 && hostport[0] == '[' {\n\t\thoststart = 1\n\t\thostend = strings.IndexByte(hostport, ']')\n\t} else {\n\t\thostend = strings.IndexByte(hostport, ':')\n\t}\n\tif hostend < 0 {\n\t\thostend = len(hostport)\n\t}\n\treturn hostport[hoststart:hostend]\n}", "func SplitHostPort(hostport string) (host, port string, err error) {\n\taddrErr := func(addr, why string) (host, port string, err error) {\n\t\treturn \"\", \"\", &net.AddrError{Err: why, Addr: addr}\n\t}\n\n\thoststart, hostend := 0, 0\n\tportstart := len(hostport)\n\tif len(hostport) >= 1 && hostport[0] == '[' {\n\t\thoststart = 1\n\t\thostend = strings.IndexByte(hostport, ']')\n\t\tif hostend < 0 {\n\t\t\treturn addrErr(hostport, \"missing ']' in address\")\n\t\t}\n\t\tportstart = hostend + 1\n\t} else {\n\t\thostend = strings.IndexByte(hostport, ':')\n\t\tif hostend < 0 {\n\t\t\thostend = len(hostport)\n\t\t}\n\t\tportstart = hostend\n\t}\n\tif portstart < len(hostport) {\n\t\tif hostport[portstart] != ':' {\n\t\t\treturn addrErr(hostport, \"invalid character at the end of address, expecting ':'\")\n\t\t}\n\t\tportstart += 1\n\t}\n\n\tport = hostport[portstart:]\n\thost = hostport[hoststart:hostend]\n\n\tif strings.IndexByte(port, ':') >= 0 {\n\t\treturn addrErr(hostport, \"too many colons in suspected port number\")\n\t}\n\tif strings.IndexByte(port, ']') >= 0 {\n\t\treturn addrErr(hostport, \"unexpected ']' in port\")\n\t}\n\tif strings.IndexByte(port, '[') >= 0 {\n\t\treturn addrErr(hostport, \"unexpected '[' in port\")\n\t}\n\tif strings.IndexByte(host, '[') >= 0 {\n\t\treturn addrErr(hostport, \"unexpected '[' in host\")\n\t}\n\tif strings.IndexByte(host, ']') >= 0 {\n\t\treturn addrErr(hostport, \"unexpected ']' in host\")\n\t}\n\n\treturn host, port, nil\n}", "func (sd *ServiceDiscovery) ResolveName(name string) (string, string) {\n\tname = stripDomain(name)\n\tif strings.HasSuffix(name, sd.Domain) {\n\t\tname = name[0 : len(name)-len(sd.Domain)-1]\n\t}\n\tseparator := fmt.Sprintf(\".%s\", serviceSuffix)\n\tvar service string\n\tvar dc string\n\tif strings.Contains(name, separator) {\n\t\tcols := strings.Split(name, separator)\n\t\tservice, dc = cols[0], stripDomain(cols[1])\n\t\tif len(dc) < 1 {\n\t\t\tdc = sd.Dc\n\t\t}\n\t} else {\n\t\tservice, dc = name, sd.Dc\n\t}\n\treturn service, dc\n}", "func fixHostPort(address string, defaultPort int) (fixed string, err error) {\n\t// If the address is wrapped in brackets, append a port if necessary.\n\tif address[0] == '[' {\n\t\tend := strings.IndexByte(address, ']')\n\t\tswitch {\n\t\tcase end < 0:\n\t\t\treturn \"\", errors.New(\"missing ']' in address\")\n\t\tcase end+1 == len(address):\n\t\t\treturn fmt.Sprintf(\"%s:%d\", address, defaultPort), nil\n\t\tcase address[end+1] == ':':\n\t\t\treturn address, nil\n\t\tdefault:\n\t\t\treturn \"\", errors.New(\"unexpected character following ']' in address\")\n\t\t}\n\t}\n\n\t// No colons? Must be a port-less IPv4 or domain address.\n\tlast := strings.LastIndexByte(address, ':')\n\tif last < 0 {\n\t\treturn fmt.Sprintf(\"%s:%d\", address, defaultPort), nil\n\t}\n\n\t// Exactly one colon? A port have been included along with an IPv4 or\n\t// domain address. (IPv6 addresses are guaranteed to have more than one\n\t// colon.)\n\tprev := strings.LastIndexByte(address[:last], ':')\n\tif prev < 0 {\n\t\treturn address, nil\n\t}\n\n\t// Two or more colons means we must have an IPv6 address without a port.\n\treturn fmt.Sprintf(\"[%s]:%d\", address, defaultPort), nil\n}", "func Dns(host string) *net.IP {\n\tfor _, dnsServer := range appConfig.Dnsservers {\n\t\tIP := dnss(host, dnsServer+\":53\")\n\t\tif IP != nil {\n\t\t\treturn IP\n\t\t}\n\t}\n\treturn nil\n}", "func (d *Device) GetDNS(domain string) (string, error) {\n\td.Set(TCPDNSLookup, \"\\\"\"+domain+\"\\\"\")\n\tresp, err := d.Response(1000)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif !strings.Contains(string(resp), \":\") {\n\t\treturn \"\", errors.New(\"GetDNS error:\" + string(resp))\n\t}\n\tr := strings.Split(string(resp), \":\")\n\tif len(r) != 2 {\n\t\treturn \"\", errors.New(\"Invalid domain lookup result\")\n\t}\n\tres := strings.Split(r[1], \"\\r\\n\")\n\treturn strings.Trim(res[0], `\"`), nil\n}", "func NormalizeAddr(addr string) (string, error) {\n\tu, err := ParseAddr(addr)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\thost, _, err := net.SplitHostPort(u.Host)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to parse host-port pair: %v\", err)\n\t} else if host == \"\" {\n\t\treturn \"\", fmt.Errorf(\"no hostname in address: %q\", addr)\n\t}\n\treturn u.String(), nil\n}", "func prepareMatchName(matchName string) string {\n\treturn dns.FQDN(matchName)\n}", "func (c *Conn) GetHostName(property string) (string, error) {\n\tp, err := c.object.GetProperty(dbusInterface + \".\" + property)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tv, b := p.Value().(string)\n\tif !b {\n\t\treturn \"\", fmt.Errorf(\"Empty value received: %s\", property)\n\t}\n\n\treturn v, nil\n}", "func GetHostname(host ...string) string {\n\tif len(host) > 0 && strings.TrimSpace(host[0]) != \"\" {\n\t\tif h, ok := hostHostList[host[0]]; ok {\n\t\t\treturn h\n\t\t}\n\t\tstr := GetCmdStr(\"host %s\", host[0])\n\t\th := \"\"\n\t\tif strings.Contains(str, \"has address\") {\n\t\t\th = GetCmdStr(\"host %s|awk '{print $1}'\", host[0])\n\t\t\thostHostList[host[0]] = h\n\t\t} else if strings.Contains(str, \"domain name pointer\") {\n\t\t\th = strings.TrimSuffix(GetCmdStr(\"host %s|awk '{print $5}'\", host[0]), \".\")\n\t\t\thostHostList[host[0]] = h\n\t\t}\n\t\treturn h\n\t}\n\tif chostname == \"\" {\n\t\th, err := os.Hostname()\n\t\tif err != nil {\n\t\t\tchostname = GetCmdStr(\"hostname\")\n\t\t} else {\n\t\t\tchostname = h\n\t\t}\n\t}\n\treturn chostname\n}", "func parseAdvertiseAddr(advAddr string, port int) (string, int) {\n\treturn advAddr, port\n\n\t// bug: if use domain, always return empty host\n\t/*m, e := regexp.Match(ipv4Pattern, []byte(advAddr))\n\t// if parse error, use serve port and parsed ip address\n\tif e != nil {\n\t\treturn \"\", port\n\t}\n\tif m {\n\t\treturn advAddr, port\n\t}\n\n\tm, e1 := regexp.Match(ipv4WithPortPattern, []byte(advAddr))\n\t// if parse error, use serve port and parsed ip address\n\tif e1 != nil {\n\t\treturn \"\", port\n\t}\n\tif m {\n\t\t// 1 5\n\t\tregxp := regexp.MustCompile(ipv4WithPortPattern)\n\t\tadAddr := regxp.ReplaceAllString(advAddr, \"${1}\")\n\t\tadPort, _ := strconv.Atoi(regxp.ReplaceAllString(advAddr, \"${5}\"))\n\t\treturn adAddr, adPort\n\t}\n\treturn \"\", port*/\n}", "func splitHost(host string) string {\n\treturn strings.Split(host, \":\")[0]\n}", "func DNSName(str string) bool {\n\tif str == \"\" || len(strings.Replace(str, \".\", \"\", -1)) > 255 {\n\t\t// constraints already violated\n\t\treturn false\n\t}\n\treturn rxDNSName.MatchString(str)\n}", "func getAddrString(host string, port string) string {\n\treturn fmt.Sprintf(\"%s:%s\", host, port)\n}", "func dbnameOfDSN(dsn string) (string, string) {\n\tvar dbname string\n\ti := strings.LastIndex(dsn, \"/\")\n\tif i >= 0 {\n\t\tdbname = dsn[i+1:] // save the database name\n\t\tj := strings.Index(dbname, \"?\")\n\t\tif j >= 0 {\n\t\t\tdbname = dbname[:j]\n\t\t}\n\t\tdsn = dsn[:i+1] // stomp on the database name in conf. Requires trailing '/'.\n\t}\n\n\treturn dbname, dsn\n}", "func HostPort(urlStr string) (string, error) {\n\tu, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"could not parse %q as a url: %v\", urlStr, err)\n\t}\n\tif u.Scheme == \"\" {\n\t\treturn \"\", fmt.Errorf(\"url %q has no scheme\", urlStr)\n\t}\n\thostPort := u.Host\n\tif hostPort == \"\" || strings.HasPrefix(hostPort, \":\") {\n\t\treturn \"\", fmt.Errorf(\"url %q has no host\", urlStr)\n\t}\n\tidx := strings.Index(hostPort, \"]\")\n\tif idx == -1 {\n\t\tidx = 0\n\t}\n\tif !strings.Contains(hostPort[idx:], \":\") {\n\t\tif u.Scheme == \"https\" {\n\t\t\thostPort += \":443\"\n\t\t} else {\n\t\t\thostPort += \":80\"\n\t\t}\n\t}\n\treturn hostPort, nil\n}", "func (d DNSSeed) String() string {\n\treturn d.Host\n}", "func Resolve(addr string) (string, error) {\n\tip, port, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tips, err := net.LookupHost(ip)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn net.JoinHostPort(ips[0], port), nil\n}", "func dnsDecodeString(raw string) ([]byte, error) {\n\tpad := 8 - (len(raw) % 8)\n\tnb := []byte(raw)\n\tif pad != 8 {\n\t\tnb = make([]byte, len(raw)+pad)\n\t\tcopy(nb, raw)\n\t\tfor index := 0; index < pad; index++ {\n\t\t\tnb[len(raw)+index] = '='\n\t\t}\n\t}\n\treturn sliverBase32.DecodeString(string(nb))\n}", "func parseBindAddr(s string) (address net.Addr, err error) {\n\tconst maxUnixLen = 106\n\n\t// '@' prefix specifies a Linux abstract domain socket.\n\tif runtime.GOOS == \"linux\" && strings.HasPrefix(s, \"@\") {\n\t\tif len(s) > maxUnixLen {\n\t\t\treturn nil, fmt.Errorf(\"sock file length must be less than %d characters\", maxUnixLen)\n\t\t}\n\t\treturn &net.UnixAddr{Name: s, Net: \"unix\"}, nil\n\t}\n\n\tif strings.Contains(s, \"/\") {\n\t\tif !filepath.IsAbs(s) {\n\t\t\treturn nil, errors.New(\"sock file must be an absolute path\")\n\t\t} else if len(s) > maxUnixLen {\n\t\t\treturn nil, fmt.Errorf(\"sock file length must be less than %d characters\", maxUnixLen)\n\t\t}\n\t\treturn &net.UnixAddr{Name: s, Net: \"unix\"}, nil\n\t}\n\n\t// For TCP, the supplied address string, s, is one of a port, a :port, or a host:port.\n\tip, port := net.IPv4(127, 0, 0, 1), 0\n\n\tif strings.Contains(s, \":\") {\n\t\thost, portString, err := net.SplitHostPort(s)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid addr %q - must be provided as host:port\", s)\n\t\t}\n\t\tif host != \"\" {\n\t\t\tip = net.ParseIP(host)\n\t\t}\n\n\t\tport, err = strconv.Atoi(portString)\n\t} else {\n\t\tport, err = strconv.Atoi(s)\n\t}\n\n\tif err != nil || port < 1 || port > 65534 {\n\t\treturn nil, fmt.Errorf(\"invalid port %d - must be between 1 and 65534\", port)\n\t}\n\treturn &net.TCPAddr{IP: ip, Port: port}, nil\n}", "func (o ZoneSoaRecordOutput) HostName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ZoneSoaRecord) *string { return v.HostName }).(pulumi.StringPtrOutput)\n}", "func (o ZoneSoaRecordOutput) HostName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ZoneSoaRecord) *string { return v.HostName }).(pulumi.StringPtrOutput)\n}", "func (d DNS64) Name() string { return \"dns64\" }", "func getHost(line string) (string, error) {\n\tsplit := strings.Split(line, \" \")\n\tif len(split) == 1 {\n\t\t// plain domain list format\n\t\treturn split[0], nil\n\t} else if len(split) == 2 {\n\t\t// hosts file format\n\t\treturn split[1], nil\n\t} else {\n\t\treturn \"\", errParseHosts\n\t}\n}", "func (o HTTP2HealthCheckOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v HTTP2HealthCheck) *string { return v.PortName }).(pulumi.StringPtrOutput)\n}", "func (o SSLHealthCheckOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v SSLHealthCheck) *string { return v.PortName }).(pulumi.StringPtrOutput)\n}", "func GetHost(loc string, www, decode bool) (string, error) {\n\tparsed, err := url.Parse(loc)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\thost, _, err := net.SplitHostPort(parsed.Host)\n\tif err != nil {\n\t\thost = parsed.Host\n\t}\n\tif www {\n\t\tre := regexp.MustCompile(`^www\\.`)\n\t\thost = re.ReplaceAllString(host, \"\")\n\t}\n\n\tif decode {\n\t\treturn idna.ToASCII(host)\n\t}\n\n\treturn host, nil\n}", "func (o TCPHealthCheckPtrOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *TCPHealthCheck) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.PortName\n\t}).(pulumi.StringPtrOutput)\n}", "func (o HTTPSHealthCheckResponseOutput) PortName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HTTPSHealthCheckResponse) string { return v.PortName }).(pulumi.StringOutput)\n}", "func Parse(s string) (Name, error) {\n\tformattedName := strings.Trim(strings.ToLower(s), \".\")\n\n\tif strings.HasPrefix(formattedName, \"*.\") {\n\t\tformattedName = strings.Replace(formattedName, \"*.\", \"\", 1)\n\t}\n\tif strings.HasPrefix(formattedName, \"@.\") {\n\t\tformattedName = strings.Replace(formattedName, \"@.\", \"\", 1)\n\t}\n\n\tif len(formattedName) == 0 {\n\t\treturn Name{}, fmt.Errorf(\"domain name is empty\")\n\t}\n\n\tvar err error\n\tformattedName, err = idna.ToASCII(formattedName)\n\tif err != nil {\n\t\treturn Name{}, fmt.Errorf(\"domain name %s is invalid: %w\", s, err)\n\t}\n\n\tif err = Validate(formattedName); err != nil {\n\t\treturn Name{}, fmt.Errorf(\"domain name %s is invalid: %w\", s, err)\n\t}\n\n\trule := publicsuffix.DefaultList.Find(formattedName, publicsuffix.DefaultFindOptions)\n\tif rule == nil {\n\t\treturn Name{}, fmt.Errorf(\"domain name %s is invalid: no rule found\", s)\n\t}\n\n\tcategory := eTLDUndefined\n\tif rule.Private {\n\t\tcategory = eTLDPrivate\n\t} else if len(rule.Value) > 0 {\n\t\t// empty value indicates the default rule\n\t\tcategory = eTLDICANN\n\t}\n\n\tdecomposedName := rule.Decompose(formattedName)\n\tif decomposedName[1] == \"\" {\n\t\t// no TLD found, which means it's already a TLD\n\t\treturn Name{\n\t\t\tlabels: []string{formattedName},\n\t\t\tcategory: category,\n\t\t}, nil\n\t}\n\n\tlabelsNoTDL := strings.TrimSuffix(formattedName, decomposedName[1])\n\tlabelsNoTDL = strings.TrimSuffix(labelsNoTDL, \".\")\n\n\tif len(labelsNoTDL) == 0 {\n\t\treturn Name{\n\t\t\tlabels: []string{decomposedName[1]},\n\t\t\tcategory: category,\n\t\t}, nil\n\t}\n\n\treturn Name{\n\t\tlabels: append(strings.Split(labelsNoTDL, \".\"), decomposedName[1]),\n\t\tcategory: category,\n\t}, nil\n}", "func stripPort(s string) string {\n\tix := strings.IndexRune(s, ':')\n\tif ix == -1 {\n\t\treturn s\n\t}\n\treturn s[:ix]\n}", "func (n Node) Hostname() string {\n\tparts := strings.Split(n.Name, \"@\")\n\tif len(parts) >= 2 {\n\t\treturn parts[1]\n\t}\n\treturn \"\"\n}", "func (o TCPHealthCheckResponsePtrOutput) PortName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *TCPHealthCheckResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.PortName\n\t}).(pulumi.StringPtrOutput)\n}", "func (o HTTPHealthCheckResponseOutput) PortName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HTTPHealthCheckResponse) string { return v.PortName }).(pulumi.StringOutput)\n}", "func (s *Server) DSN(suffix string) string {\n\treturn fmt.Sprintf(\"root@tcp(127.0.0.1:%d)/%s\", s.Port, suffix)\n}", "func shortHostname(hostname string) string {\n\tif i := strings.Index(hostname, \".\"); i >= 0 {\n\t\treturn hostname[:i]\n\t}\n\treturn hostname\n}", "func shortHostname(hostname string) string {\n\tif i := strings.Index(hostname, \".\"); i >= 0 {\n\t\treturn hostname[:i]\n\t}\n\treturn hostname\n}", "func extractIPv4(ptr string) string {\n\ts := strings.Replace(ptr, \".in-addr.arpa\", \"\", 1)\n\twords := strings.Split(s, \".\")\n\tfor i, j := 0, len(words)-1; i < j; i, j = i+1, j-1 {\n\t\twords[i], words[j] = words[j], words[i]\n\t}\n\treturn strings.Join(words, \".\")\n}", "func RemovePort(addressWithPort string) string {\n\thost, _, err := net.SplitHostPort(addressWithPort)\n\tif err != nil {\n\t\tklog.Errorf(\"Split host and port for a service name has an error:%v\\n\", err)\n\t\t// returning the original address instead if the address has a incorrect format\n\t\treturn addressWithPort\n\t}\n\treturn host\n}", "func DecodeAddr(address []byte) string {\n\tvar stringAddr string\n\tvar ip []byte\n\tvar port []byte\n\n\tip = address[:4]\n\tport = address[4:]\n\n\t// Decode IP\n\tfor index, octet := range ip {\n\t\tstringAddr = stringAddr + strconv.Itoa(int(octet))\n\t\tif index != 3 {\n\t\t\tstringAddr += \".\"\n\t\t}\n\t}\n\tstringAddr += \":\"\n\n\t// Decode Port\n\tb := make([]byte, 8)\n\tfor i := 0; i < 6; i++ {\n\t\tb[i] = byte(0)\n\t}\n\tb[6] = port[0]\n\tb[7] = port[1]\n\tp := binary.BigEndian.Uint64(b)\n\tstringAddr += strconv.FormatUint(p, 10)\n\t//fmt.Println(\"Complete IP:\", stringAddr)\n\treturn stringAddr\n}", "func lookupDomainName(domainName string) string {\n\tif du, ok := domainUuid[domainName]; ok {\n\t\treturn du\n\t}\n\treturn \"\"\n}", "func SplitAddress(addr string) (string, int) {\n\ts := strings.Split(addr, \":\")\n\thostname := s[0]\n\tport, _ := strconv.Atoi(s[1])\n\treturn hostname, port\n}", "func (o HTTP2HealthCheckResponseOutput) PortName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HTTP2HealthCheckResponse) string { return v.PortName }).(pulumi.StringOutput)\n}", "func splitHostPort(addr string) (string, int, error) {\n\thost, sPort, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\tlog.Errorf(\"Could not split network address: %v\", err)\n\t\treturn \"\", 0, errors.Wrap(err)\n\t}\n\tport, err := strconv.Atoi(sPort)\n\tif err != nil {\n\t\tlog.Errorf(\"No port number found %v\", err)\n\t\treturn \"\", 0, errors.Wrap(err)\n\t}\n\treturn host, port, nil\n}", "func Get() (host string, domain string, full string, err error) {\n\thost, err = os.Hostname()\n\tif err != nil {\n\t\treturn\n\t}\n\thost = removeTrailingDot(host)\n\thost, domain = split2(host, '.')\n\tif domain != \"\" {\n\t\tfull = host + \".\" + domain\n\t} else {\n\t\tfull, err = resolveNetFullname(host)\n\t\tif err == nil {\n\t\t\tfull = removeTrailingDot(full)\n\t\t\thost, domain = split2(full, '.')\n\t\t}\n\t}\n\treturn\n}", "func (o SSLHealthCheckResponseOutput) PortName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v SSLHealthCheckResponse) string { return v.PortName }).(pulumi.StringOutput)\n}", "func splitDockerDomain(name string) (domain, remainder string) {\n\ti := strings.IndexRune(name, '/')\n\tif i == -1 || (!strings.ContainsAny(name[:i], \".:\") && name[:i] != \"localhost\") {\n\t\tdomain, remainder = defaultDomain, name\n\t} else {\n\t\tdomain, remainder = name[:i], name[i+1:]\n\t}\n\tif domain == legacyDefaultDomain {\n\t\tdomain = defaultDomain\n\t}\n\tif domain == defaultDomain && !strings.ContainsRune(remainder, '/') {\n\t\tremainder = officialRepoName + \"/\" + remainder\n\t}\n\treturn\n}", "func splitDockerDomain(name string) (domain, remainder string) {\n\ti := strings.IndexRune(name, '/')\n\tif i == -1 || (!strings.ContainsAny(name[:i], \".:\") && name[:i] != \"localhost\") {\n\t\tdomain, remainder = defaultDomain, name\n\t} else {\n\t\tdomain, remainder = name[:i], name[i+1:]\n\t}\n\tif domain == legacyDefaultDomain {\n\t\tdomain = defaultDomain\n\t}\n\tif domain == defaultDomain && !strings.ContainsRune(remainder, '/') {\n\t\tremainder = officialRepoName + \"/\" + remainder\n\t}\n\treturn\n}", "func hostnameForService(svc string) string {\n\n\tparts := strings.Split(svc, \"/\")\n\tif len(parts) < 2 {\n\t\treturn parts[0]\n\t}\n\tif len(parts) > 2 {\n\t\tlog.Printf(\"Malformated service identifier [%s] - Hostname will be truncated\", svc)\n\t}\n\treturn fmt.Sprintf(\"%s.%s\", parts[1], parts[0])\n\n}" ]
[ "0.6603416", "0.6248293", "0.61732805", "0.61518425", "0.60280794", "0.59298724", "0.5926241", "0.5838026", "0.5804168", "0.57999206", "0.57859087", "0.5661244", "0.56175256", "0.559655", "0.5595708", "0.5587074", "0.5566612", "0.55407655", "0.55281454", "0.5504938", "0.55014837", "0.54476506", "0.5445435", "0.5429305", "0.541024", "0.54022217", "0.5393858", "0.53937256", "0.5377149", "0.5371813", "0.53676784", "0.5345948", "0.5339239", "0.53294593", "0.53161824", "0.5315227", "0.52965254", "0.52807206", "0.52790046", "0.5272594", "0.5272594", "0.5267359", "0.5263078", "0.5256207", "0.52482736", "0.52449256", "0.5244048", "0.5240452", "0.5234649", "0.52339226", "0.5228818", "0.5220929", "0.5220192", "0.52177763", "0.5207422", "0.5205988", "0.520409", "0.5202219", "0.5199763", "0.51988316", "0.51981926", "0.51903987", "0.51881224", "0.5181254", "0.51797265", "0.5179401", "0.5174353", "0.5173946", "0.51570493", "0.5151287", "0.51480293", "0.5134729", "0.5134729", "0.51208085", "0.51129586", "0.5112419", "0.5111724", "0.51080287", "0.51045626", "0.51002204", "0.50960535", "0.5087783", "0.50862175", "0.50801736", "0.5079264", "0.5076809", "0.50730866", "0.50730866", "0.50698197", "0.5068049", "0.5067723", "0.5063858", "0.5056818", "0.50492495", "0.504386", "0.50406754", "0.5038285", "0.5035944", "0.5035944", "0.5033655" ]
0.7793767
0
MultiCloser implements io.Close, it sequentially calls Close() on each object
MultiCloser реализует io.Close, последовательно вызывает Close() для каждого объекта
func MultiCloser(closers ...io.Closer) io.Closer { return &multiCloser{ closers: closers, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (mc *MultiCloser) Close() error {\n\tmc.mu.Lock()\n\tdefer mc.mu.Unlock()\n\tresult := &multierror.Error{ErrorFormat: utils.SingleLineErrorFormatter}\n\n\tfor _, closer := range mc.closers {\n\t\tif err := closer.Close(); err != nil {\n\t\t\tresult = multierror.Append(result, err)\n\t\t}\n\t}\n\n\tmc.closers = []io.Closer{}\n\treturn result.ErrorOrNil()\n}", "func (m *IOClosers) Close() (err error) {\n\tfor _, c := range m.closers {\n\t\tif err = c.Close(); err != nil {\n\t\t\tlogger.Errorf(\"Error closing write strream: %s\", err.Error())\n\t\t}\n\t}\n\treturn\n}", "func (mw *MultiWriter) Close() error {\n\tmw.Lock()\n\tdefer mw.Unlock()\n\n\tvar err error\n\n\tfor _, out := range mw.outputs {\n\t\tif e1 := out.Close(); e1 != nil {\n\t\t\terr = e1\n\t\t}\n\t}\n\tmw.outputs = nil\n\tmw.closed = true\n\treturn err\n}", "func (mw *multiWriter) Close() error {\n\tmw.Lock()\n\tfor _, w := range mw.writers {\n\t\tw.Close()\n\t}\n\tmw.writers = nil\n\tmw.Unlock()\n\treturn nil\n}", "func closeAll(closers ...xclose.Closer) error {\n\tmultiErr := xerrors.NewMultiError()\n\tfor _, closer := range closers {\n\t\tif err := closer.Close(); err != nil {\n\t\t\tmultiErr = multiErr.Add(err)\n\t\t}\n\t}\n\treturn multiErr.FinalError()\n}", "func closeMultipleSrvs(srvs []*httptest.Server) {\n\tfor _, srv := range srvs {\n\t\tsrv.Close()\n\t}\n}", "func (bc *BatchCloser) Close() error {\n\tvar errs errorsbp.Batch\n\tfor _, closer := range bc.closers {\n\t\terrs.AddPrefix(fmt.Sprintf(\"%#v\", closer), closer.Close())\n\t}\n\treturn errs.Compile()\n}", "func (d *Death) closeObjects(closer closer, done chan<- closer) {\n\terr := closer.C.Close()\n\tif err != nil {\n\t\td.log.Error(err)\n\t\tcloser.Err = err\n\t}\n\tdone <- closer\n}", "func (c *Closer) CloseAll() {\n\tfor i := len(c.closers) - 1; i >= 0; i-- {\n\t\tClose(c.closers[i])\n\t}\n}", "func (p *AsyncPipeline) Close() error {\n\tvar firstErr error\n\tfor _, op := range p.ops {\n\t\terr := op.Close()\n\t\tif firstErr == nil {\n\t\t\tfirstErr = err\n\t\t}\n\t}\n\treturn firstErr\n}", "func (c *Client) Close() error {\n\tfor _, wgc := range c.cs {\n\t\tif err := wgc.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (m *MultiConnPool) Close() {\n\tfor _, p := range m.Pools {\n\t\tp.Close()\n\t}\n}", "func (w *MultiWriter) Close() error {\n\terrs := new(multierror.Error)\n\tfor _, w := range w.writers {\n\t\terrs = multierror.Append(w.Close())\n\t}\n\treturn errs.ErrorOrNil()\n}", "func (i *Iterator) Close() {}", "func (e *BaseExecutor) Close() error {\n\tvar firstErr error\n\tfor _, src := range e.children {\n\t\tif err := src.Close(); err != nil && firstErr == nil {\n\t\t\tfirstErr = err\n\t\t}\n\t}\n\treturn firstErr\n}", "func (c *OneClient) Close() error {\n\tmultierror := errors.NewMultiError(nil)\n\tc.mu.RLock()\n\tfor _, v := range c.xclients {\n\t\terr := v.Close()\n\t\tif err != nil {\n\t\t\tmultierror.Append(err)\n\t\t}\n\t}\n\tc.mu.RUnlock()\n\n\tif len(multierror.Errors) == 0 {\n\t\treturn nil\n\t}\n\treturn multierror\n}", "func (mlog *MultiLogger) Close() {\n\tmlog.Lock()\n\tmlog.isClosed = true\n\tclose(mlog.qerr)\n\tclose(mlog.qout)\n\t<-mlog.flushq\n\t<-mlog.flushq\n\tmlog.Unlock()\n}", "func (c *Consumer) closeAll() {\n\tclose(c.messages)\n\tclose(c.errors)\n\tc.zoo.Close()\n\tc.consumer.Close()\n\tif c.ownClient {\n\t\tc.client.Close()\n\t}\n}", "func (c *Closer) Close() (err error) {\n\tc.o.Do(func() {\n\t\t// Get close funcs\n\t\tc.m.Lock()\n\t\tfs := append([]CloseFunc{}, c.fs...)\n\t\tc.m.Unlock()\n\n\t\t// Loop through closers\n\t\tvar errs []error\n\t\tfor _, f := range fs {\n\t\t\tif errC := f(); errC != nil {\n\t\t\t\terrs = append(errs, errC)\n\t\t\t}\n\t\t}\n\n\t\t// Process errors\n\t\tif len(errs) == 1 {\n\t\t\terr = errs[0]\n\t\t} else if len(errs) > 1 {\n\t\t\terr = astierror.NewMultiple(errs)\n\t\t}\n\t})\n\treturn\n}", "func (c *RPCClient) Close() {\n\tfor _, conn := range c.pool {\n\t\tconn.Close()\n\t}\n}", "func (cr *ChainReader) Close() error {\n\tfor _, reader := range cr.readers {\n\t\terr := reader.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (p *Pipeline) Close() {\n\tfor _, pool := range p.pools {\n\t\tclose(pool.terminate)\n\t\tpool.done.Wait()\n\t\tpool.factory.Destroy()\n\t}\n}", "func CloseAll() {\n\tkrw.reader.Lock()\n\tdefer krw.reader.Unlock()\n\t// Closing all opened Readers Connections\n\tfor rp, rc := range krw.Readers {\n\t\trc.Close()\n\t\tdelete(krw.Readers, rp)\n\t}\n\n\tkrw.writer.Lock()\n\tdefer krw.writer.Unlock()\n\t// Closing all opened Writers Connections\n\tfor wp, wc := range krw.Writers {\n\t\twc.Close()\n\t\tdelete(krw.Writers, wp)\n\t}\n}", "func (w *writer) Close() error {\n\tfor name, file := range w.files {\n\t\tif file != nil {\n\t\t\tfile.Close()\n\t\t\tdelete(w.files, name)\n\t\t}\n\t}\n\treturn nil\n}", "func PoolCloseAll(pools []Pool) {\n\tfor _, p := range pools {\n\t\tp.Close()\n\t}\n}", "func (d *Death) closeInMass(closable ...io.Closer) (err error) {\n\n\tcount := len(closable)\n\tsentToClose := make(map[int]closer)\n\t//call close async\n\tdoneClosers := make(chan closer, count)\n\tfor i, c := range closable {\n\t\tname, pkgPath := getPkgPath(c)\n\t\tcloser := closer{Index: i, C: c, Name: name, PKGPath: pkgPath}\n\t\tgo d.closeObjects(closer, doneClosers)\n\t\tsentToClose[i] = closer\n\t}\n\n\t// wait on channel for notifications.\n\ttimer := time.NewTimer(d.timeout)\n\tfailedClosers := []closer{}\n\tfor {\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\ts := \"failed to close: \"\n\t\t\tpkgs := []string{}\n\t\t\tfor _, c := range sentToClose {\n\t\t\t\tpkgs = append(pkgs, fmt.Sprintf(\"%s/%s\", c.PKGPath, c.Name))\n\t\t\t\td.log.Error(\"Failed to close: \", c.PKGPath, \"/\", c.Name)\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"%s\", fmt.Sprintf(\"%s %s\", s, strings.Join(pkgs, \", \")))\n\t\tcase closer := <-doneClosers:\n\t\t\tdelete(sentToClose, closer.Index)\n\t\t\tcount--\n\t\t\tif closer.Err != nil {\n\t\t\t\tfailedClosers = append(failedClosers, closer)\n\t\t\t}\n\n\t\t\td.log.Debug(count, \" object(s) left\")\n\t\t\tif count != 0 || len(sentToClose) != 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif len(failedClosers) != 0 {\n\t\t\t\terrString := generateErrString(failedClosers)\n\t\t\t\treturn fmt.Errorf(\"errors from closers: %s\", errString)\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func closeResources(handler http.Handler, closers ...io.Closer) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfor _, closer := range closers {\n\t\t\tdefer closer.Close()\n\t\t}\n\t\thandler.ServeHTTP(w, r)\n\t})\n}", "func (p *tubePool) closeAll(head tube) {\n\tvar next tube\n\tfor head != nil {\n\t\tnext = head.Next()\n\t\thead.SetNext(nil)\n\t\thead.Close()\n\t\thead = next\n\t}\n}", "func (sr *shardResult) Close() {\n\tfor _, series := range sr.blocks {\n\t\tseries.Blocks.Close()\n\t}\n}", "func (r *Reader) Close() error {\n\tvar err error\n\tfor i, n := 0, r.NumR(); i < n; i++ {\n\t\tvar _err error\n\t\tterm := termReader(r.R(i))\n\t\tif term != nil {\n\t\t\t_err = term()\n\t\t}\n\t\tif err == nil && _err != nil {\n\t\t\terr = _err\n\t\t}\n\t}\n\treturn err\n}", "func CloseAll() {\n\tfor _, ps := range poolMaps {\n\t\tfor _, c := range ps {\n\t\t\t_ = c.Close()\n\t\t}\n\t}\n}", "func (iter *BatchObjectIter) Close() {\n\tclose(iter.oidCh)\n}", "func (c *Context) Close() {\n\tfor _, storage := range c.storages {\n\t\tstorage.Disk.Close()\n\t\tc.setEOF()\n\t}\n}", "func Close() {\n\tlog4go.Debug(\"resources destroy, pid:%v\", os.Getpid())\n\tfor name, r := range resources {\n\t\terr := r.Close()\n\t\tif err != nil {\n\t\t\tlog4go.Error(\"resources[%s] destroy failed:%s\", name, err.Error())\n\t\t} else {\n\t\t\tlog4go.Info(\"resources[%s] destroy finish\", name)\n\t\t}\n\t}\n}", "func (c *Container) Close() {\n\tfor _, d := range c.dependencies {\n\t\tif dep, ok := d.(Dependency); ok {\n\t\t\tdep.Close()\n\t\t}\n\t}\n}", "func (cl *CompositeLogger) Close() (err error) {\n\tcl.mu.Lock()\n\tdefer cl.mu.Unlock()\n\n\tfor i, logger := range cl.loggers {\n\t\tif i == 0 {\n\t\t\terr = logger.Close()\n\t\t} else {\n\t\t\tlogger.Close()\n\t\t}\n\t}\n\treturn\n}", "func (r *RemoteSSH) Close() error {\n\tvar err error\n\tfor i := 0; i < r.n; i++ {\n\t\tclient := <-r.pool\n\t\terr = client.SendGoodbye()\n\t}\n\treturn err\n}", "func (h *proxyHandler) close() {\n\tfor _, image := range h.images {\n\t\terr := image.src.Close()\n\t\tif err != nil {\n\t\t\t// This shouldn't be fatal\n\t\t\tlogrus.Warnf(\"Failed to close image %s: %v\", transports.ImageName(image.cachedimg.Reference()), err)\n\t\t}\n\t}\n}", "func (p Pipe) Close() error {\n\tfor receiver := range p.receivers {\n\t\t// errors from one of the receivers shouldn't affect any others\n\t\treceiver.Close()\n\t}\n\treturn nil\n}", "func (trans *Transcoder) Close() (err error) {\n\tfor _, stream := range trans.streams {\n\t\tif stream.aenc != nil {\n\t\t\tstream.aenc.Close()\n\t\t\tstream.aenc = nil\n\t\t}\n\t\tif stream.adec != nil {\n\t\t\tstream.adec.Close()\n\t\t\tstream.adec = nil\n\t\t}\n\t}\n\ttrans.streams = nil\n\treturn\n}", "func (writer *FileLogWriter) Close() {\n\tfor l := writer.level; l <= _LEVEL_MAX; l++ {\n\t\twriter.files[l].close()\n\t\twriter.files[l] = nil\n\t}\n}", "func (obj *Object) Close() error {\n\tif obj == nil {\n\t\treturn nil\n\t}\n\n\tif obj.closefuncs == nil {\n\t\treturn nil\n\t}\n\n\tvar mErr *multierror.Error\n\tfor _, f := range obj.closefuncs {\n\t\terr := f()\n\t\tif err != nil {\n\t\t\tmErr = multierror.Append(mErr, err)\n\t\t}\n\t}\n\treturn errors.WithStack(helpers.FlattenMultiError(mErr))\n}", "func (l *ChannelList) Close() {\n\tchannels := make([]*Channel, 0, l.Count())\n\tfor _, c := range l.channels {\n\t\tc.Lock()\n\t\tfor _, c := range c.data {\n\t\t\tchannels = append(channels, c)\n\t\t}\n\t\tc.Unlock()\n\t}\n\t// close all channels\n\tfor _, c := range channels {\n\t\tif err := c.Close(); err != nil {\n\t\t\tlog.Error(\"c.Close() error(%v)\", err)\n\t\t}\n\t}\n}", "func (p *Pool) Close() error {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tclose(p.items)\n\tfor v := range p.items {\n\t\tif c, ok := v.(closer); ok {\n\t\t\tif err := c.Close(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (p *Pools) Close() {\n\tfor _, pool := range p.pools {\n\t\tpool.close()\n\t}\n\tp.Flush(true)\n}", "func CloseAsyncPipeline() {\n\tfor _, pool := range asyncPipelinePools {\n\t\tfor _, pipeline := range pool.pipelines {\n\t\t\tpipeline.Close()\n\t\t}\n\t}\n\tasyncPipelinePools = nil\n}", "func (self *Transcoder) Close() (err error) {\n\tfor _, stream := range self.streams {\n\t\tif stream.aenc != nil {\n\t\t\tstream.aenc.Close()\n\t\t\tstream.aenc = nil\n\t\t}\n\t\tif stream.adec != nil {\n\t\t\tstream.adec.Close()\n\t\t\tstream.adec = nil\n\t\t}\n\t}\n\tself.streams = nil\n\treturn\n}", "func (transmuxer *Transmuxer) Close() {\n\tif transmuxer.closed {\n\t\treturn\n\t}\n\n\tfor _, streamer := range transmuxer.Streamers {\n\t\tstreamer.Close()\n\t}\n\n\ttransmuxer.FinalStream.Close()\n\n\ttransmuxer.closed = true\n\ttransmuxer.running = false\n}", "func (iter *logIterator) close() {\n\tfor _, f := range iter.pending {\n\t\t_ = f.Close()\n\t}\n}", "func (b *Balancer) Close() (err error) {\n\tfor _, b := range b.selector {\n\t\tif e := b.Close(); e != nil {\n\t\t\terr = e\n\t\t}\n\t}\n\treturn\n}", "func (r *ShardReader) Close() {\n\tctx := vcontext.Background()\n\tfor f := range r.fieldReaders {\n\t\tfr := r.fieldReaders[f]\n\t\tif fr != nil {\n\t\t\tif fr.rio != nil { // fr.rio =nil on error\n\t\t\t\tfr.err.Set(fr.rio.Finish())\n\t\t\t}\n\t\t\tif fr.in != nil { // fr.in =nil on error\n\t\t\t\tr.err.Set(fr.in.Close(ctx))\n\t\t\t}\n\t\t}\n\t}\n}", "func (x *Indexer) Close() error {\n\tdefer x.lock.Close()\n\tfor i := 0; i < x.config.NumShatters; i++ {\n\t\tx.shatter <- &shatterReq{shutdown: true}\n\t\t// no more shatters running, each waits\n\t\t// for all shards to complete before\n\t\t// returning => shards are no longer busy.\n\t}\n\tfor i := 0; i < x.config.NumShards; i++ {\n\t\tclose(x.shards[i].PostChan())\n\t}\n\tif err := x.config.Write(); err != nil {\n\t\treturn err\n\t}\n\tif err := x.writeFiles(); err != nil {\n\t\treturn err\n\t}\n\tif err := x.dmds.Close(); err != nil {\n\t\treturn err\n\t}\n\terrs := make(chan error, len(x.shards))\n\tfor i := range x.shards {\n\t\tb := &x.shards[i]\n\t\tgo func(b *shard.Indexer) {\n\t\t\terrs <- b.Close()\n\t\t}(b)\n\t}\n\tvar err error\n\tfor i := range x.shards {\n\t\tierr := <-errs\n\t\tif err != nil && ierr != nil {\n\t\t\tlog.Printf(\"dupy.Index.Close: dropping error %s from bucket %d\", ierr, i)\n\t\t} else if ierr != nil {\n\t\t\terr = ierr\n\t\t}\n\t}\n\treturn err\n}", "func ReadCloserClose(rc *zip.ReadCloser,) error", "func (e *HTTPExecuter) Close() {}", "func (c *RemoteHTTP) Close() {\n\tc.HTTPClient = nil\n\n\tif c.SSHSession != nil {\n\t\tc.SSHSession.Close()\n\t\tc.SSHSession = nil\n\t}\n\n\tif c.SSHClient != nil {\n\t\tc.SSHClient.Close()\n\t\tc.SSHClient = nil\n\t}\n}", "func (nopCloser) Close() error { return nil }", "func (a *API) Close() {\n\ta.lock.Lock()\n\tdefer a.lock.Unlock()\n\n\tfor _, clients := range a.clients {\n\t\tfor _, client := range clients {\n\t\t\tclient.Close()\n\t\t}\n\t}\n\tfor k := range a.clients {\n\t\tdelete(a.clients, k)\n\t}\n}", "func (s *RandomAggr) Close() error {\n\tvar err error\n\tfor _, v := range s.sources {\n\t\tif closer, ok := v.Reader.(io.Closer); ok {\n\t\t\titemErr := closer.Close()\n\t\t\tif err == nil {\n\t\t\t\terr = itemErr\n\t\t\t}\n\t\t}\n\t}\n\n\treturn err\n}", "func (s IOStreams) Close() error {\n\t// TODO\n\treturn nil\n}", "func (c *refCountedCloser) Close(ctx context.Context) error {\n\tremaining := c.refCount.Add(-1)\n\n\tif remaining != 0 {\n\t\treturn nil\n\t}\n\n\tif c.closed.Load() {\n\t\tpanic(\"already closed\")\n\t}\n\n\tc.closed.Store(true)\n\n\tvar errors []error\n\n\tfor _, closer := range c.closers {\n\t\terrors = append(errors, closer(ctx))\n\t}\n\n\t//nolint:wrapcheck\n\treturn multierr.Combine(errors...)\n}", "func (a *AppTracer) Close() {\n\tfor _, cli := range a.ClientList {\n\t\tcli.Close()\n\t}\n}", "func (b *Batch) Close() {\n}", "func (c *Copier) Close() {\n\tc.once.Do(func() {\n\t\tif c.dst != nil {\n\t\t\tfor _, d := range c.dst {\n\t\t\t\tif err := d.Close(); err != nil {\n\t\t\t\t\tlogrus.Errorf(\"close log driver failure %s\", err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tclose(c.closed)\n\t})\n}", "func (mu *MultipartUpload) Close() error {\n\terr := mu.multiWriter.Close()\n\tif err != nil {\n\t\treturn errors.Wrap(err, 1)\n\t}\n\n\terr = mu.bufferedWriter.Flush()\n\tif err != nil {\n\t\treturn errors.Wrap(err, 1)\n\t}\n\n\terr = mu.pipeWriter.Close()\n\tif err != nil {\n\t\treturn errors.Wrap(err, 1)\n\t}\n\n\treturn nil\n}", "func (fr *FormatReaders) Close() (rtnerr error) {\n\tvar err error\n\tfor i := len(fr.readers) - 1; i >= 0; i-- {\n\t\terr = fr.readers[i].rdr.Close()\n\t\tif err != nil {\n\t\t\trtnerr = err // tracking last error\n\t\t}\n\t}\n\treturn rtnerr\n}", "func (p *hardwareProfiler) Close() error {\n\tvar err error\n\tfor _, profiler := range p.profilers {\n\t\terr = multierr.Append(err, profiler.Close())\n\t}\n\treturn err\n}", "func (s *ModelSubscriber) Close() error {\n\tfor _, rec := range s.receivers {\n\t\tif closer, _ := rec.(io.Closer); closer != nil {\n\t\t\tif err := closer.Close(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func Close() {\n\tatomic.StoreInt64(&writingStopFlag, 1)\n\twritingFilesLock.Lock()\n\tfor name, fh := range writingFiles {\n\t\tfh.Sync()\n\t\tfh.Close()\n\t\tlog.Debug(\"fh-close\", \"name\", name)\n\t}\n\twritingFilesLock.Unlock()\n}", "func CloseAll() {\n\tserfClients.closeAllSerfs()\n}", "func (c FinalOutput) Close() {}", "func (x *Data) Close() {\n for _, c := range x.productChannels {\n close(c)\n }\n x.processorsRunning.Wait()\n close(x.ResultChannel)\n}", "func (r *recorders) close() {\n\tr.Lock()\n\tdefer r.Unlock()\n\n\tfor _, re := range r.list {\n\t\tre.Close()\n\t}\n}", "func (c *Closer) Close() error {\n\tc.CloseAll()\n\treturn nil\n}", "func (it *BaseLibraryContentObjectCreatedIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}", "func (it *DogsOfRomeScoobyIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}", "func (b *bufCloser) Close() error { return nil }", "func (b *Balancer) Close() error {\n\tvar cErr error\n\n\tfor _, backend := range b.pool {\n\t\tif err := backend.close(); err != nil {\n\t\t\tcErr = err\n\t\t}\n\t}\n\n\treturn cErr\n}", "func (m compKeyMap) closeAll(blockIdx uint64, txIdx uint64, availableIdx uint64) error {\n\tfor _, d := range m {\n\t\terr := d.writer.CloseList()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Check if block height was reached\n\t\tif blockIdx < availableIdx || (d.searchBlockLimit <= blockIdx && d.searchTxLimit <= txIdx) {\n\t\t\terr = d.writer.AddField(\"blockStoreHeightSufficient\", true)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\terr = d.writer.AddField(\"blockStoreHeightSufficient\", false)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\terr = d.writer.CloseObject()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = d.writer.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (r *relation) Close() {\n\tfor _, v := range r.mp {\n\t\tv.Close()\n\t}\n}", "func (_m *MockMultiReaderIterator) Close() {\n\t_m.ctrl.Call(_m, \"Close\")\n}", "func (c *Client) Close() {\n\tfor i, client := range clients {\n\t\tif client == c {\n\t\t\tclients = append(clients[:i], clients[i+1:]...)\n\t\t\tbreak\n\t\t}\n\t}\n\n\tclose(c.send)\n\tc.conn.Close()\n\tlog.Printf(\"close connection. addr: %s\", c.conn.RemoteAddr())\n}", "func (m *agentEndpointsManager) close() {\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\tlogger.Info(\"closing all agent endpoints...\")\n\tfor _, endpoint := range m.endpoints {\n\t\tlogger.Infof(\"closing agent (id == %s) endpoint\", endpoint.id)\n\t\tendpoint.close()\n\t}\n}", "func (conn *Connection) Close() {\n\tclose(conn.directChan)\n\tclose(conn.rpcChan)\n\tfor direct := range conn.directChan {\n\t\terr := direct.Close()\n\t\tif err != nil{\n\t\t\tlog.Errorln(err)\n\t\t}\n\t}\n\tfor client := range conn.rpcChan {\n\t\terr := client.Close()\n\t\tif err != nil{\n\t\t\tlog.Errorln(err)\n\t\t}\n\t}\n}", "func (it *BaseContentContentObjectCreateIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}", "func (c *Client) Close() error {\n\tfor _, s := range c.subs {\n\t\ts.Unsubscribe()\n\t}\n\treturn nil\n}", "func (c *ClosablePool) Close(timeout time.Duration) error {\n\tstarted := time.Now()\n\n\ttiers := []int{}\n\tfor i := range c.closables {\n\t\ttiers = append(tiers, i)\n\t}\n\tsort.Ints(tiers)\n\n\tfor _, i := range tiers {\n\t\ttier := c.closables[i]\n\t\tfor j := range tier {\n\t\t\ttier[j].CloseAsync()\n\t\t}\n\t\tfor j := range tier {\n\t\t\tif err := tier[j].WaitForClose(timeout - time.Since(started)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tdelete(c.closables, i)\n\t}\n\treturn nil\n}", "func (e *RawExecutor) close() {\n\tif e != nil {\n\t\tfor _, m := range e.mappers {\n\t\t\tm.Close()\n\t\t}\n\t}\n}", "func (pool Pool) Close() error {\n\tfor conn := range pool.connC {\n\t\tfor _, handler := range pool.connCloseHandler {\n\t\t\tif err := handler(conn); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif err := conn.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c *Client) Close() error {\n\tc.mu.Lock()\n\tfactories := c.pool\n\tc.pool = nil\n\tc.mu.Unlock()\n\n\tif factories == nil {\n\t\treturn nil\n\t}\n\n\tvar (\n\t\terrInfo []string\n\t\terr error\n\t)\n\n\tfor _, c := range factories {\n\t\twrapperCli, ok := c.(*WrapperClient)\n\t\tif !ok {\n\t\t\terrInfo = append(errInfo, \"failed to convert Factory interface to *WrapperClient\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := wrapperCli.client.Close(); err != nil {\n\t\t\terrInfo = append(errInfo, err.Error())\n\t\t\tcontinue\n\t\t}\n\t}\n\n\tif len(errInfo) > 0 {\n\t\terr = fmt.Errorf(\"failed to close client pool: %s\", errInfo)\n\t}\n\treturn err\n}", "func (f *factory) Close() error {\n\tif f.closed.CAS(false, true) {\n\t\tf.mutex.Lock()\n\t\tdefer f.mutex.Unlock()\n\n\t\tfor _, page := range f.pages {\n\t\t\tif err := page.Close(); err != nil {\n\t\t\t\tpageLogger.Error(\"close mapped page data err\",\n\t\t\t\t\tlogger.String(\"path\", f.path), logger.Error(err))\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func parallelMultiReader(readers ...io.Reader) io.ReadCloser {\n\tr, w := io.Pipe()\n\n\terrChs := make([]chan error, len(readers))\n\tfor j, reader := range readers {\n\t\terrChs[j] = make(chan error, 1)\n\t\tgo func(errCh chan<- error, r io.Reader) {\n\t\t\t_, err := io.Copy(w, r)\n\t\t\terrCh <- err\n\t\t}(errChs[j], reader)\n\t}\n\n\tgo func() {\n\t\t// After all readers have stopped, propagate EOF.\n\t\tfor j, errCh := range errChs {\n\t\t\tif err := <-errCh; err != nil {\n\t\t\t\tglog.Warningf(\"Error copying from reader %d: %s\", j, err)\n\t\t\t}\n\t\t}\n\t\tw.Close()\n\t}()\n\n\treturn r\n}", "func (coll *Collection) Close() []error {\n\terrs := []error{}\n\tfor secName, prog := range coll.Programs {\n\t\tif errTmp := prog.Close(); errTmp != nil {\n\t\t\terrs = append(errs, errors.Wrapf(errTmp, \"couldn't close program %s\", secName))\n\t\t}\n\t}\n\tfor secName, m := range coll.Maps {\n\t\tif errTmp := m.Close(); errTmp != nil {\n\t\t\terrs = append(errs, errors.Wrapf(errTmp, \"couldn't close map %s\", secName))\n\t\t}\n\t}\n\treturn errs\n}", "func CloseEnvs(envs []Env) {\n\tfor _, e := range envs {\n\t\te.Close()\n\t}\n}", "func TryClose(maybeClosers ...interface{}) {\n\tfor _, maybeCloser := range maybeClosers {\n\t\tif closer, ok := maybeCloser.(io.Closer); ok {\n\t\t\t_ = closer.Close()\n\t\t}\n\t}\n}", "func (nc *NoiseClient) Close() error {\n\tnc.mu.Lock()\n\tconns := nc.connPool\n\tnc.connPool = nil\n\tnc.mu.Unlock()\n\n\tvar errors []error\n\tfor _, c := range conns {\n\t\tif err := c.Close(); err != nil {\n\t\t\terrors = append(errors, err)\n\t\t}\n\t}\n\treturn multierr.New(errors...)\n}", "func (it *SimpleMultiSigExecuteIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}", "func (p *pool) close() {\n\tif p.closed {\n\t\treturn\n\t}\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tp.closed = true\n\tclose(p.readyChannel)\n\n\tfor connIndex := range p.connList {\n\t\tp.connList[connIndex].close()\n\t}\n\tp.connList = nil\n}", "func (c *Channel) Close() {\n\tfor uuid := range c.clients.m {\n\t\tc.removeClient(uuid)\n\t}\n}", "func (c *ChannelPool) Close() {\n\tc.mu.Lock()\n\tconns := c.conns\n\tc.conns = nil\n\tc.factory = nil\n\tc.mu.Unlock()\n\n\tif conns == nil {\n\t\treturn\n\t}\n\n\tclose(conns)\n\tfor conn := range conns {\n\t\tconn.Close()\n\t}\n}", "func (it *DogsOfRomeRomulusIterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}" ]
[ "0.7285341", "0.69266", "0.68220395", "0.6799848", "0.6782212", "0.676067", "0.67010254", "0.6576231", "0.6567459", "0.65182304", "0.6500774", "0.64879596", "0.645716", "0.6314351", "0.62897563", "0.62881887", "0.62770766", "0.62508285", "0.62197375", "0.62140936", "0.61650044", "0.6159571", "0.6154363", "0.61302245", "0.612434", "0.61194974", "0.6111266", "0.61098665", "0.6094182", "0.60907876", "0.608526", "0.60816634", "0.606246", "0.6045381", "0.60430133", "0.60076827", "0.6002082", "0.59970796", "0.59916246", "0.59908706", "0.5966395", "0.59507716", "0.59501046", "0.5914453", "0.59108484", "0.59042484", "0.59042156", "0.5904178", "0.5889518", "0.58724564", "0.5854216", "0.5847633", "0.5846979", "0.58469033", "0.5837563", "0.58216405", "0.5811925", "0.57858", "0.5785144", "0.5767068", "0.5764046", "0.5740587", "0.57404643", "0.5733851", "0.57287663", "0.57279736", "0.57244056", "0.5707181", "0.5701457", "0.5701434", "0.57011676", "0.56997406", "0.56844276", "0.5668773", "0.56683636", "0.56656724", "0.56648684", "0.56645423", "0.5660724", "0.5653165", "0.5652253", "0.5642935", "0.5640629", "0.5634419", "0.562989", "0.56199354", "0.5617954", "0.5616917", "0.56143755", "0.5602238", "0.55901134", "0.55900913", "0.55890715", "0.5588813", "0.55876184", "0.55703855", "0.5560555", "0.555829", "0.5555988", "0.5555861" ]
0.74036545
0
OpaqueAccessDenied returns a generic NotFound instead of AccessDenied so as to avoid leaking the existence of secret resources.
OpaqueAccessDenied возвращает общий NotFound вместо AccessDenied, чтобы избежать утечки информации о существовании секретных ресурсов.
func OpaqueAccessDenied(err error) error { if trace.IsAccessDenied(err) { return trace.NotFound("not found") } return trace.Wrap(err) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (ctx *ShowSecretsContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func ErrAccessDenied(w http.ResponseWriter, r *http.Request) {\n\tAccessDeniedWithErr(w, r, errors.New(\"Forbidden\"))\n}", "func (aee *ActiveEndpointsError) Forbidden() {}", "func Forbidden(msg string) ErrorResponse {\n\tif msg == \"\" {\n\t\tmsg = \"You are not authorized to perform the requested action.\"\n\t}\n\treturn ErrorResponse{\n\t\tStatus: http.StatusForbidden,\n\t\tMessage: msg,\n\t}\n}", "func Forbidden(err error) Response {\n\tmessage := \"not authorized\"\n\tif err != nil {\n\t\tmessage = err.Error()\n\t}\n\treturn &errorResponse{\n\t\tcode: http.StatusForbidden,\n\t\tmsg: message,\n\t}\n}", "func AccessDeniedWithErr(w http.ResponseWriter, r *http.Request, err error) {\n\tdata := []byte(`{ \"error\": \"ERR_FORBIDDEN\" }`)\n\n\tsendError(w, r, err, http.StatusForbidden, data)\n}", "func (ctx *AcceptOfferContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func (he *HTTPErrors) NotFound(ctx *Context) {\n\the.Emit(http.StatusNotFound, ctx)\n}", "func (ctx *ShowVerificationContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func TestAccessDenied(t *testing.T) {\n\trunTest(t, func(s *Session) {\n\t\ts.Handle(\"model\", res.Access(func(r res.AccessRequest) {\n\t\t\tr.AccessDenied()\n\t\t}))\n\t}, func(s *Session) {\n\t\tinb := s.Request(\"access.test.model\", nil)\n\t\ts.GetMsg(t).\n\t\t\tAssertSubject(t, inb).\n\t\t\tAssertError(t, res.ErrAccessDenied)\n\t})\n}", "func (ctx *ShowUserContext) NotFound(r error) error {\n\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 404, r)\n}", "func (ctx *ShowProfileContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func NotFound(w http.ResponseWriter, r *http.Request) { Error(w, \"404 page not found\", http.StatusNotFound) }", "func (ctx *ShowBottleContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func (ctx *ShowBottleContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func assertSecretNotFound(\n\tctx context.Context,\n\tf *framework.Framework,\n\tnamespacedName types.NamespacedName,\n) error {\n\tsecret := &corev1.Secret{}\n\terr := f.Client.Get(ctx, namespacedName, secret)\n\tif err == nil {\n\t\treturn fmt.Errorf(\"secret '%s' still found\", namespacedName)\n\t}\n\tif errors.IsNotFound(err) {\n\t\treturn nil\n\t}\n\treturn err\n}", "func AccessDenied() ErrorBuilder {\n\treturn &defaultErrorBuilder{\n\t\terr: \"access_denied\",\n\t\terrorDescription: \"The authorization request was denied.\",\n\t}\n}", "func (r Response) Forbidden(code string, payload Payload, header ...ResponseHeader) {\n\tr.Response(code, http.Forbidden, payload, header...)\n}", "func NotFound(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\thttp.NotFound(w, r)\n\treturn nil\n}", "func NotFound(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\thttp.NotFound(w, r)\n\treturn nil\n}", "func Forbidden(format string, args ...interface{}) error {\n\treturn New(http.StatusForbidden, format, args...)\n}", "func (c ApiWrapper) NotFound(msg string, objs ...interface{}) revel.Result {\n\treturn c.renderErrorString(404, fmt.Sprintf(msg, objs))\n}", "func NotFound(w http.ResponseWriter, r *http.Request, h *render.Renderer) {\n\taccept := strings.Split(r.Header.Get(\"Accept\"), \",\")\n\taccept = append(accept, strings.Split(r.Header.Get(\"Content-Type\"), \",\")...)\n\n\tswitch {\n\tcase prefixInList(accept, ContentTypeHTML):\n\t\tm := TemplateMapFromContext(r.Context())\n\t\tm.Title(http.StatusText(http.StatusNotFound))\n\t\th.RenderHTMLStatus(w, http.StatusNotFound, \"404\", m)\n\tcase prefixInList(accept, ContentTypeJSON):\n\t\th.RenderJSON(w, http.StatusNotFound, http.StatusText(http.StatusNotFound))\n\tdefault:\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t}\n}", "func (ace *ActiveContainerError) Forbidden() {}", "func TestAccessDeniedHandler(t *testing.T) {\n\trunTest(t, func(s *Session) {\n\t\ts.Handle(\"model\", res.Access(res.AccessDenied))\n\t}, func(s *Session) {\n\t\tinb := s.Request(\"access.test.model\", nil)\n\t\ts.GetMsg(t).\n\t\t\tAssertSubject(t, inb).\n\t\t\tAssertError(t, res.ErrAccessDenied)\n\t})\n}", "func (r *Router) Forbidden(ctx *Context) {\n\tctx.Forbidden()\n}", "func (c *Action) NotFound(message string) error {\n\treturn c.Abort(404, message)\n}", "func (ctx *GetFilterContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func (ctx *Context) NotFound(err error, message string) *HTTPError {\n\treturn notFoundError(err, message)\n}", "func (ctx *GetByIDHostContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func (ctx *ShowCommentContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func (ctx *ListOfferContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func NotFound(fn http.HandlerFunc) {\n\tinfoMutex.Lock()\n\tvestigo.CustomNotFoundHandlerFunc(fn)\n\tinfoMutex.Unlock()\n}", "func (ctx *ListFeedContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func NotFound(c *routing.Context, msg string, service string) error {\n\tResponse(c, `{\"error\": true, \"msg\": \"`+msg+`\"}`, 404, service, \"application/json\")\n\treturn nil\n}", "func (ctx *GetFeedContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func (c ApiWrapper) Forbidden(msg string, objs ...interface{}) revel.Result {\n\treturn c.renderErrorString(403, fmt.Sprintf(msg, objs))\n}", "func (h *Handler) NotFound(w http.ResponseWriter, r *http.Request) {\n\twriteResponse(r, w, http.StatusNotFound, &SimpleResponse{\n\t\tTraceID: tracing.FromContext(r.Context()),\n\t\tMessage: \"not found\",\n\t})\n}", "func (ctx *ShowWorkflowContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func (b *Baa) NotFound(c *Context) {\n\tif b.notFoundHandler != nil {\n\t\tb.notFoundHandler(c)\n\t\treturn\n\t}\n\thttp.NotFound(c.Resp, c.Req)\n}", "func NotFound(w http.ResponseWriter, r *http.Request) {\n\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\treturn\n}", "func Forbidden(w http.ResponseWriter, err error) {\n\t(Response{Error: err.Error()}).json(w, http.StatusForbidden)\n}", "func (ctx *GetAllHostContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func (c *Context) NotFound() {\n\tc.Handle(http.StatusNotFound, \"\", nil)\n}", "func NotFound(message ...interface{}) Err {\n\treturn Boomify(http.StatusNotFound, message...)\n}", "func (r Response) NotFound(code string, payload Payload, header ...ResponseHeader) {\n\tr.Response(code, http.NotFound, payload, header...)\n}", "func (ctx *ListUserContext) NotFound(r error) error {\n\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 404, r)\n}", "func NotFound(msg string) ErrorResponse {\n\tif msg == \"\" {\n\t\tmsg = \"The requested resource was not found.\"\n\t}\n\treturn ErrorResponse{\n\t\tStatus: http.StatusNotFound,\n\t\tMessage: msg,\n\t}\n}", "func NotFoundRoute(res http.ResponseWriter, req *http.Request) {\n res.Write([]byte(\"Oopsie woopsie this doesn't exist.\"))\n}", "func (ctx *GetDogsByHostIDHostContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func NotFound(rw http.ResponseWriter) {\n\tHttpError(rw, \"not found\", 404)\n}", "func NotFoundHandler() Handler { return HandlerFunc(NotFound) }", "func NotFoundHandler() Handler { return HandlerFunc(NotFound) }", "func (ctx *DeleteFeedContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func NotFound(w http.ResponseWriter, message ...interface{}) {\n\tboom(w, 404, message...)\n}", "func (ctx *DeleteDogContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func (ctx *UpdateFilterContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func NotFound(context *gin.Context) {\n\tcontext.JSON(404, gin.H{\n\t\t\"error\": \"404 not found\",\n\t\t\"url\": context.Request.URL,\n\t})\n}", "func (ctx *UpdateFeedContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func NotFound(w http.ResponseWriter, err error) {\n\tError(w, http.StatusNotFound, err)\n}", "func NotFound(w http.ResponseWriter, r *http.Request) {\n\thandlerMu.RLock()\n\tf, ok := handlerMap[http.StatusNotFound]\n\thandlerMu.RUnlock()\n\tif ok {\n\t\tf.ServeHTTP(w, r)\n\t} else {\n\t\tdefaultNotFound(w, r)\n\t}\n}", "func (ctx *DeleteFilterContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func Forbidden(message ...interface{}) Err {\n\treturn Boomify(http.StatusForbidden, message...)\n}", "func (c *Context) NotFound() {\n\tc.JSON(404, ResponseWriter(404, \"page not found\", nil))\n}", "func (this *Context) NotFound(message string) {\n\tthis.ResponseWriter.WriteHeader(404)\n\tthis.ResponseWriter.Write([]byte(message))\n}", "func NotFound(c *gin.Context) {\n\tresponse := types.APIErrResponse{Msg: \"Something went wrong\", Success: false, Err: \"Not found\"}\n\tc.JSON(http.StatusNotFound, response)\n}", "func Forbidden(p protocol.Instance) echo.Checker {\n\tswitch {\n\tcase p.IsGRPC():\n\t\treturn ErrorContains(\"rpc error: code = PermissionDenied\")\n\tcase p.IsTCP():\n\t\treturn ErrorContains(\"EOF\")\n\tdefault:\n\t\treturn NoErrorAndStatus(http.StatusForbidden)\n\t}\n}", "func NotFound(w http.ResponseWriter, _ error) {\n\t(Response{Error: \"resource not found\"}).json(w, http.StatusNotFound)\n}", "func Forbidden(msg string) error {\n\tif msg == \"\" {\n\t\tmsg = \"no está autorizado a realizar la acción solicitada.\"\n\t}\n\treturn echo.NewHTTPError(http.StatusForbidden, msg)\n}", "func NotFound(w http.ResponseWriter, r *http.Request) {\n\tresponse := response.CreateResponse()\n\tresponse.SendDataWithStatusCode(w, \"not found\", http.StatusOK)\n}", "func (ctx *AddLinkWorkflowContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func (response BasicJSONResponse) NotFound(writer http.ResponseWriter) {\n\tNotFound(writer, response)\n}", "func (ctx *DeleteHostContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func NotFound(w http.ResponseWriter) {\n\thttp.Error(w, \"404 not found!!!\", http.StatusNotFound)\n}", "func NotFound(w http.ResponseWriter) {\n\trenderError(w, http.StatusNotFound, nil)\n}", "func (ctx *DeleteOutputContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func ForbiddenErr(err error, format string, args ...interface{}) error {\n\treturn NewError(http.StatusForbidden, err, format, args...)\n}", "func NotFound(ctx context.Context, w http.ResponseWriter, message string) {\n\tfhirError(ctx, w, http.StatusNotFound, fhir.IssueSeverityWarning, fhir.IssueTypeNotFound, message)\n}", "func NotFound(err error) error {\n\treturn APIError{\n\t\tcode: http.StatusNotFound,\n\t\tMessage: err.Error(),\n\t}\n}", "func (c DBaseController) ForbiddenResponse() revel.Result {\n\tc.Response.Status = http.StatusForbidden\n\n\treturn c.RenderJSON(serializers.ForbiddenResponse())\n}", "func (ctx *DeleteLinkWorkflowContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func (h *HandleHelper) NotFound() {\n\terrResponse(http.StatusNotFound,\n\t\t\"the requested resource could not be found\",\n\t)(h.w, h.r)\n}", "func (e DiscoveryError) IsAccessDenied() bool {\n\treturn strings.Contains(e.Error(), AccessDenied)\n}", "func accessForbiddenResp() response.Response {\n\t//nolint:stylecheck // Grandfathered capitalization of error.\n\treturn ErrResp(http.StatusForbidden, errors.New(\"Permission denied\"), \"\")\n}", "func notFound(resource string) middleware.Responder {\n\tmessage := fmt.Sprintf(\"404 %s not found\", resource)\n\treturn operations.NewGetChartDefault(http.StatusNotFound).WithPayload(\n\t\t&models.Error{Code: helpers.Int64ToPtr(http.StatusNotFound), Message: &message},\n\t)\n}", "func (uee *UnknownEndpointError) NotFound() {}", "func (ctx *UpdateUserContext) NotFound(r error) error {\n\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 404, r)\n}", "func (r *Responder) NotFound() { r.write(http.StatusNotFound) }", "func (r *Responder) Forbidden() { r.write(http.StatusForbidden) }", "func (ctx *StopFeedContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func NotFoundHandler(*Context) error {\n\treturn NewHTTPError(StatusNotFound)\n}", "func ERROR_AUTH_USER_NOT_FOUND(w http.ResponseWriter, pl string) {\n\tbuildForeignError(w, http.StatusForbidden, \"ERROR_AUTH_USER_NOT_FOUND\", pl)\n}", "func (r *Route) NotFound(handler http.Handler) *Route {\n\tr.handlers[notFound] = handler\n\treturn r\n}", "func (ctx *CreateUserContext) NotFound(r error) error {\n\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 404, r)\n}", "func (ctx *DeleteUserContext) NotFound(r error) error {\n\tctx.ResponseData.Header().Set(\"Content-Type\", \"application/vnd.goa.error\")\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 404, r)\n}", "func WrapWithPermissionDenied(cause error, parameters ...wparams.ParamStorer) Error {\n\treturn newGenericError(cause, DefaultPermissionDenied, wparams.NewParamStorer(parameters...))\n}", "func (req *Request) NotFound(body string) {\n\treq.Reply(http.StatusNotFound, body)\n}", "func NewSecretInspectNotFound() *SecretInspectNotFound {\n\treturn &SecretInspectNotFound{}\n}", "func (ctx *MoveLinkWorkflowContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}", "func (ctx *ListMessageContext) NotFound() error {\n\tctx.ResponseData.WriteHeader(404)\n\treturn nil\n}" ]
[ "0.6432331", "0.60147", "0.588723", "0.5819935", "0.5743779", "0.5713874", "0.5659095", "0.5598807", "0.5597008", "0.5592161", "0.5579683", "0.5568134", "0.5550085", "0.55464023", "0.55464023", "0.5545322", "0.5542302", "0.55259365", "0.5513903", "0.5513903", "0.55054015", "0.5493526", "0.54794586", "0.5474977", "0.5471457", "0.54650295", "0.54612654", "0.5435942", "0.54350364", "0.543279", "0.5426074", "0.54157656", "0.5407503", "0.5405352", "0.5403094", "0.53887254", "0.5380413", "0.53766626", "0.53710234", "0.536787", "0.53573143", "0.5352898", "0.53499216", "0.53345394", "0.5334449", "0.5333373", "0.53188235", "0.53029233", "0.5300304", "0.52866703", "0.52790534", "0.52778614", "0.52778614", "0.5276735", "0.52758485", "0.52533525", "0.52451426", "0.5241564", "0.52391595", "0.5233154", "0.5230381", "0.52181226", "0.5215855", "0.52140534", "0.52045393", "0.51976794", "0.5192057", "0.5189199", "0.518597", "0.5185721", "0.5183919", "0.5175863", "0.5175223", "0.51446986", "0.5144459", "0.5131449", "0.51299775", "0.5127557", "0.5127268", "0.5119076", "0.51163", "0.51082784", "0.51081765", "0.5104693", "0.50972664", "0.5092853", "0.5079889", "0.5078243", "0.5076729", "0.5067516", "0.5065334", "0.5062758", "0.5055248", "0.5048371", "0.50388956", "0.50384295", "0.5034623", "0.50319886", "0.5028053", "0.5024341" ]
0.7602695
0
PopInt returns a value from the list, it panics if not enough values were allocated
PopInt возвращает значение из списка, вызывает панику, если не хватает выделенных значений
func (p *PortList) PopInt() int { i, err := strconv.Atoi(p.Pop()) if err != nil { panic(err) } return i }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *SliceOfInt) Pop() int {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}", "func (p *IntVector) Pop() int\t{ return p.Vector.Pop().(int) }", "func (s *SliceOfInt32) Pop() int32 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}", "func (s *SliceOfInt64) Pop() int64 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}", "func (s *SliceOfInt8) Pop() int8 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}", "func (v *IntVec) Pop() int {\n\treturn v.Remove(len(*v) - 1)\n}", "func (q *MyQueue) Pop() int {\n\tq.lock.Lock()\n\tx := q.list[0] // 对空数组取值时自动panic\n\tq.list = q.list[1:]\n\tq.lock.Unlock()\n\treturn x\n}", "func (s *SliceOfUint) Pop() uint {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}", "func (v *Int32Vec) Pop() int32 {\n\treturn v.Remove(len(*v) - 1)\n}", "func (q *MyQueue) Pop() int {\n\tfront := q.list.Front()\n\tres := front.Value.(int)\n\tq.list.Remove(front)\n\treturn res\n}", "func (s *Int64) Pop() int64 {\n\tfor val := range s.m {\n\t\tdelete(s.m, val)\n\t\treturn val\n\t}\n\treturn 0\n}", "func (s *SliceOfInt16) Pop() int16 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}", "func (s *SliceOfUint32) Pop() uint32 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}", "func (this *MyStack) Pop() int {\n\ttemp := this.val[0]\n\tthis.val = this.val[1:]\n\treturn temp\n}", "func (h *IntMaxHeap) Pop() interface{} {\n\told := *h\n\tn := len(old)\n\tx := old[n-1]\n\t*h = old[0 : n-1]\n\treturn x\n}", "func (p *IntArray) Pop() int {\n\ttmp := *p\n\tlast := tmp[len(tmp)-1 : len(tmp)]\n\ttmp = tmp[0 : len(tmp)-1]\n\n\t*p = tmp\n\treturn last[0]\n}", "func (s *IntStack) Pop() (int, error) {\n\tif s.Size() == 0 {\n\t\treturn 0, fmt.Errorf(\"Stack is empty.\")\n\t}\n\tv := s.head.value\n\ts.head = s.head.next\n\ts.size--\n\treturn v, nil\n}", "func (list *List) Pop(idx ...int) (interface{}, error) {\n\tindex := list.getLastIndex()\n\tll := len(idx)\n\n\tif ll > 1 {\n\t\treturn nil, fmt.Errorf(\"only 1 or 0 arguments are allowed\")\n\t}\n\n\t// in case of `list.Pop()`\n\telement := list.getByIndex(index)\n\tif ll == 0 {\n\t\treturn element, list.removeByIndex(index)\n\t}\n\n\tif idx[0] > index {\n\t\treturn nil, fmt.Errorf(\"index out of range\")\n\t}\n\n\tindex = idx[0]\n\treturn element, list.removeByIndex(index)\n}", "func (h *IntHeap) Pop() interface{} {\n\told := *h\n\tn := len(old)\n\tx := old[n-1]\n\t*h = old[0 : n-1]\n\treturn x\n}", "func (iheap *IntegerHeap) Pop() interface{} {\n\tvar (\n\t\tn, x1 int\n\n\t\t// hold a reference to the\n\t\tprevious = *iheap\n\t)\n\n\tn = len(previous)\n\tx1 = previous[n-1]\n\n\t*iheap = previous[0 : n-1]\n\n\treturn x1\n\n}", "func (s *StackInt) Pop() int {\nlength := len(s.s)\nres := s.s[length-1]\ns.s = s.s[:length-1]\nreturn res\n}", "func (s *SliceOfUint64) Pop() uint64 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}", "func (this *MyStack) Pop() int {\n\tres := this.v[len(this.v)-1]\n\tthis.v = this.v[:len(this.v)-1]\n\treturn res\n}", "func (arr *ArrayList) Pop() ItemType {\n if arr.length > 0 {\n // shrink by half if only a third is used - dampening resize operations\n if arr.length < arr.capacity / 3 {\n arr.resize(arr.capacity / 2)\n }\n arr.length--\n return arr.data[arr.length]\n }\n panic(\"out of bounds\")\n}", "func (this *MyStack) Pop() int {\n\ttmpQ := list.New()\n\tll := this.queue.Len() - 1\n\tfor i := 0; i < ll; i++ {\n\t\te := this.queue.Front()\n\t\ttmpQ.PushBack(e.Value)\n\t\tthis.queue.Remove(e)\n\t}\n\n\ttopE := this.queue.Front()\n\tres := topE.Value.(int)\n\tthis.queue.Remove(topE)\n\n\tfor tmpQ.Len() > 0 {\n\t\te := tmpQ.Front()\n\t\tthis.queue.PushBack(e.Value)\n\t\ttmpQ.Remove(e)\n\t}\n\treturn res\n}", "func (t *topK) Pop() interface{} {\n\tn := len(t.values)\n\tx := t.values[n-1]\n\tt.values = t.values[:n-1]\n\treturn x\n}", "func (h *FixedSizeHeap) Pop() interface{} {\n\tn := len(h.data)\n\tx := h.data[n-1]\n\th.data = h.data[0 : n-1]\n\treturn x\n}", "func (h *PerformanceHeap) Pop() interface{} {\n\told := h.items\n\tn := len(old)\n\tx := old[n-1]\n\th.items = old[0 : n-1]\n\treturn x\n}", "func (s *Uint64) Pop() uint64 {\n\tfor val := range s.m {\n\t\tdelete(s.m, val)\n\t\treturn val\n\t}\n\treturn 0\n}", "func lvalPop(v *LVal, i int) *LVal {\n\tx := v.Cell[i]\n\n\tv.Cell = append(v.Cell[:i], v.Cell[i+1:]...)\n\treturn x\n}", "func (s *stack) pop() int {\n\tl := len(s.items)\n\tremovedItem := s.items[l-1]\n\ts.items = s.items[:l-1]\n\treturn removedItem\n}", "func (iheap *IntegerHeap) Pop() interface{} {\n\tvar previous IntegerHeap = *iheap\n\tn := len(previous)\n\tx1 := previous[n-1]\n\t*iheap = previous[0 : n-1]\n\treturn x1\n}", "func (h *itemHeap) Pop() interface{} {\n\tl := len(*h)\n\ti := (*h)[l-1]\n\t*h = (*h)[:l-1]\n\treturn i\n}", "func (heap *MinHeap) Pop() int {\n\tvalue := heap.Heap[0]\n\theap.Heap[0] = heap.Count - 1\n\theap.Heap[heap.Count-1] = 0\n\theap.Count--\n\theap.heapifyDown()\n\treturn value\n}", "func (o *openList) Pop() interface{} {\n\topn := *o\n\tit := opn[len(opn)-1]\n\tit.pqindex = -1\n\t*o = opn[:len(opn)-1]\n\treturn it\n}", "func (s *SliceOfFloat32) Pop() float32 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}", "func (s *MyStack) Pop() int {\n\titem := s.queue[0]\n\ts.queue = s.queue[1:]\n\treturn item\n}", "func (s *MyStack) Pop() int {\n v := s.queue1[0]\n s.queue1 = s.queue1[1:]\n return v\n}", "func (this *MyStack) Pop() int {\n\treturn this.l.Remove(this.l.Back()).(int)\n}", "func (pq *MaxPQ) Pop() interface{} {\n\told := *pq\n\tn := len(old)\n\titem := old[n-1]\n\t*pq = old[0 : n-1]\n\treturn item\n}", "func (l *List) Pop() (v Value, err error) {\n\tif l.tail == nil {\n\t\terr = errEmpty\n\t} else {\n\t\tv = l.tail.Value\n\t\tl.tail = l.tail.prev\n\t\tif l.tail == nil {\n\t\t\tl.head = nil\n\t\t}\n\t}\n\treturn v, err\n}", "func Pop(h *PriorityQueue) *Item {\n\tn := h.Len() - 1\n\th.Swap(0, n)\n\tdown(h, 0, n)\n\treturn h.Pop()\n}", "func (this *MyQueue) Pop() int {\n\tv := this.Stack[0]\n\tthis.Stack = this.Stack[1:]\n\treturn v\n}", "func (s *Stack) Pop() int {\n\tlength := len(s.items) - 1\n\ttoRemove := s.items[length]\n\ts.items = s.items[:length]\n\treturn toRemove\n}", "func (h *Heap) Pop() interface{} {\n\tif h.size == 0 {\n\t\treturn nil\n\t}\n\tres := h.values[1]\n\th.values[1] = h.values[h.size]\n\th.values = h.values[:h.size]\n\th.size--\n\n\th.bubbleDown()\n\n\treturn res\n}", "func (this *MyQueue) Pop() int {\n\tr := this.q[len(this.q)-1]\n\tthis.q = this.q[:len(this.q)-1]\n\treturn r\n}", "func (s *MyStack) Pop() int {\n\tif s.Empty() {\n\t\treturn -1\n\t}\n\tn := len(s.Q)\n\tx := s.Q[n-1]\n\ts.Q = s.Q[:n-1]\n\treturn x\n}", "func popInt(cloneMap map[string]string, key string) (int, error) {\n\tval, err := pop(cloneMap, key)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn strconv.Atoi(val)\n}", "func (t *Tower) pop() (result int) {\n\tresult = (*t)[len(*t)-1]\n\t*t = (*t)[:len(*t)-1]\n\treturn result\n}", "func Pop[T any](h Interface[T]) T {\n\tn := h.Len() - 1\n\th.Swap(0, n)\n\tdown(h, 0, n)\n\treturn h.Pop()\n}", "func (this *MyQueue) Pop() int {\n\tif this.out.Len() == 0 {\n\t\tfor v := this.in.Pop(); v != nil; v = this.in.Pop() {\n\t\t\tthis.out.Push(v)\n\t\t}\n\t}\n\treturn this.out.Pop().(int)\n}", "func (s *SliceOfUint8) Pop() uint8 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}", "func (l *pqList) Pop() interface{} {\n\treturn l.Remove(len(l.Slice) - 1)\n}", "func (this *MyQueue) Pop() int {\n x := this.q[0]\n this.q = this.q[1:]\n return x\n}", "func (h *heap) pop() int {\n\tr := h.H[0];\n\th.V--;\n\th.H[0] = h.H[h.V];\n\tif h.V > 1 {\n\t\th.bubble_down(0);\n\t}\n\treturn r;\n}", "func (s *Stack) Pop() (int, error) {\n\tif s.size > 0 {\n\t\tval := s.top.value\n\t\ts.top = s.top.next\n\t\ts.size--\n\t\treturn val, nil\n\t}\n\treturn 0, errorEmptyStack\n}", "func (this *MyStack) Pop() int {\n\tx := this.Queue[0]\n\tthis.Queue = this.Queue[1:]\n\treturn x\n}", "func (s *Stack) Pop() int {\n\tl := len(s-item) - 1\n\ttoRemove := s.items[l]\n\ts.items = s.items[:l]\n\treturn toRemove\n}", "func (q *TaskQueue) Pop() int {\n\tif q.Count == 0 {\n\t\treturn -1\n\t}\n\tnode := q.Nodes[q.Head]\n\tq.Head = (q.Head + 1) % q.Size\n\tq.Count--\n\treturn node\n}", "func (this *MyQueue) Pop() int {\n\tthis.Peek()\n\te := this.b[len(this.b)-1]\n\tthis.b = this.b[:len(this.b)-1]\n\treturn e\n}", "func Pop(h Interface) interface{} {\n\tn := h.Len() - 1\n\th.Swap(0, n)\n\tdown(h, 0, n)\n\treturn h.Pop()\n}", "func (heap *MinHeap) Pop() (int, error) {\n\tif heap.Size() == 0 {\n\t\treturn 0, errors.New(\"heap underflow\")\n\t}\n\n\tmin := heap.elements[0]\n\theap.elements[0] = heap.elements[heap.Size()-1]\n\theap.elements = heap.elements[:heap.Size()-1]\n\theap.minHeapify(0)\n\n\treturn min, nil\n}", "func (this *MyStack) Pop() int {\n\tfor this.current.Qsize() != 1 {\n\t\tthis.backup.push(this.current.pop())\n\t}\n\tres := this.current.pop()\n\tthis.current, this.backup = this.backup, this.current\n\n\treturn res\n}", "func (p *intPool) get() *big.Int {\n\tif p.pool.len() > 0 {\n\t\treturn p.pool.pop()\n\t}\n\treturn new(big.Int)\n}", "func (heap *maxheap) Pop() interface{} {\n\told := *heap\n\tn := len(old)\n\n\titem := old[n-1]\n\told[n-1] = nil\n\titem.index = -1\n\n\t*heap = old[0 : n-1]\n\n\treturn item\n}", "func (sm *StackMax) Pop() (int, error) {\n\tif sm.Empty() {\n\t\treturn -1, ErrstackEmpty\n\t}\n\n\ttop, _ := sm.Top()\n\n\tsm.length--\n\tsm.container = sm.container[:sm.length]\n\tsm.maxer = sm.maxer[:sm.length]\n\treturn top, nil\n}", "func (w *MetricWindow) Pop() bitflow.Value {\n\tif w.Empty() {\n\t\treturn 0\n\t}\n\tval := w.data[w.first]\n\tw.first = w.inc(w.first)\n\tw.full = false\n\treturn val\n}", "func (d *DynamicArr) Pop() (interface{}, error) {\n\tif d.length == 0 {\n\t\treturn nil, errors.New(\"Empty array\")\n\t}\n\n\tfor d.capacity/2 > d.length-1 {\n\t\td.shrinkSize()\n\t}\n\n\ttempArr := make([]interface{}, d.capacity)\n\tfor i := 0; i < d.length-1; i++ {\n\t\ttempArr[i] = d.array[i]\n\t}\n\tval := d.array[d.length-1]\n\td.length--\n\td.array = tempArr\n\n\treturn val, nil\n}", "func(k *Stack) Pop(){\n\tl := len(k.nums)-1\n\n\tk.nums = k.nums[:l]\n\n\n}", "func (pq *askPQueue) Pop() *models.Ask {\n\tpq.Lock()\n\tdefer pq.Unlock()\n\n\tif pq.size() < 1 {\n\t\treturn nil\n\t}\n\n\tmax := pq.items[1]\n\n\tpq.exch(1, pq.size())\n\tpq.items = pq.items[0:pq.size()]\n\tpq.elemsCount--\n\tpq.sink(1)\n\n\treturn max.value\n}", "func (pq *MinPQ) Pop() interface{} {\n\told := *pq\n\tn := len(old)\n\titem := old[n-1]\n\t*pq = old[0 : n-1]\n\treturn item\n}", "func (q *BoundedQueue) Pop() int {\n\tq.Lock()\n\tdefer q.Unlock()\n\tvar res int\n\tfor len(q.queue) == 0 {\n\t\tq.hasItems.Wait()\n\t}\n\tres = q.queue[0]\n\tq.queue = q.queue[1:]\n\tif len(q.queue) < q.capacity {\n\t\tq.hasSpace.Broadcast()\n\t}\n\tfmt.Println(\"Popped\", res)\n\treturn res\n}", "func (this *MyStack) Pop() int {\n\tfor this.wareHouse.Size() > 1 {\n\t\tthis.backup.Push(this.wareHouse.Pop())\n\t}\n\tval := this.wareHouse.Pop()\n\tthis.wareHouse, this.backup = this.backup, this.wareHouse\n\treturn val\n}", "func (p Pool) Pop() interface{} {\n\tel := p[p.Len()-1]\n\tp = p[:p.Len()-2]\n\treturn el\n}", "func (this *MyStack) Pop() int {\n\tthis.top--\n\treturn this.queue[this.top+1]\n}", "func (p *PortList) Pop() string {\n\tp.Lock()\n\tdefer p.Unlock()\n\tif len(p.ports) == 0 {\n\t\tpanic(\"list is empty\")\n\t}\n\tval := p.ports[len(p.ports)-1]\n\tp.ports = p.ports[:len(p.ports)-1]\n\treturn val\n}", "func MaxIntHeapPop(s []int) (int, []int) {\n\tcpy := make([]int, len(s), cap(s))\n\tcopy(cpy, s)\n\tmaxVal := cpy[0]\n\tlastIndex := len(cpy) - 1\n\tcpy[0] = cpy[lastIndex]\n\tcpy = cpy[:lastIndex]\n\tMaxIntHeapify(cpy)\n\treturn maxVal, cpy\n}", "func (s *Stack) Pop() (int, error) {\n\tif s.empty() {\n\t\treturn -1, ErrStackUnderflow\n\t}\n\tresult := s.array[s.top]\n\ts.top--\n\treturn result, nil\n}", "func (this *MyStack) Pop() int {\n\tret := this.Head.Val\n\tthis.Head = this.Head.Next\n\tif this.Head != nil {\n\t\tthis.Head.Pre = nil\n\t}\n\tthis.Len--\n\treturn ret\n}", "func popCntq(uint) (ret uint)", "func pop(s stack, top int) (*element, int, error) {\n\tif top == -1 {\n\t\treturn nil, -1, fmt.Errorf(\"underflow\")\n\t}\n\tpoppedElement := s[top]\n\ttop--\n\treturn &poppedElement, top, nil\n}", "func (pq *MinPQueue) Pop() interface{} {\n\told := *pq\n\tn := len(old)\n\titem := old[n-1]\n\titem.index = -1 // for safety\n\t*pq = old[0 : n-1]\n\treturn item\n}", "func (h *MaxKeyHeap) Pop() interface{} {\n\told := *h\n\tn := len(old)\n\tx := old[n-1]\n\t*h = old[0 : n-1]\n\treturn x\n}", "func PopLastInt(x []int) (int, []int, error) {\n\tif len(x) == 0 {\n\t\treturn 0, nil, fmt.Errorf(\"no value to pop\")\n\t}\n\treturn x[len(x)-1], x[:len(x)-1], nil\n}", "func (this *MyStack) Pop() int {\n\tfor this.l.Front().Next() != nil {\n\t\tthis.t.PushBack(this.l.Remove(this.l.Front()))\n\t}\n\ttop := this.l.Remove(this.l.Front())\n\tfor this.t.Front() != nil {\n\t\tthis.l.PushBack(this.t.Remove(this.t.Front()))\n\t}\n\treturn top.(int)\n}", "func (this *MyStack) Pop() int {\n\tans := this.Ele[this.Len-1]\n\tthis.Ele = this.Ele[:this.Len-1]\n\tthis.Len--\n\treturn ans\n}", "func (c *Clac) Pop() (value.Value, error) {\n\tx, err := c.remove(0, 1)\n\tif err != nil {\n\t\treturn zero, err\n\t}\n\treturn x[0], err\n}", "func (pq *bidPQueue) Pop() *models.Bid {\n\tpq.Lock()\n\tdefer pq.Unlock()\n\n\tif pq.size() < 1 {\n\t\treturn nil\n\t}\n\n\tmax := pq.items[1]\n\n\tpq.exch(1, pq.size())\n\tpq.items = pq.items[0:pq.size()]\n\tpq.elemsCount--\n\tpq.sink(1)\n\n\treturn max.value\n}", "func lvalTake(v *LVal, i int) *LVal {\n\tx := lvalPop(v, i)\n\treturn x\n}", "func (h *ReqHeap) Pop() interface{} {\n\told := *h\n\tn := len(old)\n\tx := old[n-1]\n\t*h = old[0 : n-1]\n\treturn x\n}", "func (pq *priorityQueue) Pop() interface{} {\n\told := *pq\n\tn := len(old)\n\titem := old[n-1]\n\titem.index = -1 // for safety\n\t*pq = old[0 : n-1]\n\treturn item\n}", "func (s *Stack) Pop() (int, error) {\n\tif s.Empty() {\n\t\treturn 0, fmt.Errorf(\"stack is empty\")\n\t}\n\n\tv := (*s)[len(*s)-1]\n\t*s = (*s)[:len(*s)-1]\n\treturn v, nil\n}", "func (s *StackOfPlates) pop() int {\n\n\tif s.stacks[s.last].getCapacity() == 0 {\n\t\ts.last--\n\t}\n\n\t// first column\n\tif s.last < 0 {\n\t\tpanic(\"Cannot pop() from a first empty stack!\")\n\t}\n\n\treturn s.stacks[s.last].pop()\n}", "func (sll *SingleLinkedList) Pop(index int) interface{} {\n\t// Panic if index is smaller 0\n\tif index < 0 {\n\t\tpanic(\"index < 0\")\n\t}\n\n\t// Pop first element\n\tif index == 0 {\n\t\t// Result\n\t\tv := sll.first.value\n\t\t// Remove first element\n\t\tsll.first = sll.first.next\n\t\t// Decrease length\n\t\tsll.length--\n\t\treturn v\n\t}\n\n\t// Get node before the one to pop\n\tn := sll.getNode(index - 1)\n\t// Result\n\tv := n.next.value\n\t// Remove reference to remove element\n\tn.next = n.next.next\n\t// Decrease length\n\tsll.length--\n\treturn v\n}", "func (f *Float64Stack) Pop() float64 {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\n\tln := len(f.items)\n\tif ln == 0 {\n\t\treturn 0\n\t}\n\n\ttail := f.items[ln-1]\n\tf.items = f.items[:ln-1]\n\n\treturn tail\n}", "func (q *queue) pop() Item {\n\ti := q.head\n\tq.head = (q.head + 1) % len(q.items)\n\tq.count--\n\treturn q.items[i]\n}", "func (s *SliceOfUint16) Pop() uint16 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}", "func (s *orderedItems) Pop() interface{} {\n\told := *s\n\tn := len(old)\n\tx := old[n-1]\n\t*s = old[0 : n-1]\n\treturn x\n}", "func (h *Heap) Pop() interface{} {\n\told := h.slice\n\tn := len(old)\n\titem := old[n-1]\n\told[n-1] = nil // avoid memory leak\n\titem.index = -1 // for safety\n\th.slice = old[0 : n-1]\n\treturn item\n}", "func (s *SliceOfFloat64) Pop() float64 {\n\tpoppedItem := s.items[len(s.items)-1]\n\ts.items = s.items[:len(s.items)-1]\n\treturn poppedItem\n}" ]
[ "0.75718653", "0.72919536", "0.7277665", "0.724577", "0.71927625", "0.71801865", "0.713347", "0.70770997", "0.70427364", "0.69788194", "0.6975585", "0.6890257", "0.6820779", "0.6796666", "0.6782513", "0.6761821", "0.673664", "0.6729056", "0.67286617", "0.67235994", "0.6723054", "0.6703815", "0.6658389", "0.6645202", "0.65945506", "0.65889615", "0.6579576", "0.6577539", "0.65609723", "0.6546097", "0.6545244", "0.65297294", "0.652464", "0.65155226", "0.65143025", "0.6514281", "0.6509543", "0.65066904", "0.64951545", "0.6451631", "0.6449532", "0.64488375", "0.6448265", "0.644799", "0.6436029", "0.643563", "0.643331", "0.6432291", "0.64294934", "0.64285135", "0.6416184", "0.6408146", "0.6407682", "0.64029264", "0.6397758", "0.6395056", "0.63639516", "0.636237", "0.63344586", "0.6333951", "0.6330025", "0.6327194", "0.6321632", "0.63205963", "0.63067764", "0.6297744", "0.6291099", "0.6279541", "0.6279358", "0.6279184", "0.6268787", "0.62667584", "0.6264959", "0.6263308", "0.62622625", "0.6256006", "0.6254914", "0.6246175", "0.6230732", "0.62306285", "0.6227003", "0.6226629", "0.6225177", "0.6222511", "0.62216085", "0.6217921", "0.6213844", "0.61957425", "0.6188665", "0.6187534", "0.6185014", "0.618347", "0.61830187", "0.61799145", "0.6179893", "0.61781704", "0.6177073", "0.61606616", "0.6153374", "0.61521816" ]
0.7560944
1
GetFreeTCPPorts returns n ports starting from port 20000.
GetFreeTCPPorts возвращает n портов, начиная с порта 20000.
func GetFreeTCPPorts(n int, offset ...int) (PortList, error) { list := make([]string, 0, n) start := PortStartingNumber if len(offset) != 0 { start = offset[0] } for i := start; i < start+n; i++ { list = append(list, strconv.Itoa(i)) } return PortList{ports: list}, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Take(n int) (ports []int, err error) {\n\tif n <= 0 {\n\t\treturn nil, fmt.Errorf(\"freeport: cannot take %d ports\", n)\n\t}\n\n\tmu.Lock()\n\tdefer mu.Unlock()\n\n\t// Reserve a port block\n\tonce.Do(initialize)\n\n\tif n > total {\n\t\treturn nil, fmt.Errorf(\"freeport: block size too small\")\n\t}\n\n\tfor len(ports) < n {\n\t\tfor freePorts.Len() == 0 {\n\t\t\tif total == 0 {\n\t\t\t\treturn nil, fmt.Errorf(\"freeport: impossible to satisfy request; there are no actual free ports in the block anymore\")\n\t\t\t}\n\t\t\tcondNotEmpty.Wait()\n\t\t}\n\n\t\telem := freePorts.Front()\n\t\tfreePorts.Remove(elem)\n\t\tport := elem.Value.(int)\n\n\t\tif used := isPortInUse(port); used {\n\t\t\t// Something outside of the test suite has stolen this port, possibly\n\t\t\t// due to assignment to an ephemeral port, remove it completely.\n\t\t\tlogf(\"WARN\", \"leaked port %d due to theft; removing from circulation\", port)\n\t\t\ttotal--\n\t\t\tcontinue\n\t\t}\n\n\t\tports = append(ports, port)\n\t}\n\n\t// logf(\"DEBUG\", \"free ports: %v\", ports)\n\treturn ports, nil\n}", "func freePortAddrs(ip string, n int) []string {\n\tmin, max := 49152, 65535\n\tfreePortsMu.Lock()\n\tdefer freePortsMu.Unlock()\n\tports := make(map[int]net.Listener, n)\n\taddrs := make([]string, n)\n\tif lastPort < min || lastPort > max {\n\t\tlastPort = min\n\t}\n\tfor i := 0; i < n; i++ {\n\t\tp, addr, listener, err := oneFreePort(ip, lastPort, min, max)\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\t\tlastPort = p\n\t\taddrs[i] = addr\n\t\tports[p] = listener\n\t\tusedPorts[p] = struct{}{}\n\t}\n\t// Now release them all. It's now a race to get our desired things\n\t// listening on these addresses.\n\tfor _, l := range ports {\n\t\tif err := l.Close(); err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\t}\n\treturn addrs\n}", "func FindFreePort() int {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer l.Close()\n\treturn l.Addr().(*net.TCPAddr).Port\n}", "func GetFreePort(t *testing.T) string {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\trequire.NoError(t, err)\n\tlistener, err := net.ListenTCP(\"tcp\", addr)\n\trequire.NoError(t, err)\n\tdefer listener.Close()\n\n\taddress := listener.Addr().String()\n\tcolon := strings.Index(address, \":\")\n\tport := address[colon+1:]\n\treturn port\n}", "func freePort() (uint16, error) {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer l.Close()\n\n\treturn uint16(l.Addr().(*net.TCPAddr).Port), nil\n}", "func freePort() (uint16, error) {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer l.Close()\n\n\treturn uint16(l.Addr().(*net.TCPAddr).Port), nil\n}", "func FreePort() int {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn 0\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn 0\n\t}\n\n\tdefer l.Close()\n\n\treturn l.Addr().(*net.TCPAddr).Port\n}", "func getFreePort() string {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer l.Close()\n\treturn fmt.Sprintf(\"%d\", l.Addr().(*net.TCPAddr).Port)\n}", "func freeport(t *testing.T) (port int, addr string) {\n\tl, err := net.ListenTCP(\"tcp\", &net.TCPAddr{IP: net.ParseIP(\"127.0.0.1\")})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer l.Close()\n\ta := l.Addr().(*net.TCPAddr)\n\tport = a.Port\n\treturn port, a.String()\n}", "func findFreePort() int {\n\tln, _ := net.Listen(\"tcp\", \":0\")\n\tln.Close()\n\n\taddr := ln.Addr().(*net.TCPAddr)\n\treturn addr.Port\n}", "func getFreePort() int {\n\tln, err := net.Listen(\"tcp\", \"[::]:0\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tport := ln.Addr().(*net.TCPAddr).Port\n\n\terr = ln.Close()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn port\n}", "func getOpenPorts(n int) []string {\n\tports := []string{}\n\tfor i := 0; i < n; i++ {\n\t\tts := httptest.NewServer(http.NewServeMux())\n\t\tdefer ts.Close()\n\t\tu, err := url.Parse(ts.URL)\n\t\trtx.Must(err, \"Could not parse url to local server:\", ts.URL)\n\t\tports = append(ports, \":\"+u.Port())\n\t}\n\treturn ports\n}", "func getFreePort() (int, error) {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer l.Close()\n\treturn l.Addr().(*net.TCPAddr).Port, nil\n}", "func GetFreePort() int {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tlisten, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer listen.Close()\n\treturn listen.Addr().(*net.TCPAddr).Port\n}", "func getFreePort(t *testing.T) string {\n\tl, err := net.Listen(\"tcp\", \":\")\n\tif err != nil {\n\t\tt.Fatalf(\"getFreePort: could not get free port: %v\", err)\n\t}\n\tdefer l.Close()\n\treturn l.Addr().String()[strings.LastIndex(l.Addr().String(), \":\"):]\n}", "func freeport() (port int, addr string) {\n\tl, err := net.ListenTCP(\"tcp\", &net.TCPAddr{IP: net.ParseIP(\"127.0.0.1\")})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer l.Close()\n\ta := l.Addr().(*net.TCPAddr)\n\tport = a.Port\n\treturn port, a.String()\n}", "func availablePorts(cnt int) ([]string, error) {\n\trtn := []string{}\n\n\tfor i := 0; i < cnt; i++ {\n\t\tport, err := getPort()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trtn = append(rtn, strconv.Itoa(port))\n\t}\n\treturn rtn, nil\n}", "func getFreePort() (int, error) {\n\tlistener, err := net.Listen(\"tcp\", \":0\")\n\tdefer listener.Close()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tport := listener.Addr().(*net.TCPAddr).Port\n\treturn port, nil\n}", "func GetFreePort() (int, error) {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer l.Close()\n\n\treturn l.Addr().(*net.TCPAddr).Port, nil\n}", "func FreePort() (int, error) {\n\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tlisten, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tdefer listen.Close()\n\treturn listen.Addr().(*net.TCPAddr).Port, nil\n}", "func GetPort() (int, error) {\n\tfor i := previousPort; i < maxPort; i++ {\n\t\tif IsPortAvailable(i) {\n\t\t\t// Next previousPort is 1124 if i == 1024 now.\n\t\t\tpreviousPort = i + 100\n\t\t\treturn i, nil\n\t\t}\n\t}\n\treturn -1, errors.New(\"Not found free TCP Port\")\n}", "func FreePort() (int, error) {\n\t// Opens a TCP connection to a free port on the host\n\t// and closes the connection but getting the port from it\n\t// so the can be setted to a free\n\t// random port each time if no one is specified\n\tl, err := net.Listen(\"tcp\", \"\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tl.Close()\n\tsl := strings.Split(l.Addr().String(), \":\")\n\tp, err := strconv.Atoi(sl[len(sl)-1])\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn p, nil\n}", "func NextN(n int) ([]int, error) {\n\tresult := make([]int, n)\n\tlisteners := make([]net.Listener, n)\n\tfor i := 0; i < n; i++ {\n\t\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tlisteners[i] = listener\n\t\tresult[i] = listener.Addr().(*net.TCPAddr).Port\n\t}\n\tfor _, l := range listeners {\n\t\t_ = l.Close()\n\t}\n\treturn result, nil\n}", "func GetFreePort(host string, preferredPort uint32) (int, error) {\n\taddress := host + \":\" + fmt.Sprint(preferredPort)\n\taddr, err := net.ResolveTCPAddr(\"tcp\", address)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer l.Close()\n\treturn l.Addr().(*net.TCPAddr).Port, nil\n}", "func getProbablyFreePortNumber() (int, error) {\n\tl, err := net.Listen(\"tcp\", \":0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tdefer l.Close()\n\n\t_, port, err := net.SplitHostPort(l.Addr().String())\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tportNum, err := strconv.Atoi(port)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn portNum, nil\n}", "func nextAvailablePort() int {\n\tservers, err := All(dockerClient())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tusedPorts := make([]int, len(servers))\n\n\tfor i, s := range servers {\n\t\tp, err := s.Port()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tusedPorts[i] = p\n\t}\n\n\t// Iterate 100 ports starting with the default\nOUTER:\n\tfor p := defaultPort; p < defaultPort+100; p++ {\n\t\tfor _, up := range usedPorts {\n\t\t\tif p == up {\n\t\t\t\t// Another server is using this port\n\t\t\t\tcontinue OUTER\n\t\t\t}\n\t\t}\n\n\t\t// The port is available\n\t\treturn p\n\t}\n\n\tpanic(\"100 ports were not available\")\n}", "func (p *PortForward) getFreePort() (int, error) {\n\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tport := listener.Addr().(*net.TCPAddr).Port\n\terr = listener.Close()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn port, nil\n}", "func (p *PortForward) getFreePort() (int, error) {\n\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tport := listener.Addr().(*net.TCPAddr).Port\n\terr = listener.Close()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn port, nil\n}", "func (g *Group) GetFreePort() uint16 {\n\n\tvar ports []uint16\n\t/*\n\t\tfor _, s := range g.Services {\n\t\t\tports = append(ports, s.Ports...)\n\t\t}\n\t*/\n\tfor i := g.MinPort; i < g.MaxPort; i++ {\n\t\tif !findPort(i, ports) {\n\t\t\treturn i\n\t\t}\n\t}\n\n\treturn 0\n}", "func alloc() (int, net.Listener) {\n\tfor i := 0; i < attempts; i++ {\n\t\tblock := int(rand.Int31n(int32(effectiveMaxBlocks)))\n\t\tfirstPort := lowPort + block*blockSize\n\t\tln, err := net.ListenTCP(\"tcp\", tcpAddr(\"127.0.0.1\", firstPort))\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\t// logf(\"DEBUG\", \"allocated port block %d (%d-%d)\", block, firstPort, firstPort+blockSize-1)\n\t\treturn firstPort, ln\n\t}\n\tpanic(\"freeport: cannot allocate port block\")\n}", "func (alloc *RuntimePortAllocator) GetAvailablePorts(portNum int) (ports []int, err error) {\n\tif alloc.pa == nil {\n\t\treturn nil, errors.New(\"Runtime port allocator not setup\")\n\t}\n\n\tfor i := 0; i < portNum; i++ {\n\t\tif availPort, err := alloc.pa.AllocateNext(); err != nil {\n\t\t\talloc.log.Error(err, \"can't allocate next, all ports are in use\")\n\t\t\tbreak\n\t\t} else {\n\t\t\tports = append(ports, availPort)\n\t\t}\n\t}\n\n\t// Something unexpected happened, rollback to release allocated ports\n\tif len(ports) < portNum {\n\t\tfor _, reservedPort := range ports {\n\t\t\t_ = alloc.pa.Release(reservedPort)\n\t\t}\n\t\treturn nil, errors.Errorf(\"can't get enough available ports, only %d ports are available\", len(ports))\n\t}\n\n\talloc.log.Info(\"Successfully allocated ports\", \"expeceted port num\", portNum, \"allocated ports\", ports)\n\treturn ports, nil\n}", "func (p *P) Ports() gnomock.NamedPorts {\n\treturn gnomock.DefaultTCP(defaultPort)\n}", "func (s *socatManager) Reserve(n int) ([]int, error) {\n\t//get all listening tcp ports\n\ttype portInfo struct {\n\t\tNetwork string `json:\"network\"`\n\t\tPort int `json:\"port\"`\n\t}\n\tvar ports []portInfo\n\n\t/*\n\t\tlist ports from local services, we of course can't grantee\n\t\tthat a service will start listening after listing the ports\n\t\tbut zos doesn't start any more services (it shouldn't) after\n\t\tthe initial bootstrap, so we almost safe by using this returned\n\t\tlist\n\t*/\n\tif err := s.api.Internal(\"info.port\", nil, &ports); err != nil {\n\t\treturn nil, err\n\t}\n\n\tused := make(map[int]struct{})\n\n\tfor _, port := range ports {\n\t\tif port.Network == \"tcp\" {\n\t\t\tused[port.Port] = struct{}{}\n\t\t}\n\t}\n\n\ts.rm.Lock()\n\tdefer s.rm.Unlock()\n\n\tfor port := range s.rules {\n\t\tused[port] = struct{}{}\n\t}\n\n\ts.sm.Lock()\n\tdefer s.sm.Unlock()\n\n\t//used is now filled with all assigned system ports (except reserved)\n\t//we can safely find the first port that is not used, and not in reseved and add it to\n\t//the result list\n\tvar result []int\n\tp := 1024\n\tfor i := 0; i < n; i++ {\n\t\tfor ; p <= 65536; p++ { //i know last valid port is at 65535, but check code below\n\t\t\tif _, ok := used[p]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif _, ok := s.reserved.Get(fmt.Sprint(p)); ok {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\n\t\tif p == 65536 {\n\t\t\treturn result, fmt.Errorf(\"pool is exhausted\")\n\t\t}\n\n\t\ts.reserved.Set(fmt.Sprint(p), nil, cache.DefaultExpiration)\n\t\tresult = append(result, p)\n\t}\n\n\treturn result, nil\n}", "func getAvailablePort(t *testing.T) int {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\trequire.Nil(t, err)\n\n\tlisten, err := net.ListenTCP(\"tcp\", addr)\n\trequire.Nil(t, err)\n\n\tdefer listen.Close()\n\treturn listen.Addr().(*net.TCPAddr).Port\n}", "func OpenFreeUDPPort(portBase int, num int) (net.PacketConn, int, error) {\n\tfor i := 0; i < num; i++ {\n\t\tport := portBase + i\n\t\tconn, err := net.ListenPacket(\"udp\", fmt.Sprint(\":\", port))\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\treturn conn, port, nil\n\t}\n\treturn nil, 0, errors.New(\"failed to open free port\")\n}", "func FindUnusedPort() (uint16, error) {\n\t// We let the kernel to find the port for us.\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer l.Close()\n\treturn uint16(l.Addr().(*net.TCPAddr).Port), nil\n}", "func getAvailablePort(from, to int) int {\n\tfor port := from; port <= to; port++ {\n\t\tif isPortAvailable(port) {\n\t\t\treturn port\n\t\t}\n\t}\n\n\treturn 0\n}", "func getPort() int {\n\tcount := 0\n\tfor count < 1000 {\n\t\tln, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\t\tif err == nil {\n\t\t\tp := ln.Addr().(*net.TCPAddr).Port\n\t\t\tln.Close()\n\t\t\treturn p\n\t\t}\n\t\tcount++\n\t}\n\tpanic(\"Could not find an available port\")\n}", "func (m *Manager) useFreePort() error {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer l.Close()\n\tlocalServerPort = l.Addr().(*net.TCPAddr).Port\n\treturn nil\n}", "func GetPorts(lookupPids bool) map[string]GOnetstat.Process {\n\tports := make(map[string]GOnetstat.Process)\n\tnetstat, _ := GOnetstat.Tcp(lookupPids)\n\tvar net string\n\t//netPorts := make(map[string]GOnetstat.Process)\n\t//ports[\"tcp\"] = netPorts\n\tnet = \"tcp\"\n\tfor _, entry := range netstat {\n\t\tif entry.State == \"LISTEN\" {\n\t\t\tport := strconv.FormatInt(entry.Port, 10)\n\t\t\tports[net+\":\"+port] = entry\n\t\t}\n\t}\n\tnetstat, _ = GOnetstat.Tcp6(lookupPids)\n\t//netPorts = make(map[string]GOnetstat.Process)\n\t//ports[\"tcp6\"] = netPorts\n\tnet = \"tcp6\"\n\tfor _, entry := range netstat {\n\t\tif entry.State == \"LISTEN\" {\n\t\t\tport := strconv.FormatInt(entry.Port, 10)\n\t\t\tports[net+\":\"+port] = entry\n\t\t}\n\t}\n\tnetstat, _ = GOnetstat.Udp(lookupPids)\n\t//netPorts = make(map[string]GOnetstat.Process)\n\t//ports[\"udp\"] = netPorts\n\tnet = \"udp\"\n\tfor _, entry := range netstat {\n\t\tport := strconv.FormatInt(entry.Port, 10)\n\t\tports[net+\":\"+port] = entry\n\t}\n\tnetstat, _ = GOnetstat.Udp6(lookupPids)\n\t//netPorts = make(map[string]GOnetstat.Process)\n\t//ports[\"udp6\"] = netPorts\n\tnet = \"udp6\"\n\tfor _, entry := range netstat {\n\t\tport := strconv.FormatInt(entry.Port, 10)\n\t\tports[net+\":\"+port] = entry\n\t}\n\treturn ports\n}", "func (s *Scan) tcpConnScan() []int {\n\tvar wg sync.WaitGroup\n\n\tvar ports []int\n\tfor i := s.minPort; i <= s.maxPort; i++ {\n\t\twg.Add(1)\n\t\tgo func(i int) {\n\t\t\tfor {\n\t\t\t\thost := net.JoinHostPort(s.raddr.String(), fmt.Sprintf(\"%d\", i))\n\t\t\t\tconn, err := net.DialTimeout(\"tcp\", host, 2*time.Second)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif strings.Contains(err.Error(), \"too many open files\") {\n\t\t\t\t\t\t// random back-off\n\t\t\t\t\t\ttime.Sleep(time.Duration(10+rand.Int31n(30)) * time.Millisecond)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\twg.Done()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tconn.Close()\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tports = append(ports, i)\n\t\t\twg.Done()\n\t\t}(i)\n\t}\n\n\twg.Wait()\n\tsort.Ints(ports)\n\treturn ports\n}", "func RandomTCPPort() int {\n\tfor i := maxReservedTCPPort; i < maxTCPPort; i++ {\n\t\tp := tcpPortRand.Intn(maxRandTCPPort) + maxReservedTCPPort + 1\n\t\tif IsTCPPortAvailable(p) {\n\t\t\treturn p\n\t\t}\n\t}\n\treturn -1\n}", "func (server *testHTTPServerImpl) randomFreePort() int64 {\n\tmaxAttempts := 5\n\tattempt := 0\n\trandomPort := server.randomPort()\n\tfor attempt < maxAttempts && server.isPortInUse(randomPort) {\n\t\tlog.Printf(\"Port %d already in use, try with new port number\", randomPort)\n\t\tattempt++\n\t\trandomPort = server.randomPort()\n\t}\n\treturn randomPort\n}", "func (test *Test) GetPorts(projectName string, ip string) ([]models.Port, error) {\n\treturn tests.NormalPorts, nil\n}", "func GetPorts(service corev1.Service) []int {\n\tif len(service.Spec.Ports) == 0 {\n\t\treturn []int{}\n\t}\n\tvar svcPorts []int\n\tfor _, port := range service.Spec.Ports {\n\t\tsvcPorts = append(svcPorts, int(port.Port))\n\t}\n\treturn svcPorts\n}", "func (ec *EvalCtx) growPorts(n int) {\n\tif len(ec.ports) >= n {\n\t\treturn\n\t}\n\tports := ec.ports\n\tec.ports = make([]*Port, n)\n\tcopy(ec.ports, ports)\n}", "func (ec *EvalCtx) growPorts(n int) {\n\tif len(ec.ports) >= n {\n\t\treturn\n\t}\n\tports := ec.ports\n\tec.ports = make([]*Port, n)\n\tcopy(ec.ports, ports)\n}", "func (c *RedfishClient) GetNetworkPorts(uri string) ([]model.NetworkPort, error) {\n\tcollection := redfish.Collection{}\n\tif err := c.Get(uri, &collection); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret []model.NetworkPort\n\tfor i := range collection.Members {\n\t\tresp := new(redfish.NetworkPort)\n\t\tif err := c.Get(collection.Members[i].Id, resp); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tret = append(ret, *redfish.CreateNetworkPortModel(resp))\n\t}\n\treturn ret, nil\n}", "func getSplunkServicePorts(instanceType InstanceType) []corev1.ServicePort {\n\tl := []corev1.ServicePort{}\n\tfor key, value := range getSplunkPorts(instanceType) {\n\t\tl = append(l, corev1.ServicePort{\n\t\t\tName: key,\n\t\t\tPort: int32(value),\n\t\t\tProtocol: \"TCP\",\n\t\t})\n\t}\n\treturn l\n}", "func nodePorts(svcPorts []utils.ServicePort) []int64 {\n\tports := []int64{}\n\tfor _, p := range uniq(svcPorts) {\n\t\tif !p.NEGEnabled {\n\t\t\tports = append(ports, p.NodePort)\n\t\t}\n\t}\n\treturn ports\n}", "func (c *RedfishClient) GetNetworkPorts(uri string) ([]model.NetworkPort, error) {\n\tcollection := dto.Collection{}\n\tif err := c.Get(uri, &collection); err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret []model.NetworkPort\n\tfor i := range collection.Members {\n\t\tresp := new(dto.NetworkPort)\n\t\tif err := c.Get(collection.Members[i].Id, resp); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tret = append(ret, *createNetworkPortModel(resp))\n\t}\n\treturn ret, nil\n}", "func TestCometStarter_PortContention(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping long test in short mode\")\n\t}\n\n\tconst nVals = 4\n\n\t// Find n+1 addresses that should be free.\n\t// Ephemeral port range should start at about 49k+\n\t// according to `sysctl net.inet.ip.portrange` on macOS,\n\t// and at about 32k+ on Linux\n\t// according to `sysctl net.ipv4.ip_local_port_range`.\n\t//\n\t// Because we attempt to find free addresses outside that range,\n\t// it is unlikely that another process will claim a port\n\t// we discover to be free, during the time this test runs.\n\tconst portSeekStart = 19000\n\treuseAddrs := make([]string, 0, nVals+1)\n\tfor i := portSeekStart; i < portSeekStart+1000; i++ {\n\t\taddr := fmt.Sprintf(\"127.0.0.1:%d\", i)\n\t\tln, err := net.Listen(\"tcp\", addr)\n\t\tif err != nil {\n\t\t\t// No need to log the failure.\n\t\t\tcontinue\n\t\t}\n\n\t\t// If the port was free, append it to our reusable addresses.\n\t\treuseAddrs = append(reuseAddrs, \"tcp://\"+addr)\n\t\t_ = ln.Close()\n\n\t\tif len(reuseAddrs) == nVals+1 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif len(reuseAddrs) != nVals+1 {\n\t\tt.Fatalf(\"needed %d free ports but only found %d\", nVals+1, len(reuseAddrs))\n\t}\n\n\t// Now that we have one more port than the number of validators,\n\t// there is a good chance that picking a random port will conflict with a previously chosen one.\n\t// But since CometStarter retries several times,\n\t// it should eventually land on a free port.\n\n\tvalPKs := testnet.NewValidatorPrivKeys(nVals)\n\tcmtVals := valPKs.CometGenesisValidators()\n\tstakingVals := cmtVals.StakingValidators()\n\n\tconst chainID = \"simapp-cometstarter\"\n\n\tb := testnet.DefaultGenesisBuilderOnlyValidators(\n\t\tchainID,\n\t\tstakingVals,\n\t\tsdk.NewCoin(sdk.DefaultBondDenom, sdk.DefaultPowerReduction),\n\t)\n\n\tjGenesis := b.Encode()\n\n\t// Use an info-level logger, because the debug logs in comet are noisy\n\t// and there is a data race in comet debug logs,\n\t// due to be fixed in v0.37.1 which is not yet released:\n\t// https://github.com/cometbft/cometbft/pull/532\n\tlogger := log.NewTestLoggerInfo(t)\n\n\tconst nRuns = 4\n\tfor i := 0; i < nRuns; i++ {\n\t\tt.Run(fmt.Sprintf(\"attempt %d\", i), func(t *testing.T) {\n\t\t\tnodes, err := testnet.NewNetwork(nVals, func(idx int) *testnet.CometStarter {\n\t\t\t\trootDir := t.TempDir()\n\n\t\t\t\tapp := simapp.NewSimApp(\n\t\t\t\t\tlogger.With(\"instance\", idx),\n\t\t\t\t\tdbm.NewMemDB(),\n\t\t\t\t\tnil,\n\t\t\t\t\ttrue,\n\t\t\t\t\tsimtestutil.NewAppOptionsWithFlagHome(rootDir),\n\t\t\t\t\tbaseapp.SetChainID(chainID),\n\t\t\t\t)\n\n\t\t\t\tcfg := cmtcfg.DefaultConfig()\n\n\t\t\t\t// memdb is sufficient for this test.\n\t\t\t\tcfg.BaseConfig.DBBackend = \"memdb\"\n\n\t\t\t\treturn testnet.NewCometStarter(\n\t\t\t\t\tapp,\n\t\t\t\t\tcfg,\n\t\t\t\t\tvalPKs[idx].Val,\n\t\t\t\t\tjGenesis,\n\t\t\t\t\trootDir,\n\t\t\t\t).\n\t\t\t\t\tLogger(logger.With(\"rootmodule\", fmt.Sprintf(\"comet_node-%d\", idx))).\n\t\t\t\t\tTCPAddrChooser(func() string {\n\t\t\t\t\t\t// This chooser function is the key of this test,\n\t\t\t\t\t\t// where there is only one more available address than there are nodes.\n\t\t\t\t\t\t// Therefore it is likely that an address will already be in use,\n\t\t\t\t\t\t// thereby exercising the address-in-use retry.\n\t\t\t\t\t\treturn reuseAddrs[rand.Intn(len(reuseAddrs))]\n\t\t\t\t\t})\n\t\t\t})\n\n\t\t\t// Ensure nodes are stopped completely,\n\t\t\t// so that we don't get t.Cleanup errors around directories not being empty.\n\t\t\tdefer func() {\n\t\t\t\terr := nodes.StopAndWait()\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t}()\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Ensure that the height advances.\n\t\t\t// Looking for height 2 seems more meaningful than 1.\n\t\t\trequire.NoError(t, testnet.WaitForNodeHeight(nodes[0], 2, 10*time.Second))\n\t\t})\n\t}\n}", "func FreeTCPAddr() (addr, port string, err error) {\n\tl, err := net.Listen(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tcloser := func() {\n\t\terr := l.Close()\n\t\tif err != nil {\n\t\t\t// TODO: Handle with #870\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tdefer closer()\n\n\tportI := l.Addr().(*net.TCPAddr).Port\n\tport = fmt.Sprintf(\"%d\", portI)\n\taddr = fmt.Sprintf(\"tcp://0.0.0.0:%s\", port)\n\treturn\n}", "func (a *cpuAccumulator) freeSockets() []int {\n\tfree := []int{}\n\tfor _, socket := range a.sortAvailableSockets() {\n\t\tif a.isSocketFree(socket) {\n\t\t\tfree = append(free, socket)\n\t\t}\n\t}\n\treturn free\n}", "func MustNextN(n int) []int {\n\tports, err := NextN(n)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn ports\n}", "func (r *portsRegistry) Reserve() (uint16, error) {\n\tr.m.Lock()\n\tdefer r.m.Unlock()\n\n\tsize := r.max - r.min + 1\n\tfor i := uint16(1); i <= size; i++ {\n\t\tport := r.min + (r.last-r.min+i)%size\n\t\tif _, ok := r.reserved[port]; ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tl, err := net.Listen(\"tcp\", fmt.Sprintf(\"127.0.0.1:%d\", port))\n\t\tif l != nil {\n\t\t\t_ = l.Close()\n\t\t}\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tr.reserved[port] = struct{}{}\n\t\tr.last = port\n\t\treturn port, nil\n\t}\n\n\treturn 0, errNoFreePort\n}", "func (s *SecurityRule) Ports() []string {\n\treturn s.Ports_\n}", "func Get() int {\n\tfor i := maxReservedTCPPort; i < maxTCPPort; i++ {\n\t\tp := tcpPortRand.Intn(maxRandTCPPort) + maxReservedTCPPort + 1\n\t\tif IsAvailable(p) {\n\t\t\treturn p\n\t\t}\n\t}\n\treturn -1\n}", "func GetOpenPortInRange(lowerBound, upperBound int) (int, error) {\n\tif lowerBound < portRangeMin {\n\t\treturn -1, errPortMin\n\t}\n\tfor lowerBound <= portRangeMax && lowerBound <= upperBound {\n\t\tif _, err := net.Dial(\"tcp\", fmt.Sprintf(\":%d\", lowerBound)); err != nil {\n\t\t\treturn lowerBound, nil\n\t\t}\n\t\tlowerBound++\n\t}\n\tif upperBound > portRangeMax {\n\t\treturn -1, errPortMax\n\t}\n\treturn -1, errPortNotFound\n}", "func GetValidExposedPortNumber(exposedPort int) (int, error) {\n\t// exposed port number will be -1 if the user doesn't specify any port\n\tif exposedPort == -1 {\n\t\tfreePort, err := util.HTTPGetFreePort()\n\t\tif err != nil {\n\t\t\treturn -1, err\n\t\t}\n\t\treturn freePort, nil\n\t} else {\n\t\t// check if the given port is available\n\t\tlistener, err := net.Listen(\"tcp\", \":\"+strconv.Itoa(exposedPort))\n\t\tif err != nil {\n\t\t\treturn -1, errors.Wrapf(err, \"given port %d is not available, please choose another port\", exposedPort)\n\t\t}\n\t\tdefer listener.Close()\n\t\treturn exposedPort, nil\n\t}\n}", "func (p *Printer) GetPorts(f v1.Flow) (string, string) {\n\tl4 := f.GetL4()\n\tif l4 == nil {\n\t\treturn \"\", \"\"\n\t}\n\tswitch l4.Protocol.(type) {\n\tcase *pb.Layer4_TCP:\n\t\treturn p.TCPPort(layers.TCPPort(l4.GetTCP().SourcePort)), p.TCPPort(layers.TCPPort(l4.GetTCP().DestinationPort))\n\tcase *pb.Layer4_UDP:\n\t\treturn p.UDPPort(layers.UDPPort(l4.GetUDP().SourcePort)), p.UDPPort(layers.UDPPort(l4.GetUDP().DestinationPort))\n\tdefault:\n\t\treturn \"\", \"\"\n\t}\n}", "func (w Work) Ports() map[string]connector.Connector {\n\treturn w.Ports_\n}", "func (l *Libvirt) NodeGetFreePages(Pages []uint32, StartCell int32, CellCount uint32, Flags uint32) (rCounts []uint64, err error) {\n\tvar buf []byte\n\n\targs := NodeGetFreePagesArgs {\n\t\tPages: Pages,\n\t\tStartCell: StartCell,\n\t\tCellCount: CellCount,\n\t\tFlags: Flags,\n\t}\n\n\tbuf, err = encode(&args)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar r response\n\tr, err = l.requestStream(340, constants.Program, buf, nil, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Return value unmarshaling\n\ttpd := typedParamDecoder{}\n\tct := map[string]xdr.TypeDecoder{\"libvirt.TypedParam\": tpd}\n\trdr := bytes.NewReader(r.Payload)\n\tdec := xdr.NewDecoderCustomTypes(rdr, 0, ct)\n\t// Counts: []uint64\n\t_, err = dec.Decode(&rCounts)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func (o *Service) GetServicePortsTcp() []string {\n\tif o == nil || o.ServicePortsTcp == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn *o.ServicePortsTcp\n}", "func getPort() (int, error) {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer l.Close()\n\n\treturn l.Addr().(*net.TCPAddr).Port, nil\n}", "func getSplunkContainerPorts(instanceType InstanceType) []corev1.ContainerPort {\n\tl := []corev1.ContainerPort{}\n\tfor key, value := range getSplunkPorts(instanceType) {\n\t\tl = append(l, corev1.ContainerPort{\n\t\t\tName: key,\n\t\t\tContainerPort: int32(value),\n\t\t\tProtocol: \"TCP\",\n\t\t})\n\t}\n\treturn l\n}", "func (rp *ResolverPool) Port() int {\n\treturn 0\n}", "func (rp *ResolverPool) Port() int {\n\treturn 0\n}", "func IsTCPPortAvailable(port int) bool {\n\tif port < minTCPPort || port > maxTCPPort {\n\t\treturn false\n\t}\n\tconn, err := net.Listen(\"tcp\", fmt.Sprintf(\"127.0.0.1:%d\", port))\n\tif err != nil {\n\t\treturn false\n\t}\n\tconn.Close()\n\treturn true\n}", "func Ports(ports ...int) Option {\n\treturn func(c *Container) {\n\t\tvar p []string\n\t\tfor _, port := range ports {\n\t\t\tp = append(p, fmt.Sprintf(\"%d\", port))\n\t\t}\n\t\tc.ports = p\n\t}\n}", "func exposedPorts(node *parser.Node) [][]string {\n\tvar allPorts [][]string\n\tvar ports []string\n\tfroms := FindAll(node, command.From)\n\texposes := FindAll(node, command.Expose)\n\tfor i, j := len(froms)-1, len(exposes)-1; i >= 0; i-- {\n\t\tfor ; j >= 0 && exposes[j] > froms[i]; j-- {\n\t\t\tports = append(nextValues(node.Children[exposes[j]]), ports...)\n\t\t}\n\t\tallPorts = append([][]string{ports}, allPorts...)\n\t\tports = nil\n\t}\n\treturn allPorts\n}", "func getOpenPorts() string {\n\tcmd := \"./Bash Functions/getOpenPorts.sh\"\n\n\t// Get's output of 'nmap' command\n\topenPortsByte, _ := exec.Command(cmd).Output()\n\topenPortsString := string(openPortsByte)\n\topenPortsString = strings.Trim(openPortsString, \"\\n\")\n\n\treturn openPortsString\n}", "func initialize() {\n\tvar err error\n\teffectiveMaxBlocks, err = adjustMaxBlocks()\n\tif err != nil {\n\t\tpanic(\"freeport: ephemeral port range detection failed: \" + err.Error())\n\t}\n\tif effectiveMaxBlocks < 0 {\n\t\tpanic(\"freeport: no blocks of ports available outside of ephemeral range\")\n\t}\n\tif lowPort+effectiveMaxBlocks*blockSize > 65535 {\n\t\tpanic(\"freeport: block size too big or too many blocks requested\")\n\t}\n\n\trand.Seed(time.Now().UnixNano())\n\tfirstPort, lockLn = alloc()\n\n\tcondNotEmpty = sync.NewCond(&mu)\n\tfreePorts = list.New()\n\tpendingPorts = list.New()\n\n\t// fill with all available free ports\n\tfor port := firstPort + 1; port < firstPort+blockSize; port++ {\n\t\tif used := isPortInUse(port); !used {\n\t\t\tfreePorts.PushBack(port)\n\t\t}\n\t}\n\ttotal = freePorts.Len()\n\n\tgo checkFreedPorts()\n}", "func getPort() (port uint16) {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer l.Close()\n\treturn uint16(l.Addr().(*net.TCPAddr).Port)\n}", "func RandomPort() (int, error) {\n\tl, err := net.Listen(\"tcp\", \"\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif err := l.Close(); err != nil {\n\t\treturn 0, nil\n\t}\n\treturn l.Addr().(*net.TCPAddr).Port, nil\n}", "func (crMgr *CRManager) virtualPorts(vs *cisapiv1.VirtualServer) []portStruct {\n\n\t// TODO ==> This will change as we will support custom ports.\n\tconst DEFAULT_HTTP_PORT int32 = 80\n\t//const DEFAULT_HTTPS_PORT int32 = 443\n\tvar httpPort int32\n\t// var httpsPort int32\n\thttpPort = DEFAULT_HTTP_PORT\n\t// httpsPort = DEFAULT_HTTPS_PORT\n\n\thttp := portStruct{\n\t\tprotocol: \"http\",\n\t\tport: httpPort,\n\t}\n\t// Support TLS Type, Create both HTTP and HTTPS\n\t/**\n\thttps := portStruct{\n\t\tprotocol: \"https\",\n\t\tport: httpsPort,\n\t}**/\n\tvar ports []portStruct\n\n\t// Support TLS Type, Create both HTTP and HTTPS\n\t/**\n\tif len(vs.Spec.TLS) > 0 {\n\t\t// 2 virtual servers needed, both HTTP and HTTPS\n\t\tports = append(ports, http)\n\t\tports = append(ports, https)\n\t} else {\n\t\t// HTTP only\n\t\tports = append(ports, http)\n\t}**/\n\n\tports = append(ports, http)\n\n\treturn ports\n}", "func openPorts() {\n\tinPort, err = utils.CreateInputPort(\"bonjour/discover.options\", *inputEndpoint, nil)\n\tutils.AssertError(err)\n}", "func randomPort() (string, error) {\n\tconst (\n\t\tminPort = 1024\n\t\tmaxPort = 65535\n\t\tmaxTries = 10\n\t)\n\tfor i := 0; i < maxTries; i++ {\n\t\tport := rand.Intn(maxPort-minPort+1) + minPort\n\t\tl, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", port))\n\t\tif err == nil {\n\t\t\t_ = l.Close()\n\t\t\treturn strconv.Itoa(port), nil\n\t\t}\n\t\tflog.Info(\"port taken: %d\", port)\n\t}\n\n\treturn \"\", xerrors.Errorf(\"max number of tries exceeded: %d\", maxTries)\n}", "func (c *ClientProxyMappingParser) GetClientProxyMappingPorts() (ports []string) {\n\tc.init()\n\treturn c.ports\n}", "func portscan(asyncCount int, host string, startPort uint32, endPort uint32, portsChecked *uint32) (chan uint32, chan bool) {\n\tportCount := endPort + 1 - startPort\n\n\tvar goroutines = make(chan bool, asyncCount) // concurrency control\n\tvar openPorts = make(chan uint32, portCount) // Store list of open ports, concurrency-safe, buffered\n\tvar completed = make(chan bool)\n\n\tgo func() {\n\t\t// Tasks to do at completion of scanning\n\t\tdefer func() {\n\t\t\t// Close openPorts channel since it's buffered\n\t\t\tclose(openPorts)\n\n\t\t\t// Send signal to anything waiting on buffered completion channel\n\t\t\tcompleted <- true\n\t\t}()\n\n\t\tfor port := startPort; port <= endPort; port++ {\n\t\t\tgoroutines <- true // Wait until allowed to go\n\n\t\t\tgo func(p uint32) {\n\t\t\t\tdefer func() {\n\t\t\t\t\t<-goroutines\n\t\t\t\t}() // release lock when done\n\n\t\t\t\t// Check the port\n\t\t\t\tif portOpen := scanOnePort(host, p); portOpen {\n\t\t\t\t\topenPorts <- p\n\t\t\t\t}\n\t\t\t\tatomic.AddUint32(portsChecked, 1)\n\t\t\t}(port)\n\t\t}\n\n\t}()\n\n\treturn openPorts, completed\n}", "func newServicePorts(m *influxdatav1alpha1.Influxdb) []corev1.ServicePort {\n\tvar ports []corev1.ServicePort\n\n\tports = append(ports, corev1.ServicePort{Port: 8086, Name: \"api\"},\n\t\tcorev1.ServicePort{Port: 2003, Name: \"graphite\"},\n\t\tcorev1.ServicePort{Port: 25826, Name: \"collectd\"},\n\t\tcorev1.ServicePort{Port: 8089, Name: \"udp\"},\n\t\tcorev1.ServicePort{Port: 4242, Name: \"opentsdb\"},\n\t\tcorev1.ServicePort{Port: 8088, Name: \"backup-restore\"},\n\t)\n\treturn ports\n}", "func Sequential(port string) []string {\n\tdialer := net.Dialer{Timeout: time.Millisecond * 100}\n\toutput := make([]string, 0)\n\n\tfor i := 0; i <= 255; i++ {\n\t\taddr := addr + \".\" + strconv.Itoa(i) + \":\" + port\n\t\tconn, err := dialer.Dial(\"tcp\", addr)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\toutput = append(output, conn.RemoteAddr().String())\n\t\tconn.Close()\n\t}\n\n\treturn output\n}", "func (rcsw *RemoteClusterServiceWatcher) getEndpointsPorts(service *corev1.Service, gatewayPort int32) []corev1.EndpointPort {\n\tvar endpointsPorts []corev1.EndpointPort\n\tfor _, remotePort := range service.Spec.Ports {\n\t\tendpointsPorts = append(endpointsPorts, corev1.EndpointPort{\n\t\t\tName: remotePort.Name,\n\t\t\tProtocol: remotePort.Protocol,\n\t\t\tPort: gatewayPort,\n\t\t})\n\t}\n\treturn endpointsPorts\n}", "func findPort() (int, error) {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer l.Close()\n\n\treturn l.Addr().(*net.TCPAddr).Port, nil\n}", "func CreateServers(ctx context.Context, n int) ([]int, error) {\n\tvar ports []int\n\n\tlocalCtx, cancel := context.WithCancel(ctx)\n\tfor i := 0; i < n; i++ {\n\t\tport, err := ListenHTTP(localCtx)\n\t\tif err != nil {\n\t\t\tcancel()\n\t\t\treturn nil, err\n\t\t}\n\n\t\tports = append(ports, port)\n\t}\n\n\treturn ports, nil\n}", "func RandomPort() int {\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tport := r.Intn(2000) + 30000\n\tfor i := 0; i < 18000; i++ {\n\t\tif checkPortIsOpen(port) == false {\n\t\t\tbreak\n\t\t}\n\t\tport++\n\t\t// retry next port\n\t}\n\treturn port\n}", "func getContainerPorts(ports []echo.Port) model.PortList {\n\tcontainerPorts := make(model.PortList, 0, len(ports))\n\tvar healthPort *model.Port\n\tvar readyPort *model.Port\n\tfor _, p := range ports {\n\t\t// Add the port to the set of application ports.\n\t\tcport := &model.Port{\n\t\t\tName: p.Name,\n\t\t\tProtocol: p.Protocol,\n\t\t\tPort: p.InstancePort,\n\t\t}\n\t\tcontainerPorts = append(containerPorts, cport)\n\n\t\tswitch p.Protocol {\n\t\tcase model.ProtocolGRPC:\n\t\t\tcontinue\n\t\tcase model.ProtocolHTTP:\n\t\t\tif p.InstancePort == httpReadinessPort {\n\t\t\t\treadyPort = cport\n\t\t\t}\n\t\tdefault:\n\t\t\tif p.InstancePort == tcpHealthPort {\n\t\t\t\thealthPort = cport\n\t\t\t}\n\t\t}\n\t}\n\n\t// If we haven't added the readiness/health ports, do so now.\n\tif readyPort == nil {\n\t\tcontainerPorts = append(containerPorts, &model.Port{\n\t\t\tName: \"http-readiness-port\",\n\t\t\tProtocol: model.ProtocolHTTP,\n\t\t\tPort: httpReadinessPort,\n\t\t})\n\t}\n\tif healthPort == nil {\n\t\tcontainerPorts = append(containerPorts, &model.Port{\n\t\t\tName: \"tcp-health-port\",\n\t\t\tProtocol: model.ProtocolHTTP,\n\t\t\tPort: tcpHealthPort,\n\t\t})\n\t}\n\treturn containerPorts\n}", "func (o *NetworkElementSummaryAllOf) GetNumFcPorts() int64 {\n\tif o == nil || o.NumFcPorts == nil {\n\t\tvar ret int64\n\t\treturn ret\n\t}\n\treturn *o.NumFcPorts\n}", "func ScanPorts(host string, options ...ScanPortsOption) <-chan int {\n\topts := defaultScanPortsOptions()\n\tfor _, opt := range options {\n\t\topt.setScanPortsOption(opts)\n\t}\n\n\tallPorts := pipeline.Ints(1, maxPort)\n\tfoundPorts := make([]<-chan int, 128)\n\tfor i := 0; i < 128; i++ {\n\t\tfoundPorts[i] = scanPorts(host, allPorts, opts.EagerPrint)\n\t}\n\treturn pipeline.MergeInts(foundPorts...)\n}", "func (alloc *RuntimePortAllocator) ReleaseReservedPorts(ports []int) {\n\talloc.log.Info(\"Releasing reserved ports\", \"ports to be released\", ports)\n\tfor _, port := range ports {\n\t\tif err := alloc.pa.Release(port); err != nil {\n\t\t\talloc.log.Error(err, \"can't release port\", \"port\", port)\n\t\t}\n\t}\n}", "func (o FirewallAllowedItemOutput) Ports() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v FirewallAllowedItem) []string { return v.Ports }).(pulumi.StringArrayOutput)\n}", "func GetUriFilteringTotalAttackConnectionPort(engine *xorm.Engine, telePreMitigationId int64) (tac []UriFilteringTotalAttackConnectionPort, err error) {\n\ttac = []UriFilteringTotalAttackConnectionPort{}\n\terr = engine.Where(\"tele_pre_mitigation_id = ?\", telePreMitigationId).OrderBy(\"id ASC\").Find(&tac)\n\treturn\n}", "func GetPort() int {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tl, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer l.Close()\n\treturn l.Addr().(*net.TCPAddr).Port\n}", "func pickFreeAddr(t *testing.T) string {\n\tl, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer l.Close()\n\treturn l.Addr().String()\n}", "func randomPort() string {\n\tl, err := net.Listen(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\t// listening for port 0 should never error but just in case\n\t\treturn strconv.Itoa(1024 + rand.Intn(65536-1024))\n\t}\n\n\tp := l.Addr().(*net.TCPAddr).Port\n\tl.Close()\n\treturn strconv.Itoa(p)\n}", "func (o FirewallAllowedItemResponseOutput) Ports() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v FirewallAllowedItemResponse) []string { return v.Ports }).(pulumi.StringArrayOutput)\n}", "func (s *server) getListenerPorts() map[uint32]bool {\n\n\tlistenerPorts := map[uint32]bool{}\n\tfor _, listener := range s.dbentities.GetListeners() {\n\t\tlistenerPorts[uint32(listener.Port)] = true\n\t}\n\treturn listenerPorts\n}", "func V1GetPorts(c *gin.Context) {\n\tvar ports []m.PortCBP\n\tportPt := &ports\n\tif err := dao.GetAllPorts(&portPt); err != nil {\n\t\tutils.NotFound(c, err)\n\t\treturn\n\t}\n\tutils.Ok(c, *portPt)\n}", "func openPorts() {\n\toptionsPort, err = utils.CreateInputPort(\"distinct.options\", *optionsEndpoint, nil)\n\tutils.AssertError(err)\n\n\tinPort, err = utils.CreateInputPort(\"distinct.in\", *inputEndpoint, inCh)\n\tutils.AssertError(err)\n\n\toutPort, err = utils.CreateOutputPort(\"distinct.out\", *outputEndpoint, outCh)\n\tutils.AssertError(err)\n}", "func WaitTCPPort(ctx Ctx, addr fmt.Stringer) error {\n\tconst delay = time.Second / 20\n\tbackOff := backoff.WithContext(backoff.NewConstantBackOff(delay), ctx)\n\top := func() error {\n\t\tvar dialer net.Dialer\n\t\tconn, err := dialer.DialContext(ctx, \"tcp\", addr.String())\n\t\tif err == nil {\n\t\t\terr = conn.Close()\n\t\t}\n\t\treturn err\n\t}\n\treturn backoff.Retry(op, backOff)\n}" ]
[ "0.6888929", "0.67983717", "0.66069543", "0.65850073", "0.6558296", "0.6558296", "0.65428734", "0.6534923", "0.6523196", "0.65159756", "0.65052295", "0.64984024", "0.6488279", "0.6486295", "0.6423216", "0.64032197", "0.6374218", "0.63717896", "0.6371297", "0.63018143", "0.6290728", "0.62243843", "0.6131413", "0.6048003", "0.602278", "0.60175073", "0.594997", "0.594997", "0.58913636", "0.5886659", "0.58812517", "0.583339", "0.58161175", "0.5798913", "0.5773416", "0.57414144", "0.572828", "0.5690129", "0.5667676", "0.561003", "0.55756605", "0.55755717", "0.5511968", "0.5399512", "0.5258598", "0.5257981", "0.5257981", "0.52372044", "0.5208359", "0.5204069", "0.51661086", "0.51636267", "0.51075286", "0.5099787", "0.5081063", "0.5070362", "0.5067064", "0.50518686", "0.50508904", "0.5038434", "0.49972916", "0.49908856", "0.4957175", "0.4893593", "0.4884692", "0.48818564", "0.48740864", "0.48740864", "0.48738757", "0.4859928", "0.48560318", "0.4853574", "0.48479646", "0.4846071", "0.4845135", "0.48419297", "0.48287135", "0.48183346", "0.47942844", "0.4780374", "0.47620478", "0.47512275", "0.47439438", "0.47422034", "0.4741782", "0.47404355", "0.4727248", "0.4721838", "0.4721072", "0.4720844", "0.47015762", "0.46961495", "0.4694343", "0.46881056", "0.46860495", "0.46858385", "0.4685394", "0.4681093", "0.46760586", "0.4664238" ]
0.8380831
0
HostUUIDExistsLocally checks if dataDir/host_uuid file exists in local storage.
HostUUIDExistsLocally проверяет, существует ли файл dataDir/host_uuid в локальном хранилище.
func HostUUIDExistsLocally(dataDir string) bool { _, err := ReadHostUUID(dataDir) return err == nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *Release) localExist() error {\n\tvar (\n\t\tversion string = fmt.Sprintf(\"terraform-%s.zip\", r.Version)\n\t\terr error\n\t)\n\n\tif _, err = os.Stat(filepath.Join(r.Home, PathTmp.toString(), version)); !os.IsNotExist(err) {\n\t\tfmt.Println(\"Already in cache ...\")\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (this *DataStore) isExistUUID(uuid string) bool {\n\tif _ ,ok := this.ProcessTable[uuid]; ok {\n\t\treturn true\n\t} \n\treturn false\n}", "func (b *Binary) LocalExist() bool {\n\treturn b.file.LocalExist()\n}", "func (l *localFileSystem) Exists(prefix string, filepath string) bool {\n\tif p := strings.TrimPrefix(filepath, prefix); len(p) <= len(filepath) {\n\t\tp = path.Join(l.root, p)\n\t\t/*if !l.physfs {\n\t\t\treturn existsFile(l, p)\n\t\t} else {*/\n\t\tfmt.Println(\"Exists: \" + p)\n\t\treturn physfs.Exists(p)\n\t\t//}\n\t}\n\treturn false\n}", "func (r *Release) remoteExist() error {\n\tvar (\n\t\turl string = fmt.Sprintf(PathTerraform.toString(), r.Version, r.Version, runtime.GOOS, runtime.GOARCH)\n\t\tresp *http.Response\n\t\terr error\n\t)\n\n\tif resp, err = r.HTTPclient.Get(url); err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\t// Verify code equal 200\n\tif resp.StatusCode == http.StatusOK {\n\t\treturn nil\n\t}\n\n\treturn nil\n}", "func (s storage) Exist(path string) bool {\n\t_, err := os.Stat(path)\n\treturn !os.IsNotExist(err)\n}", "func IsExist(err error) bool", "func exists() bool {\r\n\t_, err := ioutil.ReadFile(\"nodestore.json\")\r\n\tif os.IsNotExist(err) {\r\n\t\treturn false\r\n\t}\r\n\treturn true\r\n}", "func FilesStorageExists(exec boil.Executor, iD int) (bool, error) {\n\tvar exists bool\n\tsql := \"select exists(select 1 from `files_storages` where `id`=? limit 1)\"\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, sql)\n\t\tfmt.Fprintln(boil.DebugWriter, iD)\n\t}\n\n\trow := exec.QueryRow(sql, iD)\n\n\terr := row.Scan(&exists)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"models: unable to check if files_storages exists\")\n\t}\n\n\treturn exists, nil\n}", "func (d *Driver) Exists(id string) bool {\n\td.Lock()\n\tdefer d.Unlock()\n\treturn d.filesystemsCache[d.zfsPath(id)]\n}", "func (instance *Host) Exists(ctx context.Context) (_ bool, ferr fail.Error) {\n\tdefer fail.OnPanic(&ferr)\n\n\tif valid.IsNil(instance) {\n\t\treturn false, fail.InvalidInstanceError()\n\t}\n\n\tdefer elapsed(ctx, fmt.Sprintf(\"Exist of %s\", instance.name.Load().(string)))()\n\ttheID, err := instance.GetID()\n\tif err != nil {\n\t\treturn false, fail.ConvertError(err)\n\t}\n\n\tif beta := os.Getenv(\"SAFESCALE_DETECT_CORRUPTION\"); beta != \"yes\" {\n\t\treturn true, nil\n\t}\n\n\t_, xerr := instance.Service().InspectHost(ctx, theID)\n\tif xerr != nil {\n\t\tswitch xerr.(type) {\n\t\tcase *fail.ErrNotFound:\n\t\t\treturn false, nil\n\t\tdefault:\n\t\t\treturn false, xerr\n\t\t}\n\t}\n\n\treturn true, nil\n}", "func machineExists(id string) bool {\n\tmut.Lock()\n\tlogFile.Seek(0, 0)\n\tdefer logFile.Seek(0, 2)\n\tdefer mut.Unlock()\n\tscanner := bufio.NewScanner(logFile)\n\tfor scanner.Scan() {\n\t\tif strings.Contains(scanner.Text(), id) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (gcs *localStorage) FileExists(bucket, fileName string) bool {\n\t_, err := os.Stat(fileName)\n\treturn err == nil\n}", "func db_check_user_exists(username string) bool {\n file_path := path.Join(\"db/users\", strings.ToLower(username) + \".json\")\n \n if _, err := os.Stat(file_path); !os.IsNotExist(err) {\n return true\n }\n return false\n}", "func (z *ZKC) identityExists(id [zkidentity.IdentitySize]byte) bool {\n\t_, err := os.Stat(path.Join(z.settings.Root, inboundDir,\n\t\thex.EncodeToString(id[:]), identityFilename))\n\tif err == nil {\n\t\tids := hex.EncodeToString(id[:])\n\t\tfullPath := path.Join(z.settings.Root, inboundDir, ids)\n\t\t_, err1 := os.Stat(path.Join(fullPath, ratchetFilename))\n\t\t_, err2 := os.Stat(path.Join(fullPath, halfRatchetFilename))\n\t\tif err1 == nil || err2 == nil {\n\t\t\treturn true\n\t\t}\n\n\t\t// this happens during reset condiftion\n\t\tz.Dbg(idZKC, \"identityExists: reset condition\")\n\t\treturn false\n\t}\n\n\treturn false\n}", "func (c *Local) Exists(key string) (string, error) {\n\tsum := hash(key)\n\tdirPrefix := filepath.Join(c.Root, fmt.Sprintf(\"%x\", sum[0:1]))\n\tdirEnd := fmt.Sprintf(\"%x\", sum[1:len(sum)-1])\n\tdirFull := filepath.Join(dirPrefix, dirEnd)\n\tif PathIsDir(dirFull) {\n\t\treturn dirFull, nil\n\t}\n\treturn \"\", nil\n}", "func (_UsersData *UsersDataCaller) IsUuidExist(opts *bind.CallOpts, uuid [16]byte) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _UsersData.contract.Call(opts, out, \"isUuidExist\", uuid)\n\treturn *ret0, err\n}", "func MasterFileExists(path string) bool {\n\t_, err := os.Stat(path)\n\treturn err == nil\n}", "func (_UsersData *UsersDataCallerSession) IsUuidExist(uuid [16]byte) (bool, error) {\n\treturn _UsersData.Contract.IsUuidExist(&_UsersData.CallOpts, uuid)\n}", "func exists(filePath string) (exists bool) {\n _,err := os.Stat(filePath)\n if err != nil {\n exists = false\n } else {\n exists = true\n }\n return\n}", "func Exists(uid int, address string) bool {\n\tnowTime := time.Now().Unix()\n\n\tif uCache, ok := localCache.UIDCache[uid]; ok {\n\t\t// cache未过期\n\t\tif uCache.lastModifyTime+CacheExpireTime> nowTime {\n\t\t\treturn true\n\t\t}\n\t}\n\n\tif addrCache, ok := localCache.AddressCache[address]; ok {\n\t\t// cache未过期\n\t\tif addrCache.lastModifyTime+CacheExpireTime> nowTime {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (cs ConsulStorage) Exists(ctx context.Context, key string) bool {\n\tkv, _, err := cs.ConsulClient.KV().Get(cs.prefixKey(key), ConsulQueryDefaults(ctx))\n\tif kv != nil && err == nil {\n\t\treturn true\n\t}\n\treturn false\n}", "func (_UsersData *UsersDataSession) IsUuidExist(uuid [16]byte) (bool, error) {\n\treturn _UsersData.Contract.IsUuidExist(&_UsersData.CallOpts, uuid)\n}", "func (ros RealOS) Exists(p string) bool {\n\tif _, err := os.Stat(path.Clean(p)); err == nil {\n\t\treturn true\n\t}\n\treturn false\n}", "func (b *Binary) RemoteExist() bool {\n\tif !b.file.RemoteExist() {\n\t\treturn false\n\t}\n\n\tcmd := fmt.Sprintf(\"sha256sum %s | cut -d\\\" \\\" -f1\", b.file.RemotePath())\n\tremoteSHA256, err := b.file.sshClient.SudoCmd(cmd)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tif err := b.checksumList.Get(); err != nil {\n\t\treturn false\n\t}\n\n\tif remoteSHA256 != b.checksumList.Value() {\n\t\treturn false\n\t}\n\treturn true\n}", "func (m *manifestService) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {\n\tcontext.GetLogger(ctx).Debugf(\"(*manifestService).Exists\")\n\n\timage, _, err := m.repo.getImageOfImageStream(dgst)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn image != nil, nil\n}", "func Exists(fname string) bool {\n if _, err := os.Stat(fname); os.IsNotExist(err) {\n return false\n }\n return true\n}", "func (d *Driver) Exists(id string) bool {\n\tlogrus.Debugf(\"secureoverlay2: Exists called w. id: %s\", id)\n\n\t// TODO: below is implementation from overlay2 but doesn't really mesh with the function description (also from overlay2)\n\t// as this should be true as soon as layer is created using Create, regardless of mount (call of Get)?!\n\t_, err := os.Stat(d.dir(id))\n\treturn err == nil\n}", "func hostExists(host string, hosts []string) bool {\n\tfor _, entry := range hosts {\n\t\tif host == cleanup(entry) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (l *Location) Exists() (bool, error) {\n\n\tclient, err := l.fileSystem.Client(l.Authority)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\t// start timer once action is completed\n\tdefer l.fileSystem.connTimerStart()\n\n\tinfo, err := client.Stat(l.Path())\n\tif err != nil && err == os.ErrNotExist {\n\t\treturn false, nil\n\t} else if err != nil {\n\t\treturn false, err\n\t}\n\n\tif !info.IsDir() {\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}", "func (cli *FakeDatabaseClient) FileExists(ctx context.Context, in *dbdpb.FileExistsRequest, opts ...grpc.CallOption) (*dbdpb.FileExistsResponse, error) {\n\tpanic(\"implement me\")\n}", "func (s *fsStore) Exists(typ namespace.Type, name string) bool {\n\ttrgt := s.targetPath(name, typ)\n\tif _, err := os.Stat(trgt); err == nil {\n\t\treturn true\n\t}\n\treturn false\n}", "func (s *s3ManifestService) Exists(ctx context.Context, dgst godigest.Digest) (bool, error) {\n\treturn false, fmt.Errorf(\"unimplemented\")\n}", "func (c *PumpsClient) exist(nodeID string) bool {\n\tc.RLock()\n\t_, ok := c.Pumps.Pumps[nodeID]\n\tc.RUnlock()\n\treturn ok\n}", "func isExist(filePath string) bool {\n\t_, err := os.Stat(filePath)\n\treturn err == nil\n}", "func (a *FileStorage) Exists() bool {\n\treturn a._exists\n}", "func (sample *SampleImage) Exists() bool {\n\treturn filesystem.Exists(sample.RootDir)\n}", "func IsFileExists(filePath string) (bool, error) {\n\t//Check if file exists in cache\n\t_, err := os.Stat(filePath)\n\n\tif os.IsNotExist(err) {\n\t\treturn false, nil\n\t} else if err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}", "func Exists(name string) bool {\n if _, err := os.Stat(name); err != nil {\n if os.IsNotExist(err) {\n return false\n }\n }\n return true\n}", "func exists(name string) bool {\n if _, err := os.Stat(name); err != nil {\n if os.IsNotExist(err) {\n return false\n }\n }\n return true\n}", "func (fs *FileStore) Exists(key string) bool {\n\t_, err := os.Stat(filepath.Join(fs.baseDir, fs.mangleKey(key, false)))\n\treturn err == nil\n}", "func (a *StorageUsage) Exists() bool {\n\treturn a._exists\n}", "func (_ElvTradableLocal *ElvTradableLocalCaller) Exists(opts *bind.CallOpts, tokenId *big.Int) (bool, error) {\n\tvar out []interface{}\n\terr := _ElvTradableLocal.contract.Call(opts, &out, \"exists\", tokenId)\n\n\tif err != nil {\n\t\treturn *new(bool), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(bool)).(*bool)\n\n\treturn out0, err\n\n}", "func (mcm *MinioChunkManager) Exist(ctx context.Context, filePath string) (bool, error) {\n\t_, err := mcm.statMinioObject(ctx, mcm.bucketName, filePath, minio.StatObjectOptions{})\n\tif err != nil {\n\t\terrResponse := minio.ToErrorResponse(err)\n\t\tif errResponse.Code == \"NoSuchKey\" {\n\t\t\treturn false, nil\n\t\t}\n\t\tlog.Warn(\"failed to stat object\", zap.String(\"bucket\", mcm.bucketName), zap.String(\"path\", filePath), zap.Error(err))\n\t\treturn false, err\n\t}\n\treturn true, nil\n}", "func (db StdNetDB) Exists() bool {\n\tp := db.Path()\n\t// check root directory\n\t_, err := os.Stat(p)\n\tif err == nil {\n\t\t// check subdirectories for skiplist\n\t\tfor _, c := range base64.Alphabet {\n\t\t\tif _, err = os.Stat(filepath.Join(p, fmt.Sprintf(\"r%c\", c))); err != nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn err == nil\n}", "func IsStaticIPFileExist() (bool, error) {\n\treturn utils.IsFileExist(staticipfilepath)\n}", "func (storage *FileStorage) IsExist(storedName string) bool {\n\tfor _, entry := range *storage {\n\t\tif entry.StoredName == storedName {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func Exists(filename string) (bool, error) {\n _, err := os.Stat(filename)\n if err == nil {\n return true, nil\n }\n if os.IsNotExist(err) {\n return false, nil\n }\n var mu bool\n return mu, err\n}", "func Exists(filePath string) (bool, error) {\n\tfilePath = strings.Replace(filePath, \"~\", HomeDir(), 1)\n\n\tif _, err := os.Stat(filePath); err == nil {\n\t\treturn true, nil\n\t} else if os.IsNotExist(err) {\n\t\treturn false, nil\n\t} else {\n\t\treturn false, err\n\t}\n}", "func (s *Storage)IsExist(key interface{})bool {\n\t_,ok :=s.data.Load(key)\n\treturn ok\n}", "func (d *Driver) Exists(walletType, dataDir string, _ map[string]string, net dex.Network) (bool, error) {\n\tif walletType != walletTypeSPV {\n\t\treturn false, fmt.Errorf(\"no Decred wallet of type %q available\", walletType)\n\t}\n\n\tchainParams, err := parseChainParams(net)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn walletExists(filepath.Join(dataDir, chainParams.Name, \"spv\"))\n}", "func fileExists(ctx context.Context, uri span.URI, source source.FileSource) (bool, error) {\n\tfh, err := source.GetFile(ctx, uri)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn fileHandleExists(fh)\n}", "func doesDeviceExist(deviceID int, meta interface{}) bool {\n\tlog.Printf(\"[Dotcom-Monitor] [DEBUG] Checking if device exists with ID: %v\", deviceID)\n\tdevice := &client.Device{\n\t\tID: deviceID,\n\t}\n\n\t// Since an empty HTTP response is a valid 200 from the API, we will determine if\n\t// the device exists by comparing the hash of the struct before and after the HTTP call.\n\t// If the has does not change, it means nothing else was added, therefore it does not exist.\n\t// If the hash changes, the API found the device and added the rest of the fields.\n\th := sha256.New()\n\tt := fmt.Sprintf(\"%v\", device)\n\tsum := h.Sum([]byte(t))\n\tlog.Printf(\"[Dotcom-Monitor] [DEBUG] Hash before: %x\", sum)\n\n\t// Try to get device from API\n\tapi := meta.(*client.APIClient)\n\terr := api.GetDevice(device)\n\n\tt2 := fmt.Sprintf(\"%v\", device)\n\tsum2 := h.Sum([]byte(t2))\n\tlog.Printf(\"[Dotcom-Monitor] [DEBUG] Hash after: %x\", sum2)\n\n\t// Compare the hashes, and if there was an error from the API we will assume the device exists\n\t// to be safe that we do not improperly remove an existing device from state\n\tif bytes.Equal(sum, sum2) && err == nil {\n\t\tlog.Println(\"[Dotcom-Monitor] [DEBUG] No new fields added to the device, therefore the device did not exist\")\n\t\treturn false\n\t}\n\n\t// If we get here, we can assume the device does exist\n\treturn true\n}", "func (cache *LedisCacheStorage) CheckDeviceExistence(clientID string, id string) bool {\n\tamount, err := cache.db.HGet([]byte(clientID+\":device\"), []byte(id))\n\n\tif err != nil {\n\t\t_, _ = fmt.Fprintf(os.Stderr, \"Ledis Cache: failed to check device existence %v\\n\", err)\n\t\treturn false\n\t}\n\n\treturn amount != nil\n}", "func (h *fs) Exists(file string) bool {\n\t_, err := os.Stat(file)\n\treturn err == nil\n}", "func (servers *Servers) IsExist(macAddressStr string) bool {\n\n\tserver, _ := servers.GetServer(macAddressStr)\n\n\tif server != nil {\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n\n}", "func ImageExistsLocally(ctx context.Context, imageName string, platform string) (bool, error) {\n\treturn false, errors.New(\"Unsupported Operation\")\n}", "func blockExists(target, data string) (bool, error) {\n\tif !exists(target) {\n\t\treturn false, nil\n\t}\n\tb, err := ioutil.ReadFile(target)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif bytes.Contains(b, []byte(data)) {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}", "func (m *InMemoryRepository) Exists(u fyne.URI) (bool, error) {\n\tpath := u.Path()\n\tif path == \"\" {\n\t\treturn false, fmt.Errorf(\"invalid path '%s'\", path)\n\t}\n\n\t_, ok := m.Data[path]\n\treturn ok, nil\n}", "func Exists(path string) bool {\n\tif SunnyDay {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (d *Driver) Exists(id string) bool {\n\tlogrus.Debugf(\"Exists - id %s\", id)\n\terr := d.ioctl(LayerStat, \"\", id)\n\treturn err == nil\n}", "func DoesExist(pth string) bool {\n\tif _, err := os.Stat(pth); os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn true\n}", "func (p *MemProvider) Exist(sid string) bool {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\n\t_, ok := p.data[sid]\n\treturn ok\n}", "func exists(filePath string) bool {\n\t_, err := os.Stat(filePath)\n\treturn err == nil\n}", "func (client *GCSBlobstore) Exists(dest string) (exists bool, err error) {\n\tif exists, err = client.exists(client.publicGCS, dest); err == nil {\n\t\treturn exists, nil\n\t}\n\n\t// If the public client fails, try using it as an authenticated actor\n\tif client.authenticatedGCS != nil {\n\t\treturn client.exists(client.authenticatedGCS, dest)\n\t}\n\n\treturn\n}", "func (q filesStorageQuery) Exists(exec boil.Executor) (bool, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\tqueries.SetLimit(q.Query, 1)\n\n\terr := q.Query.QueryRow(exec).Scan(&count)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"models: failed to check if files_storages exists\")\n\t}\n\n\treturn count > 0, nil\n}", "func (sds *SiaDirSet) exists(siaPath string) (bool, error) {\n\t// Check for SiaDir in Memory\n\tsiaPath = strings.Trim(siaPath, \"/\")\n\t_, exists := sds.siaDirMap[siaPath]\n\tif exists {\n\t\treturn exists, nil\n\t}\n\t// Check for SiaDir on disk\n\t_, err := os.Stat(filepath.Join(sds.rootDir, siaPath+\"/\"+SiaDirExtension))\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\treturn false, err\n}", "func exists(p string) bool {\n\tif _, err := os.Stat(p); os.IsNotExist(err) {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func ReadHostUUID(dataDir string) (string, error) {\n\tout, err := ReadPath(filepath.Join(dataDir, HostUUIDFile))\n\tif err != nil {\n\t\tif errors.Is(err, fs.ErrPermission) {\n\t\t\t//do not convert to system error as this loses the ability to compare that it is a permission error\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn \"\", trace.ConvertSystemError(err)\n\t}\n\tid := strings.TrimSpace(string(out))\n\tif id == \"\" {\n\t\treturn \"\", trace.NotFound(\"host uuid is empty\")\n\t}\n\treturn id, nil\n}", "func InternalExists(opts ExistsOpts, conn connections.Connection) (*connections.FileResult, error) {\n\tinput := map[string]interface{}{\n\t\t\"path\": opts.Path,\n\t\t\"timeout\": opts.Timeout,\n\t\t\"_logger\": opts.Logger,\n\t\t\"_internal\": true,\n\t}\n\n\treturn Exists(input, opts.Connection)\n}", "func (*GuluFile) IsExist(path string) bool {\n\t_, err := os.Stat(path)\n\n\treturn err == nil || os.IsExist(err)\n}", "func (xsml *XfileServiceMetricLog) Exists() bool {\n\treturn xsml._exists\n}", "func (u *urlShortner) exists() bool {\n\tif _, err := appFs.Stat(u.shortURL); os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn true\n}", "func fileExists(path string) bool {\n _, err := os.Stat(path)\n return err == nil\n}", "func (f *FakeFileSystem) Exists(file string) bool {\n\tf.ExistsFile = append(f.ExistsFile, file)\n\treturn f.ExistsResult[file]\n}", "func (realFS) Exists(name string) bool {\n\t_, err := os.Stat(name)\n\treturn err == nil\n}", "func (s *storager) Exists(ctx context.Context, resourceID string, options ...storage.Option) (bool, error) {\n\tresource, err := newResource(resourceID)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tclient := s.secretManager(resource.Region)\n\t_, err = client.GetSecretValueWithContext(ctx,\n\t\t&secretsmanager.GetSecretValueInput{\n\t\t\tSecretId: &resource.Secret,\n\t\t\tVersionStage: aws.String(\"AWSCURRENT\"),\n\t\t})\n\treturn !isNotFound(err), nil\n}", "func Exists(path string) bool {\n _, err := os.Stat(path)\n if err == nil { return true }\n if os.IsNotExist(err) { return false }\n return false\n}", "func (z *ZKC) ratchetExists(id [zkidentity.IdentitySize]byte) bool {\n\t_, err := os.Stat(path.Join(z.settings.Root, inboundDir,\n\t\thex.EncodeToString(id[:]), ratchetFilename))\n\treturn err == nil\n}", "func (c *FakeZkConn) Exists(path string) (bool, *zk.Stat, error) {\n\tc.history.addToHistory(\"Exists\", path)\n\treturn true, nil, nil\n}", "func (fs *EmbedFs) IsFileExist(path string) bool {\n\t_, exist := fs.index[path]\n\treturn exist\n}", "func exists(path string) bool {\n\t_, err := os.Stat(path)\n\tif err != nil {\n\t\tif os.IsExist(err) {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\treturn true\n}", "func (p *Provider) Exists(alias string) (bool, error) {\n\tp.mutex.RLock()\n\tdefer p.mutex.RUnlock()\n\n\t_, err := os.Stat(filepath.Join(p.Config.Path, path.Base(alias)))\n\treturn !os.IsNotExist(err), nil\n}", "func exists(f string) (bool, error) {\n\t_, err := os.Stat(f)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, fmt.Errorf(\"cannot get stats for path `%s`: %v\", f, err)\n\t}\n\treturn true, nil\n}", "func (lg *Logger) isExist(path string) bool {\n\t_, err := os.Stat(path)\n\treturn err == nil || os.IsExist(err)\n}", "func fileExists(file string) bool {\n\t//Debugf(\"checking for file existence \" + file)\n\tif _, err := os.Stat(file); os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn true\n}", "func (o *StorageHyperFlexStorageContainer) HasUuid() bool {\n\tif o != nil && o.Uuid != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *NetworkLicenseFile) HasHostId() bool {\n\tif o != nil && o.HostId != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func exist(t *testing.T, root, name string) {\n\t_, err := stat(root, name)\n\tif err != nil {\n\t\tt.Fatalf(\"exist: %v\", err)\n\t}\n}", "func isExist(n string) string {\n\tif _, err := os.Stat(n); !os.IsNotExist(err) {\n\t\tn = fmt.Sprintf(\"%s_1\", n)\n\t\treturn n\n\t}\n\treturn n\n}", "func (z *ZkPlus) Exists(path string) (bool, *zk.Stat, error) {\n\tz.forPath(path).Log(logkey.ZkMethod, \"Exists\")\n\treturn z.blockOnConn().Exists(z.realPath(path))\n}", "func (c *Cache) Exists(name string) bool {\n\tosPath := c.ToOSPath(name)\n\tfi, err := os.Stat(osPath)\n\tif err != nil {\n\t\treturn false\n\t}\n\t// checks for non-regular files (e.g. directories, symlinks, devices, etc.)\n\tif !fi.Mode().IsRegular() {\n\t\treturn false\n\t}\n\treturn true\n}", "func (b *Blob) IsExist() bool {\n\n\tfilepath := b.FilePath()\n\tif utils.IsFileExist(filepath) {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (s *Store) Exists(ctx context.Context, name string) bool {\n\treturn s.storage.Exists(ctx, s.Passfile(name))\n}", "func (service *FolderServiceImpl) Exists(id int) bool {\n\t_, exists := service.folders[id]\n\n\treturn exists\n}", "func exists(path string) bool {\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn true\n}", "func ReadOrMakeHostUUID(dataDir string) (string, error) {\n\tid, err := ReadHostUUID(dataDir)\n\tif err == nil {\n\t\treturn id, nil\n\t}\n\tif !trace.IsNotFound(err) {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\t// Checking error instead of the usual uuid.New() in case uuid generation\n\t// fails due to not enough randomness. It's been known to happen happen when\n\t// Teleport starts very early in the node initialization cycle and /dev/urandom\n\t// isn't ready yet.\n\trawID, err := uuid.NewRandom()\n\tif err != nil {\n\t\treturn \"\", trace.BadParameter(\"\" +\n\t\t\t\"Teleport failed to generate host UUID. \" +\n\t\t\t\"This may happen if randomness source is not fully initialized when the node is starting up. \" +\n\t\t\t\"Please try restarting Teleport again.\")\n\t}\n\tid = rawID.String()\n\tif err = WriteHostUUID(dataDir, id); err != nil {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\treturn id, nil\n}", "func (db *Database) DoesFileExist(f File, c Client) bool {\n\tdbC := db.dbClientForClient(c)\n\tvar count uint64\n\tconst countSQL = `\n\tSELECT COUNT(id) FROM File WHERE name=$1 AND ownerId=$2`\n\tif err := db.QueryRow(countSQL, f.name, dbC.id).Scan(&count); err != nil {\n\t\tlog.Println(\"checking if file saved:\", err)\n\t\treturn false\n\t}\n\treturn count > 0\n}", "func (linux *Linux) FileExists(filePath string) bool {\n\tfile, err := os.Open(linux.applyChroot(filePath))\n\tdefer file.Close()\n\treturn err == nil\n}", "func NonZeroFileExists(filename string) bool {\n\n\tif info, err := os.Stat(filename); err == nil {\n\t\tif info.Size() > 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}" ]
[ "0.6837421", "0.6144338", "0.61178225", "0.6017075", "0.59379965", "0.59195673", "0.5889505", "0.5859016", "0.58369076", "0.5826344", "0.5795487", "0.57538235", "0.57405114", "0.5738003", "0.57183146", "0.57179755", "0.5670447", "0.56537825", "0.56479305", "0.564708", "0.56407434", "0.5610793", "0.5607607", "0.56041396", "0.5596046", "0.55446655", "0.5534099", "0.55258054", "0.55255264", "0.55228984", "0.55221367", "0.55091155", "0.5499236", "0.54862314", "0.5485776", "0.5474485", "0.5457866", "0.54446954", "0.5436129", "0.5415444", "0.54133725", "0.5405575", "0.5403257", "0.5400908", "0.53881127", "0.53865445", "0.53856826", "0.53850687", "0.5376573", "0.5367834", "0.5367548", "0.53614604", "0.5357764", "0.5348482", "0.53448886", "0.53428864", "0.5341882", "0.53330183", "0.53300077", "0.53294754", "0.53248703", "0.531093", "0.5302594", "0.52866375", "0.52864194", "0.52840936", "0.5282022", "0.52784055", "0.52775544", "0.52722764", "0.52711135", "0.5267289", "0.5261935", "0.5250576", "0.5240205", "0.5236874", "0.5234187", "0.5230421", "0.52300847", "0.5224365", "0.52231234", "0.52136385", "0.5212599", "0.5209042", "0.5204611", "0.5202703", "0.5199821", "0.5197458", "0.5196864", "0.51886266", "0.51873654", "0.5184305", "0.51819515", "0.51749367", "0.51709235", "0.5169141", "0.5169092", "0.5167395", "0.5164498", "0.5156312" ]
0.8382169
0
ReadHostUUID reads host UUID from the file in the data dir
ReadHostUUID читает UUID хоста из файла в каталоге данных
func ReadHostUUID(dataDir string) (string, error) { out, err := ReadPath(filepath.Join(dataDir, HostUUIDFile)) if err != nil { if errors.Is(err, fs.ErrPermission) { //do not convert to system error as this loses the ability to compare that it is a permission error return "", err } return "", trace.ConvertSystemError(err) } id := strings.TrimSpace(string(out)) if id == "" { return "", trace.NotFound("host uuid is empty") } return id, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func ReadOrMakeHostUUID(dataDir string) (string, error) {\n\tid, err := ReadHostUUID(dataDir)\n\tif err == nil {\n\t\treturn id, nil\n\t}\n\tif !trace.IsNotFound(err) {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\t// Checking error instead of the usual uuid.New() in case uuid generation\n\t// fails due to not enough randomness. It's been known to happen happen when\n\t// Teleport starts very early in the node initialization cycle and /dev/urandom\n\t// isn't ready yet.\n\trawID, err := uuid.NewRandom()\n\tif err != nil {\n\t\treturn \"\", trace.BadParameter(\"\" +\n\t\t\t\"Teleport failed to generate host UUID. \" +\n\t\t\t\"This may happen if randomness source is not fully initialized when the node is starting up. \" +\n\t\t\t\"Please try restarting Teleport again.\")\n\t}\n\tid = rawID.String()\n\tif err = WriteHostUUID(dataDir, id); err != nil {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\treturn id, nil\n}", "func UUIDFile(fpath string) (string, error) {\n\n\t_, err := os.Stat(fpath)\n\tif err != nil && !os.IsExist(err) {\n\t\tkey := uuid.New().String()\n\t\tif err := ioutil.WriteFile(fpath, []byte(key), 0777); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn key, nil\n\t}\n\n\tfp, err := os.Open(fpath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer fp.Close()\n\tdata, err := ioutil.ReadAll(fp)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tkey := string(data)\n\tif _, err := uuid.Parse(key); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn key, nil\n}", "func Read(args ...string) (*UUID, error) {\n\tfpath := sfFilePath(args)\n\tf, err := os.Open(fpath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\tdata := make([]byte, UUIDHexLen+8)\n\tn, err := f.Read(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n < UUIDHexLen {\n\t\treturn nil, fmt.Errorf(\"File '%s' is too small\", fpath)\n\t}\n\tdata = data[:n]\n\tuuid, err := Decode(string(data))\n\tif err == nil {\n\t\tnc := &cache{uuid: *uuid, filePath: fpath, validationTime: time.Now().Add(ValidationTimePeriod)}\n\t\tatomic.StorePointer(&current, unsafe.Pointer(nc))\n\t}\n\treturn uuid, err\n}", "func GetUUID() string {\n\tuuid, _ := ioutil.ReadFile(AppPath.UUIDFile)\n\treturn string(bytes.TrimSpace(uuid))\n}", "func ReadUUID(buffer []byte, offset int) UUID {\n bytes := ReadBytes(buffer, offset, 16)\n return UUIDFromBytes(bytes)\n}", "func (b *Broker) readIDFromFile(home, filepath string) (id string, err error) {\n\t_filepath := fmt.Sprintf(\"%v%v%v\", home, string(os.PathSeparator), filepath)\n\t_bytes, err := ioutil.ReadFile(_filepath)\n\tif err != nil {\n\t\treturn\n\t}\n\tid = string(_bytes)\n\treturn\n}", "func readMachineID() []byte {\n\tid := make([]byte, 3)\n\tif hostname, err := os.Hostname(); err == nil {\n\t\thw := md5.New()\n\t\thw.Write([]byte(hostname))\n\t\tcopy(id, hw.Sum(nil))\n\t} else {\n\t\t// Fallback to rand number if machine id can't be gathered\n\t\tif _, randErr := rand.Reader.Read(id); randErr != nil {\n\t\t\tpanic(fmt.Errorf(\"Cannot get hostname nor generate a random number: %v; %v\", err, randErr))\n\t\t}\n\t}\n\treturn id\n}", "func readMachineID() []byte {\n\tid := make([]byte, 3)\n\thid, err := readPlatformMachineID()\n\tif err != nil || len(hid) == 0 {\n\t\thid, err = os.Hostname()\n\t}\n\tif err == nil && len(hid) != 0 {\n\t\thw := md5.New()\n\t\thw.Write([]byte(hid))\n\t\tcopy(id, hw.Sum(nil))\n\t} else {\n\t\t// Fallback to rand number if machine id can't be gathered\n\t\tif _, randErr := rand.Reader.Read(id); randErr != nil {\n\t\t\tpanic(fmt.Errorf(\"xid: cannot get hostname nor generate a random number: %v; %v\", err, randErr))\n\t\t}\n\t}\n\treturn id\n}", "func WriteHostUUID(dataDir string, id string) error {\n\terr := os.WriteFile(filepath.Join(dataDir, HostUUIDFile), []byte(id), os.ModeExclusive|0400)\n\tif err != nil {\n\t\tif errors.Is(err, fs.ErrPermission) {\n\t\t\t//do not convert to system error as this loses the ability to compare that it is a permission error\n\t\t\treturn err\n\t\t}\n\t\treturn trace.ConvertSystemError(err)\n\t}\n\treturn nil\n}", "func readMachineId() []byte {\n\tvar sum [3]byte\n\tid := sum[:]\n\thostname, err1 := os.Hostname()\n\tif err1 != nil {\n\t\tn := uint32(time.Now().UnixNano())\n\t\tsum[0] = byte(n >> 0)\n\t\tsum[1] = byte(n >> 8)\n\t\tsum[2] = byte(n >> 16)\n\t\treturn id\n\t}\n\thw := md5.New()\n\thw.Write([]byte(hostname))\n\tcopy(id, hw.Sum(nil))\n\treturn id\n}", "func readInstanceID() string {\n\tconst instanceIDFile = \"/var/lib/cloud/data/instance-id\"\n\tidBytes, err := ioutil.ReadFile(instanceIDFile)\n\tif err != nil {\n\t\tglog.Infof(\"Failed to get instance id from file: %v\", err)\n\t\treturn \"\"\n\t} else {\n\t\tinstanceID := string(idBytes)\n\t\tinstanceID = strings.TrimSpace(instanceID)\n\t\tglog.Infof(\"Get instance id from file: %s\", instanceID)\n\t\treturn instanceID\n\t}\n}", "func removeUuidFromFilepath(path string) string {\n\t// UUID has 4 hyphens, so we split into 6 parts. \n\treturn strings.SplitN(filepath.Base(path), \"-\", 6)[5]\n}", "func UUID() (string, error) {\n\tb := make([]byte, 2)\n\n\t_, err := crand.Read(b)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn fmt.Sprintf(\"%X\", b[0:2]), nil\n}", "func HostUUIDExistsLocally(dataDir string) bool {\n\t_, err := ReadHostUUID(dataDir)\n\treturn err == nil\n}", "func (s *Store) readID() error {\n\tb, err := ioutil.ReadFile(s.IDPath())\n\tif os.IsNotExist(err) {\n\t\ts.id = 0\n\t\treturn nil\n\t} else if err != nil {\n\t\treturn fmt.Errorf(\"read file: %s\", err)\n\t}\n\n\tid, err := strconv.ParseUint(string(b), 10, 64)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"parse id: %s\", err)\n\t}\n\ts.id = id\n\n\ts.Logger.Printf(\"read local node id: %d\", s.id)\n\n\treturn nil\n}", "func (this *Actions) generateUid(config *Config, hostname string) string{\n uuidNew := uuid.NewV5(uuid.NewV1(), hostname).String()\n uuid_path := config.GetValue(\"basic\", \"uuid_path\")\n log.Info(\"The new uuid is : \" + uuidNew)\n file, error := os.OpenFile(uuid_path, os.O_RDWR|os.O_CREATE, 0622)\n if error != nil {\n log.Error(\"Open uuid file in \"+ uuid_path +\" failed.\" + error.Error())\n }\n _,err := file.WriteString(uuidNew)\n if err != nil {\n log.Error(\"Save uuid file in \"+ uuid_path +\" failed.\" + err.Error())\n }\n file.Close()\n return uuidNew\n}", "func GetHostUUID(nbmaster string, httpClient *http.Client, jwt string, host string) string {\r\n fmt.Printf(\"\\nGet the UUID of host %s...\\n\", host)\r\n\r\n uri := \"https://\" + nbmaster + \":\" + port + \"/netbackup/config/hosts\";\r\n\r\n request, _ := http.NewRequest(http.MethodGet, uri, nil)\r\n query := request.URL.Query()\r\n query.Add(\"filter\", \"hostName eq '\" + host + \"'\")\r\n request.URL.RawQuery = query.Encode()\r\n\r\n request.Header.Add(\"Authorization\", jwt);\r\n request.Header.Add(\"Accept\", contentTypeV3);\r\n\r\n response, err := httpClient.Do(request)\r\n\r\n hostUuid := \"\"\r\n if err != nil {\r\n fmt.Printf(\"The HTTP request failed with error: %s\\n\", err)\r\n panic(\"Unable to get the host UUID\")\r\n } else {\r\n if response.StatusCode == 200 {\r\n data, _ := ioutil.ReadAll(response.Body)\r\n var obj interface{}\r\n json.Unmarshal(data, &obj)\r\n response := obj.(map[string]interface{})\r\n hosts := response[\"hosts\"].([]interface{})\r\n hostUuid = ((hosts[0].(map[string]interface{}))[\"uuid\"]).(string)\r\n fmt.Printf(\"Host UUID: %s\\n\", hostUuid);\r\n } else {\r\n printErrorResponse(response)\r\n }\r\n }\r\n\r\n return hostUuid\r\n}", "func getHostFromUUID(id string) (*model.Host, error) {\n\thosts, err := driver.GetHosts()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, host := range *hosts {\n\t\tif host.UUID == id {\n\t\t\t// Host Matches\n\t\t\tlog.Tracef(\"current host matches with id=%s\", id)\n\t\t\treturn host, nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"no host found with id %s\", id)\n}", "func (c *Config) getRandomId() (string, error) {\n\tb, err := ioutil.ReadFile(c.ProcBootId)\n\tif err != nil {\n\t\tglog.Errorf(\"fail to open %s: %q\", c.ProcBootId, err)\n\t\treturn \"\", err\n\t}\n\trandomId := string(b)\n\trandomId = strings.Trim(randomId, \"\\n\")\n\tglog.V(2).Infof(\"RandomId: %q\", randomId)\n\treturn randomId, nil\n\n}", "func UDID() string {\n\tf, err := os.Open(\"/dev/urandom\")\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to get /dev/urandom! %s\", err))\n\t}\n\tb := make([]byte, 16)\n\t_, err = f.Read(b)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to read 16 bytes from /dev/urandom! %s\", err))\n\t}\n\tf.Close()\n\treturn fmt.Sprintf(\"%x-%x-%x-%x-%x\", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])\n}", "func uuid() []byte {\n\tuuid := make([]byte, 16)\n\t_, err := rand.Read(uuid)\n\tif err != nil {\n\t\tpanic(\"cue/hosted: uuid() failed to read random bytes\")\n\t}\n\n\t// The following bit twiddling is outlined in RFC 4122. In short, it\n\t// identifies the UUID as a v4 random UUID.\n\tuuid[6] = (4 << 4) | (0xf & uuid[6])\n\tuuid[8] = (8 << 4) | (0x3f & uuid[8])\n\treturn uuid\n}", "func pid(instance int) (pid string, err error) {\n file, err := os.Open(pidFileName(instance))\n if err != nil {\n return\n }\n\n defer file.Close()\n\n scanner := bufio.NewScanner(file)\n scanner.Scan()\n pid = scanner.Text()\n return\n}", "func HardwareUUID() (string, error) {\n\t/*\n\t\tSample output of 'wmic path Win32_ComputerSystemProduct get uuid'\n\n\t\tUUID\n\t\t4219B2F5-C25F-6AF2-573C-35B0DF557236\n\t*/\n\tresult, err := readAndParseFromCommandLine(hardwareUUIDCmd)\n\tif err != nil {\n\t\treturn \"-1\", err\n\t}\n\thardwareUUID := \"\"\n\tif len(result) > 1 {\n\t\t// remove all spaces from the second line as that line consists hardware uuid\n\t\tre := regexp.MustCompile(\"\\\\s|\\\\r\")\n\t\thardwareUUID = re.ReplaceAllString(result[1], \"\")\n\t}\n\treturn hardwareUUID, nil\n}", "func (h *Harness) UUID(id string) string { return h.uuidG.Get(id) }", "func parseUUID(src string) (dst [16]byte, err error) {\n\tswitch len(src) {\n\tcase 36:\n\t\tsrc = src[0:8] + src[9:13] + src[14:18] + src[19:23] + src[24:]\n\tcase 32:\n\t\t// dashes already stripped, assume valid\n\tdefault:\n\t\t// assume invalid.\n\t\treturn dst, fmt.Errorf(\"cannot parse UUID %v\", src)\n\t}\n\n\tbuf, err := hex.DecodeString(src)\n\tif err != nil {\n\t\treturn dst, err\n\t}\n\n\tcopy(dst[:], buf)\n\treturn dst, err\n}", "func (device *DCV2Bricklet) ReadUID() (uid uint32, err error) {\n\tvar buf bytes.Buffer\n\n\tresultBytes, err := device.device.Get(uint8(FunctionReadUID), buf.Bytes())\n\tif err != nil {\n\t\treturn uid, err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 12 {\n\t\t\treturn uid, fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 12)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn uid, DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tresultBuf := bytes.NewBuffer(resultBytes[8:])\n\t\tbinary.Read(resultBuf, binary.LittleEndian, &uid)\n\n\t}\n\n\treturn uid, nil\n}", "func (dev VMVolumeDevice) UUID() string {\n\treturn utils.NewUUID5(blockVolumeNsUUID, dev.HostPath)\n}", "func GetVendorIDByCPUInfo(path string) (string, error) {\n\tvendorID := \"unknown\"\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn vendorID, err\n\t}\n\tdefer f.Close()\n\n\ts := bufio.NewScanner(f)\n\tfor s.Scan() {\n\t\tif err := s.Err(); err != nil {\n\t\t\treturn vendorID, err\n\t\t}\n\n\t\tline := s.Text()\n\n\t\t// get \"vendor_id\" from first line\n\t\tif strings.Contains(line, \"vendor_id\") {\n\t\t\tattrs := strings.Split(line, \":\")\n\t\t\tif len(attrs) >= 2 {\n\t\t\t\tvendorID = strings.TrimSpace(attrs[1])\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn vendorID, nil\n}", "func (device *IndustrialDigitalIn4V2Bricklet) ReadUID() (uid uint32, err error) {\n\tvar buf bytes.Buffer\n\n\tresultBytes, err := device.device.Get(uint8(FunctionReadUID), buf.Bytes())\n\tif err != nil {\n\t\treturn uid, err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 12 {\n\t\t\treturn uid, fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 12)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn uid, DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tresultBuf := bytes.NewBuffer(resultBytes[8:])\n\t\tbinary.Read(resultBuf, binary.LittleEndian, &uid)\n\n\t}\n\n\treturn uid, nil\n}", "func hostRead(d *schema.ResourceData, m interface{}, params zabbix.Params) error {\n\tapi := m.(*zabbix.API)\n\n\tlog.Debug(\"Lookup of host with params %#v\", params)\n\n\thosts, err := api.HostsGet(params)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(hosts) < 1 {\n\t\td.SetId(\"\")\n\t\treturn nil\n\t}\n\tif len(hosts) > 1 {\n\t\treturn errors.New(\"multiple hosts found\")\n\t}\n\thost := hosts[0]\n\n\tlog.Debug(\"Got host: %+v\", host)\n\n\td.SetId(host.HostID)\n\td.Set(\"name\", host.Name)\n\td.Set(\"host\", host.Host)\n\td.Set(\"proxyid\", host.ProxyID)\n\td.Set(\"enabled\", host.Status == 0)\n\td.Set(\"inventory_mode\", HINV_LOOKUP_REV[host.InventoryMode])\n\n\td.Set(\"interface\", flattenHostInterfaces(host, d, m))\n\td.Set(\"templates\", flattenTemplateIds(host.ParentTemplateIDs))\n\td.Set(\"inventory\", flattenInventory(host))\n\td.Set(\"groups\", flattenHostGroupIds(host.GroupIds))\n\td.Set(\"macro\", flattenMacros(host.UserMacros))\n\td.Set(\"tag\", flattenTags(host.Tags))\n\n\treturn nil\n}", "func GetClientID() (string, error) {\n\tfn := \"clientid\" // File Name\n\tif _, err := os.Stat(fn); os.IsNotExist(err) {\n\t\t// File does not exists, create a new uuid\n\t\tuuid := uuid.NewV4()\n\t\tuuidStr := uuid.String()\n\t\tlog.Println(\"Created new Client ID.\", uuidStr)\n\t\terr = ioutil.WriteFile(fn, []byte(uuidStr), 0666)\n\t\tif err != nil {\n\t\t\treturn uuidStr, err\n\t\t}\n\t\treturn uuidStr, nil\n\t}\n\t// Read the uuid from the file\n\tdata, err := ioutil.ReadFile(fn)\n\tif err != nil {\n\t\tlog.Println(\"Failed to read the Client ID file. Attempting to recreate it.\", err)\n\t\tuuid := uuid.NewV4()\n\t\tuuidStr := uuid.String()\n\t\tlog.Println(\"Created new Client ID.\", uuidStr)\n\t\terr = ioutil.WriteFile(fn, []byte(uuidStr), 0666)\n\t\tif err != nil {\n\t\t\treturn uuidStr, err\n\t\t}\n\t\treturn uuidStr, nil\n\t}\n\treturn string(data), nil\n}", "func extractUuid(input string) string {\n\treGetID := regexp.MustCompile(`([a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})`)\n\tmatchListId := reGetID.FindAllStringSubmatch(input, -1)\n\tif len(matchListId) > 0 && len(matchListId[0]) > 0 {\n\t\treturn matchListId[len(matchListId)-1][1]\n\t}\n\treturn \"\"\n}", "func loadHostString() (string, error) {\n\tif hostFile, err := getHostFile(); err == nil {\n\t\tbytes, err := ioutil.ReadFile(hostFile)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn string(bytes), nil\n\t} else {\n\t\treturn \"\", err\n\t}\n\n}", "func ReadPidFile(name string) (pid int, err error) {\n\tvar file *os.File\n\tif file, err = os.OpenFile(name, os.O_RDONLY, 0640); err != nil {\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\tlock := &LockFile{file}\n\tpid, err = lock.ReadPid()\n\treturn\n}", "func getUUID() string{\n\tresponse,_ := http.Get(BaseUrl+\"/_uuids\")\n\tdefer response.Body.Close()\n\tdecoder := json.NewDecoder(response.Body)\n\terr := decoder.Decode(&uniqueid)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn uniqueid.Uuids[0]\n}", "func (o *Partition) GetUUID(ctx context.Context) (uUID string, err error) {\n\terr = o.object.CallWithContext(ctx, \"org.freedesktop.DBus.Properties.Get\", 0, InterfacePartition, \"UUID\").Store(&uUID)\n\treturn\n}", "func PartitionUUIDs(r io.Reader) []string {\n\tparts, err := readPartitionEntries(r)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn []string{\n\t\tGUIDFromBytes(parts[0].GUID[:]),\n\t}\n}", "func (c *Config) ReadNodeID() (string, error) {\n\tdata, err := ioutil.ReadFile(filepath.Join(c.Chdir, \"node.id\"))\n\tif err != nil || len(data) == 0 {\n\t\treturn \"\", errNodeIDEmpty\n\t}\n\n\t// Trim all leading and trailing whitespace\n\tnodeIDStr := strings.TrimSpace(string(data))\n\tif len(nodeIDStr) == 0 {\n\t\treturn \"\", errNodeIDEmpty\n\t}\n\n\t// Make sure that there is no whitespace inside of the nodeid\n\tfields := strings.Fields(nodeIDStr)\n\tif len(fields) > 1 {\n\t\treturn \"\", errNodeIDMalformed\n\t}\n\n\treturn nodeIDStr, nil\n}", "func readFile(file *os.File, offset int) uint32 {\n\tbytes := make([]byte, UINT32_LENGTH)\n\n\tfile.Seek(int64(offset), 0)\n\n\tdata, err := file.Read(bytes)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn binary.LittleEndian.Uint32(bytes[:data])\n}", "func CreateUuidForMonitorData(md MonitorData) string {\n serial := SerializeMonitorData(md)\n h := sha256.New()\n h.Write(serial)\n return fmt.Sprintf(\"%x\", h.Sum(nil))\n}", "func uuid() string {\n\tout, err := exec.Command(\"/usr/bin/uuidgen\").Output()\n\tif err != nil {\n\t\tlog.Fatal().\n\t\t\tStr(\"command\", \"/usr/bin/uuidgen\").\n\t\t\tMsg(\"There was an error generating the uuid.\")\n\t}\n\n\t//n := bytes.IndexByte(out, 0)\n\ts := string(out)\n\ts = strings.TrimSpace(s)\n\treturn s\n}", "func idOfFile(filename string) string {\n\treturn fmt.Sprintf(`{\"$oid\":\"%s\"}`, testFiles[filename].Hex())\n}", "func (o *MDRaid) GetUUID(ctx context.Context) (uUID string, err error) {\n\terr = o.object.CallWithContext(ctx, \"org.freedesktop.DBus.Properties.Get\", 0, InterfaceMDRaid, \"UUID\").Store(&uUID)\n\treturn\n}", "func (gpu *Device) UUID() (string, error) {\n\treturn gpu.textProperty(\"UUID\")\n}", "func GUIDFromBytes(b []byte) string {\n\t// See Intel EFI specification, Appendix A: GUID and Time Formats\n\t// https://www.intel.de/content/dam/doc/product-specification/efi-v1-10-specification.pdf\n\tvar (\n\t\ttimeLow uint32\n\t\ttimeMid uint16\n\t\ttimeHighAndVersion uint16\n\t\tclockSeqHighAndReserved uint8\n\t\tclockSeqLow uint8\n\t\tnode [6]byte\n\t)\n\ttimeLow = binary.LittleEndian.Uint32(b[0:4])\n\ttimeMid = binary.LittleEndian.Uint16(b[4:6])\n\ttimeHighAndVersion = binary.LittleEndian.Uint16(b[6:8])\n\tclockSeqHighAndReserved = b[8]\n\tclockSeqLow = b[9]\n\tcopy(node[:], b[10:])\n\treturn fmt.Sprintf(\"%08X-%04X-%04X-%02X%02X-%012X\",\n\t\ttimeLow,\n\t\ttimeMid,\n\t\ttimeHighAndVersion,\n\t\tclockSeqHighAndReserved,\n\t\tclockSeqLow,\n\t\tnode)\n}", "func getDiskUUID() string {\n\treturn vboxmanage.GetVMInfoByRegexp(boxName, \"\\\"SATA Controller-ImageUUID-0-0\\\"=\\\"(.*?)\\\"\")\n}", "func resourceHostRead(d *schema.ResourceData, m interface{}) error {\n\tlog.Debug(\"Lookup of hostgroup with id %s\", d.Id())\n\n\treturn hostRead(d, m, zabbix.Params{\n\t\t\"selectInterfaces\": \"extend\",\n\t\t\"selectParentTemplates\": \"extend\",\n\t\t\"selectGroups\": \"extend\",\n\t\t\"selectMacros\": \"extend\",\n\t\t\"selectTags\": \"extend\",\n\t\t\"selectInventory\": \"extend\",\n\t\t\"hostids\": d.Id(),\n\t})\n}", "func (o *NetworkLicenseFile) GetHostId() string {\n\tif o == nil || o.HostId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.HostId\n}", "func libc_getuid() int32", "func GetHostID() string {\n\tif cachedHostID != \"\" {\n\t\treturn cachedHostID\n\t}\n\n\tecsMetadataURI := os.Getenv(\"ECS_CONTAINER_METADATA_URI_V4\")\n\tif ecsMetadataURI != \"\" {\n\t\thostID, err := getHostIDFromECS(ecsMetadataURI + \"/task\")\n\t\tif err == nil {\n\t\t\tcachedHostID = hostID\n\t\t\treturn cachedHostID\n\t\t}\n\n\t\tfmt.Fprintf(os.Stderr, \"Failed to get task ARN from ECS metadata v4 endpoint: %v\\n\", err)\n\t}\n\n\tecsMetadataURI = os.Getenv(\"ECS_CONTAINER_METADATA_URI\")\n\tif ecsMetadataURI != \"\" {\n\t\thostID, err := getHostIDFromECS(ecsMetadataURI + \"/task\")\n\t\tif err == nil {\n\t\t\tcachedHostID = hostID\n\t\t\treturn cachedHostID\n\t\t}\n\n\t\tfmt.Fprintf(os.Stderr, \"Failed to get task ARN from ECS metadata v3 endpoint: %v\\n\", err)\n\t}\n\n\thostID, errECS := getHostIDFromECS(\"http://169.254.170.2/v2/metadata\")\n\tif errECS == nil {\n\t\tcachedHostID = hostID\n\t\treturn cachedHostID\n\t}\n\n\thostID, errEC2 := getHostIDFromEC2()\n\tif errEC2 == nil {\n\t\tcachedHostID = hostID\n\t\treturn cachedHostID\n\t}\n\n\thostID, errIF := getHostIDFromInterfaces()\n\tif errIF == nil {\n\t\tcachedHostID = hostID\n\t\treturn cachedHostID\n\t}\n\n\thostID, errRand := getRandomHostID()\n\tif errRand == nil {\n\t\tcachedHostID = hostID\n\t\treturn cachedHostID\n\t}\n\n\tfmt.Fprintf(os.Stderr, \"Failed to get task ARN from ECS metadata v2 endpoint: %v\\n\", errECS)\n\tfmt.Fprintf(os.Stderr, \"Failed to get instance ID from EC2 metadata endpoint: %v\\n\", errEC2)\n\tfmt.Fprintf(os.Stderr, \"Failed to get IP address from network interface: %v\\n\", errIF)\n\tfmt.Fprintf(os.Stderr, \"Failed to get random host ID: %v\\n\", errRand)\n\tpanic(\"Unable to obtain a valid host ID\")\n}", "func Getuid() int", "func getHostId() (uint64, error) {\n\ta := getLocalIP()\n\tip := (uint64(a[0]) << 24) + (uint64(a[1]) << 16) + (uint64(a[2]) << 8) + uint64(a[3])\n\treturn ip % MaxHostId, nil\n}", "func dataHostRead(d *schema.ResourceData, m interface{}) error {\n\tparams := zabbix.Params{\n\t\t\"selectInterfaces\": \"extend\",\n\t\t\"selectParentTemplates\": \"extend\",\n\t\t\"selectGroups\": \"extend\",\n\t\t\"selectMacros\": \"extend\",\n\t\t\"selectTags\": \"extend\",\n\t\t\"selectInventory\": \"extend\",\n\t\t\"filter\": map[string]interface{}{},\n\t}\n\n\tlookups := []string{\"host\", \"hostid\", \"name\"}\n\tfor _, k := range lookups {\n\t\tif v, ok := d.GetOk(k); ok {\n\t\t\tparams[\"filter\"].(map[string]interface{})[k] = v\n\t\t}\n\t}\n\n\tif len(params[\"filter\"].(map[string]interface{})) < 1 {\n\t\treturn errors.New(\"no host lookup attribute\")\n\t}\n\tlog.Debug(\"performing data lookup with params: %#v\", params)\n\n\treturn hostRead(d, m, params)\n}", "func (conf *Configuration) UUID(name string) (string, error) {\n\tctx := context.NewContext(conf.Timeout)\n\tdefer ctx.Cancel()\n\n\treturn conf.UUIDWithContext(ctx, name)\n}", "func (pe *ProgramExt) UUID() string {\n\treturn fmt.Sprintf(\"%s_%s\", pe.Manager, pe.Config)\n}", "func getNodeUUID(client clientset.Interface, nodeName string) string {\n\tnode, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})\n\tgomega.Expect(err).NotTo(gomega.HaveOccurred())\n\treturn strings.TrimPrefix(node.Spec.ProviderID, providerPrefix)\n}", "func readUserFriendlyFilePath(path string) ([]byte, error) {\n\tpath, err := homedir.Expand(path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to retrieve key path: %v\", err)\n\t}\n\treturn ioutil.ReadFile(path)\n}", "func readIdentifier(path string, fileSuffix string, idtype string) (string, error) {\n\tidentifiers, err := readIdentifiers(path)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error reading identifier: %v\", err)\n\t}\n\n\tfor _, item := range identifiers {\n\t\tif !strings.HasSuffix(strings.ToLower(item.File), strings.ToLower(fileSuffix)) {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, pair := range item.Identifiers {\n\t\t\tif pair.Type == idtype {\n\t\t\t\treturn pair.Value, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn \"\", errors.New(\"error reading identifier: not found\")\n}", "func (mariadbFlavor) serverUUID(c *Conn) (string, error) {\n\treturn \"\", nil\n}", "func (e *EPub) UUID() string {\n\treturn strings.TrimPrefix(\"urn:uuid:\", e.uuid)\n}", "func getUID(lib utils.PathIdentifier) string {\n\treturn lib.Key()[:5]\n}", "func chatUUIDstring(idstr string) (idbytes []byte, err error) {\n\tif idstr == \"\" {\n\t\tidbytes, err = db.DB.Read(db.CHAT, db.LastCB)\n\t} else {\n\t\tvar id uuid.UUID\n\t\tid, err = uuid.FromString(idstr)\n\t\tidbytes = id.Bytes()\n\t\treturn\n\t}\n\treturn\n}", "func parseFormatted(b []byte) (UUID, error) {\n\tvar u UUID\n\tvar iu, ib int\n\tfor idx, cnt := range uuidHexLengths {\n\t\tn, err := hex.Decode(u[iu:], b[ib:ib+cnt])\n\t\tif err != nil {\n\t\t\treturn u, ErrInvalidUUID\n\t\t}\n\t\tif idx < 4 && b[ib+cnt] != dash {\n\t\t\treturn u, ErrInvalidUUID\n\t\t}\n\t\tiu += n\n\t\tib += cnt + 1\n\t}\n\treturn u, nil\n}", "func ExtractUUID(r *http.Request) (uuid.UUID, error) {\n\ts := r.Header.Get(\"Authorization\")\n\tif s == \"\" {\n\t\tpretty.Printf(\"fatal error: Authorization Header empty \\n\")\n\t\treturn uuid.Nil, errors.New(\"Authorization Header empty\")\n\t}\n\n\tinUUID, err := uuid.FromString(s)\n\tif err != nil {\n\t\tpretty.Printf(\"fatal error: %s \\n\", err)\n\t\treturn uuid.Nil, err\n\t}\n\n\tif _, err := datastructures.GetEntry(inUUID); err != nil {\n\t\tpretty.Printf(\"fatal error: %s \\n\", err)\n\t\treturn uuid.Nil, errors.New(\"sorry, UUID is not correct, please access /welcome to receive an UUID\")\n\t}\n\n\treturn inUUID, nil\n}", "func (c parser) GetHexDeviceID(command []byte) (string, error) {\n\treturn extractByteRangeAndReturnHex(command, 5, 25, \"Identificador do device não encontrado\")\n}", "func (ps *PS) UUID() uint64 {\n\tif ps.uuid != 0 {\n\t\treturn ps.uuid\n\t}\n\t// assume the uuid is derived from boot ID and process start time\n\tps.uuid = (bootid.Read() << 30) + uint64(ps.PID) | uint64(ps.StartTime.UnixNano())\n\tmaj, _, patch := windows.RtlGetNtVersionNumbers()\n\tif maj >= 10 && patch >= 1507 {\n\t\tseqNum := querySequenceNumber(ps.PID)\n\t\t// prefer the most robust variant of the uuid which uses the\n\t\t// process sequence number obtained from the process object\n\t\tif seqNum != 0 {\n\t\t\tps.uuid = (bootid.Read() << 30) | seqNum\n\t\t}\n\t}\n\treturn ps.uuid\n}", "func ParseUUID(s string) (u UUID, err error) {\n\tif len(s) != UUIDStringLen {\n\t\terr = errors.New(\"invalid UUID string length\")\n\t\treturn\n\t}\n\n\tif s[sDelim0At] != uuidDelim || s[sDelim1At] != uuidDelim || s[sDelim2At] != uuidDelim || s[sDelim3At] != uuidDelim {\n\t\terr = errors.New(\"invalid UUID string delimiters\")\n\t\treturn\n\t}\n\n\tb := []byte(s)\n\n\tif l, e := hex.Decode(u[part0From:part1From], b[sPart0From:sPart0To]); l != part0Len || e != nil {\n\t\terr = errors.New(\"invalid UUID part 1\")\n\t\treturn\n\t}\n\n\tif l, e := hex.Decode(u[part1From:part2From], b[sPart1From:sPart1To]); l != part1Len || e != nil {\n\t\terr = errors.New(\"invalid UUID part 2\")\n\t\treturn\n\t}\n\n\tif l, e := hex.Decode(u[part2From:part3From], b[sPart2From:sPart2To]); l != part2Len || e != nil {\n\t\terr = errors.New(\"invalid UUID part 3\")\n\t\treturn\n\t}\n\n\tif l, e := hex.Decode(u[part3From:part4From], b[sPart3From:sPart3To]); l != part3Len || e != nil {\n\t\terr = errors.New(\"invalid UUID part 4\")\n\t\treturn\n\t}\n\n\tif l, e := hex.Decode(u[part4From:], b[sPart4From:sPart4To]); l != part4Len || e != nil {\n\t\terr = errors.New(\"invalid UUID part 5\")\n\t\treturn\n\t}\n\treturn\n}", "func generateUUID() string {\n\tbuf := make([]byte, 16)\n\tif _, err := cr.Read(buf); err != nil {\n\t\tpanic(fmt.Errorf(\"failed to read random bytes: %w\", err))\n\t}\n\n\treturn fmt.Sprintf(\"%08x-%04x-%04x-%04x-%12x\",\n\t\tbuf[0:4],\n\t\tbuf[4:6],\n\t\tbuf[6:8],\n\t\tbuf[8:10],\n\t\tbuf[10:16])\n}", "func generateUUID(bd blockdevice.BlockDevice) (string, bool) {\n\tvar ok bool\n\tvar uuidField, uuid string\n\n\t// select the field which is to be used for generating UUID\n\t//\n\t// Serial number is not used directly for UUID generation. This is because serial number is not\n\t// unique in some cloud environments. For example, in GCP the serial number is\n\t// configurable by the --device-name flag while attaching the disk.\n\t// If this flag is not provided, GCP automatically assigns the serial number\n\t// which is unique only to the node. Therefore Serial number is used only in cases\n\t// where the disk has a WWN.\n\t//\n\t// If disk has WWN, a combination of WWN+Serial will be used. This is done because there are cases\n\t// where the disks has same WWN but different serial. It is seen in some storage arrays.\n\t// All the LUNs will have same WWN, but different serial.\n\t//\n\t// PartitionTableUUID is not used for UUID generation in NDM. The only case where the disk has a PartitionTable\n\t// and not partition is when, the user has manually created a partition table without writing any actual partitions.\n\t// This means NDM will have to give its consumers the entire disk, i.e consumers will have access to the sectors\n\t// where partition table is written. If consumers decide to reformat or erase the disk completely the partition\n\t// table UUID is also lost, making NDM unable to identify the disk. Hence, even if a partition table is present\n\t// NDM will rewrite it and create a new GPT table and a single partition. Thus consumers will have access only to\n\t// the partition and the unique data will be stored in sectors where consumers do not have access.\n\n\tswitch {\n\tcase bd.DeviceAttributes.DeviceType == blockdevice.BlockDeviceTypePartition:\n\t\t// The partition entry UUID is used when a partition (/dev/sda1) is processed. The partition UUID should be used\n\t\t// if available, other than the partition table UUID, because multiple partitions can have the same partition table\n\t\t// UUID, but each partition will have a different UUID.\n\t\tklog.Infof(\"device(%s) is a partition, using partition UUID: %s\", bd.DevPath, bd.PartitionInfo.PartitionEntryUUID)\n\t\tuuidField = bd.PartitionInfo.PartitionEntryUUID\n\t\tok = true\n\tcase len(bd.DeviceAttributes.WWN) > 0:\n\t\t// if device has WWN, both WWN and Serial will be used for UUID generation.\n\t\tklog.Infof(\"device(%s) has a WWN, using WWN: %s and Serial: %s\",\n\t\t\tbd.DevPath,\n\t\t\tbd.DeviceAttributes.WWN, bd.DeviceAttributes.Serial)\n\t\tuuidField = bd.DeviceAttributes.WWN +\n\t\t\tbd.DeviceAttributes.Serial\n\t\tok = true\n\tcase len(bd.FSInfo.FileSystemUUID) > 0:\n\t\tklog.Infof(\"device(%s) has a filesystem, using filesystem UUID: %s\", bd.DevPath, bd.FSInfo.FileSystemUUID)\n\t\tuuidField = bd.FSInfo.FileSystemUUID\n\t\tok = true\n\t}\n\n\tif ok {\n\t\tuuid = blockdevice.BlockDevicePrefix + util.Hash(uuidField)\n\t\tklog.Infof(\"generated uuid: %s for device: %s\", uuid, bd.DevPath)\n\t}\n\n\treturn uuid, ok\n}", "func (hof *Heap) uuid() http.Arrow {\n\treturn http.GET(\n\t\tø.URI(\"https://httpbin.org/uuid\"),\n\t\tø.Accept.JSON,\n\n\t\tƒ.Status.OK,\n\t\tƒ.ContentType.JSON,\n\t\tƒ.Body(&hof.ID),\n\t)\n}", "func readRandomUint32() uint32 {\n\t// We've found systems hanging in this function due to lack of entropy.\n\t// The randomness of these bytes is just preventing nearby clashes, so\n\t// just look at the time.\n\treturn uint32(time.Now().UnixNano())\n}", "func UCDReader(file string) (io.Reader, error) {\n\tdata, err := os.ReadFile(UCDPath(file))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn bytes.NewReader(data), nil\n}", "func readPinFile(pinFile string, mac hash.Hash) (*treeHead, error) {\n\tdata, err := ioutil.ReadFile(pinFile)\n\tif os.IsNotExist(err) {\n\t\tlog.Println(\"integrity: local pin file not found, will accept whatever remote storage returns\")\n\t\treturn &treeHead{}, nil\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\treturn unmarshalTreeHead(data, mac)\n}", "func Parse(b []byte) (UUID, error) {\n\tswitch len(b) {\n\tcase 16:\n\t\tvar u UUID\n\t\tcopy(u[:], b)\n\t\treturn u, nil\n\tcase 32:\n\t\tvar u UUID\n\t\t_, err := hex.Decode(u[:], b)\n\t\tif err != nil {\n\t\t\treturn u, ErrInvalidUUID\n\t\t}\n\t\treturn u, nil\n\tcase 36:\n\t\treturn parseFormatted(b)\n\tdefault:\n\t\treturn UUID{}, ErrInvalidUUID\n\t}\n}", "func readOuiFile(ouiPath string) (radix.Readonly, error) {\n\tfh, err := os.Open(ouiPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer fh.Close()\n\tgzreader, err := gzip.NewReader(fh)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer gzreader.Close()\n\n\ttree := radix.New()\n\n\tcommentre := regexp.MustCompile(\"\\\\s*#.*\")\n\tre := regexp.MustCompile(\"^([0-9a-fA-F:/-]+)\\\\s+(\\\\S+)\\\\s+(.*?)\\\\s*$\")\n\tprefixmatch := regexp.MustCompile(\"^([0-9a-fA-F:-]+)/(\\\\d+)$\")\n\tstripre := regexp.MustCompile(\"[:-]\")\n\n\tscanner := bufio.NewScanner(gzreader)\n\tfor scanner.Scan() {\n\t\tline := commentre.ReplaceAllString(scanner.Text(), \"\")\n\t\tsubs := re.FindStringSubmatch(line)\n\t\tif subs == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\toui := &OUIDescr{\n\t\t\tPrefix: subs[1],\n\t\t\tVendor: subs[2],\n\t\t\tComments: subs[3],\n\t\t}\n\n\t\tprefix := oui.Prefix\n\t\tplen := 24\n\n\t\tpmatch := prefixmatch.FindStringSubmatch(prefix)\n\t\tif pmatch != nil {\n\t\t\tprefix = pmatch[1]\n\t\t\tif len, err := strconv.Atoi(pmatch[2]); err == nil {\n\t\t\t\tplen = len\n\t\t\t} else {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\tprefix = strings.ToLower(stripre.ReplaceAllString(prefix, \"\"))\n\t\tif plen%4 != 0 {\n\t\t\treturn nil, fmt.Errorf(\"Prefix length not multiple of 4\")\n\t\t}\n\n\t\tprefix = prefix[:plen/4]\n\n\t\ttree.Add(prefix, oui)\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn tree.Readonly(), nil\n}", "func ReadOrInitSessionId(bD *BaseData) (string, error) {\n _, ok := sessions[bD.SessionId]; if !ok {\n bytes := make([]byte, 16)\n if _, err := rand.Read(bytes); err != nil {\n return \"\", err\n }\n sessionId := hex.EncodeToString(bytes)\n sessions[sessionId] = &Data{SessionId: sessionId, CopyAndPaste: make(map[string]bool)}\n return sessionId, nil\n }\n return bD.SessionId, nil\n}", "func (store Storage) UUID() string {\n\treturn \"\"\n}", "func ReadHostsFile() ([]byte, error) {\n\tbs, err := ioutil.ReadFile(HostsPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bs, nil\n}", "func (d *Descriptor) UUID() UUID {\n\treturn d.uuid\n}", "func (sys *Sys) GetUUID() []byte {\n\tmyUUID := uuid.NewV1()\n\tlog.Debugln(\"UUID Generated:\", myUUID.String())\n\treturn myUUID.Bytes()\n}", "func Parse(value string) (UUID, error) {\n\tvar uuid UUID\n\tif len(value) != 36 && len(value) != 38 {\n\t\treturn uuid, fmt.Errorf(\"string is not the correct length\")\n\t}\n\n\tif len(value) == 38 {\n\t\tif value[0] != '{' && value[37] != '}' {\n\t\t\treturn uuid, fmt.Errorf(\"invalid UUID string format\")\n\t\t}\n\t\tvalue = value[1:37]\n\t}\n\tif value[8] != '-' ||\n\t\tvalue[13] != '-' ||\n\t\tvalue[18] != '-' ||\n\t\tvalue[23] != '-' {\n\t\treturn uuid, fmt.Errorf(\"invalid UUID string format\")\n\t}\n\n\tif _, err := hex.Decode(uuid[0:], []byte(value[0:8])); err != nil {\n\t\treturn uuid, fmt.Errorf(\"invalid UUID : %v\", err)\n\t}\n\tif _, err := hex.Decode(uuid[4:], []byte(value[9:13])); err != nil {\n\t\treturn uuid, fmt.Errorf(\"invalid UUID : %v\", err)\n\t}\n\tif _, err := hex.Decode(uuid[6:], []byte(value[14:18])); err != nil {\n\t\treturn uuid, fmt.Errorf(\"invalid UUID : %v\", err)\n\t}\n\tif _, err := hex.Decode(uuid[8:], []byte(value[19:23])); err != nil {\n\t\treturn uuid, fmt.Errorf(\"invalid UUID : %v\", err)\n\t}\n\tif _, err := hex.Decode(uuid[10:], []byte(value[24:36])); err != nil {\n\t\treturn uuid, fmt.Errorf(\"invalid UUID : %v\", err)\n\t}\n\n\treturn uuid, nil\n}", "func (o FioSpecVolumeVolumeSourceFlockerOutput) DatasetUUID() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceFlocker) *string { return v.DatasetUUID }).(pulumi.StringPtrOutput)\n}", "func readIdentifiers(path string) (TransferIdentifiers, error) {\n\tidentifiers := TransferIdentifiers([]TransferIdentifier{})\n\n\tblob, err := os.ReadFile(filepath.Join(path, \"metadata\", \"identifiers.json\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := json.Unmarshal(blob, &identifiers); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn identifiers, nil\n}", "func GetFileID(path string) (string, error) {\n\n\tdat, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn \"\", errors.New(\"Reading File: \" + error.Error(err))\n\t}\n\n\treturn string(dat), nil\n}", "func blake2HashFromFileUUID(fileUUID []string) ([64]byte, error) {\n\tvar buffer bytes.Buffer\n\tencoder := gob.NewEncoder(&buffer)\n\n\terr := encoder.Encode(fileUUID)\n\tif err != nil {\n\t\treturn [64]byte{}, err\n\t}\n\treturn blake2b.Sum512(buffer.Bytes()), nil\n}", "func (g *Gateway) parseUUID(c *gin.Context, param string) (parsed string) {\n\tid, err := uuid.Parse(param)\n\tif err != nil {\n\t\tg.render404(c)\n\t\treturn\n\t}\n\treturn id.String()\n}", "func (j *juicefs) GetJfsVolUUID(ctx context.Context, name string) (string, error) {\n\tcmdCtx, cmdCancel := context.WithTimeout(ctx, 8*defaultCheckTimeout)\n\tdefer cmdCancel()\n\tstdout, err := j.Exec.CommandContext(cmdCtx, config.CeCliPath, \"status\", name).CombinedOutput()\n\tif err != nil {\n\t\tre := string(stdout)\n\t\tif strings.Contains(re, \"database is not formatted\") {\n\t\t\tklog.V(6).Infof(\"juicefs %s not formatted.\", name)\n\t\t\treturn \"\", nil\n\t\t}\n\t\tklog.Infof(\"juicefs status error: %v, output: '%s'\", err, re)\n\t\tif cmdCtx.Err() == context.DeadlineExceeded {\n\t\t\tre = fmt.Sprintf(\"juicefs status %s timed out\", 8*defaultCheckTimeout)\n\t\t\treturn \"\", errors.New(re)\n\t\t}\n\t\treturn \"\", errors.Wrap(err, re)\n\t}\n\n\tmatchExp := regexp.MustCompile(`\"UUID\": \"(.*)\"`)\n\tidStr := matchExp.FindString(string(stdout))\n\tidStrs := strings.Split(idStr, \"\\\"\")\n\tif len(idStrs) < 4 {\n\t\treturn \"\", fmt.Errorf(\"get uuid of %s error\", name)\n\t}\n\n\treturn idStrs[3], nil\n}", "func readUsername() string {\n\tusername := \"bob\"\n\n\tdata, err := ioutil.ReadFile(usernameFile)\n\tif err != nil {\n\t\treturn username\n\t}\n\n\tvar userData struct {\n\t\tUsername string `yaml:\"username\"`\n\t}\n\terr = yaml.Unmarshal(data, &userData)\n\tif err != nil {\n\t\treturn username\n\t}\n\treturn userData.Username\n}", "func loadIdentity(userName, identity string) ([]byte, error) {\n\tif filepath.Dir(identity) == \".\" {\n\t\tu, err := user.Current()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tidentity = filepath.Join(u.HomeDir, \".ssh\", identity)\n\t}\n\n\treturn ioutil.ReadFile(identity)\n}", "func DecodeHash(hash string) string {\n\tbyteArray := []byte(hash)\n\n\tfor i := 0; i < 19; i++ {\n\t\tif (string(byteArray[i*2]) == \"0\") && (string(byteArray[(i*2)+1]) == \"3\") {\n\t\t\tfileName, _ := hex.DecodeString(string(byteArray[:(i)*2]))\n\t\t\treturn string(fileName)\n\t\t}\n\t}\n\treturn \"Error when decoding dataID\"\n}", "func (*UserUUID) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{2}\n}", "func Readfile(conf *goini.Config) {\n\tfilename := conf.GetValue(\"hqmodule\", \"sh_filename\")\n\tinterval, _ := strconv.Atoi(conf.GetValue(\"hqmodule\", \"sh_readfileinterval\"))\n\tquotes := conf.GetStr(helper.ConfigHQSessionName, \"sh\")\n\tquotemap := make(map[string]bool, 3)\n\tfor _, q := range strings.Split(quotes, \"|\") {\n\t\tquotemap[q] = true\n\t}\n\t_, md001ok := quotemap[\"md001\"]\n\t_, md002ok := quotemap[\"md002\"]\n\t_, md004ok := quotemap[\"md004\"]\n\n\tvar fd []byte\n\tvar l int\n\ti := 0\n\tpauseinter := time.Duration(interval) * time.Millisecond\n\tfor {\n\t\tfd, _ = ioutil.ReadFile(filename)\n\t\tl = len(fd) - 11\n\t\tfor i = 0; i < l; i++ {\n\t\t\tif fd[i] == 0x0A {\n\t\t\t\tif fd[i+5] == 0x33 {\n\t\t\t\t\ti += 399\n\t\t\t\t\tcontinue\n\t\t\t\t} else if fd[i+5] == 0x31 {\n\t\t\t\t\tif md001ok {\n\t\t\t\t\t\tif updateHs(&fd, &i, 150) {\n\t\t\t\t\t\t\trbmd001map.Put(fd[i+7 : i+150])\n\t\t\t\t\t\t\ti += 149\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else if fd[i+5] == 0x32 {\n\t\t\t\t\tif md002ok {\n\t\t\t\t\t\tif updateHs(&fd, &i, 400) {\n\t\t\t\t\t\t\trbmd002map.Put(fd[i+7 : i+400])\n\t\t\t\t\t\t\tif string(fd[i+7:i+7+6]) == \"600000\" {\n\t\t\t\t\t\t\t\tlog.Info(\"ReadFile: %d\", time.Now().UnixNano()/1e6)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\ti += 399\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else if fd[i+5] == 0x34 {\n\t\t\t\t\tif md004ok {\n\t\t\t\t\t\tif updateHs(&fd, &i, 424) {\n\t\t\t\t\t\t\trbmd004map.Put(fd[i+7 : i+424])\n\t\t\t\t\t\t\ti += 423\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(pauseinter)\n\t}\n}", "func (c *Client) ReadHost(host string) (*Host, error) {\n\tvar hostdetail StructGetHostResult\n\ts := \"request={\\\"hostname\\\": \\\"\" + host + \"\\\"}\"\n\tbody := strings.NewReader(s)\n\trespBody, respErr := c.NewAPIRequest(\"POST\", \"get_host\", body)\n\tif respErr != nil {\n\t\tfmt.Printf(\"API Request for get_host failed. Error: %s\\n\", respErr)\n\t\treturn nil, respErr\n\t}\n\trespUnmarshalErr := json.Unmarshal(respBody, &hostdetail)\n\tif respUnmarshalErr != nil {\n\t\tfmt.Printf(\"Error Decoding the API response. Error: %s\\n\", respUnmarshalErr)\n\t\treturn nil, respUnmarshalErr\n\t}\n\thostname := hostdetail.Result.Hostname\n\tfolder := hostdetail.Result.Path\n\talias := hostdetail.Result.Attributes.Alias\n\ttagAgent := hostdetail.Result.Attributes.TagAgent\n\ttagCriticality := hostdetail.Result.Attributes.TagCriticality\n\tipaddress := hostdetail.Result.Attributes.Ipaddress\n\thoststruct := &Host{Attributes{alias, tagAgent, tagCriticality, ipaddress}, hostname, folder}\n\treturn hoststruct, nil\n}", "func (*FileUUID) Descriptor() ([]byte, []int) {\n\treturn file_github_com_Ultimate_Super_WebDev_Corp_gateway_services_file_file_proto_rawDescGZIP(), []int{0}\n}", "func GetUID() string {\n\twd, err := os.Getwd()\n\n\tvar data map[string]interface{}\n\n\tbuff, err := ioutil.ReadFile(wd + \"/package.json\")\n\n\tcheck(err)\n\n\tif err := json.Unmarshal(buff, &data); err != nil {\n\t\tpanic(err)\n\t}\n\n\tuser, err := GetStoredUser()\n\n\tGuard(user)\n\n\tcheck(err)\n\n\tname := data[\"name\"].(string)\n\n\tuid := CreateUID(name, user.Email)\n\n\treturn uid\n}", "func UnhexUuid(uuid uuid.UUID) ([]byte, error) {\n\ts := strings.Replace(uuid.String(), \"-\", \"\", -1)\n\treturn hex.DecodeString(s)\n}", "func newNameUUIDFromBytes(bytes []byte) *UUID {\n\tmd5Hash := md5.Sum(bytes)\n\tmd5Hash[6] &= 0x0f /* clear version */\n\tmd5Hash[6] |= 0x30 /* set to version 3 */\n\tmd5Hash[8] &= 0x3f /* clear variant */\n\tmd5Hash[8] |= 0x80 /* set to IETF variant */\n\n\tvar msb uint64\n\tvar lsb uint64\n\n\tfor i := 0; i < 8; i++ {\n\t\tmsb = (msb << 8) | (uint64(md5Hash[i]) & 0xff)\n\t}\n\tfor i := 8; i < 16; i++ {\n\t\tlsb = (lsb << 8) | (uint64(md5Hash[i]) & 0xff)\n\t}\n\n\treturn &UUID{msb, lsb}\n}", "func (TiText) ReadHex(r io.Reader) (Hexfile, os.Error) {\n\tresp := RecordSequence{}\n\n\t// 16 bytes per line ought to be enough for anybody.\n\tline_reader := line.NewReader(r, 64)\n\taddr := 0\n\n\tfor {\n\t\tline, is_prefix, err := line_reader.ReadLine()\n\t\tif line == nil && err == os.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor is_prefix {\n\t\t\tvar line_part []byte\n\t\t\tline_part, is_prefix, err = line_reader.ReadLine()\n\t\t\tif line == nil && err == os.EOF {\n\t\t\t\tbreak\n\t\t\t} else if err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tline = append(line, line_part...)\n\t\t}\n\t\tline_s := string(line)\n\t\treader := strings.NewReader(line_s)\n\t\tswitch line_s[0] {\n\t\tcase '@':\n\t\t\treader.ReadByte()\n\t\t\tif len(line) < 2 {\n\t\t\t\treturn nil, StrError(\"Format error: short address\")\n\t\t\t}\n\t\t\taddr, err = decodeInt(reader, -1)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t// TODO(thequux): Check for trailing junk\n\t\tcase 'q':\n\t\t\treturn resp, nil\n\t\tcase '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':\n\t\t\tfallthrough\n\t\tcase 'a', 'b', 'c', 'd', 'e', 'f':\n\t\t\tfallthrough\n\t\tcase 'A', 'B', 'C', 'D', 'E', 'F':\n\t\t\tbuf := make([]byte, 0, len(line)/3)\n\t\t\tfor {\n\n\t\t\t\tif n, err := decodeInt(reader, -1); err != nil {\n\t\t\t\t\tif err == os.EOF {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\treturn nil, err\n\t\t\t\t} else {\n\t\t\t\t\tbuf = append(buf, byte(n))\n\t\t\t\t}\n\t\t\t}\n\t\t\tresp = append(resp, Record{addr, buf})\n\t\t\taddr += len(buf)\n\t\tdefault:\n\t\t\treturn nil, StrError(\"Invalid format\")\n\t\t}\n\t}\n\treturn resp, nil\n}", "func uniqueHandle(client interfaces.Client) (interfaces.Client, error) {\n\tfile, err := os.Open(\"users.txt\")\n\tif err != nil {\n\t\treturn client, err\n\t}\n\tdefer file.Close()\n\treader := bufio.NewReader(file)\n\tfor {\n\t\tline, err := reader.ReadString('\\n')\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn client, err\n\t\t}\n\t\thandle, _ := helpers.SplitOnFirstDelim(',', line)\n\t\tif client.GetHandle() == handle {\n\t\t\treturn client, errors.New(\"Handle is not unique\")\n\t\t}\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn client, err\n}", "func lookupUsername(file string) (string, error) {\n\tfileInfo, err := os.Stat(file)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tuid := fileInfo.Sys().(*syscall.Stat_t).Uid\n\tif ucache[uid] != \"\" {\n\t\treturn ucache[uid], nil\n\t}\n\tu, err := user.LookupId(strconv.FormatUint(uint64(uid), 10))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tucache[uid] = u.Username\n\treturn u.Username, nil\n}" ]
[ "0.69632655", "0.6446444", "0.60150903", "0.59953517", "0.5983552", "0.59579515", "0.58662516", "0.5794862", "0.5774995", "0.57337934", "0.57194346", "0.56959444", "0.5645361", "0.5586416", "0.554383", "0.55005544", "0.53939855", "0.53913856", "0.5279676", "0.5251984", "0.5185961", "0.5163046", "0.51620716", "0.5162019", "0.5161038", "0.5160165", "0.51465535", "0.5131792", "0.51190853", "0.51169676", "0.5112542", "0.50973433", "0.50754976", "0.5031062", "0.5018129", "0.5014143", "0.5007482", "0.49971324", "0.49307427", "0.49180737", "0.49170688", "0.4913092", "0.4905663", "0.4900515", "0.48987964", "0.48901162", "0.48837683", "0.48796737", "0.48489857", "0.48304746", "0.48303434", "0.48263764", "0.48166877", "0.47918335", "0.47893786", "0.47828728", "0.4781566", "0.47805685", "0.4771621", "0.47484154", "0.4724709", "0.47210962", "0.47095472", "0.46963507", "0.46925047", "0.4690793", "0.46879882", "0.46758112", "0.4675701", "0.467455", "0.46569797", "0.46559978", "0.46557954", "0.46545693", "0.4653859", "0.46469694", "0.4641024", "0.46406263", "0.4639627", "0.4628563", "0.46231022", "0.46203563", "0.46183893", "0.46093425", "0.46045282", "0.45931858", "0.45878783", "0.45829403", "0.45806572", "0.45776314", "0.45727795", "0.4572598", "0.45669642", "0.4566248", "0.45639655", "0.45593965", "0.45592088", "0.45562103", "0.455411", "0.4554011" ]
0.8575717
0
WriteHostUUID writes host UUID into a file
WriteHostUUID записывает UUID хоста в файл
func WriteHostUUID(dataDir string, id string) error { err := os.WriteFile(filepath.Join(dataDir, HostUUIDFile), []byte(id), os.ModeExclusive|0400) if err != nil { if errors.Is(err, fs.ErrPermission) { //do not convert to system error as this loses the ability to compare that it is a permission error return err } return trace.ConvertSystemError(err) } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func WriteUUID(buffer []byte, offset int, value UUID) {\n bytes, _ := value.MarshalBinary()\n WriteBytes(buffer, offset, bytes)\n}", "func (this *Actions) generateUid(config *Config, hostname string) string{\n uuidNew := uuid.NewV5(uuid.NewV1(), hostname).String()\n uuid_path := config.GetValue(\"basic\", \"uuid_path\")\n log.Info(\"The new uuid is : \" + uuidNew)\n file, error := os.OpenFile(uuid_path, os.O_RDWR|os.O_CREATE, 0622)\n if error != nil {\n log.Error(\"Open uuid file in \"+ uuid_path +\" failed.\" + error.Error())\n }\n _,err := file.WriteString(uuidNew)\n if err != nil {\n log.Error(\"Save uuid file in \"+ uuid_path +\" failed.\" + err.Error())\n }\n file.Close()\n return uuidNew\n}", "func ReadOrMakeHostUUID(dataDir string) (string, error) {\n\tid, err := ReadHostUUID(dataDir)\n\tif err == nil {\n\t\treturn id, nil\n\t}\n\tif !trace.IsNotFound(err) {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\t// Checking error instead of the usual uuid.New() in case uuid generation\n\t// fails due to not enough randomness. It's been known to happen happen when\n\t// Teleport starts very early in the node initialization cycle and /dev/urandom\n\t// isn't ready yet.\n\trawID, err := uuid.NewRandom()\n\tif err != nil {\n\t\treturn \"\", trace.BadParameter(\"\" +\n\t\t\t\"Teleport failed to generate host UUID. \" +\n\t\t\t\"This may happen if randomness source is not fully initialized when the node is starting up. \" +\n\t\t\t\"Please try restarting Teleport again.\")\n\t}\n\tid = rawID.String()\n\tif err = WriteHostUUID(dataDir, id); err != nil {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\treturn id, nil\n}", "func UUIDFile(fpath string) (string, error) {\n\n\t_, err := os.Stat(fpath)\n\tif err != nil && !os.IsExist(err) {\n\t\tkey := uuid.New().String()\n\t\tif err := ioutil.WriteFile(fpath, []byte(key), 0777); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn key, nil\n\t}\n\n\tfp, err := os.Open(fpath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer fp.Close()\n\tdata, err := ioutil.ReadAll(fp)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tkey := string(data)\n\tif _, err := uuid.Parse(key); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn key, nil\n}", "func ReadHostUUID(dataDir string) (string, error) {\n\tout, err := ReadPath(filepath.Join(dataDir, HostUUIDFile))\n\tif err != nil {\n\t\tif errors.Is(err, fs.ErrPermission) {\n\t\t\t//do not convert to system error as this loses the ability to compare that it is a permission error\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn \"\", trace.ConvertSystemError(err)\n\t}\n\tid := strings.TrimSpace(string(out))\n\tif id == \"\" {\n\t\treturn \"\", trace.NotFound(\"host uuid is empty\")\n\t}\n\treturn id, nil\n}", "func post(w http.ResponseWriter,r *http.Request) {\n\toutput, _ := exec.Command(\"dbus-uuidgen\").Output()\n\tuuid := strings.TrimSuffix(string(output), \"\\n\") //注意生成的uuid包含\\n后缀,而在url中该字符别翻译为%OA,造成无法删除临时问题\n\tname := strings.Split(r.URL.EscapedPath(), \"/\")[2]\n\tsize, e := strconv.ParseInt(r.Header.Get(\"size\"), 0, 64)\n\tif e != nil{\n\t\tlog.Errorf(\"Temp/<hash> post parse_size error %v\",e)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tt := tempinfo{Uuid:uuid,Name:name,Size:size}\n\te = t.writeToFile()\n\tif e!= nil{\n\t\tlog.Errorf(\"Temp/<hash> post write to file error %v\",e)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tos.Create(os.Getenv(\"STORAGE_ROOT\")+\"/temp/\"+t.Uuid+\".dat\")\n\tw.Write([]byte(t.Uuid))\n}", "func writeHostMap(hostMap map[string]string) {\n\tif host_list_file == \"\" {\n\t\treturn\n\t}\n\tf, err := os.Create(host_list_file)\n\tif err != nil {\n\t\tlogr.LogLine(logr.Lerror, ltagsrc, err.Error())\n\t}\n\tdefer f.Close()\n\n\tfor host := range hostMap {\n\t\tf.WriteString(fmt.Sprintf(\"%s\\n\", host))\n\t}\n}", "func (b *Broker) createIDFile(home string, filepath string, id string) (err error) {\n\t_filepath := fmt.Sprintf(\"%v%v%v\", home, string(os.PathSeparator), filepath)\n\terr = ioutil.WriteFile(_filepath, []byte(id), 0644)\n\n\treturn\n}", "func WritePidFile(path string, pid int) error {\n\tlog.WithField(\"pid\", pid).Debug(\"writing pid file\")\n\tpidFile, err := os.Create(path)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to write pid file: %v\", err)\n\t}\n\tdefer pidFile.Close()\n\tpidFile.WriteString(strconv.Itoa(pid))\n\treturn nil\n}", "func (e *EPub) SetUUID(uu string) error {\n\tu, err := uuid.FromString(uu)\n\tif err != nil {\n\t\treturn err\n\t}\n\te.uuid = \"urn:uuid:\" + u.String()\n\tlog.Printf(\"Setting uuid, theoretically %q\", e.uuid)\n\tfor i, m := range e.metadata {\n\t\tif m.kind == \"dc:identifier\" {\n\t\t\tlog.Printf(\"Set id to %q\", e.uuid)\n\t\t\te.metadata[i].value = e.uuid\n\t\t}\n\t}\n\treturn nil\n}", "func (device *DCV2Bricklet) WriteUID(uid uint32) (err error) {\n\tvar buf bytes.Buffer\n\tbinary.Write(&buf, binary.LittleEndian, uid)\n\n\tresultBytes, err := device.device.Set(uint8(FunctionWriteUID), buf.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 8 {\n\t\t\treturn fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 8)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tbytes.NewBuffer(resultBytes[8:])\n\n\t}\n\n\treturn nil\n}", "func (device *IndustrialDigitalIn4V2Bricklet) WriteUID(uid uint32) (err error) {\n\tvar buf bytes.Buffer\n\tbinary.Write(&buf, binary.LittleEndian, uid)\n\n\tresultBytes, err := device.device.Set(uint8(FunctionWriteUID), buf.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 8 {\n\t\t\treturn fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 8)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tbytes.NewBuffer(resultBytes[8:])\n\n\t}\n\n\treturn nil\n}", "func WritePidFile(componentName string) error {\n\tpidFile := fmt.Sprintf(\"%s/%s-%d.pid\", KtHome, componentName, os.Getpid())\n\treturn ioutil.WriteFile(pidFile, []byte(fmt.Sprintf(\"%d\", os.Getpid())), 0644)\n}", "func writeUint24(b *bytes.Buffer, value uint24) {\n\tb.WriteByte(byte(value))\n\tb.WriteByte(byte(value >> 8))\n\tb.WriteByte(byte(value >> 16))\n}", "func encodeUUID(src [16]byte) string {\n\treturn fmt.Sprintf(\"%x-%x-%x-%x-%x\", src[0:4], src[4:6], src[6:8], src[8:10], src[10:16])\n}", "func writeFile(dir, file, uid, gid string, data []byte) error {\n\tfnlog := log.\n\t\tWithField(\"dir\", dir).\n\t\tWithField(\"file\", file)\n\n\ttmpfile, err := ioutil.TempFile(dir, \"systemk.*.tmp\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tfnlog.Debugf(\"chowning %q to %s.%s\", tmpfile.Name(), uid, gid)\n\tif err := chown(tmpfile.Name(), uid, gid); err != nil {\n\t\treturn err\n\t}\n\n\tx := 10\n\tif len(data) < 10 {\n\t\tx = len(data)\n\t}\n\tfnlog.Debugf(\"writing data %q to path %q\", data[:x], tmpfile.Name())\n\tif err := ioutil.WriteFile(tmpfile.Name(), data, 0640); err != nil {\n\t\treturn err\n\t}\n\tpath := filepath.Join(dir, file)\n\tfnlog.Debugf(\"renaming %q to %q\", tmpfile.Name(), path)\n\n\treturn os.Rename(tmpfile.Name(), path)\n}", "func writePid() {\n\tif *pid_file_path == \"\" {\n\t\treturn\n\t}\n\tf, err := os.OpenFile(*pid_file_path, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0644)\n\tif err != nil {\n\t\tlog.Printf(\"ERROR unable to open pidfile: %v\", err)\n\t\treturn\n\t}\n\tfmt.Fprintln(f, os.Getpid())\n\tonShutdown(rmPidfile)\n}", "func GenUUID(account string) string {\n h1 := md5.New()\n io.WriteString(h1, account)\n io.WriteString(h1, UUIDkey)\n h2 := md5.New()\n io.WriteString(h2, account)\n io.WriteString(h2, MD5key)\n return fmt.Sprintf(\"%x%x\", h1.Sum(nil), h2.Sum(nil))\n}", "func CreateUuidForMonitorData(md MonitorData) string {\n serial := SerializeMonitorData(md)\n h := sha256.New()\n h.Write(serial)\n return fmt.Sprintf(\"%x\", h.Sum(nil))\n}", "func (w *FormSerializationWriter) WriteUUIDValue(key string, value *uuid.UUID) error {\n\tif key != \"\" && value != nil {\n\t\tw.writePropertyName(key)\n\t}\n\tif value != nil {\n\t\tw.writeStringValue((*value).String())\n\t}\n\tif key != \"\" && value != nil {\n\t\tw.writePropertySeparator()\n\t}\n\treturn nil\n}", "func encodeHex(dst []byte, uuid Uuid) {\n\t// hex.Encode takes an input byte array and returns the bytes of the string\n\t// of the hex encoding of each input byte\n\t// example: [63,127] ==> ['3', 'f', '7', 'f']\n\thex.Encode(dst[:], uuid[:4])\n\tdst[8] = '-'\n\thex.Encode(dst[9:13], uuid[4:6])\n\tdst[13] = '-'\n\thex.Encode(dst[14:18], uuid[6:8])\n\tdst[18] = '-'\n\thex.Encode(dst[19:23], uuid[8:10])\n\tdst[23] = '-'\n\thex.Encode(dst[24:], uuid[10:])\n}", "func createFakeDHCP() error{\n\n\n dhcpData := []byte(`lease 192.168.50.63 {\n starts 4 2019/08/08 22:32:49;\n ends 4 2019/08/08 23:52:49;\n cltt 4 2019/08/08 22:32:49;\n binding state active;\n next binding state free;\n rewind binding state free;\n hardware ethernet 08:00:27:00:ab:2c;\n client-hostname \"fake-test-bmh\"\";\n}`)\n err := ioutil.WriteFile(\"/var/lib/dhcp/dhcpd.leases\", dhcpData, 0777)\n\n if (err != nil) {\n return err\n }\n\n return nil\n}", "func (ins *EC2RemoteClient) WriteBytesToFile(source []byte, destination string) error {\n\terr := ins.cmdClient.WriteBytesToFile(source, destination)\n\treturn err\n}", "func writePid() {\n\tpid := os.Getpid()\n\tpidfile := os.ExpandEnv(\"$PIDFILE\")\n\tlog.Printf(\"Opening pidfile %s: %d\", pidfile, pid)\n\tif pidfile != \"\" {\n\t\tfile, err := os.Create(pidfile)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"Couldn't open pidfile \" + pidfile)\n\t\t}\n\t\tio.WriteString(file, strconv.Itoa(pid))\n\t\tdefer func() {\n\t\t\tif err = file.Close(); err != nil {\n\t\t\t\tlog.Fatal(\"Couldn't close pidfile \" + pidfile + \". \" + err.Error())\n\t\t\t}\n\t\t}()\n\t}\n}", "func (hof *Heap) uuid() http.Arrow {\n\treturn http.GET(\n\t\tø.URI(\"https://httpbin.org/uuid\"),\n\t\tø.Accept.JSON,\n\n\t\tƒ.Status.OK,\n\t\tƒ.ContentType.JSON,\n\t\tƒ.Body(&hof.ID),\n\t)\n}", "func pgFormatUUID(arr [16]byte) (out []byte) {\n\tout = make([]byte, 36)\n\n\t_ = hex.Encode(out[0:8], arr[0:4])\n\t_ = hex.Encode(out[9:13], arr[4:6])\n\t_ = hex.Encode(out[14:18], arr[6:8])\n\t_ = hex.Encode(out[19:23], arr[8:10])\n\t_ = hex.Encode(out[24:], arr[10:])\n\n\tout[8] = '-'\n\tout[13] = '-'\n\tout[18] = '-'\n\tout[23] = '-'\n\n\treturn out\n}", "func writeToFile(file *os.File, data uint32, offset int) {\n\tbuffer := make([]byte, UINT32_LENGTH)\n\tbinary.LittleEndian.PutUint32(buffer, data)\n\tfile.WriteAt(buffer, int64(offset))\n}", "func WriteTempVaultIDFile(t *testing.T, password string) string {\n\ttempVaultIDFile, err := ioutil.TempFile(\"\", \".temp-vault-id\")\n\tif err != nil {\n\t\tt.Fatal(\"Expected a temp vault id file to be crated\", err)\n\t}\n\ttempVaultIDFileToWrite, err := os.OpenFile(tempVaultIDFile.Name(), os.O_RDWR, 0644)\n\tif err != nil {\n\t\tt.Fatal(\"Expected a temp vault id file to be writable\", err)\n\t}\n\ttempVaultIDFileToWrite.WriteString(password)\n\ttempVaultIDFileToWrite.Close()\n\treturn tempVaultIDFile.Name()\n}", "func (mariadbFlavor) serverUUID(c *Conn) (string, error) {\n\treturn \"\", nil\n}", "func (h *Host) ID() string {\n\tif h.id == \"\" {\n\t\thash := md5.New()\n\t\t_, _ = io.WriteString(hash, h.IP+h.MAC)\n\t\th.id = fmt.Sprintf(\"%x\", hash.Sum(nil))\n\t}\n\n\treturn h.id\n}", "func uuid() string {\n\tout, err := exec.Command(\"/usr/bin/uuidgen\").Output()\n\tif err != nil {\n\t\tlog.Fatal().\n\t\t\tStr(\"command\", \"/usr/bin/uuidgen\").\n\t\t\tMsg(\"There was an error generating the uuid.\")\n\t}\n\n\t//n := bytes.IndexByte(out, 0)\n\ts := string(out)\n\ts = strings.TrimSpace(s)\n\treturn s\n}", "func Write(idFile *IdentityFile, path string) error {\n\tbuf := new(bytes.Buffer)\n\tif err := encodeIdentityFile(buf, idFile); err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tif err := os.WriteFile(path, buf.Bytes(), FilePermissions); err != nil {\n\t\treturn trace.ConvertSystemError(err)\n\t}\n\treturn nil\n}", "func (k TimeKey) UUID() string {\n\tbuf := make([]byte, 36)\n\n\thex.Encode(buf[0:8], k[0:4])\n\tbuf[8] = '-'\n\thex.Encode(buf[9:13], k[4:6])\n\tbuf[13] = '-'\n\thex.Encode(buf[14:18], k[6:8])\n\tbuf[18] = '-'\n\thex.Encode(buf[19:23], k[8:10])\n\tbuf[23] = '-'\n\thex.Encode(buf[24:], k[10:])\n\n\treturn string(buf)\n}", "func (h *Harness) UUID(id string) string { return h.uuidG.Get(id) }", "func writePidFile(pidFile string) error {\n\t// Read in the pid file as a slice of bytes.\n\tpiddata, err := ioutil.ReadFile(pidFile)\n\tif err == nil {\n\t\t// Convert the file contents to an integer.\n\t\tpid, err := strconv.Atoi(string(piddata))\n\t\tif err == nil {\n\t\t\t// Look for the pid in the process list.\n\t\t\tprocess, err := os.FindProcess(pid)\n\t\t\tif err == nil {\n\t\t\t\t// Send the process a signal zero kill.\n\t\t\t\terr := process.Signal(syscall.Signal(0))\n\t\t\t\tif err == nil {\n\t\t\t\t\t// We only get an error if the pid isn't running,\n\t\t\t\t\t// or it's not ours.\n\t\t\t\t\treturn fmt.Errorf(\"pid already running: %d\", pid)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t// If we get here, then the pidfile didn't exist,\n\t// or the pid in it doesn't belong to the user running this app.\n\treturn ioutil.WriteFile(pidFile,\n\t\t[]byte(fmt.Sprintf(\"%d\", os.Getpid())), 0664)\n}", "func sendUUIDToPlayer(id int64, client *Client) {\n\tmsg := protocol.CreatePlayerUUIDMessage(id)\n\tsendMessageToClient(msg, id)\n}", "func (w *Writer) SetUUID(u uuid.UUID) {\n\tcopy(w.blk[uuidStart:uuidEnd], u[:])\n\tcopy(w.blk[uuidCopyStart:uuidCopyEnd], u[:])\n}", "func (h *HAProxyManager) write(b []byte) error {\n\tf, err := os.OpenFile(h.filename(), os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\t_, err = f.Write(b)\n\treturn err\n}", "func writeFile(v string) {\n\t// 打开文件\n\tfilePtr, err := os.OpenFile(\"mqtt.json\", os.O_CREATE|os.O_WRONLY, os.ModePerm)\n\tif err != nil {\n\t\tmqtt.ERROR.Println(err)\n\t}\n\n\tdefer filePtr.Close()\n\n\ttype Data struct {\n\t\tDeviceID string `JSON:\"deviceID\"` //设备id\n\t\tTimestamp string `JSON:\"timestamp\"` //时间戳\n\t\tFields map[string]string `JSON:\"fields\"` //标签\n\t}\n\tvar data Data\n\tif err := json.Unmarshal([]byte(v), &data); err == nil {\n\n\t\t// 创建Json编码器\n\t\tencoder := json.NewEncoder(filePtr)\n\t\terr = encoder.Encode(data)\n\t\tif err != nil {\n\t\t\tmqtt.ERROR.Println(\"writeFile failed\", err.Error())\n\t\t} else {\n\t\t\tmqtt.ERROR.Println(\"writeFile success\")\n\t\t}\n\t} else {\n\t\tmqtt.ERROR.Println(err)\n\t}\n\n}", "func UUID() (string, error) {\n\tb := make([]byte, 2)\n\n\t_, err := crand.Read(b)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn fmt.Sprintf(\"%X\", b[0:2]), nil\n}", "func HardwareUUID() (string, error) {\n\t/*\n\t\tSample output of 'wmic path Win32_ComputerSystemProduct get uuid'\n\n\t\tUUID\n\t\t4219B2F5-C25F-6AF2-573C-35B0DF557236\n\t*/\n\tresult, err := readAndParseFromCommandLine(hardwareUUIDCmd)\n\tif err != nil {\n\t\treturn \"-1\", err\n\t}\n\thardwareUUID := \"\"\n\tif len(result) > 1 {\n\t\t// remove all spaces from the second line as that line consists hardware uuid\n\t\tre := regexp.MustCompile(\"\\\\s|\\\\r\")\n\t\thardwareUUID = re.ReplaceAllString(result[1], \"\")\n\t}\n\treturn hardwareUUID, nil\n}", "func generateUniqueId() string {\n\tcmd := exec.Command(\"/usr/bin/uuidgen\")\n\tvar out bytes.Buffer\n\tcmd.Stdout = &out\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tuuid := out.String()\n\tuuid = strings.Replace(uuid, \"\\n\", \"\", 1)\n\treturn uuid\n}", "func MakeCustomizedUuid(port, nodeNum int) (string, error) {\n\treDigit := regexp.MustCompile(`\\d`)\n\tgroup1 := fmt.Sprintf(\"%08d\", port)\n\tgroup2 := fmt.Sprintf(\"%04d-%04d-%04d\", nodeNum, nodeNum, nodeNum)\n\tgroup3 := fmt.Sprintf(\"%012d\", port)\n\t// 12345678 1234 1234 1234 123456789012\n\t// new_uuid=\"00000000-0000-0000-0000-000000000000\"\n\tswitch {\n\tcase nodeNum > 0 && nodeNum <= 9:\n\t\tgroup2 = reDigit.ReplaceAllString(group2, fmt.Sprintf(\"%d\", nodeNum))\n\t\tgroup3 = reDigit.ReplaceAllString(group3, fmt.Sprintf(\"%d\", nodeNum))\n\t// Number greater than 10 make little sense for this purpose.\n\t// But we keep the rule so that a valid UUID will be formatted in any case.\n\tcase nodeNum >= 10000 && nodeNum <= 99999:\n\t\tgroup2 = fmt.Sprintf(\"%04d-%04d-%04d\", 0, int(nodeNum/10000), nodeNum-10000*int(nodeNum/10000))\n\tcase nodeNum >= 100000 && nodeNum < 1000000:\n\t\tgroup2 = fmt.Sprintf(\"%04d-%04d-%04d\", int(nodeNum/10000), 0, 0)\n\tcase nodeNum >= 1000000:\n\t\treturn \"\", fmt.Errorf(\"node num out of boundaries: %d\", nodeNum)\n\t}\n\treturn fmt.Sprintf(\"%s-%s-%s\", group1, group2, group3), nil\n}", "func writePidFile(pidFile string) error {\n\t// Read in the pid file as a slice of bytes.\n\tif piddata, err := ioutil.ReadFile(pidFile); err == nil {\n\t\t// Convert the file contents to an integer.\n\t\tif pid, err := strconv.Atoi(string(piddata)); err == nil {\n\t\t\t// Look for the pid in the process list.\n\t\t\tif process, err := os.FindProcess(pid); err == nil {\n\t\t\t\t// Send the process a signal zero kill.\n\t\t\t\tif err := process.Signal(syscall.Signal(0)); err == nil {\n\t\t\t\t\t// We only get an error if the pid isn't running, or it's not ours.\n\t\t\t\t\treturn fmt.Errorf(\"pid already running: %d\", pid)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t// If we get here, then the pidfile didn't exist,\n\t// or the pid in it doesn't belong to the user running this app.\n\treturn ioutil.WriteFile(pidFile, []byte(fmt.Sprintf(\"%d\", os.Getpid())), 0664)\n}", "func writePidFile(pidFile string) error {\n\t// Read in the pid file as a slice of bytes.\n\tif piddata, err := ioutil.ReadFile(pidFile); err == nil {\n\t\t// Convert the file contents to an integer.\n\t\tif pid, err := strconv.Atoi(string(piddata)); err == nil {\n\t\t\t// Look for the pid in the process list.\n\t\t\tif process, err := os.FindProcess(pid); err == nil {\n\t\t\t\t// Send the process a signal zero kill.\n\t\t\t\tif err := process.Signal(syscall.Signal(0)); err == nil {\n\t\t\t\t\t// We only get an error if the pid isn't running, or it's not ours.\n\t\t\t\t\treturn fmt.Errorf(\"pid already running: %d\", pid)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t// If we get here, then the pidfile didn't exist,\n\t// or the pid in it doesn't belong to the user running this app.\n\treturn ioutil.WriteFile(pidFile, []byte(fmt.Sprintf(\"%d\", os.Getpid())), 0664)\n}", "func (m *Attachment) GenerateUUID() (string, error) {\n\tout, err := exec.Command(\"uuidgen\").Output()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn strings.Replace(strings.Trim(string(out), \"\\n\"), \"-\", \"_\", -1), nil\n}", "func (a *accountManager) writeToFile(addrHex string, secretVersion int64, conf config.NewAccount) (config.AccountFile, error) {\n\tnow := time.Now().UTC()\n\tnowISO8601 := now.Format(\"2006-01-02T15-04-05.000000000Z\")\n\tfilename := fmt.Sprintf(\"UTC--%v--%v\", nowISO8601, addrHex)\n\n\tfullpath, err := a.client.accountDirectory.Parse(filename)\n\tif err != nil {\n\t\treturn config.AccountFile{}, err\n\t}\n\tfilePath := fullpath.Host + \"/\" + fullpath.Path\n\tlog.Printf(\"[DEBUG] writing to file %v\", filePath)\n\n\tfileData := conf.AccountFile(fullpath.String(), addrHex, secretVersion)\n\n\tlog.Printf(\"[DEBUG] marshalling file contents: %v\", fileData)\n\tcontents, err := json.Marshal(fileData.Contents)\n\tif err != nil {\n\t\treturn config.AccountFile{}, err\n\t}\n\tlog.Printf(\"[DEBUG] marshalled file contents: %v\", contents)\n\n\tlog.Printf(\"[DEBUG] Creating temp file %v/%v\", filepath.Dir(filePath), fmt.Sprintf(\".%v*.tmp\", filepath.Base(fullpath.String())))\n\tf, err := ioutil.TempFile(filepath.Dir(filePath), fmt.Sprintf(\".%v*.tmp\", filepath.Base(fullpath.String())))\n\tif err != nil {\n\t\treturn config.AccountFile{}, err\n\t}\n\tif _, err := f.Write(contents); err != nil {\n\t\tf.Close()\n\t\tos.Remove(f.Name())\n\t\treturn config.AccountFile{}, err\n\t}\n\tf.Close()\n\n\tlog.Println(\"[DEBUG] Renaming temp file\")\n\tif err := os.Rename(f.Name(), filePath); err != nil {\n\t\treturn config.AccountFile{}, err\n\t}\n\treturn fileData, nil\n}", "func SaveHash(hash string) {\n\tfile, err := os.Create(fileName)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer file.Close()\n\n\tfile.WriteString(hash)\n}", "func SetupEncode(uid string, w io.Writer) error {\n\tif !uuidRE.MatchString(uid) {\n\t\treturn fmt.Errorf(\"name must be a UUIDv4 identifier\")\n\t}\n\tw.Write([]byte(uid))\n\n\treturn nil\n}", "func WriteUInt16(buffer []byte, offset int, value uint16) {\n buffer[offset + 0] = byte(value >> 0)\n buffer[offset + 1] = byte(value >> 8)\n}", "func (u uuid) string() string {\n\tbuf := make([]byte, 36)\n\n\thex.Encode(buf[0:8], u[0:4])\n\tbuf[8] = dash\n\thex.Encode(buf[9:13], u[4:6])\n\tbuf[13] = dash\n\thex.Encode(buf[14:18], u[6:8])\n\tbuf[18] = dash\n\thex.Encode(buf[19:23], u[8:10])\n\tbuf[23] = dash\n\thex.Encode(buf[24:], u[10:])\n\n\treturn string(buf)\n}", "func (file *LockFile) WritePid() (err error) {\n\tif _, err = file.Seek(0, os.SEEK_SET); err != nil {\n\t\treturn\n\t}\n\tvar fileLen int\n\tif fileLen, err = fmt.Fprint(file, os.Getpid()); err != nil {\n\t\treturn\n\t}\n\tif err = file.Truncate(int64(fileLen)); err != nil {\n\t\treturn\n\t}\n\terr = file.Sync()\n\treturn\n}", "func (this UUID) Hex() string {\n\tx := [16]byte(this)\n\treturn fmt.Sprintf(\"%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x\",\n\t\tx[0], x[1], x[2], x[3], x[4],\n\t\tx[5], x[6],\n\t\tx[7], x[8],\n\t\tx[9], x[10], x[11], x[12], x[13], x[14], x[15])\n\n}", "func Format(uuid UUID, style Style) string {\n\tif len(uuid) != 16 {\n\t\tpanic(\"uuid: UUID is invalid\")\n\t}\n\n\tbuffer := []byte(uuid)\n\tswitch style {\n\tcase StyleStandard:\n\t\treturn fmt.Sprintf(\"%08x-%04x-%04x-%04x-%012x\", buffer[:4], buffer[4:6], buffer[6:8], buffer[8:10], buffer[10:])\n\tcase StyleWithoutDash:\n\t\treturn fmt.Sprintf(\"%x\", buffer[:])\n\tdefault:\n\t\tpanic(\"uuid: style of UUID is invalid\")\n\t}\n}", "func uuid() []byte {\n\tuuid := make([]byte, 16)\n\t_, err := rand.Read(uuid)\n\tif err != nil {\n\t\tpanic(\"cue/hosted: uuid() failed to read random bytes\")\n\t}\n\n\t// The following bit twiddling is outlined in RFC 4122. In short, it\n\t// identifies the UUID as a v4 random UUID.\n\tuuid[6] = (4 << 4) | (0xf & uuid[6])\n\tuuid[8] = (8 << 4) | (0x3f & uuid[8])\n\treturn uuid\n}", "func (dev *HidDevice) Write(b []byte) (int, error) {\n\treturn 0, ErrUnsupportedPlatform\n}", "func writeInt16ToFile(input int16, fp *os.File) {\n\tbuff := new(bytes.Buffer)\n\terr := binary.Write(buff, binary.LittleEndian, input)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tintByteArray := buff.Bytes()\n\tfp.Write(intByteArray)\n}", "func createV4UUID() string {\n\tu := make([]byte, 16)\n\trand.Read(u)\n\t// 13th char must be 4 and 17th must be in [89AB]\n\tu[8] = (u[8] | 0x80) & 0xBF\n\tu[6] = (u[6] | 0x40) & 0x4F\n\treturn fmt.Sprintf(\"%X-%X-%X-%X-%X\", u[0:4], u[4:6], u[6:8], u[8:10], u[10:])\n}", "func HostUUIDExistsLocally(dataDir string) bool {\n\t_, err := ReadHostUUID(dataDir)\n\treturn err == nil\n}", "func writeToken(filePath string, token string) {\n\t// Check if file exists\n\tif _, err := os.Stat(filePath); os.IsNotExist(err) {\n\t\t// Doesn't exist; lets create it\n\t\terr = os.MkdirAll(filepath.Dir(filePath), 0700)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tb := []byte(token)\n\tif err := ioutil.WriteFile(filePath, b, 0600); err != nil {\n\t\treturn\n\t}\n}", "func putUuid(log log.T, byteArray []byte, offset int, input uuid.UUID) (err error) {\n\tif input == nil {\n\t\tlog.Error(\"putUuid failed: input is null.\")\n\t\treturn errors.New(\"putUuid failed: input is null.\")\n\t}\n\n\tbyteArrayLength := len(byteArray)\n\tif offset > byteArrayLength-1 || offset+16-1 > byteArrayLength-1 || offset < 0 {\n\t\tlog.Error(\"putUuid failed: Offset is invalid.\")\n\t\treturn errors.New(\"Offset is outside the byte array.\")\n\t}\n\n\tleastSignificantLong, err := bytesToLong(log, input.Bytes()[8:16])\n\tif err != nil {\n\t\tlog.Error(\"putUuid failed: Failed to get leastSignificant Long value.\")\n\t\treturn errors.New(\"Failed to get leastSignificant Long value.\")\n\t}\n\n\tmostSignificantLong, err := bytesToLong(log, input.Bytes()[0:8])\n\tif err != nil {\n\t\tlog.Error(\"putUuid failed: Failed to get mostSignificantLong Long value.\")\n\t\treturn errors.New(\"Failed to get mostSignificantLong Long value.\")\n\t}\n\n\terr = putLong(log, byteArray, offset, leastSignificantLong)\n\tif err != nil {\n\t\tlog.Error(\"putUuid failed: Failed to put leastSignificantLong Long value.\")\n\t\treturn errors.New(\"Failed to put leastSignificantLong Long value.\")\n\t}\n\n\terr = putLong(log, byteArray, offset+8, mostSignificantLong)\n\tif err != nil {\n\t\tlog.Error(\"putUuid failed: Failed to put mostSignificantLong Long value.\")\n\t\treturn errors.New(\"Failed to put mostSignificantLong Long value.\")\n\t}\n\n\treturn nil\n}", "func uuid() string {\n\treturn fmt.Sprintf(\"%s\", guid.NewV4())\n}", "func (c *Config) WriteNodeID() error {\n\tnodeIDFile := \"node.id\"\n\terr := ioutil.WriteFile(nodeIDFile, []byte(c.NodeID), 0666)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot write file [%s]: %v\", nodeIDFile, err)\n\t}\n\treturn nil\n}", "func ToHyphenUUID(uuid string) string {\n\t// 8 - 4 - 4 - 4 - 12\n\treturn fmt.Sprintf(\"%v-%v-%v-%v-%v\", uuid[:8], uuid[8:12], uuid[12:16], uuid[16:20], uuid[20:])\n}", "func saveHostMetadata(metadata Metadata) error {\n\tdataBytes, err := json.Marshal(metadata)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"[Telemetry] marshal data failed with err %+v\", err)\n\t}\n\n\tif err = ioutil.WriteFile(metadataFile, dataBytes, 0644); err != nil {\n\t\ttelemetryLogger.Printf(\"[Telemetry] Writing metadata to file failed: %v\", err)\n\t}\n\n\treturn err\n}", "func GenerateUUID(device string) error {\n\t// for mounting the cloned volume for btrfs, a new UUID has to be generated\n\tcmd := exec.Command(\"btrfstune\", \"-f\", \"-u\", device)\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tklog.Errorf(\"btrfs: uuid generate failed for device %s error: %s\", device, string(out))\n\t\treturn err\n\t}\n\tklog.Infof(\"btrfs: generated UUID for the device %s \\n %v\", device, string(out))\n\treturn nil\n}", "func (rng *Tunafish) WriteSeed(filename string) error {\n\tif !rng.Initialised() {\n\t\treturn ErrNotInitialised\n\t}\n\n\tseed, err := rng.Seed()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(filename, seed, 0600)\n}", "func (dm *dataManager) writeUint(address uint, u uint) (err ProcessException) {\n\tdata := make([]byte, 4)\n\tbinary.LittleEndian.PutUint32(data, uint32(u))\n\n\terr = dm.process.WriteBytes(address, data)\n\n\treturn\n}", "func (dev VMVolumeDevice) UUID() string {\n\treturn utils.NewUUID5(blockVolumeNsUUID, dev.HostPath)\n}", "func appendHexUint16(dst []byte, src uint16) []byte {\n\tdst = append(dst, \"0000\"[1+(bits.Len16(src)-1)/4:]...)\n\tdst = strconv.AppendUint(dst, uint64(src), 16)\n\treturn dst\n}", "func (h *Hostman) Write() error {\n\tvar final string\n\n\tfor _, entry := range h.entries {\n\t\tif entry.Disabled {\n\t\t\tfinal += \"#\"\n\t\t}\n\n\t\tfinal += entry.Raw + \"\\n\"\n\t}\n\n\treturn ioutil.WriteFile(h.filename, []byte(final), 0644)\n}", "func saveHostMetadata(metadata Metadata) error {\n\tdataBytes, err := json.Marshal(metadata)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"[Telemetry] marshal data failed with err %+v\", err)\n\t}\n\n\tif err = ioutil.WriteFile(metadataFile, dataBytes, 0644); err != nil {\n\t\tlog.Logf(\"[Telemetry] Writing metadata to file failed: %v\", err)\n\t}\n\n\treturn err\n}", "func TransportFile(uuid string, owlh map[string]string, file string) {\n logs.Info(\"Get file \" + owlh[\"local_pcap_path\"] + \" from \" + owlh[\"name\"] + \" - \" + owlh[\"ip\"])\n TransportFileSSH(uuid, owlh, file)\n}", "func (pm *procMan) WritePID(t *testing.T, pid int) {\n\tpm.Lock()\n\tdefer pm.Unlock()\n\tpsProc, err := ps.FindProcess(pid)\n\tif err != nil {\n\t\tt.Fatalf(\"cannot inspect proc %d: %s\", pid, err)\n\t}\n\tif psProc == nil {\n\t\ttime.Sleep(time.Second)\n\t\tpsProc, err := ps.FindProcess(pid)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"cannot inspect proc %d: %s\", pid, err)\n\t\t}\n\t\tif psProc == nil {\n\t\t\tt.Logf(\"Warning! Possibly orphaned PID: %d\", pid)\n\t\t\treturn\n\t\t}\n\t}\n\n\tpidFile := pm.PIDFile\n\n\tvar f *os.File\n\tif s, err := os.Stat(pidFile); err != nil {\n\t\tif !isNotExist(err) {\n\t\t\tt.Fatalf(\"could not stat %q: %s\", pidFile, err)\n\t\t\treturn\n\t\t}\n\t\tif s != nil && s.IsDir() {\n\t\t\tt.Fatalf(\"cannot write to file %q: it's a directory\", pidFile)\n\t\t}\n\t\tf, err = os.Create(pidFile)\n\t\tdefer closeFiles(f)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"could not create %q: %s\", pidFile, err)\n\t\t}\n\t}\n\tif f == nil {\n\t\tvar err error\n\t\tf, err = os.OpenFile(pidFile, os.O_APPEND|os.O_WRONLY, os.ModeAppend)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"could not open %q: %s\", pidFile, err)\n\t\t\treturn\n\t\t}\n\t\tdefer closeFiles(f)\n\t}\n\tif psProc == nil {\n\t\tt.Logf(\"Warning! Unable to write PID %d to pidfile: psProc became nil all of a sudden\", pid)\n\t\treturn\n\n\t}\n\tif _, err := fmt.Fprintf(f, \"%d\\t%s\\n\", pid, psProc.Executable()); err != nil {\n\t\tt.Fatalf(\"could not write PID %d (exe %s) to file %q: %s\",\n\t\t\tpid, psProc.Executable(), pidFile, err)\n\t}\n}", "func writeFile(iptvline string){\n\n\t//check if file exists\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\t// create file if not exists\n\t\tif os.IsNotExist(err) {\n\t\t\tvar file, err = os.Create(path)\n\t\t\tif isError(err) { return }\n\t\t\tdefer file.Close()\n\t\t}\n\t}\n\n\n\tfileHandle, _ := os.OpenFile(path, os.O_APPEND, 0666)\n\twriter := bufio.NewWriter(fileHandle)\n\tdefer fileHandle.Close()\n\n\tfmt.Fprintln(writer, iptvline)\n\twriter.Flush()\n}", "func UUID() string {\n\treturn strings.Replace(UUID4(), \"-\", \"\", -1)\n}", "func updateHostString(hosts string) error {\n\tif hostFile, err := getHostFile(); err == nil {\n\t\treturn ioutil.WriteFile(hostFile, []byte(hosts), 0777)\n\t} else {\n\t\treturn err\n\t}\n}", "func BytesToUUIDFormat(bytes []byte) string {\n\treturn codec.ToByteArray(bytes[0:4]).Hex() + \"-\" +\n\t\tcodec.ToByteArray(bytes[4:6]).Hex() + \"-\" +\n\t\tcodec.ToByteArray(bytes[6:8]).Hex() + \"-\" +\n\t\tcodec.ToByteArray(bytes[8:10]).Hex() + \"-\" +\n\t\tcodec.ToByteArray(bytes[10:]).Hex()\n}", "func WriteUInt32(buffer []byte, offset int, value uint32) {\n buffer[offset + 0] = byte(value >> 0)\n buffer[offset + 1] = byte(value >> 8)\n buffer[offset + 2] = byte(value >> 16)\n buffer[offset + 3] = byte(value >> 24)\n}", "func createID(rec *OutputRecord) string {\n\tstr := rec.Type + rec.Path + rec.Datetime + rec.IPAddress + rec.UserID\n\tsum := sha1.Sum([]byte(str))\n\treturn hex.EncodeToString(sum[:])\n}", "func writeHeapToFile(h *minHeap, filename string) {\n\tfile, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm)\n\tdefer file.Close()\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\turlCntMap := make(map[string]int)\n\tfor i := 0; i < h.cap; i++ {\n\t\tpair, err := h.getNode(i)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\turlCntMap[pair.url] = pair.cnt\n\t}\n\tgenResult(filename, urlCntMap, h.cap)\n}", "func TestWriteTagHeader(t *testing.T) {\n\tt.Parallel()\n\n\tbuf := new(bytes.Buffer)\n\tbw := bufio.NewWriter(buf)\n\tdst := make([]byte, 4)\n\n\tif err := writeTagHeader(bw, dst, 15351, 4); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := bw.Flush(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !bytes.Equal(thb, buf.Bytes()) {\n\t\tt.Fatalf(\"Expected %v, got %v\", thb, buf.Bytes())\n\t}\n}", "func write_log(nombre string, partes int, nodos []string) {\n f, err := os.OpenFile(\"log.txt\",os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)\n if err != nil {\n log.Println(err)\n }\n defer f.Close()\n \n var buffer string = nombre+\" \"+strconv.Itoa(partes)+\"\\n\"\n \n if _, err := f.WriteString(buffer); err != nil {\n log.Println(err)\n }\n \n for i := 0; i < partes; i++ {\n index, err := strconv.Atoi(nodos[i])\n if err != nil {\n log.Fatalf(\"fail: %s\", err)\n }\n \n buffer = nombre+\" Parte_\"+strconv.Itoa(i+1)+\" \"+addresses[index]+\"\\n\"\n \n if _, err := f.WriteString(buffer); err != nil {\n log.Println(err)\n }\n } \n\n}", "func (d *DivMaster) WriteDivMaster(n string) {\n\tf, err := os.Create(n)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t//w := bufio.NewReader(f)\n\tfor _, v := range d.divName {\n\t\t_, err := fmt.Fprintf(f, \"%v\\n\", v)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}", "func removeUuidFromFilepath(path string) string {\n\t// UUID has 4 hyphens, so we split into 6 parts. \n\treturn strings.SplitN(filepath.Base(path), \"-\", 6)[5]\n}", "func TestUUID(t *testing.T) {\n\ttext := utl.GeneredUUID()\n\tt.Logf(\"text:[%s]\", text)\n}", "func (m MessageDescriptorMap) WriteFile(filename string) error {\n\tbytes, err := json.MarshalIndent(m, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn os.WriteFile(filename, append(bytes, '\\n'), 0o644) //nolint:gas\n}", "func Encode(w io.Writer, h Hostsfile) error {\n\tfor _, record := range h.records {\n\t\tvar toWrite string\n\t\tif record.isBlank {\n\t\t\ttoWrite = \"\"\n\t\t} else if len(record.comment) > 0 {\n\t\t\ttoWrite = record.comment\n\t\t} else {\n\t\t\tout := make([]string, len(record.Hostnames))\n\t\t\ti := 0\n\t\t\tfor name := range record.Hostnames {\n\t\t\t\tout[i] = name\n\t\t\t\ti++\n\t\t\t}\n\t\t\tsort.Strings(out)\n\t\t\tout = append([]string{record.IpAddress.String()}, out...)\n\t\t\ttoWrite = strings.Join(out, \" \")\n\t\t}\n\t\ttoWrite += eol\n\t\t_, err := w.Write([]byte(toWrite))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func genUTF16(path, s string, order binary.ByteOrder) {\n\tbs := utf16.Encode([]rune(s))\n\tbuf := &bytes.Buffer{}\n\tif err := binary.Write(buf, order, bs); err != nil {\n\t\tpanic(err)\n\t}\n\tmustWriteFile(path, buf.Bytes())\n}", "func (kv *DisKV) filePut(shard int, key string, content string) error {\n\tfullname := kv.shardDir(shard) + \"/key-\" + kv.encodeKey(key)\n\ttempname := kv.shardDir(shard) + \"/temp-\" + kv.encodeKey(key)\n\tif err := ioutil.WriteFile(tempname, []byte(content), 0666); err != nil {\n\t\treturn err\n\t}\n\tif err := os.Rename(tempname, fullname); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func GetHostUUID(nbmaster string, httpClient *http.Client, jwt string, host string) string {\r\n fmt.Printf(\"\\nGet the UUID of host %s...\\n\", host)\r\n\r\n uri := \"https://\" + nbmaster + \":\" + port + \"/netbackup/config/hosts\";\r\n\r\n request, _ := http.NewRequest(http.MethodGet, uri, nil)\r\n query := request.URL.Query()\r\n query.Add(\"filter\", \"hostName eq '\" + host + \"'\")\r\n request.URL.RawQuery = query.Encode()\r\n\r\n request.Header.Add(\"Authorization\", jwt);\r\n request.Header.Add(\"Accept\", contentTypeV3);\r\n\r\n response, err := httpClient.Do(request)\r\n\r\n hostUuid := \"\"\r\n if err != nil {\r\n fmt.Printf(\"The HTTP request failed with error: %s\\n\", err)\r\n panic(\"Unable to get the host UUID\")\r\n } else {\r\n if response.StatusCode == 200 {\r\n data, _ := ioutil.ReadAll(response.Body)\r\n var obj interface{}\r\n json.Unmarshal(data, &obj)\r\n response := obj.(map[string]interface{})\r\n hosts := response[\"hosts\"].([]interface{})\r\n hostUuid = ((hosts[0].(map[string]interface{}))[\"uuid\"]).(string)\r\n fmt.Printf(\"Host UUID: %s\\n\", hostUuid);\r\n } else {\r\n printErrorResponse(response)\r\n }\r\n }\r\n\r\n return hostUuid\r\n}", "func (n *PaxosNode) writeAtSlot(slotNumber int, buf []byte) error {\n\twriteLog, _ := os.OpenFile(n.logFileName, os.O_RDWR, 0666)\n\toffset := int64(slotNumber * MAX_SLOT_BYTES)\n\twriteLog.Seek(offset, 0) // from origin of file go to current offset\n\tnbytes, err := writeLog.WriteString(string(buf) + \"\\n\")\n\tif err != nil {\n\t\tLOGE.Printf(\"Error in writing to log\")\n\t\treturn err\n\t}\n\tLOGV.Printf(\"wrote %d bytes\\n\", nbytes)\n\twriteLog.Sync()\n\twriteLog.Close()\n\n\tn.MessageAvailable <- true\n\n\treturn nil\n}", "func (wmid *WzMachineIDUtil) setupMachineId() {\n\tsystemdMidFPath := \"/etc/machine-id\"\n\tif wmid.filePath == \"\" {\n\t\twmid.filePath = systemdMidFPath\n\t}\n\tmid, err := ioutil.ReadFile(wmid.filePath)\n\tif err != nil {\n\t\twmid.GetLogger().Debugf(\"File %s was not found\", wmid.filePath)\n\t\tmid, err = ioutil.ReadFile(systemdMidFPath)\n\t\tif err != nil {\n\t\t\twmid.GetLogger().Debugf(\"This system has no /etc/machine-id file, creating a replacement.\")\n\n\t\t\thasher := md5.New()\n\t\t\t_, err := io.WriteString(hasher, wzlib.MakeJid())\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tmid = []byte(fmt.Sprintf(\"%x\", hasher.Sum(nil)))\n\t\t}\n\t\tif wmid.filePath != systemdMidFPath {\n\t\t\tif err := ioutil.WriteFile(wmid.filePath, mid, 0644); err != nil {\n\t\t\t\twmid.GetLogger().Errorf(\"Unable to duplicate machine id: %s\", err.Error())\n\t\t\t}\n\t\t}\n\t}\n\twmid.machineid = strings.TrimSpace(string(mid))\n}", "func writeDumpIndex(filepath string, dumpInfo *blockDumpInfo) error {\n\tdumpInfoData, err := json.Marshal(dumpInfo)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(filepath, dumpInfoData, 0666)\n}", "func (m *Metadata) SetUUID(id string) {\n\tif id == \"\" {\n\t\tid = NewUUID() // generate random UUID if not defined\n\t}\n\tm.Identifier = []Element{{Value: id, ID: \"uuid\"}}\n}", "func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error {\n\ttmpFile, err := os.CreateTemp(devices.metadataDir(), \".tmp\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"devmapper: Error creating metadata file: %s\", err)\n\t}\n\n\tn, err := tmpFile.Write(jsonData)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"devmapper: Error writing metadata to %s: %s\", tmpFile.Name(), err)\n\t}\n\tif n < len(jsonData) {\n\t\treturn io.ErrShortWrite\n\t}\n\tif err := tmpFile.Sync(); err != nil {\n\t\treturn fmt.Errorf(\"devmapper: Error syncing metadata file %s: %s\", tmpFile.Name(), err)\n\t}\n\tif err := tmpFile.Close(); err != nil {\n\t\treturn fmt.Errorf(\"devmapper: Error closing metadata file %s: %s\", tmpFile.Name(), err)\n\t}\n\tif err := os.Rename(tmpFile.Name(), filePath); err != nil {\n\t\treturn fmt.Errorf(\"devmapper: Error committing metadata file %s: %s\", tmpFile.Name(), err)\n\t}\n\n\treturn nil\n}", "func (e *EPub) UUID() string {\n\treturn strings.TrimPrefix(\"urn:uuid:\", e.uuid)\n}", "func writeEtcHostnameForContainer(globalOptions types.GlobalCommandOptions, hostname string, containerID string) ([]oci.SpecOpts, error) {\n\tif containerID == \"\" {\n\t\treturn nil, fmt.Errorf(\"container ID is required for setting up hostname file\")\n\t}\n\n\tdataStore, err := clientutil.DataStore(globalOptions.DataRoot, globalOptions.Address)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstateDir, err := ContainerStateDirPath(globalOptions.Namespace, dataStore, containerID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thostnamePath := filepath.Join(stateDir, \"hostname\")\n\tif err := os.WriteFile(hostnamePath, []byte(hostname+\"\\n\"), 0644); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn []oci.SpecOpts{oci.WithHostname(hostname), withCustomEtcHostname(hostnamePath)}, nil\n}", "func (e *EndToEndTest) WriteFileSsh(path string, content string) error {\n\treturn exec.Command(\"docker\", \"exec\", e.GetContainer(\"ssh\"), \"sh\", \"-c\",\n\t\tfmt.Sprintf(\"echo \\\"%s\\\" > %s\", content, path)).Run()\n}", "func writeKeyToFile(keyBytes []byte, saveFileTo string) error {\n\terr := ioutil.WriteFile(saveFileTo, keyBytes, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}" ]
[ "0.64471084", "0.6248215", "0.5742111", "0.55889857", "0.54731405", "0.54549134", "0.53703684", "0.5361488", "0.5359973", "0.5352255", "0.5332432", "0.5296048", "0.52657807", "0.5221564", "0.5199073", "0.518144", "0.5132775", "0.5102348", "0.50953376", "0.5062497", "0.50417596", "0.5040314", "0.49691027", "0.49506995", "0.49506453", "0.49401084", "0.492673", "0.49043378", "0.48911652", "0.4877534", "0.48622662", "0.48574185", "0.4842028", "0.47974378", "0.47790903", "0.47774404", "0.47758725", "0.47717625", "0.47479907", "0.47433522", "0.47383502", "0.47366178", "0.4733186", "0.4732963", "0.4732963", "0.47329295", "0.47280547", "0.47145176", "0.4709323", "0.46981215", "0.46954164", "0.46918654", "0.46745187", "0.46608406", "0.46596268", "0.46595505", "0.46509847", "0.46362096", "0.46295586", "0.4622732", "0.46210712", "0.46190825", "0.46104044", "0.46097323", "0.46088728", "0.46019995", "0.4600836", "0.45983732", "0.45982197", "0.45971125", "0.4591601", "0.4587983", "0.4576737", "0.4565739", "0.45629475", "0.4551056", "0.45503482", "0.45452726", "0.45409393", "0.45243233", "0.45155007", "0.45105013", "0.45099956", "0.4509101", "0.45063233", "0.45045635", "0.44934952", "0.44932306", "0.4487627", "0.4485864", "0.4485051", "0.4480689", "0.44781882", "0.44692045", "0.44598013", "0.4455939", "0.4454889", "0.44541255", "0.444965", "0.4449336" ]
0.8289592
0
ReadOrMakeHostUUID looks for a hostid file in the data dir. If present, returns the UUID from it, otherwise generates one
ReadOrMakeHostUUID ищет файл hostid в каталоге данных. Если он найден, возвращает UUID из него, иначе генерирует один
func ReadOrMakeHostUUID(dataDir string) (string, error) { id, err := ReadHostUUID(dataDir) if err == nil { return id, nil } if !trace.IsNotFound(err) { return "", trace.Wrap(err) } // Checking error instead of the usual uuid.New() in case uuid generation // fails due to not enough randomness. It's been known to happen happen when // Teleport starts very early in the node initialization cycle and /dev/urandom // isn't ready yet. rawID, err := uuid.NewRandom() if err != nil { return "", trace.BadParameter("" + "Teleport failed to generate host UUID. " + "This may happen if randomness source is not fully initialized when the node is starting up. " + "Please try restarting Teleport again.") } id = rawID.String() if err = WriteHostUUID(dataDir, id); err != nil { return "", trace.Wrap(err) } return id, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func ReadHostUUID(dataDir string) (string, error) {\n\tout, err := ReadPath(filepath.Join(dataDir, HostUUIDFile))\n\tif err != nil {\n\t\tif errors.Is(err, fs.ErrPermission) {\n\t\t\t//do not convert to system error as this loses the ability to compare that it is a permission error\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn \"\", trace.ConvertSystemError(err)\n\t}\n\tid := strings.TrimSpace(string(out))\n\tif id == \"\" {\n\t\treturn \"\", trace.NotFound(\"host uuid is empty\")\n\t}\n\treturn id, nil\n}", "func UUIDFile(fpath string) (string, error) {\n\n\t_, err := os.Stat(fpath)\n\tif err != nil && !os.IsExist(err) {\n\t\tkey := uuid.New().String()\n\t\tif err := ioutil.WriteFile(fpath, []byte(key), 0777); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn key, nil\n\t}\n\n\tfp, err := os.Open(fpath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer fp.Close()\n\tdata, err := ioutil.ReadAll(fp)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tkey := string(data)\n\tif _, err := uuid.Parse(key); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn key, nil\n}", "func HostUUIDExistsLocally(dataDir string) bool {\n\t_, err := ReadHostUUID(dataDir)\n\treturn err == nil\n}", "func WriteHostUUID(dataDir string, id string) error {\n\terr := os.WriteFile(filepath.Join(dataDir, HostUUIDFile), []byte(id), os.ModeExclusive|0400)\n\tif err != nil {\n\t\tif errors.Is(err, fs.ErrPermission) {\n\t\t\t//do not convert to system error as this loses the ability to compare that it is a permission error\n\t\t\treturn err\n\t\t}\n\t\treturn trace.ConvertSystemError(err)\n\t}\n\treturn nil\n}", "func readMachineID() []byte {\n\tid := make([]byte, 3)\n\tif hostname, err := os.Hostname(); err == nil {\n\t\thw := md5.New()\n\t\thw.Write([]byte(hostname))\n\t\tcopy(id, hw.Sum(nil))\n\t} else {\n\t\t// Fallback to rand number if machine id can't be gathered\n\t\tif _, randErr := rand.Reader.Read(id); randErr != nil {\n\t\t\tpanic(fmt.Errorf(\"Cannot get hostname nor generate a random number: %v; %v\", err, randErr))\n\t\t}\n\t}\n\treturn id\n}", "func GetUUID() string {\n\tuuid, _ := ioutil.ReadFile(AppPath.UUIDFile)\n\treturn string(bytes.TrimSpace(uuid))\n}", "func (this *Actions) generateUid(config *Config, hostname string) string{\n uuidNew := uuid.NewV5(uuid.NewV1(), hostname).String()\n uuid_path := config.GetValue(\"basic\", \"uuid_path\")\n log.Info(\"The new uuid is : \" + uuidNew)\n file, error := os.OpenFile(uuid_path, os.O_RDWR|os.O_CREATE, 0622)\n if error != nil {\n log.Error(\"Open uuid file in \"+ uuid_path +\" failed.\" + error.Error())\n }\n _,err := file.WriteString(uuidNew)\n if err != nil {\n log.Error(\"Save uuid file in \"+ uuid_path +\" failed.\" + err.Error())\n }\n file.Close()\n return uuidNew\n}", "func getHostFromUUID(id string) (*model.Host, error) {\n\thosts, err := driver.GetHosts()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, host := range *hosts {\n\t\tif host.UUID == id {\n\t\t\t// Host Matches\n\t\t\tlog.Tracef(\"current host matches with id=%s\", id)\n\t\t\treturn host, nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"no host found with id %s\", id)\n}", "func GetHostID() string {\n\tif cachedHostID != \"\" {\n\t\treturn cachedHostID\n\t}\n\n\tecsMetadataURI := os.Getenv(\"ECS_CONTAINER_METADATA_URI_V4\")\n\tif ecsMetadataURI != \"\" {\n\t\thostID, err := getHostIDFromECS(ecsMetadataURI + \"/task\")\n\t\tif err == nil {\n\t\t\tcachedHostID = hostID\n\t\t\treturn cachedHostID\n\t\t}\n\n\t\tfmt.Fprintf(os.Stderr, \"Failed to get task ARN from ECS metadata v4 endpoint: %v\\n\", err)\n\t}\n\n\tecsMetadataURI = os.Getenv(\"ECS_CONTAINER_METADATA_URI\")\n\tif ecsMetadataURI != \"\" {\n\t\thostID, err := getHostIDFromECS(ecsMetadataURI + \"/task\")\n\t\tif err == nil {\n\t\t\tcachedHostID = hostID\n\t\t\treturn cachedHostID\n\t\t}\n\n\t\tfmt.Fprintf(os.Stderr, \"Failed to get task ARN from ECS metadata v3 endpoint: %v\\n\", err)\n\t}\n\n\thostID, errECS := getHostIDFromECS(\"http://169.254.170.2/v2/metadata\")\n\tif errECS == nil {\n\t\tcachedHostID = hostID\n\t\treturn cachedHostID\n\t}\n\n\thostID, errEC2 := getHostIDFromEC2()\n\tif errEC2 == nil {\n\t\tcachedHostID = hostID\n\t\treturn cachedHostID\n\t}\n\n\thostID, errIF := getHostIDFromInterfaces()\n\tif errIF == nil {\n\t\tcachedHostID = hostID\n\t\treturn cachedHostID\n\t}\n\n\thostID, errRand := getRandomHostID()\n\tif errRand == nil {\n\t\tcachedHostID = hostID\n\t\treturn cachedHostID\n\t}\n\n\tfmt.Fprintf(os.Stderr, \"Failed to get task ARN from ECS metadata v2 endpoint: %v\\n\", errECS)\n\tfmt.Fprintf(os.Stderr, \"Failed to get instance ID from EC2 metadata endpoint: %v\\n\", errEC2)\n\tfmt.Fprintf(os.Stderr, \"Failed to get IP address from network interface: %v\\n\", errIF)\n\tfmt.Fprintf(os.Stderr, \"Failed to get random host ID: %v\\n\", errRand)\n\tpanic(\"Unable to obtain a valid host ID\")\n}", "func readMachineID() []byte {\n\tid := make([]byte, 3)\n\thid, err := readPlatformMachineID()\n\tif err != nil || len(hid) == 0 {\n\t\thid, err = os.Hostname()\n\t}\n\tif err == nil && len(hid) != 0 {\n\t\thw := md5.New()\n\t\thw.Write([]byte(hid))\n\t\tcopy(id, hw.Sum(nil))\n\t} else {\n\t\t// Fallback to rand number if machine id can't be gathered\n\t\tif _, randErr := rand.Reader.Read(id); randErr != nil {\n\t\t\tpanic(fmt.Errorf(\"xid: cannot get hostname nor generate a random number: %v; %v\", err, randErr))\n\t\t}\n\t}\n\treturn id\n}", "func readMachineId() []byte {\n\tvar sum [3]byte\n\tid := sum[:]\n\thostname, err1 := os.Hostname()\n\tif err1 != nil {\n\t\tn := uint32(time.Now().UnixNano())\n\t\tsum[0] = byte(n >> 0)\n\t\tsum[1] = byte(n >> 8)\n\t\tsum[2] = byte(n >> 16)\n\t\treturn id\n\t}\n\thw := md5.New()\n\thw.Write([]byte(hostname))\n\tcopy(id, hw.Sum(nil))\n\treturn id\n}", "func (c *Config) getRandomId() (string, error) {\n\tb, err := ioutil.ReadFile(c.ProcBootId)\n\tif err != nil {\n\t\tglog.Errorf(\"fail to open %s: %q\", c.ProcBootId, err)\n\t\treturn \"\", err\n\t}\n\trandomId := string(b)\n\trandomId = strings.Trim(randomId, \"\\n\")\n\tglog.V(2).Infof(\"RandomId: %q\", randomId)\n\treturn randomId, nil\n\n}", "func HardwareUUID() (string, error) {\n\t/*\n\t\tSample output of 'wmic path Win32_ComputerSystemProduct get uuid'\n\n\t\tUUID\n\t\t4219B2F5-C25F-6AF2-573C-35B0DF557236\n\t*/\n\tresult, err := readAndParseFromCommandLine(hardwareUUIDCmd)\n\tif err != nil {\n\t\treturn \"-1\", err\n\t}\n\thardwareUUID := \"\"\n\tif len(result) > 1 {\n\t\t// remove all spaces from the second line as that line consists hardware uuid\n\t\tre := regexp.MustCompile(\"\\\\s|\\\\r\")\n\t\thardwareUUID = re.ReplaceAllString(result[1], \"\")\n\t}\n\treturn hardwareUUID, nil\n}", "func getHostId() (uint64, error) {\n\ta := getLocalIP()\n\tip := (uint64(a[0]) << 24) + (uint64(a[1]) << 16) + (uint64(a[2]) << 8) + uint64(a[3])\n\treturn ip % MaxHostId, nil\n}", "func (o *NetworkLicenseFile) GetHostId() string {\n\tif o == nil || o.HostId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.HostId\n}", "func GetHostFile() (*hostess.Hostfile, []error) {\n\n\t// prep for refactor\n\t// capture duplicate localhost here\n\t// TODO need a better solution, this is a hack\n\thf, errs := hostess.LoadHostfile()\n\n\tfor _, err := range errs {\n\n\t\t// auto-fixing hostfile problems.\n\t\tif err.Error() == \"Duplicate hostname entry for localhost -> ::1\" {\n\t\t\t_, err = BackupHostFile(hf)\n\t\t\tif err != nil {\n\t\t\t\treturn hf, []error{errors.New(\"Could not back up hostfile.\")}\n\t\t\t}\n\n\t\t\t// fix the duplicate\n\t\t\tinput, err := ioutil.ReadFile(hf.Path)\n\t\t\tif err != nil {\n\t\t\t\treturn hf, []error{err}\n\t\t\t}\n\n\t\t\tlines := strings.Split(string(input), \"\\n\")\n\t\t\tfor i, line := range lines {\n\t\t\t\t// if the line looks something like this then it's\n\t\t\t\t// probably the fault of hostess on a previous run and\n\t\t\t\t// safe to fix.\n\t\t\t\tif strings.Contains(line, \"::1 localhost localhost\") {\n\t\t\t\t\tlines[i] = \"::1 localhost\"\n\t\t\t\t}\n\t\t\t}\n\n\t\t\toutput := strings.Join(lines, \"\\n\")\n\t\t\terr = ioutil.WriteFile(hf.Path, []byte(output), 0644)\n\t\t\tif err != nil {\n\t\t\t\treturn hf, []error{err}\n\t\t\t}\n\n\t\t\treturn hostess.LoadHostfile()\n\t\t}\n\n\t}\n\n\treturn hf, errs\n}", "func UDID() string {\n\tf, err := os.Open(\"/dev/urandom\")\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to get /dev/urandom! %s\", err))\n\t}\n\tb := make([]byte, 16)\n\t_, err = f.Read(b)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to read 16 bytes from /dev/urandom! %s\", err))\n\t}\n\tf.Close()\n\treturn fmt.Sprintf(\"%x-%x-%x-%x-%x\", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])\n}", "func (wmid *WzMachineIDUtil) setupMachineId() {\n\tsystemdMidFPath := \"/etc/machine-id\"\n\tif wmid.filePath == \"\" {\n\t\twmid.filePath = systemdMidFPath\n\t}\n\tmid, err := ioutil.ReadFile(wmid.filePath)\n\tif err != nil {\n\t\twmid.GetLogger().Debugf(\"File %s was not found\", wmid.filePath)\n\t\tmid, err = ioutil.ReadFile(systemdMidFPath)\n\t\tif err != nil {\n\t\t\twmid.GetLogger().Debugf(\"This system has no /etc/machine-id file, creating a replacement.\")\n\n\t\t\thasher := md5.New()\n\t\t\t_, err := io.WriteString(hasher, wzlib.MakeJid())\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tmid = []byte(fmt.Sprintf(\"%x\", hasher.Sum(nil)))\n\t\t}\n\t\tif wmid.filePath != systemdMidFPath {\n\t\t\tif err := ioutil.WriteFile(wmid.filePath, mid, 0644); err != nil {\n\t\t\t\twmid.GetLogger().Errorf(\"Unable to duplicate machine id: %s\", err.Error())\n\t\t\t}\n\t\t}\n\t}\n\twmid.machineid = strings.TrimSpace(string(mid))\n}", "func GetClientID() (string, error) {\n\tfn := \"clientid\" // File Name\n\tif _, err := os.Stat(fn); os.IsNotExist(err) {\n\t\t// File does not exists, create a new uuid\n\t\tuuid := uuid.NewV4()\n\t\tuuidStr := uuid.String()\n\t\tlog.Println(\"Created new Client ID.\", uuidStr)\n\t\terr = ioutil.WriteFile(fn, []byte(uuidStr), 0666)\n\t\tif err != nil {\n\t\t\treturn uuidStr, err\n\t\t}\n\t\treturn uuidStr, nil\n\t}\n\t// Read the uuid from the file\n\tdata, err := ioutil.ReadFile(fn)\n\tif err != nil {\n\t\tlog.Println(\"Failed to read the Client ID file. Attempting to recreate it.\", err)\n\t\tuuid := uuid.NewV4()\n\t\tuuidStr := uuid.String()\n\t\tlog.Println(\"Created new Client ID.\", uuidStr)\n\t\terr = ioutil.WriteFile(fn, []byte(uuidStr), 0666)\n\t\tif err != nil {\n\t\t\treturn uuidStr, err\n\t\t}\n\t\treturn uuidStr, nil\n\t}\n\treturn string(data), nil\n}", "func defaultHostId(p peer.ID, prefix string) string {\n\tif os.Getenv(\"HOST_ID\") == \"\" {\n\t\treturn fmt.Sprintf(\"%s-%s\", prefix, shortID(p))\n\t}\n\treturn fmt.Sprintf(\"%s-%s-%s\", prefix, os.Getenv(\"HOST_ID\"), shortID(p))\n}", "func (a *Agent) makeNodeID() (string, error) {\n\t// If they've disabled host-based IDs then just make a random one.\n\tif a.config.DisableHostNodeID {\n\t\treturn a.makeRandomID()\n\t}\n\n\t// Try to get a stable ID associated with the host itself.\n\tinfo, err := host.Info()\n\tif err != nil {\n\t\ta.logger.Printf(\"[DEBUG] agent: Couldn't get a unique ID from the host: %v\", err)\n\t\treturn a.makeRandomID()\n\t}\n\n\t// Make sure the host ID parses as a UUID, since we don't have complete\n\t// control over this process.\n\tid := strings.ToLower(info.HostID)\n\tif _, err := uuid.ParseUUID(id); err != nil {\n\t\ta.logger.Printf(\"[DEBUG] agent: Unique ID %q from host isn't formatted as a UUID: %v\",\n\t\t\tid, err)\n\t\treturn a.makeRandomID()\n\t}\n\n\t// Hash the input to make it well distributed. The reported Host UUID may be\n\t// similar across nodes if they are on a cloud provider or on motherboards\n\t// created from the same batch.\n\tbuf := sha512.Sum512([]byte(id))\n\tid = fmt.Sprintf(\"%08x-%04x-%04x-%04x-%12x\",\n\t\tbuf[0:4],\n\t\tbuf[4:6],\n\t\tbuf[6:8],\n\t\tbuf[8:10],\n\t\tbuf[10:16])\n\n\ta.logger.Printf(\"[DEBUG] agent: Using unique ID %q from host as node ID\", id)\n\treturn id, nil\n}", "func Read(args ...string) (*UUID, error) {\n\tfpath := sfFilePath(args)\n\tf, err := os.Open(fpath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\tdata := make([]byte, UUIDHexLen+8)\n\tn, err := f.Read(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n < UUIDHexLen {\n\t\treturn nil, fmt.Errorf(\"File '%s' is too small\", fpath)\n\t}\n\tdata = data[:n]\n\tuuid, err := Decode(string(data))\n\tif err == nil {\n\t\tnc := &cache{uuid: *uuid, filePath: fpath, validationTime: time.Now().Add(ValidationTimePeriod)}\n\t\tatomic.StorePointer(&current, unsafe.Pointer(nc))\n\t}\n\treturn uuid, err\n}", "func (b *Broker) readIDFromFile(home, filepath string) (id string, err error) {\n\t_filepath := fmt.Sprintf(\"%v%v%v\", home, string(os.PathSeparator), filepath)\n\t_bytes, err := ioutil.ReadFile(_filepath)\n\tif err != nil {\n\t\treturn\n\t}\n\tid = string(_bytes)\n\treturn\n}", "func GetHostUUID(nbmaster string, httpClient *http.Client, jwt string, host string) string {\r\n fmt.Printf(\"\\nGet the UUID of host %s...\\n\", host)\r\n\r\n uri := \"https://\" + nbmaster + \":\" + port + \"/netbackup/config/hosts\";\r\n\r\n request, _ := http.NewRequest(http.MethodGet, uri, nil)\r\n query := request.URL.Query()\r\n query.Add(\"filter\", \"hostName eq '\" + host + \"'\")\r\n request.URL.RawQuery = query.Encode()\r\n\r\n request.Header.Add(\"Authorization\", jwt);\r\n request.Header.Add(\"Accept\", contentTypeV3);\r\n\r\n response, err := httpClient.Do(request)\r\n\r\n hostUuid := \"\"\r\n if err != nil {\r\n fmt.Printf(\"The HTTP request failed with error: %s\\n\", err)\r\n panic(\"Unable to get the host UUID\")\r\n } else {\r\n if response.StatusCode == 200 {\r\n data, _ := ioutil.ReadAll(response.Body)\r\n var obj interface{}\r\n json.Unmarshal(data, &obj)\r\n response := obj.(map[string]interface{})\r\n hosts := response[\"hosts\"].([]interface{})\r\n hostUuid = ((hosts[0].(map[string]interface{}))[\"uuid\"]).(string)\r\n fmt.Printf(\"Host UUID: %s\\n\", hostUuid);\r\n } else {\r\n printErrorResponse(response)\r\n }\r\n }\r\n\r\n return hostUuid\r\n}", "func getHostFile() (string, error) {\n\tpaltform := runtime.GOOS\n\tif hostFile, ok := pathMap[paltform]; ok {\n\t\treturn hostFile, nil\n\t} else {\n\t\treturn \"\", errors.New(\"unsupported PLATFORM!\")\n\t}\n}", "func GetVendorIDByCPUInfo(path string) (string, error) {\n\tvendorID := \"unknown\"\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn vendorID, err\n\t}\n\tdefer f.Close()\n\n\ts := bufio.NewScanner(f)\n\tfor s.Scan() {\n\t\tif err := s.Err(); err != nil {\n\t\t\treturn vendorID, err\n\t\t}\n\n\t\tline := s.Text()\n\n\t\t// get \"vendor_id\" from first line\n\t\tif strings.Contains(line, \"vendor_id\") {\n\t\t\tattrs := strings.Split(line, \":\")\n\t\t\tif len(attrs) >= 2 {\n\t\t\t\tvendorID = strings.TrimSpace(attrs[1])\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn vendorID, nil\n}", "func Make(args ...string) (*UUID, error) {\n\tfpath := sfFilePath(args)\n\tu, err := Get(fpath)\n\tif err == nil {\n\t\treturn u, nil\n\t}\n\tperr, ok := err.(*os.PathError)\n\tif ok && perr != nil && perr.Op == \"open\" {\n\t\tif err = WriteNew(fpath); err == nil {\n\t\t\treturn Get(fpath)\n\t\t}\n\t}\n\treturn nil, err\n}", "func loadHostString() (string, error) {\n\tif hostFile, err := getHostFile(); err == nil {\n\t\tbytes, err := ioutil.ReadFile(hostFile)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn string(bytes), nil\n\t} else {\n\t\treturn \"\", err\n\t}\n\n}", "func ReadUUID(buffer []byte, offset int) UUID {\n bytes := ReadBytes(buffer, offset, 16)\n return UUIDFromBytes(bytes)\n}", "func CreateUuidForMonitorData(md MonitorData) string {\n serial := SerializeMonitorData(md)\n h := sha256.New()\n h.Write(serial)\n return fmt.Sprintf(\"%x\", h.Sum(nil))\n}", "func UUID() (string, error) {\n\tb := make([]byte, 2)\n\n\t_, err := crand.Read(b)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn fmt.Sprintf(\"%X\", b[0:2]), nil\n}", "func removeUuidFromFilepath(path string) string {\n\t// UUID has 4 hyphens, so we split into 6 parts. \n\treturn strings.SplitN(filepath.Base(path), \"-\", 6)[5]\n}", "func ResourceToHostID(res pcommon.Resource) (HostID, bool) {\n\tvar cloudAccount, hostID, provider string\n\n\tattrs := res.Attributes()\n\n\tif attrs.Len() == 0 {\n\t\treturn HostID{}, false\n\t}\n\n\tif attr, ok := attrs.Get(conventions.AttributeCloudAccountID); ok {\n\t\tcloudAccount = attr.Str()\n\t}\n\n\tif attr, ok := attrs.Get(conventions.AttributeHostID); ok {\n\t\thostID = attr.Str()\n\t}\n\n\tif attr, ok := attrs.Get(conventions.AttributeCloudProvider); ok {\n\t\tprovider = attr.Str()\n\t}\n\n\tswitch provider {\n\tcase conventions.AttributeCloudProviderAWS:\n\t\tvar region string\n\t\tif attr, ok := attrs.Get(conventions.AttributeCloudRegion); ok {\n\t\t\tregion = attr.Str()\n\t\t}\n\t\tif hostID == \"\" || region == \"\" || cloudAccount == \"\" {\n\t\t\tbreak\n\t\t}\n\t\treturn HostID{\n\t\t\tKey: HostIDKeyAWS,\n\t\t\tID: fmt.Sprintf(\"%s_%s_%s\", hostID, region, cloudAccount),\n\t\t}, true\n\tcase conventions.AttributeCloudProviderGCP:\n\t\tif cloudAccount == \"\" || hostID == \"\" {\n\t\t\tbreak\n\t\t}\n\t\treturn HostID{\n\t\t\tKey: HostIDKeyGCP,\n\t\t\tID: fmt.Sprintf(\"%s_%s\", cloudAccount, hostID),\n\t\t}, true\n\tcase conventions.AttributeCloudProviderAzure:\n\t\tif cloudAccount == \"\" {\n\t\t\tbreak\n\t\t}\n\t\tid := azureID(attrs, cloudAccount)\n\t\tif id == \"\" {\n\t\t\tbreak\n\t\t}\n\t\treturn HostID{\n\t\t\tKey: HostIDKeyAzure,\n\t\t\tID: id,\n\t\t}, true\n\t}\n\n\tif attr, ok := attrs.Get(conventions.AttributeHostName); ok {\n\t\treturn HostID{\n\t\t\tKey: HostIDKeyHost,\n\t\t\tID: attr.Str(),\n\t\t}, true\n\t}\n\n\treturn HostID{}, false\n}", "func generateUUID(bd blockdevice.BlockDevice) (string, bool) {\n\tvar ok bool\n\tvar uuidField, uuid string\n\n\t// select the field which is to be used for generating UUID\n\t//\n\t// Serial number is not used directly for UUID generation. This is because serial number is not\n\t// unique in some cloud environments. For example, in GCP the serial number is\n\t// configurable by the --device-name flag while attaching the disk.\n\t// If this flag is not provided, GCP automatically assigns the serial number\n\t// which is unique only to the node. Therefore Serial number is used only in cases\n\t// where the disk has a WWN.\n\t//\n\t// If disk has WWN, a combination of WWN+Serial will be used. This is done because there are cases\n\t// where the disks has same WWN but different serial. It is seen in some storage arrays.\n\t// All the LUNs will have same WWN, but different serial.\n\t//\n\t// PartitionTableUUID is not used for UUID generation in NDM. The only case where the disk has a PartitionTable\n\t// and not partition is when, the user has manually created a partition table without writing any actual partitions.\n\t// This means NDM will have to give its consumers the entire disk, i.e consumers will have access to the sectors\n\t// where partition table is written. If consumers decide to reformat or erase the disk completely the partition\n\t// table UUID is also lost, making NDM unable to identify the disk. Hence, even if a partition table is present\n\t// NDM will rewrite it and create a new GPT table and a single partition. Thus consumers will have access only to\n\t// the partition and the unique data will be stored in sectors where consumers do not have access.\n\n\tswitch {\n\tcase bd.DeviceAttributes.DeviceType == blockdevice.BlockDeviceTypePartition:\n\t\t// The partition entry UUID is used when a partition (/dev/sda1) is processed. The partition UUID should be used\n\t\t// if available, other than the partition table UUID, because multiple partitions can have the same partition table\n\t\t// UUID, but each partition will have a different UUID.\n\t\tklog.Infof(\"device(%s) is a partition, using partition UUID: %s\", bd.DevPath, bd.PartitionInfo.PartitionEntryUUID)\n\t\tuuidField = bd.PartitionInfo.PartitionEntryUUID\n\t\tok = true\n\tcase len(bd.DeviceAttributes.WWN) > 0:\n\t\t// if device has WWN, both WWN and Serial will be used for UUID generation.\n\t\tklog.Infof(\"device(%s) has a WWN, using WWN: %s and Serial: %s\",\n\t\t\tbd.DevPath,\n\t\t\tbd.DeviceAttributes.WWN, bd.DeviceAttributes.Serial)\n\t\tuuidField = bd.DeviceAttributes.WWN +\n\t\t\tbd.DeviceAttributes.Serial\n\t\tok = true\n\tcase len(bd.FSInfo.FileSystemUUID) > 0:\n\t\tklog.Infof(\"device(%s) has a filesystem, using filesystem UUID: %s\", bd.DevPath, bd.FSInfo.FileSystemUUID)\n\t\tuuidField = bd.FSInfo.FileSystemUUID\n\t\tok = true\n\t}\n\n\tif ok {\n\t\tuuid = blockdevice.BlockDevicePrefix + util.Hash(uuidField)\n\t\tklog.Infof(\"generated uuid: %s for device: %s\", uuid, bd.DevPath)\n\t}\n\n\treturn uuid, ok\n}", "func HostID(nomad *NomadServer, hostname *string) (*Host, error) {\n\thosts, _, err := Hosts(nomad)\n\tif err != nil {\n\t\treturn &Host{}, err\n\t}\n\tfor _, host := range hosts {\n\t\tif *hostname == host.Name {\n\t\t\treturn &host, nil\n\t\t}\n\t}\n\tbuf := log(\"event\", \"node_not_found\", \"hostname\", hostname)\n\treturn &Host{}, errors.New(buf.String())\n\n}", "func makeRandomHost() (host.Host, *kaddht.IpfsDHT) {\n\tctx := context.Background()\n\tport := 10000 + rand.Intn(10000)\n\n\thost, err := libp2p.New(ctx,\n\t\tlibp2p.ListenAddrStrings(fmt.Sprintf(\"/ip4/0.0.0.0/tcp/%d\", port)),\n\t\tlibp2p.EnableRelay(circuit.OptHop, circuit.OptDiscovery))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Bootstrap the DHT. In the default configuration, this spawns a Background\n\t// thread that will refresh the peer table every five minutes.\n\tdht, err := kaddht.New(ctx, host)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\terr = dht.Bootstrap(ctx)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn host, dht\n}", "func GenerateMonitorID(s string) (string, error) {\n\tvar errEdidCorrupted = errors.New(\"corrupt EDID: \" + s)\n\tif len(s) < 32 || s[:16] != \"00ffffffffffff00\" {\n\t\treturn \"\", errEdidCorrupted\n\t}\n\n\tedid, err := hex.DecodeString(s)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// we only parse EDID 1.3 and 1.4\n\tif edid[18] != 1 || (edid[19] < 3 || edid[19] > 4) {\n\t\treturn \"\", fmt.Errorf(\"unknown EDID version %d.%d\", edid[18], edid[19])\n\t}\n\n\tmanuf := binary.BigEndian.Uint16(edid[8:10])\n\n\t// The first bit is resevered and needs to be zero\n\tif manuf&0x8000 != 0x0000 {\n\t\treturn \"\", errEdidCorrupted\n\t}\n\n\t// Decode the manufacturer 'A' = 0b00001, 'B' = 0b00010, ..., 'Z' = 0b11010\n\tvar manufacturer string\n\tmask := uint16(0x7C00) // 0b0111110000000000\n\tfor i := uint(0); i <= 10; i += 5 {\n\t\tnumber := ((manuf & (mask >> i)) >> (10 - i))\n\t\tmanufacturer += string(byte(number + 'A' - 1))\n\t}\n\n\t// Decode the product and serial number\n\tproduct := binary.LittleEndian.Uint16(edid[10:12])\n\tserial := binary.LittleEndian.Uint32(edid[12:16])\n\n\t// Decode four descriptor blocks\n\tvar displayName, displaySerialNumber string\n\tfor i := 0; i < 4; i++ {\n\t\td := edid[54+i*18 : 54+18+i*18]\n\n\t\t// interesting descriptors start with three zeroes\n\t\tif d[0] != 0 || d[1] != 0 || d[2] != 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch d[3] {\n\t\tcase 0xff: // display serial number\n\t\t\tdisplaySerialNumber = strings.TrimSpace(string(d[5:]))\n\t\tcase 0xfc: // display name\n\t\t\tdisplayName = strings.TrimSpace(string(d[5:]))\n\t\t}\n\t}\n\n\tstr := fmt.Sprintf(\"%s-%d-%d-%v-%v\", manufacturer, product, serial, displayName, displaySerialNumber)\n\treturn str, nil\n}", "func (h *Host) ID() string {\n\tif h.id == \"\" {\n\t\thash := md5.New()\n\t\t_, _ = io.WriteString(hash, h.IP+h.MAC)\n\t\th.id = fmt.Sprintf(\"%x\", hash.Sum(nil))\n\t}\n\n\treturn h.id\n}", "func defaultID() string {\n\tvar id string\n\n\t/* If we have one statically set, use that plus four random bytes */\n\tif \"\" != staticID {\n\t\tn := strconv.FormatInt(int64(time.Now().Nanosecond()),36)\n\t\tif len(n) > staticIDB36Max {\n\t\t\tn = n[:staticIDB36Max]\n\t\t\t}\n\t\treturn staticID +\"-\"+ n\n\t}\n\n\t/* Look through all the interfaces for one we like */\n\tis, err := net.Interfaces()\n\tif nil != err {\n\t\tlog.Printf(\"Unable to list interfaces: %v\", err)\n\t}\n\tfor _, i := range is {\n\t\t/* Skip docker interfaces */\n\t\t/* TODO: Unhardcode this */\n\t\tif \"docker0\" == i.Name {\n\t\t\tcontinue\n\t\t}\n\t\t/* Skip loopback interfaces */\n\t\tif 0 != (net.FlagLoopback & i.Flags) {\n\t\t\tcontinue\n\t\t}\n\t\t/* Get the addresses for this interface */\n\t\tas, err := i.Addrs()\n\t\tif nil != err {\n\t\t\tlog.Printf(\n\t\t\t\t\"Unable to get addresses for %v: %v\",\n\t\t\t\ti.Name,\n\t\t\t\terr,\n\t\t\t)\n\t\t\tcontinue\n\t\t}\n\t\t/* Use the first address we find */\n\t\tif 0 == len(as) {\n\t\t\tcontinue\n\t\t}\n\t\tid = as[0].String()\n\t}\n\t/* Clean up the address a bit, to make DNS-friendly */\n\tparts := strings.SplitN(id, \"/\", 2)\n\tif 0 == len(parts) { /* Probably didn't find one */\n\t\treturn randomID()\n\t}\n\n\t/* Remove all non-hex characters */\n\tid = strings.Map(\n\t\tfunc(r rune) rune {\n\t\t\t/* Turn all non-hex characters into hyphens */\n\t\t\tif !strings.ContainsRune(\"abcdefABCDEF0123456789\", r) {\n\t\t\t\treturn '-'\n\t\t\t}\n\t\t\treturn r\n\t\t},\n\t\tparts[0],\n\t)\n\t/* Trim leading and trailing -'s, which can happen with IPv6\n\taddresses */\n\treturn strings.Trim(id, \"-\")\n}", "func (dev VMVolumeDevice) UUID() string {\n\treturn utils.NewUUID5(blockVolumeNsUUID, dev.HostPath)\n}", "func (s *Store) readID() error {\n\tb, err := ioutil.ReadFile(s.IDPath())\n\tif os.IsNotExist(err) {\n\t\ts.id = 0\n\t\treturn nil\n\t} else if err != nil {\n\t\treturn fmt.Errorf(\"read file: %s\", err)\n\t}\n\n\tid, err := strconv.ParseUint(string(b), 10, 64)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"parse id: %s\", err)\n\t}\n\ts.id = id\n\n\ts.Logger.Printf(\"read local node id: %d\", s.id)\n\n\treturn nil\n}", "func generateUniqueId() string {\n\tcmd := exec.Command(\"/usr/bin/uuidgen\")\n\tvar out bytes.Buffer\n\tcmd.Stdout = &out\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tuuid := out.String()\n\tuuid = strings.Replace(uuid, \"\\n\", \"\", 1)\n\treturn uuid\n}", "func readInstanceID() string {\n\tconst instanceIDFile = \"/var/lib/cloud/data/instance-id\"\n\tidBytes, err := ioutil.ReadFile(instanceIDFile)\n\tif err != nil {\n\t\tglog.Infof(\"Failed to get instance id from file: %v\", err)\n\t\treturn \"\"\n\t} else {\n\t\tinstanceID := string(idBytes)\n\t\tinstanceID = strings.TrimSpace(instanceID)\n\t\tglog.Infof(\"Get instance id from file: %s\", instanceID)\n\t\treturn instanceID\n\t}\n}", "func extractUuid(input string) string {\n\treGetID := regexp.MustCompile(`([a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})`)\n\tmatchListId := reGetID.FindAllStringSubmatch(input, -1)\n\tif len(matchListId) > 0 && len(matchListId[0]) > 0 {\n\t\treturn matchListId[len(matchListId)-1][1]\n\t}\n\treturn \"\"\n}", "func GetHostOSDistro() (string, error) {\n\tinitiatorNSPath := iscsiutil.GetHostNamespacePath(HostProcPath)\n\tmountPath := fmt.Sprintf(\"--mount=%s/mnt\", initiatorNSPath)\n\toutput, err := Execute([]string{}, \"nsenter\", mountPath, \"cat\", OsReleasePath)\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"failed to read %v on host\", OsReleasePath)\n\t}\n\n\tscanner := bufio.NewScanner(strings.NewReader(string(output)))\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tif strings.HasPrefix(line, \"ID=\") {\n\t\t\tosDistro := RemoveNewlines(strings.TrimPrefix(line, \"ID=\"))\n\t\t\treturn strings.ReplaceAll(osDistro, `\"`, \"\"), nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"failed to find ID field in %v\", OsReleasePath)\n}", "func (h *Harness) UUID(id string) string { return h.uuidG.Get(id) }", "func makeID() (string, error) {\n\tdata := make([]byte, 32)\n\t_, err := rand.Read(data)\n\tx := sha256.Sum256(data)\n\treturn hex.EncodeToString(x[:]), err\n}", "func ReadOrInitSessionId(bD *BaseData) (string, error) {\n _, ok := sessions[bD.SessionId]; if !ok {\n bytes := make([]byte, 16)\n if _, err := rand.Read(bytes); err != nil {\n return \"\", err\n }\n sessionId := hex.EncodeToString(bytes)\n sessions[sessionId] = &Data{SessionId: sessionId, CopyAndPaste: make(map[string]bool)}\n return sessionId, nil\n }\n return bD.SessionId, nil\n}", "func GUIDFromBytes(b []byte) string {\n\t// See Intel EFI specification, Appendix A: GUID and Time Formats\n\t// https://www.intel.de/content/dam/doc/product-specification/efi-v1-10-specification.pdf\n\tvar (\n\t\ttimeLow uint32\n\t\ttimeMid uint16\n\t\ttimeHighAndVersion uint16\n\t\tclockSeqHighAndReserved uint8\n\t\tclockSeqLow uint8\n\t\tnode [6]byte\n\t)\n\ttimeLow = binary.LittleEndian.Uint32(b[0:4])\n\ttimeMid = binary.LittleEndian.Uint16(b[4:6])\n\ttimeHighAndVersion = binary.LittleEndian.Uint16(b[6:8])\n\tclockSeqHighAndReserved = b[8]\n\tclockSeqLow = b[9]\n\tcopy(node[:], b[10:])\n\treturn fmt.Sprintf(\"%08X-%04X-%04X-%02X%02X-%012X\",\n\t\ttimeLow,\n\t\ttimeMid,\n\t\ttimeHighAndVersion,\n\t\tclockSeqHighAndReserved,\n\t\tclockSeqLow,\n\t\tnode)\n}", "func hostRead(d *schema.ResourceData, m interface{}, params zabbix.Params) error {\n\tapi := m.(*zabbix.API)\n\n\tlog.Debug(\"Lookup of host with params %#v\", params)\n\n\thosts, err := api.HostsGet(params)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(hosts) < 1 {\n\t\td.SetId(\"\")\n\t\treturn nil\n\t}\n\tif len(hosts) > 1 {\n\t\treturn errors.New(\"multiple hosts found\")\n\t}\n\thost := hosts[0]\n\n\tlog.Debug(\"Got host: %+v\", host)\n\n\td.SetId(host.HostID)\n\td.Set(\"name\", host.Name)\n\td.Set(\"host\", host.Host)\n\td.Set(\"proxyid\", host.ProxyID)\n\td.Set(\"enabled\", host.Status == 0)\n\td.Set(\"inventory_mode\", HINV_LOOKUP_REV[host.InventoryMode])\n\n\td.Set(\"interface\", flattenHostInterfaces(host, d, m))\n\td.Set(\"templates\", flattenTemplateIds(host.ParentTemplateIDs))\n\td.Set(\"inventory\", flattenInventory(host))\n\td.Set(\"groups\", flattenHostGroupIds(host.GroupIds))\n\td.Set(\"macro\", flattenMacros(host.UserMacros))\n\td.Set(\"tag\", flattenTags(host.Tags))\n\n\treturn nil\n}", "func mkSeed(t *testing.T, d Distro, sshKey, hostURL, tdir string, port int) {\n\tt.Helper()\n\n\tdir := filepath.Join(tdir, d.Name, \"seed\")\n\tos.MkdirAll(dir, 0700)\n\n\t// make meta-data\n\t{\n\t\tfout, err := os.Create(filepath.Join(dir, \"meta-data\"))\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\terr = metaDataTempl.Execute(fout, struct {\n\t\t\tID string\n\t\t\tHostname string\n\t\t}{\n\t\t\tID: \"31337\",\n\t\t\tHostname: d.Name,\n\t\t})\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\terr = fout.Close()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\t// make user-data\n\t{\n\t\tfout, err := os.Create(filepath.Join(dir, \"user-data\"))\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\terr = userDataTempl.Execute(fout, struct {\n\t\t\tSSHKey string\n\t\t\tHostURL string\n\t\t\tHostname string\n\t\t\tPort int\n\t\t\tInstallPre string\n\t\t\tPassword string\n\t\t}{\n\t\t\tSSHKey: strings.TrimSpace(sshKey),\n\t\t\tHostURL: hostURL,\n\t\t\tHostname: d.Name,\n\t\t\tPort: port,\n\t\t\tInstallPre: d.InstallPre(),\n\t\t\tPassword: securePassword,\n\t\t})\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\terr = fout.Close()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\targs := []string{\n\t\t\"-output\", filepath.Join(dir, \"seed.iso\"),\n\t\t\"-volid\", \"cidata\", \"-joliet\", \"-rock\",\n\t\tfilepath.Join(dir, \"meta-data\"),\n\t\tfilepath.Join(dir, \"user-data\"),\n\t}\n\n\tif hackOpenSUSE151UserData(t, d, dir) {\n\t\targs = append(args, filepath.Join(dir, \"openstack\"))\n\t}\n\n\trun(t, tdir, \"genisoimage\", args...)\n}", "func (d *WindowsDesktopV3) GetHostID() string {\n\treturn d.Spec.HostID\n}", "func GenerateClientID(confDir string) {\n\tmachineID, err := ioutil.ReadFile(confDir + \"/.machine_id\")\n\tif err != nil {\n\t\tfmt.Println(\"error reading machine id\")\n\t\tmachineID = generateMachineID(confDir)\n\t}\n\tClientID = string(machineID[:len(machineID)-1])\n\tfmt.Println(\"generated ClientID\", ClientID)\n}", "func uuid() string {\n\tout, err := exec.Command(\"/usr/bin/uuidgen\").Output()\n\tif err != nil {\n\t\tlog.Fatal().\n\t\t\tStr(\"command\", \"/usr/bin/uuidgen\").\n\t\t\tMsg(\"There was an error generating the uuid.\")\n\t}\n\n\t//n := bytes.IndexByte(out, 0)\n\ts := string(out)\n\ts = strings.TrimSpace(s)\n\treturn s\n}", "func Get() (*HostID, error) {\n\tvar id HostID\n\tvar addrs []string\n\tifs, err := net.Interfaces()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, v := range ifs {\n\t\th := v.HardwareAddr.String()\n\t\tif len(h) > 0 {\n\t\t\taddrs = append(addrs, h)\n\t\t}\n\t}\n\tsort.Strings(addrs) // sort host IDs\n\tif len(addrs) > 0 { // make host IDs unique\n\t\tid.HostID = append(id.HostID, addrs[0])\n\t\tlast := addrs[0]\n\t\tfor i := 1; i < len(addrs); i++ {\n\t\t\tif addrs[i] != last {\n\t\t\t\tid.HostID = append(id.HostID, addrs[i])\n\t\t\t\tlast = addrs[i]\n\t\t\t}\n\t\t}\n\t}\n\tid.OS = GetOS()\n\tid.CPU = GetCPU()\n\treturn &id, nil\n}", "func GenerateUUID(device string) error {\n\t// for mounting the cloned volume for btrfs, a new UUID has to be generated\n\tcmd := exec.Command(\"btrfstune\", \"-f\", \"-u\", device)\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tklog.Errorf(\"btrfs: uuid generate failed for device %s error: %s\", device, string(out))\n\t\treturn err\n\t}\n\tklog.Infof(\"btrfs: generated UUID for the device %s \\n %v\", device, string(out))\n\treturn nil\n}", "func (b *Broker) createIDFile(home string, filepath string, id string) (err error) {\n\t_filepath := fmt.Sprintf(\"%v%v%v\", home, string(os.PathSeparator), filepath)\n\terr = ioutil.WriteFile(_filepath, []byte(id), 0644)\n\n\treturn\n}", "func GetUID() string {\n\twd, err := os.Getwd()\n\n\tvar data map[string]interface{}\n\n\tbuff, err := ioutil.ReadFile(wd + \"/package.json\")\n\n\tcheck(err)\n\n\tif err := json.Unmarshal(buff, &data); err != nil {\n\t\tpanic(err)\n\t}\n\n\tuser, err := GetStoredUser()\n\n\tGuard(user)\n\n\tcheck(err)\n\n\tname := data[\"name\"].(string)\n\n\tuid := CreateUID(name, user.Email)\n\n\treturn uid\n}", "func makeInodeID(path string) uint64 {\n hash := fnv.New64a()\n hash.Write([]byte(path))\n return hash.Sum64()\n}", "func getHardwareID() string {\n\tif hardwareID != \"\" {\n\t\treturn hardwareID\n\t}\n\taddress := \"\"\n\tinters, err := net.Interfaces()\n\tif err == nil {\n\t\tfor _, inter := range inters {\n\t\t\tif inter.HardwareAddr.String() != \"\" {\n\t\t\t\taddress = inter.HardwareAddr.String()\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif address == \"\" {\n\t\taddress = \"0\"\n\t}\n\tcheck32 := crc32.ChecksumIEEE([]byte(address))\n\tid58 := base58.EncodeBig(nil, big.NewInt(int64(check32)))\n\thardwareID = string(id58)\n\treturn hardwareID\n}", "func getUUID() string{\n\tresponse,_ := http.Get(BaseUrl+\"/_uuids\")\n\tdefer response.Body.Close()\n\tdecoder := json.NewDecoder(response.Body)\n\terr := decoder.Decode(&uniqueid)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn uniqueid.Uuids[0]\n}", "func (cli *BaseClient) GetHostLunId(ctx context.Context, hostID, lunID string) (string, error) {\n\thostLunId := \"1\"\n\turl := fmt.Sprintf(\"/lun/associate?TYPE=11&ASSOCIATEOBJTYPE=21&ASSOCIATEOBJID=%s\", hostID)\n\tresp, err := cli.Get(ctx, url, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tcode := int64(resp.Error[\"code\"].(float64))\n\tif code != 0 {\n\t\treturn \"\", fmt.Errorf(\"Get hostLunId of host %s, lun %s error: %d\", hostID, lunID, code)\n\t}\n\n\trespData := resp.Data.([]interface{})\n\tfor _, i := range respData {\n\t\thostLunInfo := i.(map[string]interface{})\n\t\tif hostLunInfo[\"ID\"].(string) == lunID {\n\t\t\tvar associateData map[string]interface{}\n\t\t\tassociateDataBytes := []byte(hostLunInfo[\"ASSOCIATEMETADATA\"].(string))\n\t\t\terr := json.Unmarshal(associateDataBytes, &associateData)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", nil\n\t\t\t}\n\t\t\thostLunIdFloat, ok := associateData[\"HostLUNID\"].(float64)\n\t\t\tif ok {\n\t\t\t\thostLunId = strconv.FormatInt(int64(hostLunIdFloat), 10)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn hostLunId, nil\n}", "func parseUUID(src string) (dst [16]byte, err error) {\n\tswitch len(src) {\n\tcase 36:\n\t\tsrc = src[0:8] + src[9:13] + src[14:18] + src[19:23] + src[24:]\n\tcase 32:\n\t\t// dashes already stripped, assume valid\n\tdefault:\n\t\t// assume invalid.\n\t\treturn dst, fmt.Errorf(\"cannot parse UUID %v\", src)\n\t}\n\n\tbuf, err := hex.DecodeString(src)\n\tif err != nil {\n\t\treturn dst, err\n\t}\n\n\tcopy(dst[:], buf)\n\treturn dst, err\n}", "func DbFindHost(id int) Host {\n\tfor _, h := range hosts {\n\t\tif h.Id == id {\n\t\t\treturn h\n\t\t}\n\t}\n\t// empty\n\treturn Host{}\n}", "func (o *NetworkLicenseFile) GetHostIdOk() (*string, bool) {\n\tif o == nil || o.HostId == nil {\n\t\treturn nil, false\n\t}\n\treturn o.HostId, true\n}", "func randomHost() (host string, original string) {\n\ts := rand.NewSource(time.Now().UnixNano())\n\tr := rand.New(s)\n\tv := r.Intn(len(hosts))\n\thost = keys[v]\n\toriginal = hosts[host]\n\treturn\n}", "func makeUserHost(listenPort int, target string, randseed int64) (host.Host, error) {\n\n\t// seed == 0, real cryptographic randomness\n\t// else, deterministic randomness source to make generated keys stay the same across multiple runs\n\tvar r io.Reader\n\tif randseed == 0 {\n\t\tr = rand.Reader\n\t} else {\n\t\tr = mrand.New(mrand.NewSource(randseed))\n\t}\n\n\t// Generate a key pair for this host. We will use it to obtain a valid host ID.\n\tpriv, _, err := crypto.GenerateKeyPairWithReader(crypto.RSA, 2048, r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Generate the libp2p host\n\tbasicHost, err := libp2p.New(\n\t\tcontext.Background(),\n\t\tlibp2p.ListenAddrStrings(fmt.Sprintf(\"/ip4/127.0.0.1/tcp/%d\", listenPort)),\n\t\tlibp2p.Identity(priv),\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfmt.Printf(\"I am user node %s\\n\", basicHost.ID().Pretty())\n\tfmt.Printf(\"\\nNow run this on a different terminal in the user directory in order to connect to the same region node:\\ngo run *.go -port %d -peer %s\\n\\n\", listenPort+1, target)\n\n\treturn basicHost, nil\n}", "func newUID() ([]byte, error) {\n\t// uuid := make([]byte, 16)\n\t// n, err := io.ReadFull(rand.Reader, uuid)\n\t// if n != len(uuid) || err != nil {\n\t// \treturn nil, err\n\t// }\n\t// // variant bits; see section 4.1.1\n\t// uuid[8] = uuid[8]&^0xc0 | 0x80\n\t// // version 4 (pseudo-random); see section 4.1.3\n\t// uuid[6] = uuid[6]&^0xf0 | 0x40\n\t// return []byte(fmt.Sprintf(\"%x-%x-%x-%x-%x\", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:])), nil\n\treturn []byte(uniuri.New()), nil\n}", "func generateUUID() string {\n\tbuf := make([]byte, 16)\n\tif _, err := cr.Read(buf); err != nil {\n\t\tpanic(fmt.Errorf(\"failed to read random bytes: %w\", err))\n\t}\n\n\treturn fmt.Sprintf(\"%08x-%04x-%04x-%04x-%12x\",\n\t\tbuf[0:4],\n\t\tbuf[4:6],\n\t\tbuf[6:8],\n\t\tbuf[8:10],\n\t\tbuf[10:16])\n}", "func GUID() (guid string) {\n\ttm := time.Now().UTC()\n\tt := tm.UnixNano() / 1000000\n\tfileDate := strconv.Itoa(tm.Year()) + \".\" + tm.Month().String() + \".\" + strconv.Itoa(tm.Day()) + \".\" + strconv.Itoa(tm.Hour()) + \".\" + strconv.Itoa(tm.Minute()) + \".\" + strconv.Itoa(tm.Second()) + \".\" + strconv.FormatInt(t, 10)\n\tguid = fileDate\n\treturn\n}", "func (m *Attachment) GenerateUUID() (string, error) {\n\tout, err := exec.Command(\"uuidgen\").Output()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn strings.Replace(strings.Trim(string(out), \"\\n\"), \"-\", \"_\", -1), nil\n}", "func GetXenIdFromCloudInit() (string, error) {\n\tif runtime.GOOS == \"windows\" {\n\t\treturn \"\", errors.New(\"cloud init is not supported on windows\")\n\t}\n\tinstanceIdPath := \"/var/lib/cloud/data/instance-id\"\n\tdata, err := ioutil.ReadFile(instanceIdPath)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to read from instance id path\")\n\t}\n\t// remove new line characters\n\txenId := strings.TrimSpace(string(data))\n\txenId = strings.ToLower(xenId)\n\t// the fallback datasource is iid-datasource-none when it does not exist\n\t// https://cloudinit.readthedocs.io/en/latest/topics/datasources/fallback.html\n\tif xenId == \"iid-datasource-none\" || xenId == \"nocloud\" {\n\t\treturn \"\", errors.New(\"invalid instance id found\")\n\t}\n\treturn xenId, nil\n}", "func uuid() []byte {\n\tuuid := make([]byte, 16)\n\t_, err := rand.Read(uuid)\n\tif err != nil {\n\t\tpanic(\"cue/hosted: uuid() failed to read random bytes\")\n\t}\n\n\t// The following bit twiddling is outlined in RFC 4122. In short, it\n\t// identifies the UUID as a v4 random UUID.\n\tuuid[6] = (4 << 4) | (0xf & uuid[6])\n\tuuid[8] = (8 << 4) | (0x3f & uuid[8])\n\treturn uuid\n}", "func GetUuidForDB() string {\n\treturn ulid.Make().String()\n}", "func HostFromDir(hostdir string) (*Host, error) {\n\tconfPath := path.Join(hostdir, hostConfFile)\n\n\th := &Host{}\n\terr := loadJson(h, confPath)\n\tif err != nil {\n\t\treturn nil, microerror.Mask(err)\n\t}\n\n\th.hostDir, err = os.Open(hostdir)\n\tif err != nil {\n\t\treturn nil, microerror.Mask(err)\n\t}\n\n\tfi, err := os.Stat(confPath)\n\tif err != nil {\n\t\treturn nil, microerror.Mask(err)\n\t}\n\th.lastModTime = fi.ModTime()\n\n\treturn h, nil\n}", "func createCidFile(ctx context.Context, tempDir string, repoSlug string) (string, func(), error) {\n\t// Find a location that we can use for a cidfile, which will contain the\n\t// container ID that is used below. We can then use this to remove the\n\t// container on a successful run, rather than leaving it dangling.\n\tcidFile, err := os.CreateTemp(tempDir, repoSlug+\"-container-id\")\n\tif err != nil {\n\t\treturn \"\", nil, errors.Wrap(err, \"Creating a CID file failed\")\n\t}\n\n\t// However, Docker will fail if the cidfile actually exists, so we need\n\t// to remove it. Because Windows can't remove open files, we'll first\n\t// close it, even though that's unnecessary elsewhere.\n\tcidFile.Close()\n\tif err = os.Remove(cidFile.Name()); err != nil {\n\t\treturn \"\", nil, errors.Wrap(err, \"removing cidfile\")\n\t}\n\n\t// Since we went to all that effort, we can now defer a function that\n\t// uses the cidfile to clean up after this function is done.\n\tcleanup := func() {\n\t\tcid, err := os.ReadFile(cidFile.Name())\n\t\t_ = os.Remove(cidFile.Name())\n\t\tif err == nil {\n\t\t\tctx, cancel := context.WithTimeout(ctx, 2*time.Second)\n\t\t\tdefer cancel()\n\t\t\t_ = exec.CommandContext(ctx, \"docker\", \"rm\", \"-f\", \"--\", string(cid)).Run()\n\t\t}\n\t}\n\n\treturn cidFile.Name(), cleanup, nil\n}", "func (fs *FS) GetId() (int64, error) {\n\tpath := fmt.Sprintf(\"%s/id\", fs.path)\n\tvar now int64 = 0\n\tif _, err := os.Stat(path); err == nil {\n\t\t// file exists\n\t\tf, err := os.OpenFile(path, os.O_RDWR, 0644)\n\t\tdefer f.Close()\n\t\tif err != nil { return -1, err }\n\n\t\tbyt, err := ioutil.ReadAll(f)\n\t\tif err != nil { return -1, err }\n\t\tstr := string(byt)\n\n\t\twas, err := strconv.ParseInt(str, 16, 64)\n\t\tif err != nil { return -1, err }\n\n\t\tnow = was + 1\n\t}\n\n\terr := os.MkdirAll(filepath.Dir(path), 0777)\n\tf, err := os.Create(path)\n\tdefer f.Close()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tout := fmt.Sprintf(\"%x\", now)\n\td := []byte(out)\n\t_, err = f.Write(d)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn now, nil\n}", "func libc_getuid() int32", "func hostsFile() string {\n\tu, err := user.Current()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdir := fmt.Sprintf(\"%s/Applications/nogame/\", u.HomeDir)\n\terr = os.MkdirAll(dir, 0777)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfilename := fmt.Sprintf(\"%shosts.txt\", dir)\n\tfile, err := os.Open(filename)\n\n\tif err != nil {\n\t\tfile, err = os.Create(filename)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\treturn file.Name()\n}", "func (pe *ProgramExt) UUID() string {\n\treturn fmt.Sprintf(\"%s_%s\", pe.Manager, pe.Config)\n}", "func GetSeedFromFile(seedFile string) (string, error) {\n\tf, err := os.Open(seedFile)\n\tif err != nil {\n\t\treturn \"\", ErrCombind(ErrorOpenSeedFile, err)\n\t}\n\tdefer f.Close()\n\n\tvar buf bytes.Buffer\n\t_, err = io.Copy(&buf, f)\n\tif err != nil {\n\t\treturn \"\", ErrCombind(ErrorToReadSeedFile, err)\n\t}\n\tseed := strings.Trim(strings.Split(buf.String(), \":\")[1], \"\\n\")\n\treturn seed, nil\n}", "func uniqueHandle(client interfaces.Client) (interfaces.Client, error) {\n\tfile, err := os.Open(\"users.txt\")\n\tif err != nil {\n\t\treturn client, err\n\t}\n\tdefer file.Close()\n\treader := bufio.NewReader(file)\n\tfor {\n\t\tline, err := reader.ReadString('\\n')\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn client, err\n\t\t}\n\t\thandle, _ := helpers.SplitOnFirstDelim(',', line)\n\t\tif client.GetHandle() == handle {\n\t\t\treturn client, errors.New(\"Handle is not unique\")\n\t\t}\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn client, err\n}", "func UUID(db *sql.DB) (domain.ID, error) {\n\tvar id domain.ID\n\trow := db.QueryRow(`SELECT uuid_generate_v4()`)\n\tif err := row.Scan(&id); err != nil {\n\t\treturn id, errors.Database(errors.ServerErrorMessage, err, \"trying to populate UUID\")\n\t}\n\treturn id, nil\n}", "func getBootID() (string, error) {\n\tcurrentBootIDBytes, err := ioutil.ReadFile(\"/proc/sys/kernel/random/boot_id\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn strings.TrimSpace(string(currentBootIDBytes)), nil\n}", "func MakeIdentifier() string {\n\tb := make([]byte, 12)\n\t_, err := io.ReadFull(rand.Reader, b)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\"%x\", b)\n}", "func (o *NetworkLicenseFile) HasHostId() bool {\n\tif o != nil && o.HostId != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func generateUUID() string {\n\tbuf := make([]byte, 16)\n\tif _, err := crand.Read(buf); err != nil {\n\t\tpanic(fmt.Errorf(\"failed to read random bytes: %v\", err))\n\t}\n\n\treturn fmt.Sprintf(\"%08x-%04x-%04x-%04x-%12x\",\n\t\tbuf[0:4],\n\t\tbuf[4:6],\n\t\tbuf[6:8],\n\t\tbuf[8:10],\n\t\tbuf[10:16])\n}", "func (ps *PS) UUID() uint64 {\n\tif ps.uuid != 0 {\n\t\treturn ps.uuid\n\t}\n\t// assume the uuid is derived from boot ID and process start time\n\tps.uuid = (bootid.Read() << 30) + uint64(ps.PID) | uint64(ps.StartTime.UnixNano())\n\tmaj, _, patch := windows.RtlGetNtVersionNumbers()\n\tif maj >= 10 && patch >= 1507 {\n\t\tseqNum := querySequenceNumber(ps.PID)\n\t\t// prefer the most robust variant of the uuid which uses the\n\t\t// process sequence number obtained from the process object\n\t\tif seqNum != 0 {\n\t\t\tps.uuid = (bootid.Read() << 30) | seqNum\n\t\t}\n\t}\n\treturn ps.uuid\n}", "func (d *Device) GetHost(ctx context.Context, hostID int) (*Host, error) {\n\tspath := fmt.Sprintf(\"/host/%d\", hostID)\n\n\treq, err := d.newRequest(ctx, \"GET\", spath, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(ErrCreateRequest+\": %w\", err)\n\t}\n\n\thost := &Host{}\n\tif err = d.requestWithRetry(req, host, DefaultHTTPRetryCount); err != nil {\n\t\treturn nil, fmt.Errorf(ErrRequestWithRetry+\": %w\", err)\n\t}\n\n\treturn host, nil\n}", "func (s *DatabaseServerV3) GetHostID() string {\n\treturn s.Spec.HostID\n}", "func pid(instance int) (pid string, err error) {\n file, err := os.Open(pidFileName(instance))\n if err != nil {\n return\n }\n\n defer file.Close()\n\n scanner := bufio.NewScanner(file)\n scanner.Scan()\n pid = scanner.Text()\n return\n}", "func GetPidFrom(pidFilePath string) (pid int, err error) {\n\n\tif pidFilePath == \"\" {\n\t\tpidFilePath = types.MosnPidDefaultFileName\n\t}\n\n\tvar pf io.Reader\n\tif pf, err = os.Open(pidFilePath); err != nil {\n\t\treturn\n\t}\n\n\tvar bs []byte\n\tif bs, err = ioutil.ReadAll(pf); err != nil {\n\t\treturn\n\t}\n\n\tpid, err = strconv.Atoi(strings.TrimRight(string(bs), \"\\n\"))\n\treturn\n}", "func (rhelpf LinuxPlatformFlavor) getHostUniqueFlavor() ([]cm.Flavor, error) {\n\tlog.Trace(\"flavor/types/linux_platform_flavor:getHostUniqueFlavor() Entering\")\n\tdefer log.Trace(\"flavor/types/linux_platform_flavor:getHostUniqueFlavor() Leaving\")\n\n\tvar errorMessage = \"Error during creation of HOST_UNIQUE flavor\"\n\tvar err error\n\tvar hostUniquePcrs = rhelpf.getPcrList(cf.FlavorPartHostUnique)\n\tvar includeEventLog = rhelpf.eventLogRequired(cf.FlavorPartHostUnique)\n\tvar allPcrDetails = pfutil.GetPcrDetails(\n\t\trhelpf.HostManifest.PcrManifest, hostUniquePcrs, includeEventLog)\n\tvar filteredPcrDetails = pfutil.IncludeModulesToEventLog(\n\t\tallPcrDetails, hostUniqueModules)\n\n\tnewMeta, err := pfutil.GetMetaSectionDetails(rhelpf.HostInfo, rhelpf.TagCertificate, \"\", cf.FlavorPartHostUnique,\n\t\thcConstants.VendorIntel)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, errorMessage+\" Failure in Meta section details\")\n\t}\n\tlog.Debugf(\"flavor/types/linux_platform_flavor:getHostUniqueFlavor() New Meta Section: %v\", *newMeta)\n\n\tnewBios := pfutil.GetBiosSectionDetails(rhelpf.HostInfo)\n\tif newBios == nil {\n\t\treturn nil, errors.Wrap(err, errorMessage+\" Failure in Bios section details\")\n\t}\n\tlog.Debugf(\"flavor/types/linux_platform_flavor:getHostUniqueFlavor() New Bios Section: %v\", *newBios)\n\n\t// Assemble the Host Unique Flavor\n\thostUniqueFlavor := cm.NewFlavor(newMeta, newBios, nil, filteredPcrDetails, nil, nil)\n\n\tlog.Debugf(\"flavor/types/esx_platform_flavor:getHostUniqueFlavor() New PlatformFlavor: %v\", hostUniqueFlavor)\n\n\treturn []cm.Flavor{*hostUniqueFlavor}, nil\n}", "func GetDeviceUUID(deviceID int32) string {\n\tuuid := C.FSEventsCopyUUIDForDevice(C.dev_t(deviceID))\n\tif uuid == nullCFUUIDRef {\n\t\treturn \"\"\n\t}\n\treturn cfStringToGoString(C.CFUUIDCreateString(nullCFAllocatorRef, uuid))\n}", "func (db GAEDatabase) LoadUUIDFromHumanTrainerName(ctx context.Context, name string) (string, error) {\n\tvar trainers []GAETrainer\n\n\t_, err := datastore.NewQuery(trainerKindName).\n\t\tFilter(\"Name =\", name).\n\t\tFilter(\"Type =\", pkmn.HumanTrainerType).\n\t\tGetAll(ctx, &trainers)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(trainers) == 0 {\n\t\treturn \"\", errors.Wrap(database.ErrNoResults, \"loading UUID from human trainer name\")\n\t}\n\tif len(trainers) > 1 {\n\t\treturn \"\", errors.Errorf(\"multiple human trainers share the same name '%s'\", name)\n\t}\n\n\treturn trainers[0].GetTrainer().UUID, nil\n}", "func GetGuestUUID(firstName, lastName string) (string, error) {\n\tapp.InitDB()\n\n\tquery := \"SELECT uuid FROM guest WHERE first_name LIKE ? AND last_name LIKE ?\"\n\trevel.INFO.Printf(\"Query -> %s\", query)\n\trow := app.DB.QueryRow(query, firstName, lastName)\n\n\tvar guestUUID string\n\terr := row.Scan(&guestUUID)\n\tif err != nil {\n\t\trevel.ERROR.Printf(\"Query error -> %s\", err)\n\t\treturn \"\", err\n\t}\n\n\trevel.INFO.Printf(\"Query result -> guestUUID: %s\", guestUUID)\n\treturn guestUUID, nil\n}", "func generateClientID(groupID string) string {\n\thostName, err := os.Hostname()\n\tif err != nil || len(hostName) == 0 {\n\t\tnow := time.Now().UnixNano()\n\t\thostName = strconv.FormatInt(now, 10)\n\t}\n\treturn fmt.Sprintf(\"%s-%s\", groupID, hostName)\n}", "func getDiskUUID() string {\n\treturn vboxmanage.GetVMInfoByRegexp(boxName, \"\\\"SATA Controller-ImageUUID-0-0\\\"=\\\"(.*?)\\\"\")\n}", "func getUID(lib utils.PathIdentifier) string {\n\treturn lib.Key()[:5]\n}", "func MakeCustomizedUuid(port, nodeNum int) (string, error) {\n\treDigit := regexp.MustCompile(`\\d`)\n\tgroup1 := fmt.Sprintf(\"%08d\", port)\n\tgroup2 := fmt.Sprintf(\"%04d-%04d-%04d\", nodeNum, nodeNum, nodeNum)\n\tgroup3 := fmt.Sprintf(\"%012d\", port)\n\t// 12345678 1234 1234 1234 123456789012\n\t// new_uuid=\"00000000-0000-0000-0000-000000000000\"\n\tswitch {\n\tcase nodeNum > 0 && nodeNum <= 9:\n\t\tgroup2 = reDigit.ReplaceAllString(group2, fmt.Sprintf(\"%d\", nodeNum))\n\t\tgroup3 = reDigit.ReplaceAllString(group3, fmt.Sprintf(\"%d\", nodeNum))\n\t// Number greater than 10 make little sense for this purpose.\n\t// But we keep the rule so that a valid UUID will be formatted in any case.\n\tcase nodeNum >= 10000 && nodeNum <= 99999:\n\t\tgroup2 = fmt.Sprintf(\"%04d-%04d-%04d\", 0, int(nodeNum/10000), nodeNum-10000*int(nodeNum/10000))\n\tcase nodeNum >= 100000 && nodeNum < 1000000:\n\t\tgroup2 = fmt.Sprintf(\"%04d-%04d-%04d\", int(nodeNum/10000), 0, 0)\n\tcase nodeNum >= 1000000:\n\t\treturn \"\", fmt.Errorf(\"node num out of boundaries: %d\", nodeNum)\n\t}\n\treturn fmt.Sprintf(\"%s-%s-%s\", group1, group2, group3), nil\n}" ]
[ "0.77543086", "0.6042349", "0.588953", "0.5722182", "0.5662293", "0.5657924", "0.56159735", "0.5602016", "0.55612344", "0.5539282", "0.5483126", "0.5434467", "0.53669393", "0.5341538", "0.5333852", "0.5304593", "0.5253771", "0.5247491", "0.52331644", "0.5198665", "0.5154344", "0.5145973", "0.5134886", "0.51346993", "0.51130307", "0.5107882", "0.5035245", "0.5032972", "0.499788", "0.4992474", "0.49821556", "0.4977723", "0.49386126", "0.49287057", "0.4919873", "0.48615667", "0.48454717", "0.48411492", "0.48381948", "0.48100737", "0.47995898", "0.47994405", "0.47898623", "0.4773801", "0.47702017", "0.4761265", "0.47596917", "0.47209916", "0.47203493", "0.47079706", "0.47031465", "0.46960726", "0.4695416", "0.46910235", "0.46901464", "0.46834", "0.46738753", "0.46690494", "0.46686134", "0.4668527", "0.4652621", "0.46406546", "0.46314973", "0.46208405", "0.4612477", "0.4611449", "0.46097255", "0.46044987", "0.45978886", "0.45973936", "0.45921338", "0.45876697", "0.45866162", "0.45676672", "0.45667773", "0.4556541", "0.4555783", "0.45526117", "0.45502415", "0.45364282", "0.4533201", "0.45231634", "0.4515219", "0.45089698", "0.44936025", "0.4485255", "0.4484306", "0.44770324", "0.4472675", "0.44713482", "0.44458622", "0.44454214", "0.44371265", "0.44319305", "0.44150296", "0.4407078", "0.43942776", "0.4393268", "0.43924087", "0.43918592" ]
0.8242125
0
StringSliceSubset returns true if b is a subset of a.
StringSliceSubset возвращает true, если b является подмножеством a.
func StringSliceSubset(a []string, b []string) error { aset := make(map[string]bool) for _, v := range a { aset[v] = true } for _, v := range b { _, ok := aset[v] if !ok { return trace.BadParameter("%v not in set", v) } } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s String) IsSubset(other String) bool {\n\tif len(s) > len(other) {\n\t\treturn false\n\t}\n\n\tfor k := range s {\n\t\tif _, ok := other[k]; !ok {\n\t\t\treturn false\n\t\t}\n\n\t}\n\treturn true\n}", "func StringsSliceContains(a []string, b string) bool {\n\tif !sort.StringsAreSorted(a) {\n\t\tsort.Strings(a)\n\t}\n\ti := sort.SearchStrings(a, b)\n\treturn i < len(a) && a[i] == b\n}", "func sliceSubset(a, b []string) []string {\n\tresults := []string{}\n\n\tfor _, aValue := range a {\n\t\tif !existsInList(b, aValue) {\n\t\t\tresults = append(results, aValue)\n\t\t}\n\t}\n\n\treturn results\n}", "func EqualsSliceOfString(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tif a[i] != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func SliceStringEqual(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\taCopy := make([]string, len(a))\n\tbCopy := make([]string, len(a))\n\tfor x, aVal := range a {\n\t\taCopy[x] = aVal\n\t\tbCopy[x] = b[x]\n\t}\n\tsort.Strings(aCopy)\n\tsort.Strings(bCopy)\n\treturn sortedStringSliceEqual(aCopy, bCopy)\n}", "func sliceEqual(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor _, v := range a {\n\t\tif !stringInSlice(v, b) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func SliceIncludeSlice(a, b []string) bool {\n\tif EqualSlice(a, b) {\n\t\treturn true\n\t}\n\tfor _, item := range b {\n\t\tif !StringsContain(a, item) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func SliceStringPEqual(a, b []*string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tsa := make([]string, len(a))\n\tsb := make([]string, len(a))\n\tfor x, aPtr := range a {\n\t\tsa[x] = *aPtr\n\t\tsb[x] = *b[x]\n\t}\n\tsort.Strings(sa)\n\tsort.Strings(sb)\n\treturn sortedStringSliceEqual(sa, sb)\n}", "func SliceStringPEqual(a, b []*string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tsa := make([]string, len(a))\n\tsb := make([]string, len(a))\n\tfor x, aPtr := range a {\n\t\tsa[x] = *aPtr\n\t\tsb[x] = *b[x]\n\t}\n\tsort.Strings(sa)\n\tsort.Strings(sb)\n\tfor x, aVal := range sa {\n\t\tbVal := sb[x]\n\t\tif aVal != bVal {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func StringInSlice(a string, slice []string) bool {\n\tfor _, b := range slice {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (s StringSet) Subset(t StringSet) bool {\n\tfor k := range s {\n\t\tif _, ok := t[k]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func SliceContainsSlice(old, new []string) bool {\n\tfor _, newElement := range new {\n\t\tin := false\n\t\tfor _, oldElement := range old {\n\t\t\tif newElement == oldElement {\n\t\t\t\tin = true\n\t\t\t}\n\t\t}\n\t\tif !in {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (s String) IsProperSubset(other String) bool {\n\treturn len(s) < len(other) && s.IsSubset(other)\n}", "func StringInSlice(a string, slice []string) bool {\n\tfor _, b := range slice {\n\t\tif a == b {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func SliceContainsString(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func SliceContainsString(list []string, a string) bool {\n\tsort.Strings(list)\n\ti := sort.SearchStrings(list, a)\n\treturn (i < len(list) && list[i] == a)\n}", "func substringContainedInSlice(str string, substrs []string) bool {\n\tfor _, s := range substrs {\n\t\tif strings.Contains(str, s) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func isSubset(setA, setB []string) bool {\n\tset := make(map[string]bool)\n\tfor _, v := range setB {\n\t\tset[v] = true\n\t}\n\tfor _, v := range setA {\n\t\tif !set[v] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func StringSlicesIntersection(a, b []string) (c []string) {\n\tm := make(map[string]bool)\n\n\tfor _, item := range a {\n\t\tm[item] = true\n\t}\n\n\tfor _, item := range b {\n\t\tif _, ok := m[item]; ok {\n\t\t\tc = append(c, item)\n\t\t}\n\t}\n\treturn\n}", "func stringSliceEqual(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := range a {\n\t\tif a[i] != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func SliceStringsEq(a []string, b []string) bool {\n\n\tvar aa []string\n\tvar bb []string\n\n\tif a == nil {\n\t\taa = make([]string, 0)\n\t} else {\n\t\taa = a\n\t}\n\n\tif b == nil {\n\t\tbb = make([]string, 0)\n\t} else {\n\t\tbb = b\n\t}\n\n\tif len(aa) != len(bb) {\n\t\treturn false\n\t}\n\n\tfor i := range aa {\n\t\tif aa[i] != bb[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func equalStringSlice(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\n\tfor i := 0; i < len(a); i++ {\n\t\tif a[i] != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func StringInSlice(a string, l []string) bool {\n\tfor _, b := range l {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func InSlice(a string, slice []string) bool {\n\tfor _, b := range slice {\n\t\tif a == b {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func Subset(first, second []string) bool {\n\tset := make(map[string]bool)\n\tfor _, value := range second {\n\t\tset[value] = true\n\t}\n\n\tfor _, value := range first {\n\t\tif !set[value] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func SliceEqualsString(x, y []string) bool {\n\tif len(x) != len(y) {\n\t\treturn false\n\t}\n\tdiff := make(map[string]int, len(x))\n\tfor _, ix := range x {\n\t\tdiff[ix]++\n\t}\n\tfor _, iy := range y {\n\t\tif _, ok := diff[iy]; !ok {\n\t\t\treturn false\n\t\t}\n\t\tdiff[iy]--\n\t\tif diff[iy] == 0 {\n\t\t\tdelete(diff, iy)\n\t\t}\n\t}\n\n\treturn len(diff) == 0\n}", "func SliceContainsString(sl []string, st string) bool {\n\tfor _, s := range sl {\n\t\tif s == st {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func stringSliceOverlaps(left []string, right []string) bool {\n\tfor _, s := range left {\n\t\tfor _, t := range right {\n\t\t\tif s == t {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func StringsSliceEqual(a, b []string) bool {\n\tif !sort.StringsAreSorted(a) {\n\t\tsort.Strings(a)\n\t}\n\tif !sort.StringsAreSorted(b) {\n\t\tsort.Strings(b)\n\t}\n\tfor i := range b {\n\t\tif !StringsSliceContains(a, b[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\tfor i := range a {\n\t\tif !StringsSliceContains(b, a[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func InStringSlice(ss []string, str string) bool {\n\tfor _, s := range ss {\n\t\tif strings.EqualFold(s, str) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func sliceContainsString(s string, sl []string) bool {\n\tfor _, v := range sl {\n\t\tif v == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func inStringSlice(ss []string, str string) bool {\n\tfor _, s := range ss {\n\t\tif strings.EqualFold(s, str) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func Subset(first, second []string) bool {\n\tset := make(map[string]int)\n\tfor _, value := range second {\n\t\tset[value]++\n\t}\n\n\tfor _, value := range first {\n\t\tif count, found := set[value]; !found {\n\t\t\treturn false\n\t\t} else if count < 1 {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func InSliceString(x string, a []string) bool {\n\tl := len(a)\n\n\tif l == 0 {\n\t\treturn false\n\t}\n\n\tsort.Strings(a)\n\n\ti := sort.SearchStrings(a, x)\n\n\tif i < l && a[i] == x {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func TestEqStringSlice(t *testing.T) {\n\tt.Parallel()\n\tvar tests = []struct {\n\t\ta []string\n\t\tb []string\n\t\texpected bool\n\t}{\n\t\t{[]string{\"foo\", \"bar\"}, []string{\"foo\", \"bar\"}, true},\n\t\t{[]string{\"foo\", \"bar\"}, []string{\"bar\", \"foo\"}, false},\n\t\t{[]string{\"foo\", \"bar\"}, []string{\"bar\"}, false},\n\t\t{[]string{\"foo\", \"bar\"}, []string{\"\\x66\\x6f\\x6f\", \"bar\"}, true},\n\t}\n\tfor _, test := range tests {\n\t\tactual := primitives.EqSlices(&test.a, &test.b)\n\t\tassert.Equal(t, test.expected, actual, \"expected value '%v' | actual : '%v'\", test.expected, actual)\n\t}\n}", "func stringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func stringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func stringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func stringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func stringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func stringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func stringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func stringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func SliceSubset(slice1, slice2 interface{}) (bool, error) {\n\n\tswitch x := slice1.(type) {\n\tcase []DRAState:\n\t\tstateSlice1, ok1 := slice1.([]DRAState)\n\t\tstateSlice2, ok2 := slice2.([]DRAState)\n\n\t\tif (!ok1) || (!ok2) {\n\t\t\treturn false, fmt.Errorf(\"Error converting slice1 (%v) or slice2 (%v).\", ok1, ok2)\n\t\t}\n\n\t\t//Iterate through all TransitionSystemState in stateSlice1 and make sure that they are in 2.\n\t\tfor _, stateFrom1 := range stateSlice1 {\n\t\t\tif !(stateFrom1.In(stateSlice2)) {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\t// If all elements of slice1 are in slice2 then return true!\n\t\treturn true, nil\n\tcase []TransitionSystemState:\n\t\tstateSlice1, ok1 := slice1.([]TransitionSystemState)\n\t\tstateSlice2, ok2 := slice2.([]TransitionSystemState)\n\n\t\tif (!ok1) || (!ok2) {\n\t\t\treturn false, fmt.Errorf(\"Error converting slice1 (%v) or slice2 (%v).\", ok1, ok2)\n\t\t}\n\n\t\t//Iterate through all TransitionSystemState in stateSlice1 and make sure that they are in 2.\n\t\tfor _, stateFrom1 := range stateSlice1 {\n\t\t\tif !(stateFrom1.In(stateSlice2)) {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\t// If all elements of slice1 are in slice2 then return true!\n\t\treturn true, nil\n\n\tcase []mc.AtomicProposition:\n\t\tapSlice1, ok1 := slice1.([]mc.AtomicProposition)\n\t\tapSlice2, ok2 := slice2.([]mc.AtomicProposition)\n\n\t\tif (!ok1) || (!ok2) {\n\t\t\treturn false, fmt.Errorf(\"Error converting slice1 (%v) or slice2 (%v).\", ok1, ok2)\n\t\t}\n\n\t\t//Iterate through all TransitionSystemState in stateSlice1 and make sure that they are in 2.\n\t\tfor _, apFrom1 := range apSlice1 {\n\t\t\tif !(apFrom1.In(apSlice2)) {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\t// If all elements of slice1 are in slice2 then return true!\n\t\treturn true, nil\n\n\tdefault:\n\t\treturn false, fmt.Errorf(\"Unexpected type given to SliceSubset(): %v\", x)\n\t}\n\n}", "func EqualSlice(a, b []string) bool {\n\tsort.Strings(a)\n\tsort.Strings(b)\n\treturn reflect.DeepEqual(a, b)\n}", "func (a Attributes) IsSubset(b Attributes) bool {\n\tm := map[string]struct{}{}\n\tfor _, s := range []string(b) {\n\t\tm[s] = struct{}{}\n\t}\n\tfor _, s := range []string(a) {\n\t\tif _, ok := m[s]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func str_is_in_slice(slice []string, str string) bool {\n\tfor _, s := range slice {\n\t\tif s == str {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func isSubsetMatch(tokens []string, test string) bool {\n\ttsa := [32]string{}\n\ttts := tokenizeSubjectIntoSlice(tsa[:0], test)\n\treturn isSubsetMatchTokenized(tokens, tts)\n}", "func StringInSlice(str string, slc []string) bool {\n\tfor _, s := range slc {\n\t\tif strings.EqualFold(s, str) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func stringInSlice(a string, list []string) bool {\n\tif list == nil {\n\t\treturn false\n\t}\n\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringSliceContains(slice []string, str string) bool {\n\tfor _, s := range slice {\n\t\tif s == str {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func sliceContainsSlice(smallSlice []core.VarId, bigSlice [][]core.VarId) bool {\n\tfor _, slice := range bigSlice {\n\t\tif slicesIdentical(slice, smallSlice) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringSlicesEqual(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor idx, v := range a {\n\t\tif v != b[idx] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func StringSliceContains(s []string, e string) bool {\n\tfor _, a := range s {\n\t\tif a == e {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func equalSlice(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i, v := range a {\n\t\tif v != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func subset(first, second []string) bool {\n\tset := make(map[string]int)\n\tfor _, value := range second {\n\t\tset[value]++\n\t}\n\n\tfor _, value := range first {\n\t\tif count, found := set[value]; !found {\n\t\t\treturn false\n\t\t} else if count < 1 {\n\t\t\treturn false\n\t\t} else {\n\t\t\tset[value] = count - 1\n\t\t}\n\t}\n\n\treturn true\n}", "func Subset(s1, s2 Set) bool {\n\tif s1.Len() > s2.Len() {\n\t\treturn false\n\t}\n\tfor k := range s1 {\n\t\tif _, ok := s2[k]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func StringSlicesEqual(a []string, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor k := range a {\n\t\tif a[k] != b[k] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func stringSliceIntersect(s, t []string) []string {\n\tvar res []string\n\tm := make(map[string]bool, len(s))\n\tfor _, x := range s {\n\t\tm[x] = true\n\t}\n\tfor _, y := range t {\n\t\tif m[y] {\n\t\t\tres = append(res, y)\n\t\t}\n\t}\n\treturn res\n}", "func SlicesEqual(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i, v := range a {\n\t\tif v != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func SliceContains(s []string, value string) bool {\n\tfor _, v := range s {\n\t\tif v == value {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringInSlice(str string, slice []string) bool {\n\tfor _, s := range slice {\n\t\tif s == str {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func SliceIntersects(a, b interface{}) bool {\n\taValue, bValue := reflect.ValueOf(a), reflect.ValueOf(b)\n\taValueKind, bValueKind := aValue.Kind(), bValue.Kind()\n\n\tif aValueKind != reflect.Slice || bValueKind != reflect.Slice {\n\t\tpanic(fmt.Sprintf(\"one of parameters is not a slice: (%v, %v)\", aValueKind, bValueKind))\n\t}\n\tfor i := 0; i < bValue.Len(); i++ {\n\t\tfor j := 0; j < aValue.Len(); j++ {\n\t\t\tif bValue.Index(i).Interface() == aValue.Index(j).Interface() {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}", "func stringSliceContains(ss []string, s string) bool {\n\tfor _, v := range ss {\n\t\tif v == s {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func isSubset(lhs, rhs ref.Val) ref.Val {\n\ta, ok := lhs.(traits.Lister)\n\tif !ok {\n\t\treturn types.ValOrErr(a, \"no such overload\")\n\t}\n\n\tb, ok := rhs.(traits.Lister)\n\tif !ok {\n\t\treturn types.ValOrErr(b, \"no such overload\")\n\t}\n\n\tm := convertToMap(b)\n\n\tfor ai := a.Iterator(); ai.HasNext() == types.True; {\n\t\tva := ai.Next()\n\t\tif m != nil {\n\t\t\tif _, ok := m[va]; !ok {\n\t\t\t\treturn types.False\n\t\t\t}\n\t\t} else {\n\t\t\tif !find(b.Iterator(), va) {\n\t\t\t\treturn types.False\n\t\t\t}\n\t\t}\n\t}\n\n\treturn types.True\n}", "func StringSliceContains(slice []string, elem string) bool {\n\tfor _, v := range slice {\n\t\tif v == elem {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func IsSubString(reads []string, genome string, len int) bool {\n kmers := MakeKMerSet(genome, len)\n for _, read := range reads{\n _, found := kmers[read]\n if (!found) {\n return false\n }\n }\n return true\n}", "func InStringSlice(haystack []string, needle string) bool {\n\tfor _, str := range haystack {\n\t\tif needle == str {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func InStringSlice(s string, arr []string) bool {\n\tfor _, v := range arr {\n\t\tif v == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringInSlice(s string, sl []string) bool {\n\tfor _, val := range sl {\n\t\tif s == val {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func AllStringsInSlice(strings []string, slice []string) bool {\n\tfor _, s := range strings {\n\t\tif !StringInSlice(s, slice) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func isInStringSlice(x string, elements []string) bool {\n\tfor _, elem := range elements {\n\t\tif elem == x {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func IsInStringSlice(slice []string, search string) bool {\n\tfor _, v := range slice {\n\t\tif v == search {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (gdb *generalDatabase) IsStringInSlice(needle string, haystack []string) bool {\n\tfor _, s := range haystack {\n\t\tif s == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringSliceContains(haystack []string, needle string) bool {\n\tfor _, str := range haystack {\n\t\tif str == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func doSlicesIntersect(s1, s2 []string) bool {\n if s1 == nil || s2 == nil {\n return false\n }\n for _, str := range s1 {\n if isElementInSlice(str, s2) {\n return true\n }\n }\n return false\n}", "func StringSliceContains(slice []string, value string) bool {\n\tinterfaceSlice := make([]interface{}, len(slice))\n\tfor _, item := range slice {\n\t\tvar interfaceItem interface{} = item\n\t\tinterfaceSlice = append(interfaceSlice, interfaceItem)\n\t}\n\tvar interfaceValue interface{} = value\n\treturn InterfaceSliceContains(interfaceSlice, interfaceValue)\n}", "func stringInSlice(s string, sl []string) bool {\n\tfor _, v := range sl {\n\t\tif v == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (s String) IsSuperset(other String) bool {\n\treturn other.IsSubset(s)\n}", "func stringInSlice(a string, list []string) bool {\n for _, b := range list {\n if b == a {\n return true\n }\n }\n return false\n}", "func SliceContains(s string, properties []string) (contain bool, prop string) {\n\tfor _, p := range properties {\n\t\tif strings.Contains(s, p) {\n\t\t\treturn true, p\n\t\t}\n\t}\n\treturn false, \"\"\n}", "func StringInSliceCS(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif a == b {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func ContainsInSlice(s []string, str string) bool {\n\tfor _, val := range s {\n\t\tif val == str {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func sliceContains(haystack []string, needle string) bool {\n\tfor _, s := range haystack {\n\t\tif s == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func sliceContains(slice []string, values ...string) bool {\n\tfor _, s := range slice {\n\t\tfor _, v := range values {\n\t\t\tif strings.EqualFold(strings.TrimSpace(s), strings.TrimSpace(v)) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}", "func StringSliceContains(list []string, s string) bool {\n\tfor _, v := range list {\n\t\tif v == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringSliceContains(list []string, s string) bool {\n\tfor _, v := range list {\n\t\tif v == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func Fsubset(lista, listb []string) bool {\n\tfound := 0\n\tFcompress(&lista)\n\tFcompress(&listb)\n\tfor _, i := range lista {\n\t\tif Fmember(listb, i) {\n\t\t\tfound++\n\t\t}\n\t}\n\tif found < len(lista) {\n\t\treturn false\n\t} else {\n\t\treturn true\n\t}\n}", "func InSlice(v string, sl []string) bool {\n\tfor _, vv := range sl {\n\t\tif vv == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func InStringSlice(str string, strSli []string) bool {\n\tfor _, v := range strSli {\n\t\tif str == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func SliceContains(slice []string, needle string) bool {\n\tfor _, s := range slice {\n\t\tif s == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}" ]
[ "0.72063106", "0.6842188", "0.6821371", "0.6786121", "0.6751392", "0.67450565", "0.6714515", "0.6702434", "0.66884464", "0.6672785", "0.66710854", "0.6625813", "0.6613793", "0.66135323", "0.6598829", "0.65849054", "0.6550833", "0.6545979", "0.6528951", "0.65145385", "0.65053153", "0.6499472", "0.6480875", "0.64218384", "0.6402446", "0.6402446", "0.6402446", "0.6402446", "0.6402446", "0.6402446", "0.6402446", "0.6402446", "0.6402446", "0.63731784", "0.6362486", "0.63611865", "0.63506305", "0.6349333", "0.63462496", "0.63289106", "0.632398", "0.6294954", "0.62924653", "0.6270303", "0.62625664", "0.62625664", "0.62625664", "0.62625664", "0.62625664", "0.62625664", "0.62625664", "0.62625664", "0.62625396", "0.625351", "0.6246504", "0.62450135", "0.62419134", "0.6240835", "0.6237596", "0.6234671", "0.6225487", "0.6220364", "0.6193443", "0.6133501", "0.61300474", "0.6102156", "0.60986376", "0.6094159", "0.60940903", "0.6090623", "0.60892373", "0.6066791", "0.6055128", "0.6050912", "0.6049116", "0.6045325", "0.6028459", "0.6015364", "0.60153127", "0.6010669", "0.60092133", "0.6004618", "0.59971607", "0.59834975", "0.59827524", "0.5971274", "0.59609497", "0.5959726", "0.59588665", "0.59453344", "0.5933795", "0.5911407", "0.5910734", "0.589611", "0.58958054", "0.58958054", "0.5886982", "0.5886703", "0.5882388", "0.58772045" ]
0.8096038
0
UintSliceSubset returns true if b is a subset of a.
UintSliceSubset возвращает true, если b является подмножеством a.
func UintSliceSubset(a []uint16, b []uint16) error { aset := make(map[uint16]bool) for _, v := range a { aset[v] = true } for _, v := range b { _, ok := aset[v] if !ok { return trace.BadParameter("%v not in set", v) } } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func StringSliceSubset(a []string, b []string) error {\n\taset := make(map[string]bool)\n\tfor _, v := range a {\n\t\taset[v] = true\n\t}\n\n\tfor _, v := range b {\n\t\t_, ok := aset[v]\n\t\tif !ok {\n\t\t\treturn trace.BadParameter(\"%v not in set\", v)\n\t\t}\n\n\t}\n\treturn nil\n}", "func SliceSubset(slice1, slice2 interface{}) (bool, error) {\n\n\tswitch x := slice1.(type) {\n\tcase []DRAState:\n\t\tstateSlice1, ok1 := slice1.([]DRAState)\n\t\tstateSlice2, ok2 := slice2.([]DRAState)\n\n\t\tif (!ok1) || (!ok2) {\n\t\t\treturn false, fmt.Errorf(\"Error converting slice1 (%v) or slice2 (%v).\", ok1, ok2)\n\t\t}\n\n\t\t//Iterate through all TransitionSystemState in stateSlice1 and make sure that they are in 2.\n\t\tfor _, stateFrom1 := range stateSlice1 {\n\t\t\tif !(stateFrom1.In(stateSlice2)) {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\t// If all elements of slice1 are in slice2 then return true!\n\t\treturn true, nil\n\tcase []TransitionSystemState:\n\t\tstateSlice1, ok1 := slice1.([]TransitionSystemState)\n\t\tstateSlice2, ok2 := slice2.([]TransitionSystemState)\n\n\t\tif (!ok1) || (!ok2) {\n\t\t\treturn false, fmt.Errorf(\"Error converting slice1 (%v) or slice2 (%v).\", ok1, ok2)\n\t\t}\n\n\t\t//Iterate through all TransitionSystemState in stateSlice1 and make sure that they are in 2.\n\t\tfor _, stateFrom1 := range stateSlice1 {\n\t\t\tif !(stateFrom1.In(stateSlice2)) {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\t// If all elements of slice1 are in slice2 then return true!\n\t\treturn true, nil\n\n\tcase []mc.AtomicProposition:\n\t\tapSlice1, ok1 := slice1.([]mc.AtomicProposition)\n\t\tapSlice2, ok2 := slice2.([]mc.AtomicProposition)\n\n\t\tif (!ok1) || (!ok2) {\n\t\t\treturn false, fmt.Errorf(\"Error converting slice1 (%v) or slice2 (%v).\", ok1, ok2)\n\t\t}\n\n\t\t//Iterate through all TransitionSystemState in stateSlice1 and make sure that they are in 2.\n\t\tfor _, apFrom1 := range apSlice1 {\n\t\t\tif !(apFrom1.In(apSlice2)) {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\t// If all elements of slice1 are in slice2 then return true!\n\t\treturn true, nil\n\n\tdefault:\n\t\treturn false, fmt.Errorf(\"Unexpected type given to SliceSubset(): %v\", x)\n\t}\n\n}", "func sliceSubset(a, b []string) []string {\n\tresults := []string{}\n\n\tfor _, aValue := range a {\n\t\tif !existsInList(b, aValue) {\n\t\t\tresults = append(results, aValue)\n\t\t}\n\t}\n\n\treturn results\n}", "func (ids IDSlice) IsSubsetOf(o IDSlice) bool {\n\tfor _, id := range ids {\n\t\tif !o.Contains(id) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func sliceContainsSlice(smallSlice []core.VarId, bigSlice [][]core.VarId) bool {\n\tfor _, slice := range bigSlice {\n\t\tif slicesIdentical(slice, smallSlice) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func isSubset(setA, setB []string) bool {\n\tset := make(map[string]bool)\n\tfor _, v := range setB {\n\t\tset[v] = true\n\t}\n\tfor _, v := range setA {\n\t\tif !set[v] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func isSubset(lhs, rhs ref.Val) ref.Val {\n\ta, ok := lhs.(traits.Lister)\n\tif !ok {\n\t\treturn types.ValOrErr(a, \"no such overload\")\n\t}\n\n\tb, ok := rhs.(traits.Lister)\n\tif !ok {\n\t\treturn types.ValOrErr(b, \"no such overload\")\n\t}\n\n\tm := convertToMap(b)\n\n\tfor ai := a.Iterator(); ai.HasNext() == types.True; {\n\t\tva := ai.Next()\n\t\tif m != nil {\n\t\t\tif _, ok := m[va]; !ok {\n\t\t\t\treturn types.False\n\t\t\t}\n\t\t} else {\n\t\t\tif !find(b.Iterator(), va) {\n\t\t\t\treturn types.False\n\t\t\t}\n\t\t}\n\t}\n\n\treturn types.True\n}", "func SliceIntersects(a, b interface{}) bool {\n\taValue, bValue := reflect.ValueOf(a), reflect.ValueOf(b)\n\taValueKind, bValueKind := aValue.Kind(), bValue.Kind()\n\n\tif aValueKind != reflect.Slice || bValueKind != reflect.Slice {\n\t\tpanic(fmt.Sprintf(\"one of parameters is not a slice: (%v, %v)\", aValueKind, bValueKind))\n\t}\n\tfor i := 0; i < bValue.Len(); i++ {\n\t\tfor j := 0; j < aValue.Len(); j++ {\n\t\t\tif bValue.Index(i).Interface() == aValue.Index(j).Interface() {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}", "func IntSliceIntersects(a, b []int) (rb bool) {\n\trb = false\n\tfor _, k := range a {\n\t\tfor _, l := range b {\n\t\t\tif k == l {\n\t\t\t\trb = true\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func Subset(first, second []string) bool {\n\tset := make(map[string]bool)\n\tfor _, value := range second {\n\t\tset[value] = true\n\t}\n\n\tfor _, value := range first {\n\t\tif !set[value] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func IsSubset(s, t Interface) bool {\n\tfor _, x := range t.Members() {\n\t\tif !s.Contains(x) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (b *Builder) IsSubsetOf(rhs interface{}) *predicate.Predicate {\n\tb.p.RegisterPredicate(impl.IsSubsetOf(rhs))\n\tif b.t != nil {\n\t\tb.t.Helper()\n\t\tEvaluate(b)\n\t}\n\treturn &b.p\n}", "func (set *AppleSet) IsSubset(other *AppleSet) bool {\n\tif set.IsEmpty() {\n\t\treturn !other.IsEmpty()\n\t}\n\n\tif other.IsEmpty() {\n\t\treturn false\n\t}\n\n\tset.s.RLock()\n\tother.s.RLock()\n\tdefer set.s.RUnlock()\n\tdefer other.s.RUnlock()\n\n\tfor v := range set.m {\n\t\tif !other.Contains(v) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (set Int64Set) IsSubset(other Int64Set) bool {\n\tfor v := range set {\n\t\tif !other.Contains(v) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func Subset(first, second []string) bool {\n\tset := make(map[string]int)\n\tfor _, value := range second {\n\t\tset[value]++\n\t}\n\n\tfor _, value := range first {\n\t\tif count, found := set[value]; !found {\n\t\t\treturn false\n\t\t} else if count < 1 {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (s Slice) Sub(b Slice) Slice {\n\tlut := map[uuid.UUID]struct{}{}\n\tfor _, id := range b {\n\t\tlut[id] = struct{}{}\n\t}\n\n\tsub := []uuid.UUID{}\n\tfor _, id := range s {\n\t\tif _, foundInB := lut[id]; !foundInB {\n\t\t\tsub = append(sub, id)\n\t\t}\n\t}\n\treturn sub\n}", "func SliceIncludeSlice(a, b []string) bool {\n\tif EqualSlice(a, b) {\n\t\treturn true\n\t}\n\tfor _, item := range b {\n\t\tif !StringsContain(a, item) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (s *IntSet) Subset(y *IntSet) bool {\n\n\tfor _, m := range s.Members() {\n\t\tif !y.Contains(m) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func EqualsSliceOfCharacteristic(a, b []Characteristic) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tif !EqualsCharacteristic(a[i], b[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func isSubset(query, subject asciiset.ASCIISet) bool {\n\t// A ⊆ B iff (A ∪ B) = B\n\tunion := query.Union(subject)\n\treturn union.Equals(subject)\n}", "func subset(first, second []string) bool {\n\tset := make(map[string]int)\n\tfor _, value := range second {\n\t\tset[value]++\n\t}\n\n\tfor _, value := range first {\n\t\tif count, found := set[value]; !found {\n\t\t\treturn false\n\t\t} else if count < 1 {\n\t\t\treturn false\n\t\t} else {\n\t\t\tset[value] = count - 1\n\t\t}\n\t}\n\n\treturn true\n}", "func Subset(s1, s2 Set) bool {\n\tif s1.Len() > s2.Len() {\n\t\treturn false\n\t}\n\tfor k := range s1 {\n\t\tif _, ok := s2[k]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (a Attributes) IsSubset(b Attributes) bool {\n\tm := map[string]struct{}{}\n\tfor _, s := range []string(b) {\n\t\tm[s] = struct{}{}\n\t}\n\tfor _, s := range []string(a) {\n\t\tif _, ok := m[s]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func SliceContainsSlice(old, new []string) bool {\n\tfor _, newElement := range new {\n\t\tin := false\n\t\tfor _, oldElement := range old {\n\t\t\tif newElement == oldElement {\n\t\t\t\tin = true\n\t\t\t}\n\t\t}\n\t\tif !in {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func ExampleIntSet_IsSubsetOf() {\n\ts1 := gset.NewIntSet()\n\ts1.Add([]int{1, 2, 3, 4}...)\n\tvar s2 gset.IntSet\n\ts2.Add([]int{1, 2, 4}...)\n\tfmt.Println(s2.IsSubsetOf(s1))\n\n\t// Output:\n\t// true\n}", "func subSlice(out, a, b []float64)", "func sliceEqual(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor _, v := range a {\n\t\tif !stringInSlice(v, b) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func isSubsetMatch(tokens []string, test string) bool {\n\ttsa := [32]string{}\n\ttts := tokenizeSubjectIntoSlice(tsa[:0], test)\n\treturn isSubsetMatchTokenized(tokens, tts)\n}", "func (s *Set) IsSubset(strict bool, other *Set) bool {\n\tif strict && len(s.m) >= len(other.m) {\n\t\treturn false\n\t}\nA:\n\tfor v := range s.m {\n\t\tfor i := range other.m {\n\t\t\tif v == i {\n\t\t\t\tcontinue A\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\treturn true\n}", "func (s *ConcurrentSet) IsSubset(other Set) bool {\n\tif s.Len() > other.Len() {\n\t\treturn false\n\t}\n\n\tisSubset := true\n\ts.hash.Range(func(k, v interface{}) bool {\n\t\tif !other.Contains(k) {\n\t\t\tisSubset = false\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t})\n\treturn isSubset\n}", "func SliceUnion(a, b []interface{}) []interface{} {\n\tm := make(map[interface{}]bool)\n\n\t// iterate through slice a, adding values as\n\t// keys in m\n\tfor _, v := range a {\n\t\tm[v] = true\n\t}\n\n\t// iterate through slice b, adding values not\n\t// in map m to slice a\n\tfor _, v := range b {\n\t\tif _, ok := m[v]; !ok {\n\t\t\ta = append(a, v)\n\t\t}\n\t}\n\n\t// return union of slices a and b\n\treturn a\n}", "func (bb *ByteSliceBuffer) PopSlice(b [][]byte) (n int, ok bool) {\n\tfor wpos := range b {\n\t\tif pos, ok := bb.Buffer.GetReadPos(); ok {\n\t\t\tb[wpos] = bb.data[pos]\n\t\t\tn++\n\t\t} else {\n\t\t\treturn n, false\n\t\t}\n\t}\n\treturn n, true\n}", "func InSliceSlice(qSlice [][]int, x []int) bool {\n\tfor _, v := range qSlice {\n\t\tif len(v) != len(x) {\n\t\t\tcontinue\n\t\t}\n\t\tvar count int\n\t\tfor i := 0; i < len(v); i++ {\n\n\t\t\tif v[i] != x[i] {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tcount++\n\t\t\t\tif count == len(v) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\treturn false\n}", "func TestEqIntSlice(t *testing.T) {\n\tt.Parallel()\n\tvar tests = []struct {\n\t\ta []int\n\t\tb []int\n\t\texpected bool\n\t}{\n\t\t{[]int{1, 2}, []int{1, 2}, true},\n\t\t{[]int{1, 2}, []int{2, 1}, false},\n\t\t{[]int{1, 2}, []int{1}, false},\n\t\t{[]int{1, 2}, []int{1, 2, 1}, false},\n\t}\n\tfor _, test := range tests {\n\t\tactual := primitives.EqSlices(&test.a, &test.b)\n\t\tassert.Equal(t, test.expected, actual, \"expected value '%v' | actual : '%v'\", test.expected, actual)\n\t}\n}", "func Fsubset(lista, listb []string) bool {\n\tfound := 0\n\tFcompress(&lista)\n\tFcompress(&listb)\n\tfor _, i := range lista {\n\t\tif Fmember(listb, i) {\n\t\t\tfound++\n\t\t}\n\t}\n\tif found < len(lista) {\n\t\treturn false\n\t} else {\n\t\treturn true\n\t}\n}", "func (s *Set) IsSubset(other *Set) bool {\n\tif other.Len() < s.Len() {\n\t\treturn false\n\t}\n\n\tisSubset := true\n\ts.Range(func(item Value) bool {\n\t\tif !other.Contains(item) {\n\t\t\tisSubset = false\n\t\t}\n\n\t\treturn isSubset\n\t})\n\n\treturn isSubset\n}", "func (s StringSet) Subset(t StringSet) bool {\n\tfor k := range s {\n\t\tif _, ok := t[k]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (s String) IsSubset(other String) bool {\n\tif len(s) > len(other) {\n\t\treturn false\n\t}\n\n\tfor k := range s {\n\t\tif _, ok := other[k]; !ok {\n\t\t\treturn false\n\t\t}\n\n\t}\n\treturn true\n}", "func Uint64SliceEqual(a []uint64, b []uint64) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i, v := range a {\n\t\tif v != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (s PointBuffer) SubSlice(low int, high int) PointBuffer {\n\tinBounds := low >= 0 && low <= high && high <= s.cap\n\tif !inBounds {\n\t\tpanic(fmt.Errorf(\n\t\t\t\"runtime error: slice bounds out of range [%d:%d] with capacity %d\",\n\t\t\tlow, high, s.cap,\n\t\t))\n\t}\n\tvar tVar Point\n\ttSize := unsafe.Sizeof(tVar)\n\ttype internalPtr struct {\n\t\toffset uintptr\n\t\tbucketIdx uint8\n\t\tarenaMask uint16\n\t}\n\tcurrentPtr := *(*internalPtr)(unsafe.Pointer(&s.data))\n\tnewPtr := internalPtr{\n\t\toffset: currentPtr.offset + uintptr(low*int(tSize)),\n\t\tbucketIdx: currentPtr.bucketIdx,\n\t\tarenaMask: currentPtr.arenaMask,\n\t}\n\treturn PointBuffer{\n\t\tdata: *(*arena.Ptr)(unsafe.Pointer(&newPtr)),\n\t\tlen: high - low,\n\t\tcap: s.cap - low,\n\t}\n}", "func IntSliceContains(is []int, s int) (rb bool) {\n\trb = false\n\tfor _, a := range is {\n\t\tif a == s {\n\t\t\trb = true\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}", "func compareOnIntSlice(a, b []int) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\n\tisEqual := true\n\tfor i := range a {\n\t\tif a[i] != b[i] {\n\t\t\tisEqual = false\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn isEqual\n}", "func InSlice(a string, slice []string) bool {\n\tfor _, b := range slice {\n\t\tif a == b {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func SliceEquals(slice1, slice2 interface{}) (bool, error) {\n\t//Determine if both slices are of the same type.\n\t// if slice1.(type) != slice2.(type) {\n\t// \tfmt.Println(\"Types of the two slices are different!\")\n\t// \treturn false\n\t// }\n\n\toneSubsetTwo, err := SliceSubset(slice1, slice2)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"There was an issue computing SliceSubset(slice1,slice2): %v\", err)\n\t}\n\n\ttwoSubsetOne, err := SliceSubset(slice2, slice1)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"There was an issue computing SliceSubset(slice2,slice1): %v\", err)\n\t}\n\n\treturn oneSubsetTwo && twoSubsetOne, nil\n\n}", "func EqualSlice(dst, src []byte) bool {\n\tif len(dst) != len(src) {\n\t\treturn false\n\t}\n\tfor idx, b := range dst {\n\t\tif b != src[idx] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func EqualsSliceOfRefOfUnionSelect(a, b []*UnionSelect) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tif !EqualsRefOfUnionSelect(a[i], b[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (el Elements) Subset(sIdx, fIdx int) Elements {\n\tres := Elements{}\n\tswitch el.Type {\n\tcase part3.Int32:\n\t\tres.I32 = el.I32[sIdx:fIdx]\n\tcase part3.Float32:\n\t\tres.F32 = el.F32[sIdx:fIdx]\n\tcase part3.Float64:\n\t\tres.F64 = el.F64[sIdx:fIdx]\n\t}\n\tres.Type = el.Type\n\treturn res\n}", "func sliceContains(s []*dag.Vertex, v *dag.Vertex) bool {\n\tfor _, i := range s {\n\t\tif i == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func equalSlice(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i, v := range a {\n\t\tif v != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func CheckSubset(src, trg *Item) bool {\n\ttype obj struct {\n\t\tsrc Attribute\n\t\ttrg Attribute\n\t}\n\tfor _, v := range []obj{\n\t\t{src.part, trg.part},\n\t\t{src.vendor, trg.vendor},\n\t\t{src.product, trg.product},\n\t\t{src.version, trg.version},\n\t\t{src.update, trg.update},\n\t\t{src.edition, trg.edition},\n\t\t{src.language, trg.language},\n\t\t{src.sw_edition, trg.sw_edition},\n\t\t{src.target_sw, trg.target_sw},\n\t\t{src.target_hw, trg.target_hw},\n\t\t{src.other, trg.other},\n\t} {\n\t\tswitch v.src.Comparison(v.trg) {\n\t\tcase Subset, Equal:\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (as AllSlice) InSlice(qSlice []Any, x Any) bool {\n\tfor _, v := range qSlice {\n\t\tif v == x {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func intSliceIncludesOther(a, b []int) bool {\n\tif len(b) > len(a) {\n\t\treturn false\n\t}\n\tfor _, n := range b {\n\t\tvar isMatch bool\n\t\tfor _, m := range a {\n\t\t\tif n == m {\n\t\t\t\tisMatch = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !isMatch {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func InIntSlice(a int, list []int) bool {\n\tfor _, v := range list {\n\t\tif a == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func IntInSlice(a uint64, list []uint64) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func sliceContains(container []*html.Node, contained *html.Node) bool {\n\tfor _, n := range container {\n\t\tif nodeContains(n, contained) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func SlicesEqual(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i, v := range a {\n\t\tif v != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func Subset[T any](collection []T, offset int, length uint) []T {\n\tsize := len(collection)\n\n\tif offset < 0 {\n\t\toffset = size + offset\n\t\tif offset < 0 {\n\t\t\toffset = 0\n\t\t}\n\t}\n\n\tif offset > size {\n\t\treturn []T{}\n\t}\n\n\tif length > uint(size)-uint(offset) {\n\t\tlength = uint(size - offset)\n\t}\n\n\treturn collection[offset : offset+int(length)]\n}", "func Test_AreEqualSlices_unequal(t *testing.T) {\n // create two equal slices\n a := []byte{ 0xDE, 0xAD, 0xBE, 0xEF }\n b := []byte{ 0xCA, 0xFE, 0xBA, 0xBE }\n // make test -> log failure but continue testing\n if AreEqualSlices(a,b) { t.Error(\"unequal slices determined equal\") }\n}", "func Sub(a []string, b []string) []string {\n\tbMap := ToSet(b)\n\tout := []string{}\n\tfor _, v := range a {\n\t\tif !bMap[v] {\n\t\t\tout = append(out, v)\n\t\t}\n\t}\n\treturn out\n}", "func (s *ConcurrentSet) IsProperSubset(other Set) bool {\n\treturn s.Len() < other.Len() && s.IsSubset(other)\n}", "func EqualSlice(a, b []string) bool {\n\tsort.Strings(a)\n\tsort.Strings(b)\n\treturn reflect.DeepEqual(a, b)\n}", "func SliceContains(slice, elem interface{}) (bool, error) {\n\n\tsv := reflect.ValueOf(slice)\n\n\t// Check that slice is actually a slice/array.\n\tif sv.Kind() != reflect.Slice && sv.Kind() != reflect.Array {\n\t\treturn false, errors.New(\"not an array or slice\")\n\t}\n\n\t// iterate the slice\n\tfor i := 0; i < sv.Len(); i++ {\n\n\t\t// compare elem to the current slice element\n\t\tif elem == sv.Index(i).Interface() {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\t// nothing found\n\treturn false, nil\n\n}", "func TestEqStringSlice(t *testing.T) {\n\tt.Parallel()\n\tvar tests = []struct {\n\t\ta []string\n\t\tb []string\n\t\texpected bool\n\t}{\n\t\t{[]string{\"foo\", \"bar\"}, []string{\"foo\", \"bar\"}, true},\n\t\t{[]string{\"foo\", \"bar\"}, []string{\"bar\", \"foo\"}, false},\n\t\t{[]string{\"foo\", \"bar\"}, []string{\"bar\"}, false},\n\t\t{[]string{\"foo\", \"bar\"}, []string{\"\\x66\\x6f\\x6f\", \"bar\"}, true},\n\t}\n\tfor _, test := range tests {\n\t\tactual := primitives.EqSlices(&test.a, &test.b)\n\t\tassert.Equal(t, test.expected, actual, \"expected value '%v' | actual : '%v'\", test.expected, actual)\n\t}\n}", "func StringsSliceContains(a []string, b string) bool {\n\tif !sort.StringsAreSorted(a) {\n\t\tsort.Strings(a)\n\t}\n\ti := sort.SearchStrings(a, b)\n\treturn i < len(a) && a[i] == b\n}", "func BoolSlicesEqual(a, b []bool) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor idx, v := range a {\n\t\tif v != b[idx] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func findSubsets(firstSubset, secondSubset, thirdSubset int, n int, slice []int) bool {\n\n\tif firstSubset == 0 && secondSubset == 0 && thirdSubset == 0 {\n\t\treturn true\n\t}\n\n\tif n < 0 {\n\t\treturn false\n\t}\n\n\tif findSubsets(firstSubset-slice[n], secondSubset, thirdSubset, n-1, slice) {\n\t\treturn true\n\t} else if findSubsets(firstSubset, secondSubset-slice[n], thirdSubset, n-1, slice) {\n\t\treturn true\n\t} else if findSubsets(firstSubset, secondSubset, thirdSubset-slice[n], n-1, slice) {\n\t\treturn true\n\t}\n\treturn false\n}", "func IntInSlice(a int, list []int) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func UintSlice(src []*uint) []uint {\n\tdst := make([]uint, len(src))\n\tfor i := 0; i < len(src); i++ {\n\t\tif src[i] != nil {\n\t\t\tdst[i] = *(src[i])\n\t\t}\n\t}\n\treturn dst\n}", "func SliceContains(slice []string, needle string) bool {\n\tfor _, s := range slice {\n\t\tif s == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func equalSlice(a, b []string) bool {\n\tif a == nil && b == nil {\n\t\treturn true\n\t}\n\n\tif a == nil || b == nil {\n\t\treturn false\n\t}\n\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\n\tfor i := range a {\n\t\tif a[i] != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func Test_AreEqualSlices_one_shorter(t *testing.T) {\n // create two equal slices\n a := []byte{ 0xDE, 0xAD, 0xBE, 0xEF }\n b := []byte{ 0xDE, 0xAD, 0xBE }\n //\tmake test -> log failure but continue testing\n if AreEqualSlices(a,b) { t.Error(\"different length slices determined equal\") }\n}", "func isInSlice(slice []*html.Node, node *html.Node) bool {\n\treturn indexInSlice(slice, node) > -1\n}", "func SliceContains(s []string, value string) bool {\n\tfor _, v := range s {\n\t\tif v == value {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (o Op) IsSlice3() bool", "func substringContainedInSlice(str string, substrs []string) bool {\n\tfor _, s := range substrs {\n\t\tif strings.Contains(str, s) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (w *viewBoxWriter) startsWith(slice []byte, subSlice []byte) bool {\n\tfor key, value := range subSlice {\n\t\tif slice[key] != value {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func EqualsSliceOfRefOfPartitionDefinition(a, b []*PartitionDefinition) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tif !EqualsRefOfPartitionDefinition(a[i], b[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func isSubsetMatchTokenized(tokens, test []string) bool {\n\t// Walk the target tokens\n\tfor i, t2 := range test {\n\t\tif i >= len(tokens) {\n\t\t\treturn false\n\t\t}\n\t\tl := len(t2)\n\t\tif l == 0 {\n\t\t\treturn false\n\t\t}\n\t\tif t2[0] == fwc && l == 1 {\n\t\t\treturn true\n\t\t}\n\t\tt1 := tokens[i]\n\n\t\tl = len(t1)\n\t\tif l == 0 || t1[0] == fwc && l == 1 {\n\t\t\treturn false\n\t\t}\n\n\t\tif t1[0] == pwc && len(t1) == 1 {\n\t\t\tm := t2[0] == pwc && len(t2) == 1\n\t\t\tif !m {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif i >= len(test) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif t2[0] != pwc && strings.Compare(t1, t2) != 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn len(tokens) == len(test)\n}", "func isOrderedSubset(first, second *callstack) bool {\n\tif len(*first) > len(*second) {\n\t\treturn false\n\t}\n\tset := make(map[string]int)\n\tfor _, value := range *second {\n\t\tset[value] += 1\n\t}\n\n\tfor _, value := range *first {\n\t\tif count, found := set[value]; !found {\n\t\t\treturn false\n\t\t} else if count < 1 {\n\t\t\treturn false\n\t\t} else {\n\t\t\tset[value] = count - 1\n\t\t}\n\t}\n\treturn checkSequence(*first, *second)\n}", "func (c Collection) HasSubsetOf(that Instance) bool {\n\tif len(c) == 0 {\n\t\treturn true\n\t}\n\t// prevent panic when that is nil\n\tif len(that) == 0 {\n\t\treturn false\n\t}\n\tfor _, this := range c {\n\t\tif this.SubsetOf(that) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func compareSlices(A, B []int) bool {\n\tif len(A) != len(B) {\n\t\treturn false\n\t}\n\n\tsort.Ints(A)\n\tsort.Ints(B)\n\n\tfor i, a := range A {\n\t\tif a != B[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (s String) IsProperSubset(other String) bool {\n\treturn len(s) < len(other) && s.IsSubset(other)\n}", "func Intersection(a, b AnySlice) AnySlice {\n\tmustBeSlice(a)\n\tmustBeSlice(b)\n\n\taVal := reflect.ValueOf(a)\n\tbVal := reflect.ValueOf(b)\n\taCount := aVal.Len()\n\tbCount := bVal.Len()\n\toutput := makeFilterSlice(a, 0, aCount+bCount)\n\tkeys := make(map[interface{}]bool)\n\n\tfor i := 0; i < aCount; i++ {\n\t\tkeys[aVal.Index(i).Interface()] = true\n\t}\n\tfor i := 0; i < bCount; i++ {\n\t\tkey := bVal.Index(i)\n\t\tif _, present := keys[key.Interface()]; present {\n\t\t\toutput = reflect.Append(output, key)\n\t\t}\n\t}\n\treturn output.Interface()\n}", "func SubjectIsSubsetMatch(subject, test string) bool {\n\ttsa := [32]string{}\n\ttts := tokenizeSubjectIntoSlice(tsa[:0], subject)\n\treturn isSubsetMatch(tts, test)\n}", "func EqualInt8Slice(a, b []int8) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i, v := range a {\n\t\tif v != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func RandomArraySubset(inslice []float64, percsample int) []float64 {\n\tsampsizefloat := float64(len(inslice)) * float64(percsample) / 100\n\tif sampsizefloat < 1 {\n\t\terr := errors.New(\"Input array has too few elements\")\n\t\tpanic(err)\n\t}\n\tsampsize := int(sampsizefloat)\n\trandtarget := rangen.RandIntegerInRange(0, len(inslice)-1)\n\tfmt.Println(len(inslice), randtarget, sampsize)\n\tif randtarget+sampsize > len(inslice) {\n\t\tdiff := (randtarget + sampsize) - len(inslice)\n\t\tarrEnd := inslice[randtarget:len(inslice)]\n\t\tarrStart := inslice[0:diff]\n\t\treturn append(arrEnd, arrStart...)\n\t}\n\treturn inslice[randtarget : randtarget+sampsize]\n}", "func (bm ByteMap) Slice(includeKeys map[string]bool) ByteMap {\n\tresult, _ := bm.doSplit(false, includeKeys)\n\treturn result\n}", "func InIntSlice(id int, idSli []int) bool {\n\tfor _, v := range idSli {\n\t\tif id == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (c *SingleItemExpCache) GetUintSlice() (data []uint, ok bool) {\n\tvar itf interface{}\n\tif itf, ok = c.Get(); !ok {\n\t\treturn nil, false\n\t}\n\n\treturn itf.([]uint), true\n}", "func IntersectionSlice(slice1, slice2 []string) []string {\n\tvar result []string\n\tfor _, s1 := range slice1 {\n\t\tinSlice2 := false\n\t\tfor _, s2 := range slice2 {\n\t\t\tif s2 == s1 {\n\t\t\t\tinSlice2 = true\n\t\t\t}\n\t\t}\n\t\tif inSlice2 {\n\t\t\tresult = append(result, s1)\n\t\t}\n\t}\n\n\treturn result\n}", "func EqualsSliceOfString(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tif a[i] != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func SliceContains(needle interface{}, haystack interface{}) bool {\n\thaystackValue := reflect.ValueOf(haystack)\n\thaystackValueKind := haystackValue.Kind()\n\n\tif haystackValueKind != reflect.Slice {\n\t\tpanic(fmt.Sprintf(\"haystackValue.Kind() should be reflect.Slice, detected: %v\", haystackValueKind))\n\t}\n\n\tfor i := 0; i < haystackValue.Len(); i++ {\n\t\t// panics if slice element points to an unexported struct field\n\t\t// see https://golang.org/pkg/reflect/#Value.Interface\n\t\tif haystackValue.Index(i).Interface() == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func doSlicesIntersect(s1, s2 []string) bool {\n if s1 == nil || s2 == nil {\n return false\n }\n for _, str := range s1 {\n if isElementInSlice(str, s2) {\n return true\n }\n }\n return false\n}", "func Uint64SlicesEqual(a, b []uint64) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor idx, v := range a {\n\t\tif v != b[idx] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func ByteSlicesEqual(a, b []byte) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor idx, v := range a {\n\t\tif v != b[idx] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (b Bits) Slice() (s []int) {\n\tfor x, w := range b.Bits {\n\t\tif w == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tt := mb.TrailingZeros64(w)\n\t\ti := t // index in w of next 1 bit\n\t\tfor {\n\t\t\tn := x<<6 | i\n\t\t\tif n >= b.Num {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ts = append(s, n)\n\t\t\tw >>= uint(t + 1)\n\t\t\tif w == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt = mb.TrailingZeros64(w)\n\t\t\ti += 1 + t\n\t\t}\n\t}\n\treturn\n}", "func EqualSlice(a, b []float64) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tif EpsilonEqual(a[0], b[0]) != true {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func InSlice(v string, sl []string) bool {\n\tfor _, vv := range sl {\n\t\tif vv == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func sliceContains(haystack []string, needle string) bool {\n\tfor _, s := range haystack {\n\t\tif s == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func contains(a, b []string) bool {\n\tlena, lenb := len(a), len(b)\n\tif lena != lenb {\n\t\treturn false\n\t}\n\n\tfor i := 0; i < lena; i++ {\n\t\tj := 0\n\t\tfor ; j < lenb; j++ {\n\t\t\tif a[i] == b[j] {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif j >= lenb {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}" ]
[ "0.6548862", "0.63709605", "0.6341045", "0.6319706", "0.6259678", "0.6141531", "0.61243564", "0.61081415", "0.6035182", "0.5946461", "0.59458554", "0.59438497", "0.59422666", "0.59412664", "0.59306276", "0.59273285", "0.59237146", "0.5862433", "0.58578426", "0.58344585", "0.58241963", "0.58127195", "0.58062154", "0.58002824", "0.57952744", "0.57418346", "0.5738892", "0.5728441", "0.57275414", "0.57078594", "0.57046425", "0.5703514", "0.569385", "0.569301", "0.56897646", "0.56661737", "0.56307954", "0.5622447", "0.5546814", "0.5514091", "0.550023", "0.54763067", "0.5470725", "0.5465868", "0.5436261", "0.5429419", "0.54113543", "0.5399974", "0.53868926", "0.5333423", "0.53009707", "0.5246746", "0.5245536", "0.5244501", "0.5228069", "0.5216665", "0.5197393", "0.5196998", "0.5187854", "0.51734567", "0.51568633", "0.5146479", "0.5137859", "0.5122403", "0.5122152", "0.5114315", "0.5114083", "0.5113593", "0.51018417", "0.5097926", "0.5093901", "0.5093819", "0.50897145", "0.5086162", "0.50842327", "0.5081607", "0.5065324", "0.50548905", "0.5044117", "0.5041841", "0.5041668", "0.5040351", "0.50333685", "0.5027297", "0.5021661", "0.5017718", "0.5012949", "0.5011614", "0.5009824", "0.49932614", "0.49923214", "0.49911222", "0.4989593", "0.49854794", "0.49847248", "0.49720013", "0.4970265", "0.49692166", "0.4963689", "0.4961032" ]
0.76196134
0
RemoveFromSlice makes a copy of the slice and removes the passed in values from the copy.
RemoveFromSlice создает копию слайса и удаляет переданные значения из копии.
func RemoveFromSlice(slice []string, values ...string) []string { output := make([]string, 0, len(slice)) remove := make(map[string]bool) for _, value := range values { remove[value] = true } for _, s := range slice { _, ok := remove[s] if ok { continue } output = append(output, s) } return output }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func RemoveFromSlice(slice []int, s int) []int {\n\treturn append(slice[:s], slice[s+1:]...)\n}", "func RemoveFromSlice(slice []string, item string) []string {\n\tfor i, value := range slice {\n\t\tif value == item {\n\t\t\treturn append(slice[:i], slice[i+1:]...)\n\t\t}\n\t}\n\treturn slice\n}", "func RemoveFromSlice(fullSlice []int, indexToRemove int) []int {\n\tfullSlice[indexToRemove] = fullSlice[len(fullSlice)-1]\n\treturn fullSlice[:len(fullSlice)-1]\n}", "func (v *Data) RemoveSlice(start, end int) {\n\tdv := *v\n\n\t*v = append(dv[:start], dv[end:]...)\n\n\tv.Truncate(len(dv) - (end - start))\n}", "func (v *IntVec) RemoveSlice(start, end int) {\n\tdv := *v\n\n\t*v = append(dv[:start], dv[end:]...)\n\n\tv.Truncate(len(dv) - (end - start))\n}", "func (k *MutableKey) RemoveSlice(vals []uint64) {\n\tfor _, val := range vals {\n\t\tdelete(k.vals, val)\n\t\tk.synced = false\n\t}\n}", "func removeFromSlice(s []string, toRemove string) ([]string, error) {\n\ti := -1\n\tfor index, item := range s {\n\t\tif item == toRemove {\n\t\t\ti = index\n\t\t}\n\t}\n\n\tif i == -1 {\n\t\treturn nil, fmt.Errorf(\"%v not found in list\", toRemove)\n\t}\n\n\ts[i] = s[len(s)-1]\n\treturn s[:len(s)-1], nil\n}", "func (v *Int32Vec) RemoveSlice(start, end int) {\n\tdv := *v\n\n\t*v = append(dv[:start], dv[end:]...)\n\n\tv.Truncate(len(dv) - (end - start))\n}", "func removeFromSlice(rrs []dns.RR, i int) []dns.RR {\n\tif i >= len(rrs) {\n\t\treturn rrs\n\t}\n\trrs = append(rrs[:i], rrs[i+1:]...)\n\treturn rrs\n}", "func removeFromSlice(array []string, item string) []string {\n\tfor ind, val := range array {\n\t\tif val == item {\n\t\t\tarray[ind] = array[len(array)-1]\n\t\t\treturn array[:len(array)-1]\n\t\t}\n\t}\n\treturn array\n}", "func remove(slice []int, i int) []int {\n // copy(dst, src)\n copy(slice[i:], slice[i+1:]) // over writes the slice from i to end with slice from i+1 to end\n return slice[:len(slice)-1]\n}", "func DeleteInSlice(s interface{}, index int) interface{} {\n\tvalue := reflect.ValueOf(s)\n\tif value.Kind() == reflect.Slice {\n\t\t// || value.Kind() == reflect.Array {\n\t\tresult := reflect.AppendSlice(value.Slice(0, index), value.Slice(index+1, value.Len()))\n\t\treturn result.Interface()\n\t}\n\n\tklog.Errorf(\"Only a slice can be passed into this method for deleting an element of it.\")\n\treturn s\n}", "func RemoveFromArray(slice []string, input string) []string {\n\tvar output []string\n\tfor i, item := range slice {\n\t\tif item == input {\n\t\t\toutput = append(slice[:i], slice[i+1:]...)\n\t\t\tbreak\n\t\t}\n\t}\n\treturn output\n}", "func RemoveStringSliceCopy(slice []string, start, end int) []string {\n\tresult := make([]string, len(slice)-(end-start))\n\tat := copy(result, slice[:start])\n\tcopy(result[at:], slice[end:])\n\treturn result\n\n}", "func RemoveItemFromSlice() {\n\tslice := []int{0, 1, 2, 3, 4, 5, 6}\n\tslice = append(slice[:2], slice[3:]...)\n\tfor _, val := range slice {\n\t\tfmt.Println(val)\n\t}\n}", "func StringSliceRemove(list []string, s string) []string {\n\tfor i, v := range list {\n\t\tif v == s {\n\t\t\tlist = append(list[:i], list[i+1:]...)\n\t\t}\n\t}\n\treturn list\n}", "func StringSliceRemove(list []string, s string) []string {\n\tfor i, v := range list {\n\t\tif v == s {\n\t\t\tlist = append(list[:i], list[i+1:]...)\n\t\t}\n\t}\n\treturn list\n}", "func Remove(slice []int, s int) []int {\n\treturn append(slice[:s], slice[s+1:]...)\n}", "func remove(slice []string, s int) []string {\n\treturn append(slice[:s], slice[s+1:]...)\n}", "func remove(slice []string, s int) []string {\n\treturn append(slice[:s], slice[s+1:]...)\n}", "func remove(slice []int, s int) []int {\n\treturn append(slice[:s], slice[s+1:]...)\n}", "func RemoveValues[T comparable](slice, values []T) []T {\n\tif len(slice) == 0 {\n\t\treturn slice\n\t}\n\tkeys := make(map[T]struct{}, len(slice))\n\tfor _, v := range values {\n\t\tkeys[v] = struct{}{}\n\t}\n\n\tvar i int\n\tfor _, v := range slice {\n\t\tif _, ok := keys[v]; !ok {\n\t\t\tslice[i] = v\n\t\t\ti++\n\t\t}\n\t}\n\treturn slice[:i]\n}", "func remove(slice []int, i int) []int{\n\tcopy(slice[i:],slice[i+1:])\n\treturn slice[:len(slice)-1]\n}", "func RemoveWithKeepOrder(slice []string, s int) []string {\n\treturn append(slice[:s], slice[s+1:]...)\n}", "func remove(slice []int16, s int) []int16 {\n\treturn append(slice[:s], slice[s+1:]...)\n}", "func Remove(slice []string, value string) []string {\n\tfor i, s := range slice {\n\t\tif s == value {\n\t\t\tslice = append(slice[:i], slice[i+1:]...)\n\t\t\tbreak\n\t\t}\n\t}\n\treturn slice\n}", "func RemoveIf(slice []string, p func(s string) bool) []string {\n\tif IsEmpty(slice) {\n\t\treturn slice\n\t}\n\n\tresult := make([]string, 0)\n\tfor _, s := range slice {\n\t\tif !p(s) {\n\t\t\tresult = append(result, s)\n\t\t}\n\t}\n\n\treturn result\n}", "func RemoveAtIndex(slice []string, s int) []string {\n\treturn append(slice[:s], slice[s+1:]...)\n}", "func removeSliceElements(txOuts []*apitypes.AddressTxnOutput, inds []int) []*apitypes.AddressTxnOutput {\n\t// Remove entries from the end to the beginning of the slice.\n\tsort.Slice(inds, func(i, j int) bool { return inds[i] > inds[j] }) // descending indexes\n\tfor _, g := range inds {\n\t\tif g > len(txOuts)-1 {\n\t\t\tcontinue\n\t\t}\n\t\ttxOuts[g] = txOuts[len(txOuts)-1] // overwrite element g with last element\n\t\ttxOuts[len(txOuts)-1] = nil // nil out last element\n\t\ttxOuts = txOuts[:len(txOuts)-1]\n\t}\n\treturn txOuts\n}", "func ExcludeFromSlice(sl []string, exclude map[string]string) []string {\n\tres := make([]string, len(sl))\n\ti := 0\n\tfor k, v := range sl {\n\t\t_, isExcluded := exclude[v]\n\t\tif isExcluded {\n\t\t\tcontinue\n\t\t}\n\t\tres[k] = v\n\t\ti++\n\t}\n\treturn res[:i]\n}", "func Remove(slice interface{}, i int) {\n\tneogointernal.Opcode2NoReturn(\"REMOVE\", slice, i)\n}", "func deleteRecordFromSlice(slice []Record, id int) []Record {\n return append(slice[:id], slice[id+1:]...)\n}", "func (es Slice) RemoveIf(f func(Value) bool) {\n\tnewLen := 0\n\tfor i := 0; i < len(*es.getOrig()); i++ {\n\t\tif f(es.At(i)) {\n\t\t\tcontinue\n\t\t}\n\t\tif newLen == i {\n\t\t\t// Nothing to move, element is at the right place.\n\t\t\tnewLen++\n\t\t\tcontinue\n\t\t}\n\t\t(*es.getOrig())[newLen] = (*es.getOrig())[i]\n\t\tnewLen++\n\t}\n\t// TODO: Prevent memory leak by erasing truncated values.\n\t*es.getOrig() = (*es.getOrig())[:newLen]\n}", "func removeElementFromStringSlice(list []string, elem string) []string {\n\tfor i, e := range list {\n\t\tif e == elem {\n\t\t\treturn append(list[:i], list[i+1:]...)\n\t\t}\n\t}\n\treturn list\n}", "func DeleteSlice(source []*Instance, index int) []*Instance {\n\tif len(source) == 1 {\n\t\treturn make([]*Instance, 0)\n\t}\n\tif index == 0 {\n\t\treturn source[1:]\n\t}\n\tif index == len(source)-1 {\n\t\treturn source[:len(source)-2]\n\t}\n\treturn append(source[0:index-1], source[index+1:]...)\n}", "func FilterSlice[S any](s []S, keep func(S) bool) []S {\n\tvar result []S\n\tfor _, e := range s {\n\t\tif keep(e) {\n\t\t\tresult = append(result, e)\n\t\t}\n\t}\n\treturn result\n}", "func RemoveElements(slice []string, drop []string) []string {\n\tres := []string{}\n\tfor _, s := range slice {\n\t\tkeep := true\n\t\tfor _, d := range drop {\n\t\t\tif s == d {\n\t\t\t\tkeep = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif keep {\n\t\t\tres = append(res, s)\n\t\t}\n\t}\n\treturn res\n}", "func DeleteFromSlicePtr(parentSlice interface{}, index int) error {\n\tscope.Debugf(\"DeleteFromSlicePtr index=%d, slice=\\n%s\", index, pretty.Sprint(parentSlice))\n\tpv := reflect.ValueOf(parentSlice)\n\n\tif !IsSliceInterfacePtr(parentSlice) {\n\t\treturn fmt.Errorf(\"deleteFromSlicePtr parent type is %T, must be *[]interface{}\", parentSlice)\n\t}\n\n\tpvv := pv.Elem()\n\tif pvv.Kind() == reflect.Interface {\n\t\tpvv = pvv.Elem()\n\t}\n\n\tpv.Elem().Set(reflect.AppendSlice(pvv.Slice(0, index), pvv.Slice(index+1, pvv.Len())))\n\n\treturn nil\n}", "func removeStringFromSlice(str string, slice []string) []string {\n\tfor i, v := range slice {\n\t\tif v == str {\n\t\t\t//append the subslice of all elements after this one, to the sublice of all elements before this one\n\t\t\treturn append(slice[:i], slice[i+1:]...)\n\t\t}\n\t}\n\n\t//if the string was not present, just return the slice back\n\treturn slice\n}", "func subtractSlice(x, y []string) []string {\n\tm := make(map[string]bool)\n\n\tfor _, y := range y {\n\t\tm[y] = true\n\t}\n\n\tvar ret []string\n\tfor _, x := range x {\n\t\tif m[x] {\n\t\t\tcontinue\n\t\t}\n\t\tret = append(ret, x)\n\t}\n\n\treturn ret\n}", "func Remove(series interface{}, removes ...interface{}) (interface{}, error) {\n\tst := reflect.TypeOf(series)\n\tsv := reflect.ValueOf(series)\n\n\tswitch {\n\tcase st.Kind() != reflect.Array && st.Kind() != reflect.Slice:\n\t\treturn nil, ErrNotArrayOrSlice\n\tcase st.Elem().Kind() == reflect.Func:\n\t\treturn nil, ErrNotSupported\n\tcase len(removes) == 0:\n\t\treturn series, nil\n\tcase st.Elem().Kind() != reflect.TypeOf(removes[0]).Kind():\n\t\treturn nil, ErrNotCompatible\n\t}\n\n\tremoved := reflect.MakeSlice(reflect.SliceOf(st.Elem()), 0, 0)\n\tswitch st.Elem().Kind() {\n\tcase reflect.Map, reflect.Slice:\n\t\tfor i := 0; i < sv.Len(); i++ {\n\t\t\tfound := false\n\t\t\tfor _, r := range removes {\n\t\t\t\tif reflect.DeepEqual(sv.Index(i).Interface(), r) {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\tremoved = reflect.Append(removed, sv.Index(i))\n\t\t\t}\n\t\t}\n\t\treturn removed.Interface(), nil\n\tdefault:\n\t\tfilter := reflect.MakeMapWithSize(reflect.MapOf(st.Elem(), reflect.TypeOf(true)), len(removes))\n\t\tfor _, r := range removes {\n\t\t\tfilter.SetMapIndex(reflect.ValueOf(r), reflect.ValueOf(true))\n\t\t}\n\t\tfor i := 0; i < sv.Len(); i++ {\n\t\t\tif !filter.MapIndex(sv.Index(i)).IsValid() {\n\t\t\t\tremoved = reflect.Append(removed, sv.Index(i))\n\t\t\t}\n\t\t}\n\t\treturn removed.Interface(), nil\n\t}\n}", "func (p *SliceOfMap) Drop(indices ...int) ISlice {\n\tif p == nil || len(*p) == 0 {\n\t\treturn p\n\t}\n\n\t// Handle index manipulation\n\ti, j, err := absIndices(len(*p), indices...)\n\tif err != nil {\n\t\treturn p\n\t}\n\n\t// Execute\n\tn := j - i\n\tif i+n < len(*p) {\n\t\t*p = append((*p)[:i], (*p)[i+n:]...)\n\t} else {\n\t\t*p = (*p)[:i]\n\t}\n\treturn p\n}", "func remove2(slice []int, i int) []int{\n\tslice[i] = slice[len(slice)-1]\n return slice[:len(slice)-1]\n}", "func remove(slice []string, i int) []string {\n\treturn append(slice[:i], slice[i+1:]...)\n}", "func SliceDeleteElement(slice interface{}, removalIndex int) (resultSlice interface{}) {\n\tsliceObj := reflect.ValueOf(slice)\n\n\tif sliceObj.Kind() == reflect.Ptr {\n\t\tsliceObj = sliceObj.Elem()\n\t}\n\n\tif sliceObj.Kind() != reflect.Slice {\n\t\treturn nil\n\t}\n\n\tif removalIndex < 0 {\n\t\tremovalIndex = sliceObj.Len() - AbsInt(removalIndex)\n\n\t\tif removalIndex < 0 {\n\t\t\treturn slice\n\t\t}\n\t}\n\n\tif removalIndex > sliceObj.Len()-1 {\n\t\treturn slice\n\t}\n\n\trm := sliceObj.Index(removalIndex)\n\tlast := sliceObj.Index(sliceObj.Len() - 1)\n\n\tif rm.CanSet() {\n\t\trm.Set(last)\n\t} else {\n\t\treturn slice\n\t}\n\n\treturn sliceObj.Slice(0, sliceObj.Len()-1).Interface()\n}", "func RemoveStringInSlice(a string, l []string) ([]string, bool) {\n\ti, in := IndexStringInSlice(a, l)\n\n\tif in {\n\t\tl = append(l[:i], l[i+1:]...)\n\t}\n\treturn l, in\n}", "func FilterSlice(in []int) ([]int, error) {\n\tif in == nil {\n\t\treturn nil, errors.New(\"input slice is nil\")\n\t}\n\n\tfilterList := map[int]bool{\n\t\t1: true,\n\t\t3: true,\n\t}\n\n\tinLen := len(in)\n\tfilteredSlice := make([]int, 0, inLen)\n\n\tfor _, value := range in {\n\t\tif _, ok := filterList[value]; ok {\n\t\t\tfilteredSlice = append(filteredSlice, value)\n\t\t}\n\t}\n\n\treturn filteredSlice, nil\n}", "func removeItemFromEquals(slice []Equal, index int) []Equal {\n\tcopy(slice[index:], slice[index+1:])\n\tslice[len(slice)-1] = Equal{}\n\tslice = slice[:len(slice)-1]\n\n\treturn slice\n}", "func remove(list []*IPRange, index int) []*IPRange {\n\tfor i := index + 1; i < len(list); i++ {\n\t\tlist[i-1] = list[i]\n\t}\n\treturn list[:len(list)-1]\n}", "func RemoveAtIndex(slice []string, index int) ([]string, bool) {\n\tif index < 0 || IsEmpty(slice) || index > len(slice) {\n\t\treturn slice, false\n\t}\n\n\treturn append(slice[:index], slice[index+1:]...), true\n}", "func removeAtIndex(source []int, index int) []int {\n\tlastIndex := len(source) - 1\n\tsource[index], source[lastIndex] = source[lastIndex], source[index]\n\treturn source[:lastIndex]\n}", "func removeString(slice []string, s string) (result []string) {\n\tfor _, item := range slice {\n\t\tif item == s {\n\t\t\tcontinue\n\t\t}\n\t\tresult = append(result, item)\n\t}\n\treturn\n}", "func RemoveFromBooks (index int) {\n\tn := len(books)\n\tbooks [index] = books [n - 1]\n\tbooks = books[:n - 1]\n}", "func (obj *Object) RemoveValueFromArray(field string, value interface{}) *Object {\n\tobj.changedData[field] = map[string]interface{}{\"__op\": \"Remove\", \"objects\": []interface{}{value}}\n\treturn obj\n}", "func (obj *Object) RemoveValueFromArrayFromList(field string, value []interface{}) *Object {\n\tobj.changedData[field] = map[string]interface{}{\"__op\": \"Remove\", \"objects\": value}\n\treturn obj\n}", "func removeBlockNodeFromSlice(nodes []*BlockNode, node *BlockNode) []*BlockNode {\n\tfor i := range nodes {\n\t\tif nodes[i].Hash.IsEqual(node.Hash) {\n\t\t\tcopy(nodes[i:], nodes[i+1:])\n\t\t\tnodes[len(nodes)-1] = nil\n\t\t\treturn nodes[:len(nodes)-1]\n\t\t}\n\t}\n\treturn nodes\n}", "func (es ExemplarSlice) RemoveIf(f func(Exemplar) bool) {\n\tnewLen := 0\n\tfor i := 0; i < len(*es.orig); i++ {\n\t\tif f(es.At(i)) {\n\t\t\tcontinue\n\t\t}\n\t\tif newLen == i {\n\t\t\t// Nothing to move, element is at the right place.\n\t\t\tnewLen++\n\t\t\tcontinue\n\t\t}\n\t\t(*es.orig)[newLen] = (*es.orig)[i]\n\t\tnewLen++\n\t}\n\t// TODO: Prevent memory leak by erasing truncated values.\n\t*es.orig = (*es.orig)[:newLen]\n}", "func RemoveElements(originalSlice []string, removeElementSlice []string) []string {\n\n\tfor _, elem := range removeElementSlice {\n\t\t// search is linear but can be improved\n\t\tfor i := 0; i < len(originalSlice); i++ {\n\t\t\tif originalSlice[i] == elem {\n\t\t\t\toriginalSlice = append(originalSlice[:i], originalSlice[i+1:]...)\n\t\t\t\ti--\n\t\t\t}\n\t\t}\n\t}\n\treturn originalSlice\n}", "func RemoveStringFromStringSlice(str string, elements []string) []string {\n\tresult := []string{}\n\tfor _, el := range elements {\n\t\tif str != el {\n\t\t\tresult = append(result, el)\n\t\t}\n\t}\n\treturn result\n}", "func removeAll(source []byte, remove []byte) []byte {\n for bytes.Index(source, remove) > -1 {\n pnt := bytes.Index(source, remove)\n source = append(source[:pnt], source[pnt+12:]...)\n }\n return source\n}", "func RemoveItem(slice []int, index int) []int {\n\tsliceLength := len(slice)\n\tif checkOutOfBounds(index, sliceLength) {\n\t\treturn slice\n\t}\n\tremovedSlice := slice[:index]\n\t// If the removed index is not at the end ...\n\tif index+1 < sliceLength {\n\t\t// tack on the rest of the slice after the removed index.\n\t\tremovedSlice = append(removedSlice, slice[index+1:]...)\n\t}\n\treturn removedSlice\n}", "func (s *SegmentChangesWrapper) RemoveFromSegment(segmentName string, keys []string) error {\n\treturn errSegmentStorageNotImplementedMethod\n}", "func (s *ConcurrentSlice) Remove(e int64) {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\ti := s.index(e)\n\ts.items = append(s.items[0:i], s.items[i+1:]...)\n}", "func (a Slice[T]) DeleteAt(index int) Slice[T] {\n\treturn append(a[:index], a[index+1:]...)\n}", "func Delete(slice []int, index int) []int {\n\treturn append(slice[:index], slice[index+1:]...)\n}", "func (list *TList) Trim(start, stop int) {\n\tlist.mux.Lock()\n\n\tstart = list.convertPos(start)\n\tstop = list.convertPos(stop)\n\n\tif start > list.Len()-1 {\n\t\tlist.mux.Unlock()\n\t\treturn\n\t}\n\n\tif start < 0 {\n\t\tstart = 0\n\t}\n\n\tif stop > list.Len() {\n\t\tstop = list.Len() - 1\n\t}\n\n\tif stop < start {\n\t\tlist.mux.Unlock()\n\t\treturn\n\t}\n\n\titemsForRemoveFromHead := start\n\titemsForRemoveFromTail := list.Len() - 1 - stop\n\n\tlist.mux.Unlock()\n\n\t// TODO We need a more optimized method\n\tfor i := itemsForRemoveFromHead; i > 0; i-- {\n\t\tlist.HPop()\n\t}\n\n\tfor i := itemsForRemoveFromTail; i > 0; i-- {\n\t\tlist.TPop()\n\t}\n}", "func (cs *Set) Remove(exclude Set) {\n\tif exclude.MatchesAny() {\n\t\tif cs.MatchesAny() {\n\t\t\t*cs = Set{}\n\t\t} else {\n\t\t\t*cs = (*cs)[:0]\n\t\t}\n\t\treturn\n\t}\n\tif len(exclude) == 0 {\n\t\treturn\n\t}\n\ts := *cs\n\tremoved := 0\n\tfor i, cc := range s {\n\t\tif exclude.Contains(cc) {\n\t\t\tremoved++\n\t\t} else {\n\t\t\tif removed > 0 { // shift\n\t\t\t\ts[i-removed] = s[i]\n\t\t\t}\n\t\t}\n\t}\n\tif removed > 0 {\n\t\t*cs = s[:len(s)-removed]\n\t}\n}", "func SubtractStringSlice(ss []string, str string) []string {\n\tvar res []string\n\tfor _, s := range ss {\n\t\tif strings.EqualFold(s, str) {\n\t\t\tcontinue\n\t\t}\n\t\tres = append(res, s)\n\t}\n\treturn res\n}", "func ExampleIntSet_Remove() {\n\ts1 := gset.NewIntSet()\n\ts1.Add([]int{1, 2, 3, 4}...)\n\ts1.Remove(1)\n\tfmt.Println(s1.Slice())\n\n\t// May Output:\n\t// [3 4 2]\n}", "func (list *List) RemoveAll(slice *Slice) {\n\tfor _, e := range slice.Slice() {\n\t\tlist.Remove(e)\n\t}\n}", "func removeIndex(slice []int, index int) []int {\n\tret := make([]int, 0)\n\tret = append(ret, slice[:index]...)\n\treturn append(ret, slice[index+1:]...)\n}", "func (r *Repo) RemoveFromSet(field string, value interface{}, i interface{}) error {\n\treturn r.toggleInSet(\"$pull\", field, value, i)\n}", "func RemoveString(slice []string, s string) (bool, []string) {\n\tremoved := false\n\tresult := []string{}\n\tfor _, item := range slice {\n\t\tif item == s {\n\t\t\tremoved = true\n\t\t\tcontinue\n\t\t}\n\t\tresult = append(result, item)\n\t}\n\treturn removed, result\n}", "func SliceDelFirstVal(a interface{}, val interface{}) interface{} {\n\tswitch a.(type) {\n\tcase []int:\n\t\treturn SliceDelFirstValInt(a.([]int), val.(int))\n\tdefault:\n\t\tpanic(\"not support type\")\n\t}\n}", "func (c *Collection) Removing() *Slice {\n\treturn c.unregister\n}", "func remove[T any](s []T, i int) []T {\n\ts[i] = s[len(s)-1]\n\treturn s[:len(s)-1]\n}", "func delete(slice []string, el string) (a []string) {\n\ti := -1\n\tfor j, s := range slice {\n\t\tif s == el {\n\t\t\ti = j\n\t\t}\n\t}\n\ta = append(slice[:i], slice[i+1:]...)\n\treturn a\n}", "func CopySlice(slice []byte) []byte {\n\tcopy := append(slice[:0:0], slice...)\n\treturn copy\n}", "func (s S) SetSlice(key, value string, before, after int) (slice []string, err error) {\n\tvar vv SortedString\n\terr = s.ReadModify(key, &vv, func(_ interface{}) (r bool) {\n\t\tslice = vv.Slice(value, before, after)\n\t\treturn\n\t})\n\treturn\n}", "func (a myArray) splice(start int, data ...string) myArray {\n\tcopy(a[start:], data)\n\treturn a\n}", "func (s strings) Remove(in []string, remove ...string) []string {\n\treturn s.Filter(in, func(item string) bool {\n\t\tfound := false\n\t\tfor _, removeItem := range remove {\n\t\t\tif removeItem == item {\n\t\t\t\tfound = true\n\t\t\t}\n\t\t}\n\t\treturn !found\n\t})\n}", "func (a *Attribute) RemoveValues(v interface{}) {\n\tif !a.IncludeValues(v) {\n\t\treturn\n\t}\n\n\t_i := a.IndexValues(v)\n\n\ta.Values = append(a.Values[:_i], a.Values[_i+1:]...)\n}", "func (s Strings) Remove(a string) []string {\n\ti := sort.SearchStrings(s, a)\n\tif s[i] != a {\n\t\treturn s\n\t}\n\treturn append(s[:i], s[i+1:]...)\n}", "func (_m *MockSegmentManager) RemoveBy(filters ...SegmentFilter) {\n\t_va := make([]interface{}, len(filters))\n\tfor _i := range filters {\n\t\t_va[_i] = filters[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, _va...)\n\t_m.Called(_ca...)\n}", "func (s StringSet) Remove(values ...string) {\n\tfor _, value := range values {\n\t\tdelete(s, value)\n\t}\n}", "func ModifySlice(sliceptr interface{}, eq func(i, j int) bool) {\n\trvp := reflect.ValueOf(sliceptr)\n\tif rvp.Type().Kind() != reflect.Ptr {\n\t\tpanic(badTypeError{rvp.Type()})\n\t}\n\trv := rvp.Elem()\n\tif rv.Type().Kind() != reflect.Slice {\n\t\tpanic(badTypeError{rvp.Type()})\n\t}\n\n\tlength := rv.Len()\n\tdst := 0\n\tfor i := 1; i < length; i++ {\n\t\tif eq(dst, i) {\n\t\t\tcontinue\n\t\t}\n\t\tdst++\n\t\t// slice[dst] = slice[i]\n\t\trv.Index(dst).Set(rv.Index(i))\n\t}\n\n\tend := dst + 1\n\tvar zero reflect.Value\n\tif end < length {\n\t\tzero = reflect.Zero(rv.Type().Elem())\n\t}\n\n\t// for i := range slice[end:] {\n\t// size[i] = 0/nil/{}\n\t// }\n\tfor i := end; i < length; i++ {\n\t\t// slice[i] = 0/nil/{}\n\t\trv.Index(i).Set(zero)\n\t}\n\n\t// slice = slice[:end]\n\tif end < length {\n\t\trv.SetLen(end)\n\t}\n}", "func (vector *Vector) Cut(i int, j int) {\n\t//a = append(a[:i], a[j:]...)\n\t// NOTE If the type of the element is a pointer or a struct with pointer fields,\n\t// which need to be garbage collected, the above implementation of Cut has a potential\n\t// memory leak problem: some elements with values are still referenced by slice a and\n\t// thus can not be collected. The following code can fix this problem:\n\n\tcopy((*vector)[i:], (*vector)[j:])\n\tfor k, n := len(*vector)-j+i, len(*vector); k < n; k++ {\n\t\t(*vector)[k] = nil // or the zero value of T\n\t}\n\t*vector = (*vector)[:len(*vector)-j+i]\n}", "func (s *IntSlicer) Clear() {\n\ts.slice = []int{}\n}", "func BenchmarkSliceDel(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tsliceDel([]string{\"a\", \"b\", \"c\"}, \"c\")\n\t}\n}", "func (t *StringSlice) RemoveAt(i int) bool {\n\tif i >= 0 && i < len(t.items) {\n\t\tt.items = append(t.items[:i], t.items[i+1:]...)\n\t\treturn true\n\t}\n\treturn false\n}", "func RemoveAtIndex(data interface{}, index int) (interface{}, error) {\n\t// Get concrete value of data\n\tvalue := reflect.ValueOf(data)\n\n\t// Get the type of value\n\tvalueType := value.Type()\n\n\tif valueType.Kind() != reflect.Array && valueType.Kind() != reflect.Slice {\n\t\terr := errors.New(\"Data parameter is not an array or slice\")\n\t\treturn nil, err\n\t}\n\n\tif index >= value.Len() {\n\t\terr := errors.New(\"Index is greater than data length\")\n\t\treturn nil, err\n\t}\n\n\t// Create slice from value\n\tresultSlice := reflect.AppendSlice(value.Slice(0, index), value.Slice(index+1, value.Len()))\n\n\treturn resultSlice.Interface(), nil\n}", "func removeItemByIndex(slice []string, idx int) []string {\n\n\tcopy(slice[idx:], slice[idx+1:]) // Shift slice[idx+1:] left one index.\n\tslice[len(slice)-1] = \"\" // Erase last element (write zero value).\n\treturn slice[:len(slice)-1] // Truncate slice.\n}", "func RemoveDuplicates(slice []string) []string {\n\treturn MapToSlice(SliceToMap(slice))\n}", "func RemoveFromTaskArray(arr []*Task, ndx int) []*Task {\n\tif ndx < 0 || ndx >= len(arr) {\n\t\treturn arr\n\t}\n\treturn append(arr[0:ndx], arr[ndx+1:]...)\n}", "func remove(s []int, i int) []int {\n\ts[i] = s[len(s)-1]\n\treturn s[:len(s)-1]\n}", "func removeIfEquals(slice *[]string, match string) {\n\ti := 0\n\tp := *slice\n\tfor _, entry := range p {\n\t\tif strings.TrimSpace(entry) != strings.TrimSpace(match) {\n\t\t\tp[i] = entry\n\t\t\ti++\n\t\t}\n\t}\n\t*slice = p[0:i]\n}", "func RemoveOne(target interface{}, src []interface{}) []interface{} {\n\ttndx := -1\n\tfor ndx, val := range src {\n\t\tif val == target {\n\t\t\ttndx = ndx\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif tndx > -1 {\n\t\tsrc[tndx] = src[len(src)-1]\n\t\tsrc = src[:len(src)-1]\n\t}\n\n\treturn src\n}", "func (s *Set) Remove(items ...uint32) *Set {\n\tfor _, item := range items {\n\t\tdelete(s.items, item)\n\t}\n\treturn s\n}", "func main() {\n\t//cretaing slice using make buildin function type, len, capacity\n\ti := make([]int, 5, 5)\n\tfmt.Println(i)\n\n\tfruits := []string{\"apple\", \"grape\", \"banana\", \"melon\"}\n\tfmt.Println(fruits)\n\tvar aFruits = fruits[0:3]\n\tfmt.Println(aFruits)\n\tvar bFruits = fruits[1:4]\n\tfmt.Println(bFruits)\n\tvar cfruits = append(fruits, \"papaya\")\n\tfmt.Println(cfruits)\n\n\tstudents := []string{\"ara\", \"fanta\", \"jevan\"}\n\tfmt.Println(students)\n\t//remove slice\n\tstudents = students[0:2]\n\tfmt.Println(students)\n\tstudents = append(students, \"misa\")\n\tfmt.Println(students)\n\tfmt.Println(len(students))\n\tfor index := 0; index < len(students); index++ {\n\t\tfmt.Println(\"ok\")\n\t}\n\ttestSliceNotPointer(students)\n\tfmt.Println(students)\n\ttestSlice(&students)\n\tfmt.Println(students)\n}", "func (v values) Remove(keys ...string) {\n\tfor _, key := range keys {\n\t\tdelete(v, key)\n\t}\n}" ]
[ "0.72074586", "0.716618", "0.7101833", "0.7082994", "0.70630497", "0.6932562", "0.6897101", "0.68811226", "0.6855913", "0.6505445", "0.63579625", "0.63161755", "0.6234457", "0.62126327", "0.61018604", "0.59863406", "0.59863406", "0.59863025", "0.5968467", "0.5968467", "0.5958366", "0.5935262", "0.5926449", "0.59181917", "0.5912026", "0.59089", "0.584173", "0.5821563", "0.581494", "0.576812", "0.5757476", "0.57540053", "0.57474166", "0.57435644", "0.56565726", "0.56502724", "0.56491965", "0.5587767", "0.55721563", "0.5528682", "0.55229205", "0.5520467", "0.5491185", "0.5447094", "0.53916746", "0.5391634", "0.5371571", "0.53422344", "0.5294048", "0.5266688", "0.5265465", "0.5255518", "0.5233026", "0.52301276", "0.5201701", "0.5201284", "0.51872045", "0.51512194", "0.5133709", "0.5133515", "0.50843376", "0.5079699", "0.50658554", "0.50403106", "0.50268364", "0.50254935", "0.5015741", "0.50074464", "0.4999378", "0.4996946", "0.4995773", "0.4988785", "0.49701327", "0.4955219", "0.49416986", "0.4928585", "0.4922169", "0.4901508", "0.49011314", "0.48892424", "0.48877373", "0.48825845", "0.48801306", "0.48761123", "0.48732552", "0.4872013", "0.48683402", "0.4865019", "0.48544756", "0.48516953", "0.48516732", "0.4842482", "0.48337054", "0.4825584", "0.48250613", "0.48172286", "0.48158532", "0.48158246", "0.4811272", "0.47978652" ]
0.76177776
0
ChooseRandomString returns a random string from the given slice.
ChooseRandomString возвращает случайную строку из заданного слайса.
func ChooseRandomString(slice []string) string { switch len(slice) { case 0: return "" case 1: return slice[0] default: return slice[rand.Intn(len(slice))] } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func ChooseString(l []string) string {\n\tif len(l) == 0 {\n\t\treturn \"\"\n\t}\n\trand.Seed(time.Now().UnixNano())\n\treturn l[rand.Intn(len(l))]\n}", "func (h *Random) StringFromSlice(in []string) string {\n\trandomIndex := rand.Intn(len(in))\n\treturn in[randomIndex]\n}", "func RandomString(values ...string) string {\n\tif len(values) == 0 {\n\t\treturn \"\"\n\t}\n\tif len(values) == 1 {\n\t\treturn values[0]\n\t}\n\treturn values[provider.Intn(len(values))]\n}", "func RandomString(len int) string {\n\tstr := make([]byte, len)\n\trand.Seed(time.Now().UTC().UnixNano())\n\n\tfor i := range str {\n\t\tstr[i] = randomPool[rand.Intn(poolLength)]\n\t}\n\n\treturn string(str)\n}", "func (c combinatorics) RandomString(values []string) string {\n\tif len(values) == 0 {\n\t\treturn \"\"\n\t}\n\tif len(values) == 1 {\n\t\treturn values[0]\n\t}\n\treturn values[RandomProvider().Intn(len(values))]\n}", "func (ur UnicodeRanges) randString(r *rand.Rand) string {\n\tn := r.Intn(20)\n\tsb := strings.Builder{}\n\tsb.Grow(n)\n\tfor i := 0; i < n; i++ {\n\t\tsb.WriteRune(ur[r.Intn(len(ur))].choose(r))\n\t}\n\treturn sb.String()\n}", "func (ur UnicodeRange) randString(r *rand.Rand) string {\n\tn := r.Intn(20)\n\tsb := strings.Builder{}\n\tsb.Grow(n)\n\tfor i := 0; i < n; i++ {\n\t\tsb.WriteRune(ur.choose(r))\n\t}\n\treturn sb.String()\n}", "func (h *Haikunator) randomString(s []string) string {\n\tsize := len(s)\n\n\tif size <= 0 {\n\t\treturn \"\"\n\t}\n\n\treturn s[h.Random.Intn(size)]\n}", "func getRandomString(length int) (string, error) {\n\tbuf := make([]byte, length)\n\tif _, err := rand.Read(buf); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor i := 0; i < length; {\n\t\tidx := int(buf[i] & letterIdxMask)\n\t\tif idx < letterSize {\n\t\t\tbuf[i] = letters[idx]\n\t\t\ti++\n\t\t} else {\n\t\t\tif _, err := rand.Read(buf[i : i+1]); err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t}\n\t}\n\treturn string(buf), nil\n}", "func GetRandomStr(r *rand.Rand, arr []string) string {\n\treturn arr[r.Intn(len(arr))]\n}", "func GenerateRandomString(stringLen int) string {\n\tb := make([]byte, stringLen)\n\tfor i := range b {\n\t\tb[i] = charset[seededRand.Intn(len(charset))]\n\t}\n\treturn string(b)\n}", "func StringRand(length int) string {\n\treturn StringRandWithCharset(length, CharsetDefault)\n}", "func RandomString(n int) string {\n\treturn string(Random(n))\n}", "func RandomString(length int, strChars string) string {\n\trand.Seed(time.Now().UnixNano())\n\tchars := []rune(strChars)\n\tfmt.Println(chars)\n\tvar b strings.Builder\n\tfor i := 0; i < length; i++ {\n\t\tb.WriteRune(chars[rand.Intn(len(chars))])\n\t}\n\treturn b.String()\n}", "func RandomString(rand *rand.Rand, size int) string {\n\tsb := strings.Builder{}\n\tfor sb.Len() <= size {\n\t\tsb.WriteRune(RandomRune(rand, 2, 5))\n\t}\n\tret := sb.String()\n\t_, lastRuneSize := utf8.DecodeLastRuneInString(ret)\n\treturn ret[0 : len(ret)-lastRuneSize]\n}", "func RandomString(len int) string {\n\tbytes := make([]byte, len)\n\tfor i := 0; i < len; i++ {\n\t\tbytes[i] = byte(65 + rand.Intn(25))\n\t}\n\treturn string(bytes)\n}", "func randString(length int) string {\n\tb := make([]rune, length)\n\tfor i := range b {\n\t\tb[i] = runes[rand.Intn(len(runes))]\n\t}\n\treturn string(b)\n}", "func RandString(length int) string {\n\trand.Seed(time.Now().UnixNano())\n\trs := make([]string, length)\n\tfor start := 0; start < length; start++ {\n\t\tt := rand.Intn(3)\n\t\tif t == 0 {\n\t\t\trs = append(rs, strconv.Itoa(rand.Intn(10)))\n\t\t} else if t == 1 {\n\t\t\trs = append(rs, string(rand.Intn(26)+65))\n\t\t} else {\n\t\t\trs = append(rs, string(rand.Intn(26)+97))\n\t\t}\n\t}\n\treturn strings.Join(rs, \"\")\n}", "func RandomString(length uint) (string, error) {\n\tr := make([]byte, length)\n\tbs := int(float64(length) * 1.3)\n\tvar err error\n\tfor i, j, rb := 0, 0, []byte{}; uint(i) < length; j++ {\n\t\tif j%bs == 0 {\n\t\t\trb, err = RandomBytes(uint(bs))\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t}\n\t\tif idx := uint(rb[j%int(length)] & bitmask); idx < uint(len(letters)) {\n\t\t\tr[i] = letters[idx]\n\t\t\ti++\n\t\t}\n\t}\n\n\treturn string(r), nil\n}", "func RandomString(length int) string {\n\trandomString := \"\"\n\n\tfor len(randomString) < length {\n\t\trandomString += strconv.Itoa(rand.Int())\n\t}\n\n\treturn randomString[:length]\n}", "func RandomString(maxlen int, charset string) string {\n\tvar s string\n\tfor i := 0; i < rand.Intn(maxlen)+1; i++ {\n\t\ts = s + string(charset[rand.Intn(len(charset))])\n\t}\n\treturn s\n}", "func RandomString(n int) string {\n\tresult := make([]byte, n)\n\tfor i := range result {\n\t\tresult[i] = CharSet[rnd.Intn(len(CharSet))]\n\t}\n\treturn string(result)\n}", "func RandomString(n int) string {\n\tresult := make([]byte, n)\n\tfor i := range result {\n\t\tresult[i] = CharSet[rnd.Intn(len(CharSet))]\n\t}\n\treturn string(result)\n}", "func randomString(n int) string {\n\tresult := make([]byte, n)\n\tfor i := range result {\n\t\tresult[i] = charSet[rnd.Intn(len(charSet))]\n\t}\n\treturn string(result)\n}", "func RandomString(n int) string {\n\treturn RandomStringFrom(n, randomBase)\n}", "func randomString(len int) string {\n\tbytes := make([]byte, len)\n\tfor i := 0; i < len; i++ {\n\t\tbytes[i] = byte(randomInt(97, 122))\n\t}\n\treturn string(bytes)\n}", "func RandomString(length int) string {\n\tsb := strings.Builder{}\n\n\trand.Seed(time.Now().UnixNano())\n\tfor i := 0; i < length; i++ {\n\t\tsb.WriteByte(_letters[rand.Intn(len(_letters))])\n\t}\n\n\treturn sb.String()\n}", "func RandString(n int, allowedChars ...[]rune) string {\n\tvar letters []rune\n\tif len(allowedChars) == 0 {\n\t\tletters = defaultLetters\n\t} else {\n\t\tletters = allowedChars[0]\n\t}\n\tb := make([]rune, n)\n\tfor i := range b {\n\t\tb[i] = letters[rand.Intn(len(letters))]\n\t}\n\treturn string(b)\n}", "func randomString(len int) string {\n\tbytes := make([]byte, len)\n\tfor i := 0; i < len; i++ {\n\t\tbytes[i] = byte(randomInt(65, 90))\n\t}\n\treturn string(bytes)\n}", "func randomString(len int) string {\n\tbytes := make([]byte, len)\n\tfor i := 0; i < len; i++ {\n\t\tbytes[i] = byte(randomInt(65, 90))\n\t}\n\treturn string(bytes)\n}", "func randomString(len int) string {\n\tbytes := make([]byte, len)\n\tfor i := 0; i < len; i++ {\n\t\tbytes[i] = byte(randomInt(65, 90))\n\t}\n\treturn string(bytes)\n}", "func randomString(len int) string {\n\tbytes := make([]byte, len)\n\tfor i := 0; i < len; i++ {\n\t\tbytes[i] = byte(randomInt(65, 90))\n\t}\n\treturn string(bytes)\n}", "func getRandString(n int) string {\n\tpool := []rune(\"ABCDEFGHIJKLMNOPQRSTUVWXYZÜÖÄabcdefghijklmnopqrstuvwxyzüöä\")\n\ts := make([]rune, n)\n\n\tfor pos := range s {\n\t\ts[pos] = pool[rand.Intn(len(pool))]\n\t}\n\n\treturn string(s)\n}", "func RandomString(length int) string {\n\tvar seed *rand.Rand = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))\n\n\tb := make([]byte, length)\n\tfor idx := range b {\n\t\tb[idx] = charset[seed.Intn(len(charset))]\n\t}\n\treturn string(b)\n}", "func randomString(length int) string {\n\treturn stringWithCharset(length, charset)\n}", "func randString(r *rand.Rand) string {\n\treturn defaultUnicodeRanges.randString(r)\n}", "func RandStr(n int) string {\n\treturn RandStringRunes(n)\n}", "func GetRandomString(length int) string {\n\tb := make([]byte, length)\n\trnd := rand.New(&source{})\n\n\tfor i := range b {\n\t\tc := rnd.Intn(allowedCharsSize)\n\t\tb[i] = allowedChars[c]\n\t}\n\n\treturn string(b)\n}", "func RandString(n int) string {\n\tb := make([]byte, n)\n\tfor i := range b {\n\t\tb[i] = CHA[rand.Intn(len(CHA))]\n\t}\n\treturn string(b)\n}", "func StrSliceRandItem(strsli []string) string {\n\tif len(strsli) == 0 {\n\t\treturn \"\"\n\t}\n\tn := time.Now().UnixNano() % int64(len(strsli))\n\treturn strsli[n]\n}", "func RandString(chars string, idxBits uint, idxMask int64, idxMax int, n int) string {\n\tb := make([]byte, n)\n\t// A rand.Int63() generates 63 random bits, enough for idCharIdxMax chars!\n\tfor i, cache, remain := n-1, rand.Int63(), idxMax; i >= 0; {\n\t\tif remain == 0 {\n\t\t\tcache, remain = rand.Int63(), idxMax\n\t\t}\n\t\tif idx := int(cache & idxMask); idx < len(chars) {\n\t\t\tb[i] = chars[idx]\n\t\t\ti--\n\t\t}\n\t\tcache >>= idxBits\n\t\tremain--\n\t}\n\n\treturn string(b)\n}", "func RandomString(length int) string {\n\tb := make([]rune, length)\n\tfor i := range b {\n\t\tb[i] = letterRunes[rand.Intn(len(letterRunes))]\n\t}\n\treturn string(b)\n}", "func RandomString(length int) string {\n\n\tconst charset = \"abcdefghijklmnopqrstuvwxyz\" +\n\t\t\"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\" +\n\t\t\"1234567890\"\n\n\tvar seededRand *rand.Rand = rand.New(\n\t\trand.NewSource(time.Now().UnixNano()))\n\n\tb := make([]byte, length)\n\tfor i := range b {\n\t\tb[i] = charset[seededRand.Intn(len(charset))]\n\t}\n\treturn string(b)\n}", "func RandomString(len int) string {\n\tbytes := make([]byte, len)\n\tfor i := 0; i < len; i++ {\n\t\tbytes[i] = byte(65 + rand.Intn(25)) // A=65 and Z = 65+25\n\t}\n\treturn string(bytes)\n}", "func ShuffleStringSlice(a []string) {\n\tfor i := range a {\n\t\tj := rand.Intn(i + 1)\n\t\ta[i], a[j] = a[j], a[i]\n\t}\n}", "func RandString(n int) string {\n\trandom := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tb := make([]byte, n)\n\tfor i := range b {\n\t\tb[i] = letterBytes[random.Intn(len(letterBytes))]\n\t}\n\treturn string(b)\n}", "func randString(n int, characterSet string) string {\n\tb := make([]byte, n)\n\tfor i := range b {\n\t\tb[i] = characterSet[rand.Intn(len(characterSet))]\n\t}\n\treturn string(b)\n}", "func GetRandomString(length int) string {\n\tstr := \"0123456789abcdefghijklmnopqrstuvwxyz\"\n\tbytes := []byte(str)\n\tresult := []byte{}\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tfor i := 0; i < length; i++ {\n\t\tresult = append(result, bytes[r.Intn(len(bytes))])\n\t}\n\treturn string(result)\n}", "func RandString(n int) string {\n\tvar randBytes = make([]byte, n)\n\trand.Read(randBytes)\n\n\tfor i, b := range randBytes {\n\t\trandBytes[i] = letters[b%byte(len(letters))]\n\t}\n\n\treturn string(randBytes)\n}", "func RandomString(length int) string {\n\trunes := make([]rune, length)\n\tfor i := range runes {\n\t\trunes[i] = allowedCharactersRunes[rand.Intn(len(allowedCharactersRunes))]\n\t}\n\treturn string(runes)\n}", "func RandString(n int) string {\n\tb := make([]byte, n)\n\tfor i := range b {\n\t\tb[i] = letterBytes[rand.Intn(len(letterBytes))]\n\t}\n\n\treturn string(b)\n}", "func randStr(l int) string {\n\tbytes := make([]byte, l)\n\tfor i := 0; i < l; i++ {\n\t\tbytes[i] = pool[rand.Intn(len(pool))]\n\t}\n\treturn string(bytes)\n}", "func RandomString(length int) string {\n\tseed := rand.New(rand.NewSource(time.Now().UnixNano()))\n\n\tb := make([]byte, length)\n\tfor i := range b {\n\t\tb[i] = charset[seed.Intn(len(charset))]\n\t}\n\n\treturn string(b)\n}", "func RandomString(randLength int, randType string) (result string) {\n\tvar (\n\t\tnum = \"0123456789\"\n\t\tlower = \"abcdefghijklmnopqrstuvwxyz\"\n\t\tupper = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\t)\n\n\tb := bytes.Buffer{}\n\n\tswitch {\n\tcase strings.Contains(randType, \"0\"):\n\t\tb.WriteString(num)\n\tcase strings.Contains(randType, \"A\"):\n\t\tb.WriteString(upper)\n\tdefault:\n\t\tb.WriteString(lower)\n\t}\n\n\tstr := b.String()\n\tstrLen := len(str)\n\n\tb = bytes.Buffer{}\n\n\tfor i := 0; i < randLength; i++ {\n\t\tn, err := rand.Int(rand.Reader, big.NewInt(int64(strLen)))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tb.WriteByte(str[int32(n.Int64())])\n\t}\n\n\treturn b.String()\n}", "func randChoice(list []string) string {\n randIndex := rand.Intn(len(list))\n return list[randIndex]\n}", "func randomString() string {\n\tr := make([]rune, 20)\n\tfor i := range r {\n\t\tr[i] = letters[rand.Intn(len(letters))]\n\t}\n\treturn string(r)\n}", "func randString(length int) string {\n\tcharset := \"abcdefghijklmnopqrstuvwxyz\" +\n\t\t\"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n\tvar seededRand *rand.Rand = rand.New(\n\t\trand.NewSource(time.Now().UnixNano()))\n\tb := make([]byte, length)\n\tfor i := range b {\n\t\tb[i] = charset[seededRand.Intn(len(charset))]\n\t}\n\treturn string(b)\n}", "func RandomString() string {\n\tsuffix := make([]byte, randSuffixLen)\n\n\tfor i := range suffix {\n\t\tsuffix[i] = letterBytes[rand.Intn(len(letterBytes))]\n\t}\n\treturn string(suffix)\n}", "func genString(length int) string {\n\tb := make([]byte, length)\n\tfor i := range b {\n\t\tb[i] = charset[seededRand.Intn(len(charset))]\n\t}\n\treturn string(b)\n}", "func RandomString(length int) string {\n\trand.Seed(time.Now().UnixNano())\n\tchars := []rune(\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" + \"abcdefghijklmnopqrstuvwxyz\" + \"0123456789\")\n\tvar b strings.Builder\n\tfor i := 0; i < length; i++ {\n\t\tb.WriteRune(chars[rand.Intn(len(chars))])\n\t}\n\tstr := b.String()\n\treturn str\n}", "func RandString(n int) string {\n\tb := make([]rune, n)\n\tfor i := range b {\n\t\tb[i] = letters[rand.Intn(len(letters))]\n\t}\n\treturn string(b)\n}", "func RandString(n int) string {\n\tb := make([]rune, n)\n\tfor i := range b {\n\t\tb[i] = letters[rand.Intn(len(letters))]\n\t}\n\treturn string(b)\n}", "func RandString(n int) string {\n\tb := make([]rune, n)\n\tfor i := range b {\n\t\tb[i] = letterRunes[rand.Intn(len(letterRunes))]\n\t}\n\treturn string(b)\n}", "func RandString(n int) string {\n\tb := make([]rune, n)\n\tfor i := range b {\n\t\tb[i] = letterRunes[rand.Intn(len(letterRunes))]\n\t}\n\treturn string(b)\n}", "func RandomString(n int) string {\n\tb := make([]byte, n)\n\tfor i := range b {\n\t\tb[i] = letterBytes[rand.Intn(len(letterBytes))]\n\t}\n\treturn string(b)\n}", "func randString(n int) string {\n\tb := make([]byte, n)\n\tfor i := range b {\n\t\tb[i] = charset[rand.Intn(len(charset))]\n\t}\n\treturn string(b)\n}", "func GenerateRandomString(length int) string {\n\tif length > 0 {\n\t\trand.Seed(time.Now().UnixNano())\n\t\tchars := make([]rune, length)\n\t\tfor i := range chars {\n\t\t\tchars[i] = letterRunes[rand.Intn(len(letterRunes))]\n\t\t}\n\n\t\treturn string(chars)\n\t}\n\n\treturn \"\"\n}", "func RandomString(length int) string {\n\tsrc := rand.NewSource(time.Now().UnixNano())\n\tb := make([]byte, length)\n\tfor i, cache, remain := length-1, src.Int63(), letterIndexMax; i >= 0; {\n\t\tif remain == 0 {\n\t\t\tcache, remain = src.Int63(), letterIndexMax\n\t\t}\n\t\tif idx := int(cache & letterIndexMask); idx < len(letterBytes) {\n\t\t\tb[i] = letterBytes[idx]\n\t\t\ti--\n\t\t}\n\t\tcache >>= letterIndexBits\n\t\tremain--\n\t}\n\n\treturn string(b)\n}", "func createRandomString(starterString string) string {\n\tresult := starterString + randomString(8)\n\treturn result\n}", "func generateRandString(length int) string {\n\tb := make([]rune, length)\n\tfor i := range b {\n\t\tb[i] = letterRunes[rand.Intn(len(letterRunes))]\n\t}\n\treturn string(b)\n}", "func RandomString(length int) string {\n\tbuf := make([]byte, length)\n\tif _, err := rand.Read(buf); err != nil {\n\t\tpanic(err)\n\t}\n\treturn base64.StdEncoding.EncodeToString(buf)\n}", "func makeRandomString(bytesLength int) []byte {\n\tbyteVar := make([]byte, bytesLength)\n\tchars := \"abcdefghijklmnopqrstuvwxyz123456789\" // our posibilities\n\tfor i := range byteVar {\n\t\tx := genPseudoRand()\n\t\tbyteVar[i] = chars[x.Intn(len(chars))]\n\t}\n\treturn byteVar\n}", "func RandomString(strlen int) string {\r\n\trand.Seed(time.Now().UTC().UnixNano())\r\n\tconst chars = \"abcdefghijklmnopqrstuvwxyz0123456789\"\r\n\tresult := make([]byte, strlen)\r\n\tfor i := 0; i < strlen; i++ {\r\n\t\tresult[i] = chars[rand.Intn(len(chars))]\r\n\t}\r\n\treturn string(result)\r\n}", "func RandomString(strlen int) string {\r\n\trand.Seed(time.Now().UTC().UnixNano())\r\n\tconst chars = \"abcdefghijklmnopqrstuvwxyz0123456789\"\r\n\tresult := make([]byte, strlen)\r\n\tfor i := 0; i < strlen; i++ {\r\n\t\tresult[i] = chars[rand.Intn(len(chars))]\r\n\t}\r\n\treturn string(result)\r\n}", "func StrRandom(length int) string {\n\tresult := make([]rune, length)\n\tfor i := range result {\n\t\tresult[i] = alphaNumeric[rand.Intn(len(alphaNumeric))]\n\t}\n\treturn string(result)\n}", "func (rs *RandString) String() string {\n\trs.mutex.RLock()\n\tdefer rs.mutex.RUnlock()\n\tif rs.len == 0 {\n\t\treturn \"\"\n\t}\n\trnd := rand.Intn(rs.len)\n\tretString := rs.strings[rnd]\n\tretString = strings.ReplaceAll(retString, \"{rnd}\", randomAlfanum(6))\n\tretString = strings.ReplaceAll(retString, \"{rndnum}\", randomNum(12))\n\treturn retString\n}", "func RandomString() string {\n\tvar letter = []rune(\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\")\n\tvar number = []rune(\"0123456789\")\n\n\tb := make([]rune, 2)\n\tfor i := 0; i < 2; i++ {\n\t\tb[i] = letter[rand.Intn(len(letter))]\n\t}\n\n\tc := make([]rune, 3)\n\tfor i := 0; i < 3; i++ {\n\t\tc[i] = number[rand.Intn(len(number))]\n\t}\n\n\treturn string(append(b, c...))\n}", "func RandStr() string {\n\tchars := \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-_\"\n\tnum := 100\n\ts := \"\"\n\tfor i := 0; i < num; i++ {\n\t\ts += string(chars[rand.Intn(len(chars))])\n\t}\n\treturn s\n}", "func RandString(n int) string {\n\tb := make([]byte, n)\n\tfor i := range b {\n\t\tb[i] = letterBytes[rand.Int63()%int64(len(letterBytes))]\n\t}\n\treturn string(b)\n}", "func randomString(prefix string, length int) string {\n\tb := make([]rune, length)\n\tfor i := range b {\n\t\tb[i] = letterRunes[rand.Intn(len(letterRunes))]\n\t}\n\treturn prefix + string(b)\n}", "func RandStr(length int) string {\n\tstr := \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n\treturn RandSomeStr(str, length)\n}", "func RandString(length int, letter letter) string {\n\tb := make([]byte, length)\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tfor i := range b {\n\t\tb[i] = letter[r.Int63()%int64(len(letter))]\n\t}\n\treturn string(b)\n}", "func randomString(n int) string {\n\trand.Seed(time.Now().UnixNano())\n\n\tvar letter = []rune(runeString)\n\n\tb := make([]rune, n)\n\tfor i := range b {\n\t\tb[i] = letter[rand.Intn(len(letter))]\n\t}\n\treturn string(b)\n}", "func RandString(length int) string {\n\tbytes := make([]byte, length)\n\tfor i := 0; i < length; i++ {\n\t\tb := common.BASE_SALT[rand.Intn(len(common.BASE_SALT))]\n\t\tbytes[i] = byte(b)\n\t}\n\treturn string(bytes)\n}", "func (this *MatchString) GetRandStr(n int) string{\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tre := r.Intn(n)\n\treturn strconv.Itoa(re)\n}", "func pickRandomWord(data []string) string {\n\trand.Seed(time.Now().UnixNano())\n\treturn strings.Trim(strings.Title(data[rand.Intn(len(data))]), \"\\n\")\n}", "func RandString(n int) string {\n\tconst letterBytes = \"abcdefghijklmnopqrstuvwxyz\"\n\tb := make([]byte, n)\n\tfor i := range b {\n\t\tb[i] = letterBytes[rand.Intn(len(letterBytes))]\n\t}\n\treturn string(b)\n}", "func RandStr(length int) string {\n\tchars := []byte{}\nMAIN_LOOP:\n\tfor {\n\t\tval := rand.Int63()\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tv := int(val & 0x3f) // rightmost 6 bits\n\t\t\tif v >= 62 { // only 62 characters in strChars\n\t\t\t\tval >>= 6\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tchars = append(chars, strChars[v])\n\t\t\t\tif len(chars) == length {\n\t\t\t\t\tbreak MAIN_LOOP\n\t\t\t\t}\n\t\t\t\tval >>= 6\n\t\t\t}\n\t\t}\n\t}\n\n\treturn string(chars)\n}", "func RandStr(length int) string {\n\tchars := []byte{}\nMAIN_LOOP:\n\tfor {\n\t\tval := rand.Int63()\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tv := int(val & 0x3f) // rightmost 6 bits\n\t\t\tif v >= 62 { // only 62 characters in strChars\n\t\t\t\tval >>= 6\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tchars = append(chars, strChars[v])\n\t\t\t\tif len(chars) == length {\n\t\t\t\t\tbreak MAIN_LOOP\n\t\t\t\t}\n\t\t\t\tval >>= 6\n\t\t\t}\n\t\t}\n\t}\n\n\treturn string(chars)\n}", "func RandomString(n int) *string {\n\tvar letter = []rune(\"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\")\n\n\tb := make([]rune, n)\n\tfor i := range b {\n\t\tb[i] = letter[rand.Intn(len(letter))]\n\t}\n\n\ts := string(b)\n\treturn &s\n}", "func RandomString(strlen int) string {\n rand.Seed(time.Now().UTC().UnixNano())\n const chars = \"abcdefghijklmnopqrstuvwxyz\"\n result := make([]byte, strlen)\n for i := 0; i < strlen; i++ {\n result[i] = chars[rand.Intn(len(chars))]\n }\n return string(result)\n}", "func RandomString(n int) string {\n\tbuffer := make([]byte, n)\n\trand.Read(buffer)\n\n\tfor k, v := range buffer {\n\t\tbuffer[k] = safeChars[v%byte(len(safeChars))]\n\t}\n\n\treturn string(buffer)\n}", "func RandomString(n int) string {\n\tb := make([]byte, n)\n\tfor i := range b {\n\t\tb[i] = letterBytes[rand.Int63()%int64(len(letterBytes))]\n\t}\n\treturn string(b)\n}", "func RandString(n int) string {\n\tvar letters = []rune(\"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\")\n\tb := make([]rune, n)\n\tfor i := range b {\n\t\tb[i] = letters[rand.Intn(len(letters))]\n\t}\n\treturn string(b)\n}", "func RandomString(n int) string {\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tvar str string\n\tlength := len(alphanum)\n\tfor i := 0; i < n; i++ {\n\t\ta := alphanum[r.Intn(len(alphanum))%length]\n\t\tstr += string(a)\n\t}\n\treturn str\n}", "func RandString(n int) string {\n\tgen := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tletters := \"bdghjlmnpqrstvwxyz0123456789\"\n\tb := make([]byte, n)\n\tfor i := range b {\n\t\tb[i] = letters[gen.Int63()%int64(len(letters))]\n\t}\n\treturn string(b)\n}", "func RandomString(num int) string {\n\tbytes := make([]byte, num)\n\tfor i := 0; i < num; i++ {\n\t\tbytes[i] = byte(randomInt(97, 122)) // lowercase letters.\n\t}\n\treturn string(bytes)\n}", "func randomString(length int) (str string) {\n\tb := make([]byte, length)\n\trand.Read(b)\n\treturn base64.StdEncoding.EncodeToString(b)\n}", "func RandomString(length int) string {\n\tvar letters = []rune(\"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\")\n\tb := make([]rune, length)\n\trand.Seed(time.Now().UnixNano())\n\tfor i := range b {\n\t\tb[i] = letters[rand.Intn(len(letters))]\n\t}\n\treturn string(b)\n}", "func RandomString(str string) string {\n\tb := make([]byte, 3)\n\trand.Read(b)\n\tid := fmt.Sprintf(str+\"%x\", b)\n\treturn strings.ToUpper(id)\n}" ]
[ "0.7593272", "0.68603545", "0.6819849", "0.6768189", "0.6673112", "0.6672849", "0.665283", "0.6644218", "0.6626326", "0.66243505", "0.6619441", "0.6537378", "0.6531689", "0.64609605", "0.6444482", "0.6438242", "0.6422548", "0.64116293", "0.6404121", "0.6369966", "0.6364124", "0.635807", "0.635807", "0.6357411", "0.6331134", "0.632085", "0.6315538", "0.6295567", "0.62892103", "0.62892103", "0.62892103", "0.62892103", "0.62889", "0.6286589", "0.62745327", "0.6270315", "0.6261423", "0.6260423", "0.624791", "0.62371236", "0.62294835", "0.6227396", "0.62057626", "0.6204432", "0.6201275", "0.6199627", "0.6196624", "0.6184738", "0.61776054", "0.61621153", "0.61573905", "0.615639", "0.61561406", "0.61545295", "0.6148863", "0.61325914", "0.6107288", "0.610647", "0.6099784", "0.60966665", "0.6095362", "0.6095362", "0.6092944", "0.6092944", "0.60915726", "0.6062413", "0.6047161", "0.6042174", "0.6041752", "0.60406077", "0.60243315", "0.60218054", "0.601661", "0.601661", "0.60154957", "0.60145", "0.60135794", "0.5991769", "0.5988051", "0.59875226", "0.59874916", "0.5984187", "0.59735984", "0.59592825", "0.5955028", "0.5948518", "0.5945005", "0.594344", "0.594344", "0.5942385", "0.5942095", "0.59397906", "0.59396964", "0.59293747", "0.5924288", "0.5920238", "0.5917259", "0.5905053", "0.5897705", "0.5883929" ]
0.86567205
0
CheckCertificateFormatFlag checks if the certificate format is valid.
CheckCertificateFormatFlag проверяет, является ли формат сертификата допустимым.
func CheckCertificateFormatFlag(s string) (string, error) { switch s { case constants.CertificateFormatStandard, teleport.CertificateFormatOldSSH, teleport.CertificateFormatUnspecified: return s, nil default: return "", trace.BadParameter("invalid certificate format parameter: %q", s) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (da *DefaultAuth) CheckFormat() error {\n\treturn nil\n}", "func (dd *AccountDoc) IsValidFormat() bool {\n\tif dd.Created == 0 || dd.GetType() != int(AccountDIDType) {\n\t\treturn false\n\t}\n\treturn true\n}", "func CheckCertificate(crt string) {\n\t// Read and parse the PEM certificate file\n\tpemData, err := ioutil.ReadFile(crt)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tblock, rest := pem.Decode([]byte(pemData))\n\tif block == nil || len(rest) > 0 {\n\t\tlog.Fatal(\"Certificate decoding error\")\n\t}\n\tcert, err := x509.ParseCertificate(block.Bytes)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Print the certificate\n\tresult, err := certinfo.CertificateText(cert)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Print(result)\n}", "func ValidFormat(f string) bool {\n\tfor _, v := range supportedFormats() {\n\t\tif v[0] == f || v[1] == f {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (m *X509Certificate) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateNotAfter(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNotBefore(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (h *DeviceHandler) CheckDeviceNameFormat(_ context.Context, arg keybase1.CheckDeviceNameFormatArg) (bool, error) {\n\tok := libkb.CheckDeviceName.F(arg.Name)\n\tif ok {\n\t\treturn ok, nil\n\t}\n\treturn false, errors.New(libkb.CheckDeviceName.Hint)\n}", "func ValidFormat(format string) bool {\n\tfor _, f := range fmtsByStandard {\n\t\tif f == format {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func isValidCertificate(c []byte) bool {\n\tp, _ := pem.Decode(c)\n\tif p == nil {\n\t\treturn false\n\t}\n\tif _, err := x509.ParseCertificates(p.Bytes); err != nil {\n\t\treturn false\n\t}\n\treturn true\n}", "func certificateCheckCallback(cert *git.Certificate, valid bool, hostname string) git.ErrorCode {\n\treturn 0\n}", "func ValidateFormatVersion(formatVersion uint32) (bool) {\n if formatVersion == 1 || formatVersion == 2 || formatVersion == 3 || formatVersion == 4 { //format version should still be 1 for now\n return true\n }\n return false\n}", "func PossibleCertificateFormatValues() []CertificateFormat {\n\treturn []CertificateFormat{CertificateFormatCer, CertificateFormatPfx}\n}", "func (s *CertificatesService) Validate(body *CertificateCreate) error {\n\tenc, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = s.client.Post(\"/v1/certificates/validate\", enc)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn err\n}", "func TestCheck(t *testing.T) {\n\t// Valid OCSP Must Staple Extension.\n\tvalidExtension := pkix.Extension{\n\t\tId: extensionOid,\n\t\tValue: expectedExtensionValue,\n\t\tCritical: false,\n\t}\n\t// Invalid OCSP Must Staple Extension: Critical field set to `true`.\n\tcriticalExtension := pkix.Extension{\n\t\tId: extensionOid,\n\t\tValue: expectedExtensionValue,\n\t\tCritical: true,\n\t}\n\t// Invalid OCSP Must Staple Extension: Wrong value.\n\twrongValueExtension := pkix.Extension{\n\t\tId: extensionOid,\n\t\tValue: []uint8{0xC0, 0xFF, 0xEE},\n\t\tCritical: false,\n\t}\n\t// Invalid OCSP Must Staple Extension: Wrong value, Critical field set to\n\t// `true`\n\twrongValueExtensionCritical := pkix.Extension{\n\t\tId: extensionOid,\n\t\tValue: []uint8{0xC0, 0xFF, 0xEE},\n\t\tCritical: true,\n\t}\n\n\ttestCases := []struct {\n\t\tName string\n\t\tInputEx pkix.Extension\n\t\tCertType string\n\t\tExpectedErrors []string\n\t}{\n\t\t{\n\t\t\tName: \"Valid: DV cert type\",\n\t\t\tInputEx: validExtension,\n\t\t\tCertType: \"DV\",\n\t\t\tExpectedErrors: []string{},\n\t\t},\n\t\t{\n\t\t\tName: \"Valid: OV cert type\",\n\t\t\tInputEx: validExtension,\n\t\t\tCertType: \"DV\",\n\t\t\tExpectedErrors: []string{},\n\t\t},\n\t\t{\n\t\t\tName: \"Valid: EV cert type\",\n\t\t\tInputEx: validExtension,\n\t\t\tCertType: \"DV\",\n\t\t\tExpectedErrors: []string{},\n\t\t},\n\t\t{\n\t\t\tName: \"Valid: CA cert type\",\n\t\t\tInputEx: validExtension,\n\t\t\tCertType: \"CA\",\n\t\t\tExpectedErrors: []string{},\n\t\t},\n\t\t{\n\t\t\tName: \"Invalid: OCSP cert type\",\n\t\t\tInputEx: validExtension,\n\t\t\tCertType: \"OCSP\",\n\t\t\tExpectedErrors: []string{\n\t\t\t\tcertTypeErr,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Invalid: critical extension\",\n\t\t\tInputEx: criticalExtension,\n\t\t\tCertType: \"DV\",\n\t\t\tExpectedErrors: []string{critExtErr},\n\t\t},\n\t\t{\n\t\t\tName: \"Invalid: critical extension, OCSP cert type\",\n\t\t\tInputEx: criticalExtension,\n\t\t\tCertType: \"OCSP\",\n\t\t\tExpectedErrors: []string{\n\t\t\t\tcertTypeErr, critExtErr,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Invalid: wrong extension value\",\n\t\t\tInputEx: wrongValueExtension,\n\t\t\tCertType: \"DV\",\n\t\t\tExpectedErrors: []string{\n\t\t\t\textValueErr,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Invalid: wrong extension value, critical extension, OCSP cert type\",\n\t\t\tInputEx: wrongValueExtensionCritical,\n\t\t\tCertType: \"OCSP\",\n\t\t\tExpectedErrors: []string{\n\t\t\t\tcertTypeErr, critExtErr, extValueErr,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.Name, func(t *testing.T) {\n\t\t\tcertData := &certdata.Data{\n\t\t\t\tType: tc.CertType,\n\t\t\t}\n\t\t\t// Run the OCSP Must Staple check on the test data\n\t\t\terrors := Check(tc.InputEx, certData)\n\t\t\t// Collect the returned errors into a list\n\t\t\terrList := errors.List()\n\t\t\t// Verify the expected number of errors are in the list\n\t\t\tif len(tc.ExpectedErrors) != len(errList) {\n\t\t\t\tt.Errorf(\"wrong number of Check errors: expected %d, got %d\\n\",\n\t\t\t\t\tlen(tc.ExpectedErrors), len(errList))\n\t\t\t} else {\n\t\t\t\t// Match the error list to the expected error list\n\t\t\t\tfor i, err := range errList {\n\t\t\t\t\tif errMsg := err.Error(); errMsg != tc.ExpectedErrors[i] {\n\t\t\t\t\t\tt.Errorf(\"expected error %q at index %d, got %q\",\n\t\t\t\t\t\t\ttc.ExpectedErrors[i], i, errMsg)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}", "func isCertTypeOK(wh *Webhook) bool {\n\tif wh.CertType == linkedca.Webhook_ALL.String() || wh.CertType == \"\" {\n\t\treturn true\n\t}\n\treturn linkedca.Webhook_X509.String() == wh.CertType\n}", "func CertificateRequestInfoSupportsCertificate(cri *tls.CertificateRequestInfo, c *tls.Certificate,) error", "func (m *X509Certificate) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateIssuerDN(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNotAfter(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNotBefore(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validatePublicKey(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSubjectDN(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (ctl *Ctl) CheckSpecFlags() error {\n\tencryptPassLength := len(ctl.EncryptionPassword)\n\tif encryptPassLength > 0 && encryptPassLength < 16 {\n\t\treturn fmt.Errorf(\"flag EncryptionPassword is %d characters. Must be 16 or more characters\", encryptPassLength)\n\t}\n\tglobalSaltLength := len(ctl.EncryptionGlobalSalt)\n\tif globalSaltLength > 0 && globalSaltLength < 16 {\n\t\treturn fmt.Errorf(\"flag EncryptionGlobalSalt is %d characters. Must be 16 or more characters\", globalSaltLength)\n\t}\n\treturn nil\n}", "func isMustStapleCertificate(cert *x509.Certificate) (bool, error) {\n\tvar featureExtension pkix.Extension\n\tvar foundExtension bool\n\tfor _, ext := range cert.Extensions {\n\t\tif ext.Id.Equal(tlsFeatureExtensionOID) {\n\t\t\tfeatureExtension = ext\n\t\t\tfoundExtension = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !foundExtension {\n\t\treturn false, nil\n\t}\n\n\t// The value for the TLS feature extension is a sequence of integers. Per the asn1.Unmarshal documentation, an\n\t// integer can be unmarshalled into an int, int32, int64, or *big.Int and unmarshalling will error if the integer\n\t// cannot be encoded into the target type.\n\t//\n\t// Use []*big.Int to ensure that all values in the sequence can be successfully unmarshalled.\n\tvar featureValues []*big.Int\n\tif _, err := asn1.Unmarshal(featureExtension.Value, &featureValues); err != nil {\n\t\treturn false, fmt.Errorf(\"error unmarshalling TLS feature extension values: %v\", err)\n\t}\n\n\tfor _, value := range featureValues {\n\t\tif value.Cmp(mustStapleFeatureValue) == 0 {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}", "func (f FormatHeader) Valid() bool {\n\treturn f.ID == 0x20746d66 && f.Size == 0x10 && f.AudioFormat == 1\n}", "func (o CertificateCreateOpts) Validate() error {\n\tif o.Name == \"\" {\n\t\treturn errors.New(\"missing name\")\n\t}\n\tif o.Certificate == \"\" {\n\t\treturn errors.New(\"missing certificate\")\n\t}\n\tif o.PrivateKey == \"\" {\n\t\treturn errors.New(\"missing private key\")\n\t}\n\treturn nil\n}", "func IsValidCertType(certType string) bool {\n\tfor _, c := range GetSupportedCerts() {\n\t\tif c == certType {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func printCertificate(cert *x509.Certificate) bool {\n\n\tfmt.Printf(\"Subject:%s\\t%s%s\\n\", Green, cert.Subject, Reset)\n\tfmt.Printf(\"Valid from:%s\\t%s%s\\n\", Yellow, cert.NotBefore, Reset)\n\tfmt.Printf(\"Valid until:%s\\t%s%s\\n\", Yellow, cert.NotAfter, Reset)\n\tfmt.Printf(\"Issuer:%s\\t\\t%s%s\\n\", Cyan, cert.Issuer.Organization[0], Reset)\n\tfmt.Printf(\"Is CA?:%s\\t\\t%t%s\\n\", Pink, cert.IsCA, Reset)\n\tfmt.Printf(\"Algorithm:%s\\t%s%s\\n\", Pink, cert.SignatureAlgorithm, Reset)\n\n\tif len(cert.DNSNames) > 0 {\n\t\tfmt.Printf(\"DNS Names:%s\\t%s%s\\n\", Purple, strings.Join(cert.DNSNames, \", \"), Reset)\n\t}\n\n\tif len(cert.OCSPServer) > 0 {\n\t\tfmt.Printf(\"OCSP Server:%s\\t%s%s\\n\", Comment, strings.Join(cert.OCSPServer, \", \"), Reset)\n\t}\n\n\treturn true\n}", "func (o BackendTlsOutput) ValidateCertificateName() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v BackendTls) *bool { return v.ValidateCertificateName }).(pulumi.BoolPtrOutput)\n}", "func (c ConvertTransformFormat) IsValid() bool {\n\tswitch c {\n\tcase ConvertTransformFormatNone, ConvertTransformFormatQuantity, ConvertTransformFormatJSON:\n\t\treturn true\n\t}\n\treturn false\n}", "func Check(d *certdata.Data) *errors.Errors {\n\tvar e = errors.New(nil)\n\n\tswitch d.Type {\n\tcase \"EV\":\n\t\tif strings.LastIndex(d.Cert.Subject.CommonName, \"*\") > -1 {\n\t\t\te.Err(\"Certificate should not contain a wildcard\")\n\t\t}\n\t\tfor _, n := range d.Cert.DNSNames {\n\t\t\tif strings.LastIndex(n, \"*\") > -1 {\n\t\t\t\te.Err(\"Certificate subjectAltName '%s' should not contain a wildcard\", n)\n\t\t\t}\n\t\t}\n\tcase \"DV\", \"OV\":\n\t\tif strings.LastIndex(d.Cert.Subject.CommonName, \"*\") > 0 {\n\t\t\te.Err(\"Certificate wildcard is only allowed as prefix\")\n\t\t}\n\t\tfor _, n := range d.Cert.DNSNames {\n\t\t\tif strings.LastIndex(n, \"*\") > 0 {\n\t\t\t\te.Err(\"Certificate subjectAltName '%s' wildcard is only allowed as prefix\", n)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn e\n}", "func (o *CertificateOptions) Validate() error {\n\tif len(o.csrNames) < 1 && cmdutil.IsFilenameSliceEmpty(o.Filenames, o.Kustomize) {\n\t\treturn fmt.Errorf(\"one or more CSRs must be specified as <name> or -f <filename>\")\n\t}\n\treturn nil\n}", "func FormatCert(c *x509.Certificate) (id CertID) {\n\tid.AltNames.IPs = append([]net.IP{}, c.IPAddresses...)\n\tid.AltNames.DNSNames = append([]string{}, c.DNSNames...)\n\tid.AltNames.Emails = append([]string{}, getEmail(c)...)\n\tid.Issuer = c.Issuer.CommonName\n\tid.CommonName = c.Subject.CommonName\n\tid.Organization = c.Subject.Organization\n\tid.IsCA = c.IsCA\n\tid.NotBefore = c.NotBefore\n\tid.NotAfter = c.NotAfter\n\treturn\n}", "func validateFormatString(v models.ValueDescriptor) (bool, error) {\n\t// No formatting specified\n\tif v.Formatting == \"\" {\n\t\treturn true, nil\n\t} else {\n\t\treturn regexp.MatchString(formatSpecifier, v.Formatting)\n\t}\n}", "func (c Certificate) SigningFormat() ([]byte, error) {\n\tvar buf bytes.Buffer\n\tif err := gob.NewEncoder(&buf).Encode(&c); err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}", "func (e Error) IsInvalidFormat() bool {\n\treturn e.kind == invalidFormat\n}", "func validateRequestedReportFormat(config *Config) bool {\n\tconfig.reportFormat = lowercaseOrNotice(config.reportFormat, \"requested report format\")\n\n\tif !sechubUtil.StringArrayContains(SupportedReportFormats, config.reportFormat) {\n\t\tsechubUtil.LogWarning(\"Unsupported report format '\" + config.reportFormat + \"'. Changing to '\" + ReportFormatJSON + \"'.\")\n\t\tconfig.reportFormat = ReportFormatJSON\n\t}\n\treturn true\n}", "func (o *V1VirusDatasetRequest) HasFormat() bool {\n\tif o != nil && o.Format != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func CheckCertsValidity(driver drivers.Driver) error {\n\tcertExpiryDateCmd := `date --date=\"$(sudo openssl x509 -in /var/lib/kubelet/pki/kubelet-client-current.pem -noout -enddate | cut -d= -f 2)\" --iso-8601=seconds`\n\toutput, err := drivers.RunSSHCommandFromDriver(driver, certExpiryDateCmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcertExpiryDate, err := time.Parse(time.RFC3339, strings.TrimSpace(output))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif time.Now().After(certExpiryDate) {\n\t\treturn fmt.Errorf(\"Certs have expired, they were valid till: %s\", certExpiryDate.Format(time.RFC822))\n\t}\n\treturn nil\n}", "func verifyProduceCodeFormat(produceCode string) (bool, error) {\n\treturn regexp.MatchString(PRODUCECODE_REGEX, produceCode)\n}", "func ClientHelloInfoSupportsCertificate(chi *tls.ClientHelloInfo, c *tls.Certificate,) error", "func CheckTLSCert(con *tls.ConnectionState, fp []byte) bool {\n\tfor _, cert := range con.PeerCertificates {\n\t\tcs := sha256.Sum256(cert.Raw)\n\t\tif bytes.Compare(cs[:], fp) == 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (f Format) Valid() bool {\n\tswitch f {\n\tcase None:\n\t\tbreak\n\tcase Custom:\n\t\tbreak\n\tcase Zstd:\n\t\tbreak\n\tdefault:\n\t\treturn false\n\t}\n\treturn true\n}", "func ValidatePasswordFormat(password string) error {\n\tif password == \"\" {\n\t\treturn errors.New(\"Password is empty\")\n\t} else if l := len(password); l < PasswordMinLength || l > PasswordMaxLength {\n\t\treturn fmt.Errorf(\"Password's length must be %d-%d characters\", PasswordMinLength, PasswordMaxLength)\n\t}\n\tcountUpper, countLower, countNumber, countSpecial := 0, 0, 0, 0\n\tfor _, c := range password {\n\t\tif c >= '0' && c <= '9' {\n\t\t\tcountNumber++\n\t\t} else if c >= 'a' && c <= 'z' {\n\t\t\tcountLower++\n\t\t} else if c >= 'A' && c <= 'Z' {\n\t\t\tcountUpper++\n\t\t} else {\n\t\t\tcountSpecial++\n\t\t}\n\t}\n\tif countUpper == 0 || countLower == 0 || countNumber == 0 || countSpecial == 0 {\n\t\treturn errors.New(\"Password must contain at least 1 lowercase, uppercase, and special characters and 1 number\")\n\t}\n\treturn nil\n}", "func (config *Config) evaluateCertificate() error {\n\n\tif FileExists(config.Certificate.Cert) {\n\n\t\tdata, err := ioutil.ReadFile(config.Certificate.Cert)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tblock, _ := pem.Decode(data)\n\n\t\tcert, err := x509.ParseCertificate(block.Bytes)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(cert.Subject.OrganizationalUnit) > 0 {\n\t\t\tif cert.Subject.OrganizationalUnit[0] == \"Hosts\" {\n\t\t\t\tconfig.IsHost = true\n\t\t\t\treturn nil\n\t\t\t}\n\t\t} else if len(cert.Issuer.Organization) > 0 {\n\t\t\tif cert.Issuer.Organization[0] == \"Tapp\" {\n\t\t\t\tconfig.IsHost = true\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\tconfig.IsHost = false\n\treturn nil\n}", "func (b FormatOption) Has(flag FormatOption) bool { return b&flag != 0 }", "func validateClientCertificate(certificate *x509.Certificate, trustedCertsFile string,\n\tsuppressCertificateTimeInvalid, suppressCertificateChainIncomplete bool) (bool, error) {\n\tif certificate == nil {\n\t\treturn false, ua.BadCertificateInvalid\n\t}\n\tvar intermediates, roots *x509.CertPool\n\tif buf, err := os.ReadFile(trustedCertsFile); err == nil {\n\t\tfor len(buf) > 0 {\n\t\t\tvar block *pem.Block\n\t\t\tblock, buf = pem.Decode(buf)\n\t\t\tif block == nil {\n\t\t\t\t// maybe its der\n\t\t\t\tcert, err := x509.ParseCertificate(buf)\n\t\t\t\tif err == nil {\n\t\t\t\t\t// is self-signed?\n\t\t\t\t\tif bytes.Equal(cert.RawIssuer, cert.RawSubject) {\n\t\t\t\t\t\tif roots == nil {\n\t\t\t\t\t\t\troots = x509.NewCertPool()\n\t\t\t\t\t\t}\n\t\t\t\t\t\troots.AddCert(cert)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tif intermediates == nil {\n\t\t\t\t\t\t\tintermediates = x509.NewCertPool()\n\t\t\t\t\t\t}\n\t\t\t\t\t\tintermediates.AddCert(cert)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif block.Type != \"CERTIFICATE\" || len(block.Headers) != 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcert, err := x509.ParseCertificate(block.Bytes)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// is self-signed?\n\t\t\tif bytes.Equal(cert.RawIssuer, cert.RawSubject) {\n\t\t\t\tif roots == nil {\n\t\t\t\t\troots = x509.NewCertPool()\n\t\t\t\t}\n\t\t\t\troots.AddCert(cert)\n\t\t\t} else {\n\t\t\t\tif intermediates == nil {\n\t\t\t\t\tintermediates = x509.NewCertPool()\n\t\t\t\t}\n\t\t\t\tintermediates.AddCert(cert)\n\t\t\t}\n\t\t}\n\t}\n\n\topts := x509.VerifyOptions{\n\t\tIntermediates: intermediates,\n\t\tRoots: roots,\n\t\tKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},\n\t}\n\n\tif suppressCertificateTimeInvalid {\n\t\topts.CurrentTime = certificate.NotAfter // causes test to pass\n\t}\n\n\tif suppressCertificateChainIncomplete {\n\t\tif opts.Roots == nil {\n\t\t\topts.Roots = x509.NewCertPool()\n\t\t}\n\t\topts.Roots.AddCert(certificate)\n\t}\n\n\t// build chain and verify\n\tif _, err := certificate.Verify(opts); err != nil {\n\t\tswitch se := err.(type) {\n\t\tcase x509.CertificateInvalidError:\n\t\t\tswitch se.Reason {\n\t\t\tcase x509.Expired:\n\t\t\t\treturn false, ua.BadCertificateTimeInvalid\n\t\t\tcase x509.IncompatibleUsage:\n\t\t\t\treturn false, ua.BadCertificateUseNotAllowed\n\t\t\tdefault:\n\t\t\t\treturn false, ua.BadSecurityChecksFailed\n\t\t\t}\n\t\tcase x509.UnknownAuthorityError:\n\t\t\treturn false, ua.BadSecurityChecksFailed\n\t\tdefault:\n\t\t\treturn false, ua.BadSecurityChecksFailed\n\t\t}\n\t}\n\treturn true, nil\n}", "func isCveFormat(fl FieldLevel) bool {\n\tcveString := fl.Field().String()\n\n\treturn cveRegex.MatchString(cveString)\n}", "func (o BackendTlsOutput) ValidateCertificateChain() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v BackendTls) *bool { return v.ValidateCertificateChain }).(pulumi.BoolPtrOutput)\n}", "func isValidVersionFormat(version string) bool {\n\tmatch, _ := regexp.MatchString(\"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\", version)\n\treturn match\n}", "func CheckFlags(sampler config.Sampler) (int, bool) {\n\tif helpConfig {\n\t\tsampler.Sample(os.Stdout, nil, nil)\n\t\treturn 0, false\n\t}\n\tif version {\n\t\tfmt.Printf(VersionInfo())\n\t\treturn 0, false\n\t}\n\tif configFile == \"\" {\n\t\tfmt.Fprintln(os.Stderr, \"Err: Missing config file\")\n\t\tflag.Usage()\n\t\treturn 1, false\n\t}\n\treturn 0, true\n}", "func (c certificate) Check(d *certdata.Data) *errors.Errors {\n\tvar e = errors.New(nil)\n\n\tfor _, cc := range c {\n\t\tif cc.filter != nil && !cc.filter.Check(d) {\n\t\t\tcontinue\n\t\t}\n\t\te.Append(cc.f(d))\n\t}\n\n\treturn e\n}", "func Valid(format, version string) error {\n\tversionParser, exists := GetParser(format)\n\tif !exists {\n\t\treturn ErrUnknownVersionFormat\n\t}\n\n\tif !versionParser.Valid(version) {\n\t\treturn ErrInvalidVersion\n\t}\n\n\treturn nil\n}", "func (sfs *SoundFileInfoService) IsValidSoundFileFormat(fileName string) bool {\n\tisValidFileFormat := false\n\tfname := strings.ToLower(fileName)\n\n\tif strings.HasSuffix(fname, \".mp3\") || strings.HasSuffix(fname, \".wav\") {\n\t\tisValidFileFormat = true\n\t}\n\n\treturn isValidFileFormat\n}", "func (c Certificate) GetBooleanValidity() (trusted_ubuntu, trusted_mozilla, trusted_microsoft, trusted_apple, trusted_android bool) {\n\n\t//check Ubuntu validation info\n\tvalInfo, ok := c.ValidationInfo[Ubuntu_TS_name]\n\n\tif !ok {\n\t\ttrusted_ubuntu = false\n\t} else {\n\t\ttrusted_ubuntu = valInfo.IsValid\n\t}\n\n\t//check Mozilla validation info\n\tvalInfo, ok = c.ValidationInfo[Mozilla_TS_name]\n\n\tif !ok {\n\t\ttrusted_mozilla = false\n\t} else {\n\t\ttrusted_mozilla = valInfo.IsValid\n\t}\n\n\t//check Microsoft validation info\n\tvalInfo, ok = c.ValidationInfo[Microsoft_TS_name]\n\n\tif !ok {\n\t\ttrusted_microsoft = false\n\t} else {\n\t\ttrusted_microsoft = valInfo.IsValid\n\t}\n\n\t//check Apple validation info\n\tvalInfo, ok = c.ValidationInfo[Apple_TS_name]\n\n\tif !ok {\n\t\ttrusted_apple = false\n\t} else {\n\t\ttrusted_apple = valInfo.IsValid\n\t}\n\n\t//check Android validation info\n\tvalInfo, ok = c.ValidationInfo[Android_TS_name]\n\n\tif !ok {\n\t\ttrusted_android = false\n\t} else {\n\t\ttrusted_android = valInfo.IsValid\n\t}\n\treturn\n}", "func (o *StorageNetAppCloudTargetAllOf) GetCertificateValidationEnabledOk() (*bool, bool) {\n\tif o == nil || o.CertificateValidationEnabled == nil {\n\t\treturn nil, false\n\t}\n\treturn o.CertificateValidationEnabled, true\n}", "func (c CertAuthType) Check() error {\n\tfor _, caType := range CertAuthTypes {\n\t\tif c == caType {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn trace.BadParameter(\"%q authority type is not supported\", c)\n}", "func (o *Manager) CanFormat(ctx context.Context, inType string) (available struct {\n\tV0 bool\n\tV1 string\n}, err error) {\n\terr = o.object.CallWithContext(ctx, InterfaceManager+\".CanFormat\", 0, inType).Store(&available)\n\treturn\n}", "func CheckCertSignature(caCert *x509.Certificate) VerifyPeerCertificateFunc {\n\treturn func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {\n\t\tfor _, c := range rawCerts {\n\t\t\tparsedCert, err := x509.ParseCertificate(c)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcertErr := parsedCert.CheckSignatureFrom(caCert)\n\t\t\tif parsedCert.NotAfter.Before(time.Now()) || parsedCert.NotBefore.After(time.Now()) {\n\t\t\t\tcertErr = errors.New(\"Certificate expired or used too soon\")\n\t\t\t}\n\t\t\tlog.Printf(\"Remote presented certificate %d with time bounds (%v-%v). Verification error for certificate: %+v\",\n\t\t\t\tparsedCert.SerialNumber, parsedCert.NotBefore, parsedCert.NotAfter, certErr)\n\t\t\treturn certErr\n\t\t}\n\t\treturn errors.New(\"Expected certificate which would pass, none presented\")\n\t}\n}", "func (c *CertChecker) CheckCert(principal string, cert *Certificate) error {\n\tif c.IsRevoked != nil && c.IsRevoked(cert) {\n\t\treturn fmt.Errorf(\"ssh: certificate serial %d revoked\", cert.Serial)\n\t}\n\n\tfor opt := range cert.CriticalOptions {\n\t\t// sourceAddressCriticalOption will be enforced by\n\t\t// serverAuthenticate\n\t\tif opt == sourceAddressCriticalOption {\n\t\t\tcontinue\n\t\t}\n\n\t\tfound := false\n\t\tfor _, supp := range c.SupportedCriticalOptions {\n\t\t\tif supp == opt {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\treturn fmt.Errorf(\"ssh: unsupported critical option %q in certificate\", opt)\n\t\t}\n\t}\n\n\tif len(cert.ValidPrincipals) > 0 {\n\t\t// By default, certs are valid for all users/hosts.\n\t\tfound := false\n\t\tfor _, p := range cert.ValidPrincipals {\n\t\t\tif p == principal {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\treturn fmt.Errorf(\"ssh: principal %q not in the set of valid principals for given certificate: %q\", principal, cert.ValidPrincipals)\n\t\t}\n\t}\n\n\tclock := c.Clock\n\tif clock == nil {\n\t\tclock = time.Now\n\t}\n\n\tunixNow := clock().Unix()\n\tif after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) {\n\t\treturn fmt.Errorf(\"ssh: cert is not yet valid\")\n\t}\n\tif before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) {\n\t\treturn fmt.Errorf(\"ssh: cert has expired\")\n\t}\n\tif err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil {\n\t\treturn fmt.Errorf(\"ssh: certificate signature does not verify\")\n\t}\n\n\treturn nil\n}", "func (o *CaCertificateCreateReqWeb) GetCertificateOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Certificate, true\n}", "func checkProtocal(protocol string) bool {\n\tpass := false\n\tfor _, v := range protocals {\n\t\tif strings.EqualFold(protocol, v) {\n\t\t\tpass = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn pass\n}", "func (asf AttestationStatementFormat) Valid() error {\n\tswitch asf {\n\tcase AttestationFormatPacked:\n\tcase AttestationFormatTPM:\n\tcase AttestationFormatAndroidKey:\n\tcase AttestationFormatAndroidSafetyNet:\n\tcase AttestationFormatFidoU2F:\n\tcase AttestationFormatNone:\n\tdefault:\n\t\treturn NewError(\"Invalid attestation statement %s\", asf)\n\t}\n\treturn nil\n}", "func IsFormatSupported(p StreamParameters, args ...interface{}) error {\n\ts := &Stream{}\n\terr := s.init(p, args...)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn newError(C.Pa_IsFormatSupported(s.inParams, s.outParams, C.double(p.SampleRate)))\n}", "func IsExtFormatValid(ext string) bool {\n\tif string(ext[0]) != \".\" {\n\t\treturn false\n\t}\n\n\tfor _, letter := range ext[1:] {\n\t\tif !unicode.IsLetter(rune(letter)) && !unicode.IsDigit(rune(letter)) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func VerifyFileFormat(fh *multipart.FileHeader, format string, size int64) (string, error) {\n\tif len(format) == 0 || size <= 0 {\n\t\treturn \"\", invar.ErrInvalidParams\n\t}\n\n\tsuffix := path.Ext(fh.Filename)\n\tswitch suffix {\n\tcase format:\n\t\tif fh.Size > int64(size<<20) {\n\t\t\treturn \"\", invar.ErrImgOverSize\n\t\t}\n\tdefault:\n\t\treturn \"\", invar.ErrUnsupportedFile\n\t}\n\treturn suffix, nil\n}", "func (o BackendTlsPtrOutput) ValidateCertificateName() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v *BackendTls) *bool {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ValidateCertificateName\n\t}).(pulumi.BoolPtrOutput)\n}", "func TestCertificate(t *testing.T) {\n\tvar result Certificate\n\n\tif err := json.NewDecoder(certificateBody).Decode(&result); err != nil {\n\t\tt.Errorf(\"Error decoding JSON: %s\", err)\n\t}\n\n\tassertEquals(t, \"1\", result.ID)\n\tassertEquals(t, \"HTTPS Certificate\", result.Name)\n\tassertEquals(t, \"PEM\", string(result.CertificateType))\n\tassertEquals(t, \"Contoso\", result.Issuer.Organization)\n\tassertEquals(t, \"2019-09-07T13:22:05Z\", result.ValidNotAfter)\n\tassertEquals(t, \"TPM_ALG_SHA1\", result.FingerprintHashAlgorithm)\n\tassertEquals(t, \"sha256WithRSAEncryption\", result.SignatureAlgorithm)\n}", "func ValidatePhoneFormat(countryCallingCode, phone string) error {\n\tif phone == \"\" {\n\t\treturn errors.New(\"Phone number is empty\")\n\t} else if !IsNumericString(phone) {\n\t\treturn errors.New(\"Phone number must be numeric\")\n\t} else if strings.HasPrefix(phone, \"0\") {\n\t\treturn errors.New(\"Phone number can't start with '0'\")\n\t}\n\tlenCC, lenPhone := len(countryCallingCode), len(phone)\n\tmin, max := PhoneMinLength-lenCC, PhoneMaxLength-lenCC\n\tif lenPhone < min || lenPhone > max {\n\t\treturn fmt.Errorf(\"Phone number's length must be %d-%d digits\", min, max)\n\t}\n\treturn nil\n}", "func isValidForDelegation(cert *x509.Certificate) bool {\n\t// Check that the digitalSignature key usage is set.\n\t// The certificate must contains the digitalSignature KeyUsage.\n\tif (cert.KeyUsage & x509.KeyUsageDigitalSignature) == 0 {\n\t\treturn false\n\t}\n\n\t// Check that the certificate has the DelegationUsage extension and that\n\t// it's marked as non-critical (See Section 4.2 of RFC5280).\n\tfor _, extension := range cert.Extensions {\n\t\tif extension.Id.Equal(extensionDelegatedCredential) {\n\t\t\tif extension.Critical {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func InvalidCollectionFormat(name, in, format string) *Validation {\n\treturn &Validation{\n\t\tcode: InvalidTypeCode,\n\t\tName: name,\n\t\tIn: in,\n\t\tValue: format,\n\t\tmessage: fmt.Sprintf(\"the collection format %q is not supported for the %s param %q\", format, in, name),\n\t}\n}", "func (m *X509Certificate) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateIssuerDN(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidatePublicKey(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateSubjectDN(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (p *CertProfile) Validate() error {\n\tif p.Expiry == 0 {\n\t\treturn errors.New(\"no expiry set\")\n\t}\n\n\tif len(p.Usage) == 0 {\n\t\treturn errors.New(\"no usages specified\")\n\t} else if _, _, unk := p.Usages(); len(unk) > 0 {\n\t\treturn errors.Errorf(\"unknown usage: %s\", strings.Join(unk, \",\"))\n\t}\n\n\tfor _, policy := range p.Policies {\n\t\tfor _, qualifier := range policy.Qualifiers {\n\t\t\tif qualifier.Type != \"\" &&\n\t\t\t\tqualifier.Type != csr.UserNoticeQualifierType &&\n\t\t\t\tqualifier.Type != csr.CpsQualifierType {\n\t\t\t\treturn errors.New(\"invalid policy qualifier type: \" + qualifier.Type)\n\t\t\t}\n\t\t}\n\t}\n\n\tif p.AllowedNames != \"\" && p.AllowedNamesRegex == nil {\n\t\trule, err := regexp.Compile(p.AllowedNames)\n\t\tif err != nil {\n\t\t\treturn errors.WithMessage(err, \"failed to compile AllowedNames\")\n\t\t}\n\t\tp.AllowedNamesRegex = rule\n\t}\n\tif p.AllowedDNS != \"\" && p.AllowedDNSRegex == nil {\n\t\trule, err := regexp.Compile(p.AllowedDNS)\n\t\tif err != nil {\n\t\t\treturn errors.WithMessage(err, \"failed to compile AllowedDNS\")\n\t\t}\n\t\tp.AllowedDNSRegex = rule\n\t}\n\tif p.AllowedEmail != \"\" && p.AllowedEmailRegex == nil {\n\t\trule, err := regexp.Compile(p.AllowedEmail)\n\t\tif err != nil {\n\t\t\treturn errors.WithMessage(err, \"failed to compile AllowedEmail\")\n\t\t}\n\t\tp.AllowedEmailRegex = rule\n\t}\n\tif p.AllowedURI != \"\" && p.AllowedURIRegex == nil {\n\t\trule, err := regexp.Compile(p.AllowedURI)\n\t\tif err != nil {\n\t\t\treturn errors.WithMessage(err, \"failed to compile AllowedURI\")\n\t\t}\n\t\tp.AllowedURIRegex = rule\n\t}\n\n\treturn nil\n}", "func CustomizeCertificateValidation(fedCluster *fedv1b1.KubeFedCluster, tlsConfig *tls.Config) error {\n\t// InsecureSkipVerify must be enabled to prevent early validation errors from\n\t// returning before VerifyPeerCertificate is run\n\ttlsConfig.InsecureSkipVerify = true\n\n\tvar ignoreSubjectName, ignoreValidityPeriod bool\n\tfor _, validation := range fedCluster.Spec.DisabledTLSValidations {\n\t\tswitch fedv1b1.TLSValidation(validation) {\n\t\tcase fedv1b1.TLSAll:\n\t\t\tklog.V(1).Infof(\"Cluster %s will not perform TLS certificate validation\", fedCluster.Name)\n\t\t\treturn nil\n\t\tcase fedv1b1.TLSSubjectName:\n\t\t\tignoreSubjectName = true\n\t\tcase fedv1b1.TLSValidityPeriod:\n\t\t\tignoreValidityPeriod = true\n\t\t}\n\t}\n\n\t// Normal TLS SubjectName validation uses the conn dnsname for validation,\n\t// but this is not available when using a VerifyPeerCertificate functions.\n\t// As a workaround, we will fill the tls.Config.ServerName with the URL host\n\t// specified as the KubeFedCluster API target\n\tif !ignoreSubjectName && tlsConfig.ServerName == \"\" {\n\t\tapiURL, err := url.Parse(fedCluster.Spec.APIEndpoint)\n\t\tif err != nil {\n\t\t\treturn errors.Errorf(\"failed to identify a valid host from APIEndpoint for use in SubjectName validation\")\n\t\t}\n\t\ttlsConfig.ServerName = apiURL.Hostname()\n\t}\n\n\t// VerifyPeerCertificate uses the same logic as crypto/tls Conn.verifyServerCertificate\n\t// but uses a modified set of options to ignore specific validations\n\ttlsConfig.VerifyPeerCertificate = func(certificates [][]byte, verifiedChains [][]*x509.Certificate) error {\n\t\topts := x509.VerifyOptions{\n\t\t\tRoots: tlsConfig.RootCAs,\n\t\t\tCurrentTime: time.Now(),\n\t\t\tIntermediates: x509.NewCertPool(),\n\t\t\tDNSName: tlsConfig.ServerName,\n\t\t}\n\t\tif tlsConfig.Time != nil {\n\t\t\topts.CurrentTime = tlsConfig.Time()\n\t\t}\n\n\t\tcerts := make([]*x509.Certificate, len(certificates))\n\t\tfor i, asn1Data := range certificates {\n\t\t\tcert, err := x509.ParseCertificate(asn1Data)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.New(\"tls: failed to parse certificate from server: \" + err.Error())\n\t\t\t}\n\t\t\tcerts[i] = cert\n\t\t}\n\n\t\tfor i, cert := range certs {\n\t\t\tif i == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\topts.Intermediates.AddCert(cert)\n\t\t}\n\n\t\tif ignoreSubjectName {\n\t\t\t// set the DNSName to nil to ignore the name validation\n\t\t\topts.DNSName = \"\"\n\t\t\tklog.V(1).Infof(\"Cluster %s will not perform tls certificate SubjectName validation\", fedCluster.Name)\n\t\t}\n\t\tif ignoreValidityPeriod {\n\t\t\t// set the CurrentTime to immediately after the certificate start time\n\t\t\t// this will ensure that certificate passes the validity period check\n\t\t\topts.CurrentTime = certs[0].NotBefore.Add(time.Second)\n\t\t\tklog.V(1).Infof(\"Cluster %s will not perform tls certificate ValidityPeriod validation\", fedCluster.Name)\n\t\t}\n\n\t\t_, err := certs[0].Verify(opts)\n\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (s TLSSpec) Validate() error {\n\tif s.IsSecure() {\n\t\tif err := shared.ValidateResourceName(s.GetCASecretName()); err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t\tif _, _, _, err := s.GetParsedAltNames(); err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t\tif err := s.GetTTL().Validate(); err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t}\n\treturn nil\n}", "func pathFetchValid(b *backend) *framework.Path {\n\treturn &framework.Path{\n\t\tPattern: `cert/(?P<serial>[0-9A-Fa-f-:]+)`,\n\t\tFields: map[string]*framework.FieldSchema{\n\t\t\t\"serial\": &framework.FieldSchema{\n\t\t\t\tType: framework.TypeString,\n\t\t\t\tDescription: `Certificate serial number, in colon- or\nhyphen-separated octal`,\n\t\t\t},\n\t\t},\n\n\t\tCallbacks: map[logical.Operation]framework.OperationFunc{\n\t\t\tlogical.ReadOperation: b.pathFetchRead,\n\t\t},\n\n\t\tHelpSynopsis: pathFetchHelpSyn,\n\t\tHelpDescription: pathFetchHelpDesc,\n\t}\n}", "func CheckFeatureFlag(v *viper.Viper) error {\n\treturn nil\n}", "func (o KeystoresAliasesKeyCertFileCertsInfoCertInfoOutput) IsValid() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v KeystoresAliasesKeyCertFileCertsInfoCertInfo) *string { return v.IsValid }).(pulumi.StringPtrOutput)\n}", "func (o *Options) CheckOptions() error {\n\tif o.ViceCrtFile == \"\" {\n\t\treturn fmt.Errorf(\"path to vice certificate not provided. Aborting\")\n\t}\n\tif o.ViceKeyFile == \"\" {\n\t\treturn fmt.Errorf(\"path to vice key not provided. Aborting\")\n\t}\n\tif o.VicePresidentConfig == \"\" {\n\t\treturn fmt.Errorf(\"path to vice config not provided. Aborting\")\n\t}\n\tif o.IntermediateCertificate == \"\" {\n\t\tLogDebug(\"Intermediate certificate not provided\")\n\t}\n\tif o.KubeConfig == \"\" {\n\t\tLogDebug(\"Path to kubeconfig not provided. Using Default\")\n\t}\n\n\tif o.MinCertValidityDays <= 0 {\n\t\tLogDebug(\"Minimum certificate validity invalid. Using default: 30 days\")\n\t\to.MinCertValidityDays = 30\n\t}\n\n\tif o.MetricPort == 0 {\n\t\to.MetricPort = 9091\n\t\tLogDebug(\"Metric port not provided. Using default port: 9091\")\n\t}\n\tif !o.IsEnableAdditionalSymantecMetrics {\n\t\tLogDebug(\"Not exposing additional Symantec metrics\")\n\t} else {\n\t\tLogDebug(\"Exposing additional Symantec metrics\")\n\t}\n\n\treturn nil\n}", "func itValidatesTLSFlags(args ...string) {\n\tContext(\"TLS Flag Validation\", func() {\n\t\tIt(\"exits with status 3 when no TLS flags are specified\", func() {\n\t\t\tcmd := exec.Command(cfdotPath, args...)\n\n\t\t\tsess, err := gexec.Start(cmd, GinkgoWriter, GinkgoWriter)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\tEventually(sess.Exited).Should(BeClosed())\n\n\t\t\tExpect(sess.ExitCode()).To(Equal(3))\n\t\t})\n\t})\n}", "func (samlCert DownloadSamlCert) Validate() error {\n\n\tif samlCert.Config.AttestationService.AttestationType == \"SGX\" {\n\t\tfmt.Println(\"tasks/download_saml_cert:Validate() Skipping download of SAML Cert task for SGX attestation\")\n\t\treturn nil\n\t}\n\n\tif _, err := os.Stat(samlCert.SamlCertPath); os.IsNotExist(err) {\n\t\treturn errors.Wrap(err, \"tasks/download_saml_cert:Validate() saml certificate does not exist\")\n\t}\n\n\t_, err := ioutil.ReadFile(samlCert.SamlCertPath)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"tasks/download_saml_cert:Validate() Error while reading Saml CA Certificate file\")\n\t}\n\n\treturn nil\n}", "func checkSecurityGroupFlags() string {\n\tcheckResult := \"\"\n\tcheckResult += checkGroupId()\n\tcheckResult += checkProtocol()\n\tcheckResult += checkWay()\n\treturn checkResult\n}", "func (m *CaCert) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateCaName(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateCertificate(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func validateCertificatePEM(certPEM string, options *x509.VerifyOptions) ([]*x509.Certificate, error) {\n\tcerts, err := cert.ParseCertsPEM([]byte(certPEM))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(certs) < 1 {\n\t\treturn nil, fmt.Errorf(\"invalid/empty certificate data\")\n\t}\n\n\tif options != nil {\n\t\t// Ensure we don't report errors for expired certs or if\n\t\t// the validity is in the future.\n\t\t// Not that this can be for the actual certificate or any\n\t\t// intermediates in the CA chain. This allows the router to\n\t\t// still serve an expired/valid-in-the-future certificate\n\t\t// and lets the client to control if it can tolerate that\n\t\t// (just like for self-signed certs).\n\t\t_, err = certs[0].Verify(*options)\n\t\tif err != nil {\n\t\t\tif invalidErr, ok := err.(x509.CertificateInvalidError); !ok || invalidErr.Reason != x509.Expired {\n\t\t\t\treturn certs, fmt.Errorf(\"error verifying certificate: %s\", err.Error())\n\t\t\t}\n\t\t}\n\t}\n\n\treturn certs, nil\n}", "func prettyPrintCertificate(w io.Writer, certs []*x509.Certificate, short bool) error {\n\tformat := certificateText\n\tif short {\n\t\tformat = certificateShortText\n\t}\n\tfor i, cert := range certs {\n\t\tinfo, err := format(cert)\n\t\tif err != nil {\n\t\t\treturn serrors.WrapStr(\"formatting certificate info\", err, \"index\", i)\n\t\t}\n\t\tif _, err = fmt.Fprint(w, info); err != nil {\n\t\t\treturn serrors.WrapStr(\"writing certificate info\", err, \"index\", i)\n\t\t}\n\t}\n\treturn nil\n}", "func (o *V1VirusDatasetRequest) GetFormatOk() (*V1TableFormat, bool) {\n\tif o == nil || o.Format == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Format, true\n}", "func (o *PodnetOptions) Validate() error {\n\t/*\n\t\tif len(o.rawConfig.CurrentContext) == 0 {\n\t\t\treturn errNoContext\n\t\t}\n\t\tif len(o.args) > 1 {\n\t\t\treturn fmt.Errorf(\"either one or no arguments are allowed\")\n\t\t}\n\t*/\n\n\tif o.outputFormat != \"\" {\n\t\to.outputFormat = strings.ToLower(o.outputFormat)\n\n\t\tswitch o.outputFormat {\n\t\tcase \"json\", \"text\": // valid format\n\t\tdefault: // illegal format\n\t\t\treturn fmt.Errorf(\"unknown output format %s\", o.outputFormat)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (f Format) Valid() bool {\n\tfor _, valid := range Formats {\n\t\tif valid == f {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (o LookupCertificateResultOutput) Format() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupCertificateResult) string { return v.Format }).(pulumi.StringOutput)\n}", "func (cer *CER) sanityCheck() error {\n\tif len(cer.OriginHost) == 0 {\n\t\treturn ErrMissingOriginHost\n\t}\n\tif len(cer.OriginRealm) == 0 {\n\t\treturn ErrMissingOriginRealm\n\t}\n\treturn nil\n}", "func ExpectValidCertificate(csr *certificatesv1.CertificateSigningRequest, _ crypto.Signer) error {\n\t_, err := pki.DecodeX509CertificateBytes(csr.Status.Certificate)\n\treturn err\n}", "func CheckCerts(certDir string) error {\n\tcertFile := filepath.Join(certDir, \"cert.pem\")\n\tkeyFile := filepath.Join(certDir, \"key.pem\")\n\n\tif !file.Exists(certFile) || !file.Exists(keyFile) {\n\t\tlog.Warnln(log.Global, \"gRPC certificate/key file missing, recreating...\")\n\t\treturn genCert(certDir)\n\t}\n\n\tpemData, err := os.ReadFile(certFile)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to open TLS cert file: %s\", err)\n\t}\n\n\tif err = verifyCert(pemData); err != nil {\n\t\tif err != errCertExpired {\n\t\t\treturn err\n\t\t}\n\t\tlog.Warnln(log.Global, \"gRPC certificate has expired, regenerating...\")\n\t\treturn genCert(certDir)\n\t}\n\n\tlog.Infoln(log.Global, \"gRPC TLS certificate and key files exist, will use them.\")\n\treturn nil\n}", "func isValidSecret(secret *v1.Secret) (bool, error) {\n\tswitch secret.Type {\n\t// We will accept TLS secrets that also have the 'ca.crt' payload.\n\tcase v1.SecretTypeTLS:\n\t\tdata, ok := secret.Data[v1.TLSCertKey]\n\t\tif !ok {\n\t\t\treturn false, errors.New(\"missing TLS certificate\")\n\t\t}\n\n\t\tif err := validateCertificate(data); err != nil {\n\t\t\treturn false, fmt.Errorf(\"invalid TLS certificate: %v\", err)\n\t\t}\n\n\t\tdata, ok = secret.Data[v1.TLSPrivateKeyKey]\n\t\tif !ok {\n\t\t\treturn false, errors.New(\"missing TLS private key\")\n\t\t}\n\n\t\tif err := validatePrivateKey(data); err != nil {\n\t\t\treturn false, fmt.Errorf(\"invalid TLS private key: %v\", err)\n\t\t}\n\n\t// Generic secrets may have a 'ca.crt' only.\n\tcase v1.SecretTypeOpaque, \"\":\n\t\tif _, ok := secret.Data[v1.TLSCertKey]; ok {\n\t\t\treturn false, nil\n\t\t}\n\n\t\tif _, ok := secret.Data[v1.TLSPrivateKeyKey]; ok {\n\t\t\treturn false, nil\n\t\t}\n\n\t\tif data := secret.Data[\"ca.crt\"]; len(data) == 0 {\n\t\t\treturn false, nil\n\t\t}\n\n\tdefault:\n\t\treturn false, nil\n\n\t}\n\n\t// If the secret we propose to accept has a CA bundle key,\n\t// validate that it is PEM certificate(s). Note that the\n\t// CA bundle on TLS secrets is allowed to be an empty string\n\t// (see https://github.com/projectcontour/contour/issues/1644).\n\tif data := secret.Data[\"ca.crt\"]; len(data) > 0 {\n\t\tif err := validateCertificate(data); err != nil {\n\t\t\treturn false, fmt.Errorf(\"invalid CA certificate bundle: %v\", err)\n\t\t}\n\t}\n\n\treturn true, nil\n}", "func RegisterCertificateCheck(name string, filter *Filter, f func(*certdata.Data) *errors.Errors) {\n\tcertMutex.Lock()\n\tCertificate = append(Certificate, certificateCheck{name, filter, f})\n\tcertMutex.Unlock()\n}", "func validateFnFormat(fnType reflect.Type) error {\n\tif fnType.Kind() != reflect.Func {\n\t\treturn fmt.Errorf(\"expected a func as input but was %s\", fnType.Kind())\n\t}\n\tif fnType.NumIn() < 1 {\n\t\treturn fmt.Errorf(\n\t\t\t\"expected at least one argument of type context.Context in function, found %d input arguments\",\n\t\t\tfnType.NumIn(),\n\t\t)\n\t}\n\tif !isContext(fnType.In(0)) {\n\t\treturn fmt.Errorf(\"expected first argument to be context.Context but found %s\", fnType.In(0))\n\t}\n\tif fnType.NumOut() != 1 {\n\t\treturn fmt.Errorf(\n\t\t\t\"expected function to return only error but found %d return values\", fnType.NumOut(),\n\t\t)\n\t}\n\tif !isError(fnType.Out(0)) {\n\t\treturn fmt.Errorf(\n\t\t\t\"expected function to return error but found %d\", fnType.Out(0).Kind(),\n\t\t)\n\t}\n\treturn nil\n}", "func AssertCertificateHasClientAuthUsage(t *testing.T, cert *x509.Certificate) {\n\tfor i := range cert.ExtKeyUsage {\n\t\tif cert.ExtKeyUsage[i] == x509.ExtKeyUsageClientAuth {\n\t\t\treturn\n\t\t}\n\t}\n\tt.Error(\"cert has not ClientAuth usage as expected\")\n}", "func canReadCertAndKey(certPath, keyPath string) error {\n\tcertReadable := canReadFile(certPath)\n\tkeyReadable := canReadFile(keyPath)\n\n\tif certReadable == false && keyReadable == false {\n\t\treturn fmt.Errorf(\"error reading key and certificate\")\n\t}\n\n\tif certReadable == false {\n\t\treturn fmt.Errorf(\"error reading %s, certificate and key must be supplied as a pair\", certPath)\n\t}\n\n\tif keyReadable == false {\n\t\treturn fmt.Errorf(\"error reading %s, certificate and key must be supplied as a pair\", keyPath)\n\t}\n\n\treturn nil\n}", "func canReadCertAndKey(certPath, keyPath string) error {\n\tcertReadable := canReadFile(certPath)\n\tkeyReadable := canReadFile(keyPath)\n\n\tif certReadable == false && keyReadable == false {\n\t\treturn fmt.Errorf(\"error reading key and certificate\")\n\t}\n\n\tif certReadable == false {\n\t\treturn fmt.Errorf(\"error reading %s, certificate and key must be supplied as a pair\", certPath)\n\t}\n\n\tif keyReadable == false {\n\t\treturn fmt.Errorf(\"error reading %s, certificate and key must be supplied as a pair\", keyPath)\n\t}\n\n\treturn nil\n}", "func Check(d *certdata.Data) *errors.Errors {\n\tvar e = errors.New(nil)\n\n\tif d.Issuer != nil && !bytes.Equal(d.Cert.RawIssuer, d.Issuer.RawSubject) {\n\t\te.Err(\"Certificate Issuer Distinguished Name field MUST match the Subject DN of the Issuing CA\")\n\t\treturn e\n\t}\n\n\treturn e\n}", "func (o BackendTlsPtrOutput) ValidateCertificateChain() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v *BackendTls) *bool {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ValidateCertificateChain\n\t}).(pulumi.BoolPtrOutput)\n}", "func (o *TppCertificateParams) GetValidUntilDateOk() (*string, bool) {\n\tif o == nil || o.ValidUntilDate == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ValidUntilDate, true\n}", "func (t Authority) Check(ctx context.Context, req *CheckRequest) (resp *CheckResponse, err error) {\n\tvar (\n\t\tm1 agent.TLSCertificates\n\t\tencoded []byte\n\t)\n\n\tif encoded, err = os.ReadFile(t.protoPath); err != nil {\n\t\treturn &CheckResponse{}, status.Error(codes.Unavailable, \"missing info\")\n\t}\n\n\tif err = proto.Unmarshal(encoded, &m1); err != nil {\n\t\treturn &CheckResponse{}, status.Error(codes.Unavailable, \"invalid authority\")\n\t}\n\n\tif m1.Fingerprint != req.Fingerprint {\n\t\treturn &CheckResponse{}, status.Error(codes.NotFound, \"fingerprint mismatch\")\n\t}\n\n\treturn &CheckResponse{}, nil\n}", "func ReadCertificate(data []byte) (certificate *Certificate, remainder []byte, err error) {\n\tcertificate, err = NewCertificate(data)\n\tif err != nil && err.Error() == \"certificate parsing warning: certificate data is longer than specified by length\" {\n\t\tremainder = certificate.ExcessBytes()\n\t\terr = nil\n\t}\n\treturn\n}", "func (c *CertAuthID) Check() error {\n\tif err := c.Type.Check(); err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tif strings.TrimSpace(c.DomainName) == \"\" {\n\t\treturn trace.BadParameter(\"identity validation error: empty domain name\")\n\t}\n\treturn nil\n}", "func (s *udtSocket) checkValidHandshake(m *multiplexer, p *packet.HandshakePacket, from *net.UDPAddr) bool {\n\tif s.udtVer != 4 {\n\t\treturn false\n\t}\n\treturn true\n}", "func CheckBadLinkerFlags(ctx BaseModuleContext, prop string, flags []string) {\n\tfor _, flag := range flags {\n\t\tflag = strings.TrimSpace(flag)\n\n\t\tif !strings.HasPrefix(flag, \"-\") {\n\t\t\tctx.PropertyErrorf(prop, \"Flag `%s` must start with `-`\", flag)\n\t\t} else if strings.HasPrefix(flag, \"-l\") {\n\t\t\tif ctx.Host() {\n\t\t\t\tctx.PropertyErrorf(prop, \"Bad flag: `%s`, use shared_libs or host_ldlibs instead\", flag)\n\t\t\t} else {\n\t\t\t\tctx.PropertyErrorf(prop, \"Bad flag: `%s`, use shared_libs instead\", flag)\n\t\t\t}\n\t\t} else if strings.HasPrefix(flag, \"-L\") {\n\t\t\tctx.PropertyErrorf(prop, \"Bad flag: `%s` is not allowed\", flag)\n\t\t} else if strings.HasPrefix(flag, \"-Wl,--version-script\") {\n\t\t\tctx.PropertyErrorf(prop, \"Bad flag: `%s`, use version_script instead\", flag)\n\t\t} else if flag == \"--coverage\" {\n\t\t\tctx.PropertyErrorf(prop, \"Bad flag: `%s`, use native_coverage instead\", flag)\n\t\t} else if strings.Contains(flag, \" \") {\n\t\t\targs := strings.Split(flag, \" \")\n\t\t\tif args[0] == \"-z\" {\n\t\t\t\tif len(args) > 2 {\n\t\t\t\t\tctx.PropertyErrorf(prop, \"`-z` only takes one argument: `%s`\", flag)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tctx.PropertyErrorf(prop, \"Bad flag: `%s` is not an allowed multi-word flag. Should it be split into multiple flags?\", flag)\n\t\t\t}\n\t\t}\n\t}\n}" ]
[ "0.6173354", "0.6016101", "0.5732977", "0.55939424", "0.5554805", "0.5551032", "0.55084914", "0.54934126", "0.53708816", "0.5328171", "0.52916765", "0.5222796", "0.5215715", "0.52015436", "0.5192821", "0.5157193", "0.50885516", "0.50314575", "0.502443", "0.50205916", "0.5017505", "0.49988452", "0.49825194", "0.4981845", "0.49788326", "0.4968657", "0.49582395", "0.49154413", "0.49117067", "0.49054348", "0.49037984", "0.49034977", "0.48994467", "0.48846835", "0.48786858", "0.48573306", "0.48034465", "0.47817802", "0.4777313", "0.4775215", "0.47677293", "0.47663137", "0.47629154", "0.47511145", "0.4735472", "0.47133613", "0.4709934", "0.47022897", "0.46969554", "0.46879178", "0.46867564", "0.46785852", "0.46473852", "0.46428883", "0.46294853", "0.4610677", "0.46097043", "0.46091715", "0.46087793", "0.45987424", "0.45899686", "0.45746183", "0.45697144", "0.45695114", "0.45463893", "0.454016", "0.4529676", "0.45259792", "0.45256615", "0.4513146", "0.4510958", "0.45098832", "0.45080668", "0.45059767", "0.45045975", "0.4503708", "0.44941726", "0.44899735", "0.4482769", "0.44733602", "0.44722885", "0.44720092", "0.44659194", "0.4457492", "0.44497234", "0.4431028", "0.44296518", "0.4427992", "0.44263336", "0.4413067", "0.44074467", "0.44074467", "0.4406721", "0.44050825", "0.4404762", "0.43970692", "0.43924367", "0.4390169", "0.43826684", "0.43691853" ]
0.8533605
0
ReadAtMost reads up to limit bytes from r, and reports an error when limit bytes are read.
ReadAtMost читает до limit байтов из r и сообщает об ошибке, когда прочитано limit байтов.
func ReadAtMost(r io.Reader, limit int64) ([]byte, error) { limitedReader := &io.LimitedReader{R: r, N: limit} data, err := io.ReadAll(limitedReader) if err != nil { return data, err } if limitedReader.N <= 0 { return data, ErrLimitReached } return data, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c Conn) LimitedRead(b []byte) (int, error) {\n\tr := io.LimitReader(c.Conn, c.maxReadBuffer)\n\treturn r.Read(b)\n}", "func (rr *Reader) ReadSizeWithLimit(limit uint32) int {\n\tif rr.Err != nil {\n\t\treturn 0\n\t}\n\tvar size32 uint32\n\tsize32, rr.Err = size32Decode(func() (byte, error) {\n\t\treturn rr.ReadByte(), rr.Err\n\t})\n\tif size32 > limit && rr.Err == nil {\n\t\trr.Err = errors.New(\"read size limit overflow\")\n\t\treturn 0\n\t}\n\treturn int(size32)\n}", "func isReadLimitReached(bytesLoaded int64, linesLoaded int64, logFilePosition string) bool {\n\treturn (logFilePosition == logs.Beginning && bytesLoaded >= byteReadLimit) ||\n\t\t(logFilePosition == logs.End && linesLoaded >= lineReadLimit)\n}", "func readFull(r io.Reader, buf []byte) (n int, err error) {\n\tfor n < len(buf) && err == nil {\n\t\tvar nn int\n\t\tnn, err = r.Read(buf[n:])\n\t\tn += nn\n\t}\n\tif n == len(buf) {\n\t\terr = nil\n\t}\n\treturn\n}", "func (s *Reader) Read(p []byte) (int, error) {\n\tlimiter := s.getRateLimit()\n\tif limiter == nil {\n\t\treturn s.r.Read(p)\n\t}\n\tn, err := s.r.Read(p)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\t// log.Printf(\"read: %d\", n)\n\tif err := limiter.WaitN(s.ctx, n); err != nil {\n\t\treturn n, err\n\t}\n\treturn n, nil\n}", "func (self File) TailBytes(limitSize int) ([]byte, error) {\n\tvar limitBytes []byte\n\tfile := self.Open()\n\n\treadBytes, err := io.ReadAtLeast(file, limitBytes, limitSize)\n\tif readBytes != limitSize {\n\t\treturn limitBytes, fmt.Errorf(\"error: failed to complete read: read \", readBytes, \" out of \", limitSize, \"bytes\")\n\t} else {\n\t\treturn limitBytes, err\n\t}\n}", "func (s *Stream) willRead(n uint64) error {\n\ts.kind = -1 // rearm / re-initialize Kind\n\tif len(s.stack) > 0 {\n\t\ttos := s.stack[len(s.stack)-1]\n\t\t// read size cannot greater than the size of the list\n\t\tif n > tos.size-tos.pos {\n\t\t\treturn ErrElemTooLarge\n\t\t}\n\t\t// change the list position\n\t\ts.stack[len(s.stack)-1].pos += n\n\t}\n\tif s.limited {\n\n\t\tif n > s.remaining {\n\t\t\treturn ErrValueTooLarge\n\t\t}\n\t\ts.remaining -= n\n\t}\n\treturn nil\n}", "func limit(n int64) int {\n\tif n < 0 || maxio < n {\n\t\tFatal(\"bad io size:\", n)\n\t}\n\treturn int(n)\n}", "func (r *LimiterReader) Read(p []byte) (int, error) {\n\ttc := time.Now()\n\twd, abc := r.lim.request(tc, len(p))\n\tif 0 < wd {\n\t\ttimer := time.NewTimer(wd)\n\t\tselect {\n\t\tcase <-timer.C:\n\t\tcase <-r.closedChan:\n\t\t\tif !timer.Stop() {\n\t\t\t\t<-timer.C\n\t\t\t}\n\t\t\treturn 0, ErrClosed\n\t\t}\n\t}\n\tn, err := r.rd.Read(p[:abc])\n\tif n < abc {\n\t\tr.lim.refund(abc - n)\n\t}\n\treturn n, err\n}", "func (r *Reader) Read(p []byte) (written int, err error) {\n\tif r.eof {\n\t\terr = io.EOF\n\t\treturn\n\t}\n\n\tvar n int\n\tvar lim int\n\tfor written < len(p) && err == nil {\n\n\t\tr.limitedM.RLock()\n\t\tisLimited := r.limited\n\t\tr.limitedM.RUnlock()\n\n\t\tif isLimited {\n\n\t\t\tr.timeoutM.Lock()\n\t\t\ttimeLimit := r.timeout\n\t\t\tr.timeoutM.Unlock()\n\n\t\t\t//TODO consolidate two cases if possible. Dynamic select via reflection?\n\t\t\tif timeLimit > 0 {\n\t\t\t\tselect {\n\t\t\t\tcase <-time.After(timeLimit):\n\t\t\t\t\terr = ErrTimeoutExceeded\n\t\t\t\t\treturn\n\t\t\t\tcase lim = <-r.rate:\n\t\t\t\tdefault:\n\t\t\t\t\tif written > 0 {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tlim = <-r.rate\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tselect {\n\t\t\t\tcase lim = <-r.rate:\n\t\t\t\tdefault:\n\t\t\t\t\tif written > 0 {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tlim = <-r.rate\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tlim = len(p[written:])\n\t\t}\n\n\t\tif lim > len(p[written:]) {\n\t\t\tlim = len(p[written:])\n\t\t}\n\n\t\tn, err = r.r.Read(p[written:][:lim])\n\t\twritten += n\n\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tr.eof = true\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}", "func readFull(r io.Reader, buf []byte) (int, error) {\n\tvar n int\n\tvar err error\n\tfor n < len(buf) && err == nil {\n\t\tvar nn int\n\t\tnn, err = r.Read(buf[n:])\n\t\tn += nn\n\t}\n\tif n == len(buf) {\n\t\treturn n, nil\n\t}\n\tif err == io.EOF {\n\t\treturn n, io.ErrUnexpectedEOF\n\t}\n\treturn n, err\n}", "func (alr *adjustableLimitedReader) Read(p []byte) (n int, err error) {\n\tn, err = alr.R.Read(p)\n\tif err == io.EOF && alr.R.N <= 0 {\n\t\t// return our custom error since io.Reader returns EOF\n\t\terr = LineLimitExceeded\n\t}\n\treturn\n}", "func (r *Reader) ReadFull(n int) ([]byte, error) {\n\tunreadBytes := r.unreadBytes()\n\tif unreadBytes >= n {\n\t\tresult := r.buf[r.r : r.r+n]\n\t\tr.r += n\n\t\treturn result, nil\n\t}\n\n\tneedToRead := n - unreadBytes\n\tif r.capLeft() >= needToRead {\n\t\t// enough room to Read\n\t\tif err := r.readAtLeast(needToRead); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresult := r.buf[r.r : r.r+n]\n\t\tr.r += n\n\t\treturn result, nil\n\t}\n\n\t// not enough room\n\t// check if buf is large enough\n\tif n > len(r.buf) {\n\t\tif cap(r.buf) == 0 {\n\t\t\treturn nil, ErrBufReaderAlreadyClosed\n\t\t}\n\n\t\t// make a larger buf\n\t\tnewBuf := slabPool.Alloc(n + 128)\n\t\tr.w = copy(newBuf, r.buf[r.r:r.w])\n\t\tr.r = 0\n\t\tslabPool.Free(r.buf)\n\t\tr.buf = newBuf\n\t} else {\n\t\t// enough room, shift existing data to left\n\t\tr.w = copy(r.buf, r.buf[r.r:r.w])\n\t\tr.r = 0\n\t}\n\n\tif err := r.readAtLeast(needToRead); err != nil {\n\t\treturn nil, err\n\t}\n\n\tresult := r.buf[r.r : r.r+n]\n\tr.r += n\n\treturn result, nil\n}", "func (c *LimitedConnection) Read(b []byte) (read int, err error) {\n\treturn c.rateLimitLoop(&c.readNotBefore, &c.readDeadline, c.inner.Read, b)\n}", "func read(r io.Reader) ([]byte, error) {\n\tvar data []byte\n\t// defer r.Close()\n\tbufSize := 1024 * 10\n\tbuf := make([]byte, bufSize) //一次读取多少个字节\n\tbfRd := bufio.NewReader(r)\n\tfor {\n\t\tn, err := bfRd.Read(buf)\n\t\tdata = append(data, buf[:n]...)\n\t\tif err != nil { //遇到任何错误立即返回,并忽略EOF错误信息\n\t\t\tif err == io.EOF {\n\t\t\t\treturn data, nil\n\t\t\t}\n\t\t\treturn data, err\n\t\t}\n\t}\n\treturn data, nil\n}", "func (c *conn) Read(b []byte) (int, error) {\n\tc.ronce.Do(c.sleepLatency)\n\n\tn, err := c.rb.FillThrottle(func(remaining int64) (int64, error) {\n\t\tmax := remaining\n\t\tif l := int64(len(b)); max > l {\n\t\t\tmax = l\n\t\t}\n\n\t\tn, err := c.Conn.Read(b[:max])\n\t\treturn int64(n), err\n\t})\n\tif err != nil && err != io.EOF {\n\t\tlog.Errorf(\"trafficshape: error on throttled read: %v\", err)\n\t}\n\n\treturn int(n), err\n}", "func (r *objReader) readFull(b []byte) error {\n\tif r.err != nil {\n\t\treturn r.err\n\t}\n\tif r.offset+int64(len(b)) > r.limit {\n\t\treturn r.error(io.ErrUnexpectedEOF)\n\t}\n\tn, err := io.ReadFull(r.b, b)\n\tr.offset += int64(n)\n\tif err != nil {\n\t\treturn r.error(err)\n\t}\n\treturn nil\n}", "func (r *objReader) readFull(b []byte) error {\n\tif r.err != nil {\n\t\treturn r.err\n\t}\n\tif r.offset+int64(len(b)) > r.limit {\n\t\treturn r.error(io.ErrUnexpectedEOF)\n\t}\n\tn, err := io.ReadFull(r.b, b)\n\tr.offset += int64(n)\n\tif err != nil {\n\t\treturn r.error(err)\n\t}\n\treturn nil\n}", "func (d *Decoder) readMore() {\n\tif d.complete {\n\t\treturn\n\t}\n\tn := cap(d.buf) - len(d.buf)\n\tif n < minRead {\n\t\t// We need to grow the buffer. Note that we don't have to copy\n\t\t// the unused part of the buffer (d.buf[:d.r0]).\n\t\t// TODO provide a way to limit the maximum size that\n\t\t// the buffer can grow to.\n\t\tused := len(d.buf) - d.r0\n\t\tn1 := cap(d.buf) * 2\n\t\tif n1-used < minGrow {\n\t\t\tn1 = used + minGrow\n\t\t}\n\t\tbuf1 := make([]byte, used, n1)\n\t\tcopy(buf1, d.buf[d.r0:])\n\t\td.buf = buf1\n\t\td.r1 -= d.r0\n\t\td.r0 = 0\n\t}\n\tn, err := d.rd.Read(d.buf[len(d.buf):cap(d.buf)])\n\td.buf = d.buf[:len(d.buf)+n]\n\tif err == nil {\n\t\treturn\n\t}\n\td.complete = true\n\tif err != io.EOF {\n\t\td.err = err\n\t}\n}", "func (c *TestConnection) Read(b []byte) (n int, err error) {\n toRet := 0\n if b == nil {\n return 0, errors.New(\"b cannot be nil\")\n }\n\n if c.ReadError != nil && c.TimesReadCalled == c.ThrowReadErrorAfter {\n return 0, c.ReadError\n }\n\n if len(c.ToRead) == 0 {\n return 0, nil\n } \n \n dataToRet := c.ToRead[0]\n buffLength := len(b)\n \n // b is big enough to hold dataToRet\n if buffLength >= len(dataToRet) {\n copy(b, []byte(dataToRet))\n c.ToRead = append(c.ToRead[:0], c.ToRead[1:]...) // remove the first element \n toRet = len(dataToRet)\n } else {\n // need to only return the maximum we can\n remains := dataToRet[buffLength:len(dataToRet)]\n c.ToRead[0] = remains // keep the remainder of the data\n copy(b, dataToRet[0:buffLength])\n toRet = buffLength\n }\n \n c.TimesReadCalled++\n return toRet, nil\n}", "func (r *Reader) Unlimit() {\n\tr.newLimit <- nil\n}", "func (o *ODirectReader) Read(buf []byte) (n int, err error) {\n\tif o.err != nil && (len(o.buf) == 0 || !o.seenRead) {\n\t\treturn 0, o.err\n\t}\n\tif o.buf == nil {\n\t\tif o.SmallFile {\n\t\t\to.bufp = ODirectPoolSmall.Get().(*[]byte)\n\t\t} else {\n\t\t\to.bufp = ODirectPoolLarge.Get().(*[]byte)\n\t\t}\n\t}\n\tif !o.seenRead {\n\t\to.buf = *o.bufp\n\t\tn, err = o.File.Read(o.buf)\n\t\tif err != nil && err != io.EOF {\n\t\t\tif isSysErrInvalidArg(err) {\n\t\t\t\tif err = disk.DisableDirectIO(o.File); err != nil {\n\t\t\t\t\to.err = err\n\t\t\t\t\treturn n, err\n\t\t\t\t}\n\t\t\t\tn, err = o.File.Read(o.buf)\n\t\t\t}\n\t\t\tif err != nil && err != io.EOF {\n\t\t\t\to.err = err\n\t\t\t\treturn n, err\n\t\t\t}\n\t\t}\n\t\tif n == 0 {\n\t\t\t// err is likely io.EOF\n\t\t\to.err = err\n\t\t\treturn n, err\n\t\t}\n\t\to.err = err\n\t\to.buf = o.buf[:n]\n\t\to.seenRead = true\n\t}\n\tif len(buf) >= len(o.buf) {\n\t\tn = copy(buf, o.buf)\n\t\to.seenRead = false\n\t\treturn n, o.err\n\t}\n\tn = copy(buf, o.buf)\n\to.buf = o.buf[n:]\n\t// There is more left in buffer, do not return any EOF yet.\n\treturn n, nil\n}", "func (c *Conn) setReadRemaining(n int64) error {\n\tif n < 0 {\n\t\treturn ErrReadLimit\n\t}\n\n\tc.readRemaining = n\n\treturn nil\n}", "func (r *ThrottledReadCloser) Read(buf []byte) (int, error) {\n\tsubBuff, delay, err := getBufferAndDelay(r.pool, r.id, len(buf))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\ttime.Sleep(delay)\n\tif subBuff > len(buf) {\n\t\tsubBuff = len(buf)\n\t}\n\tn, err := r.origReadCloser.Read(buf[:subBuff])\n\treturn n, err\n}", "func LimitReader(r io.Reader, n int64) io.Reader {\n\treturn &LimitedReader{r, n}\n}", "func (this *reader) ioRead(buffer []byte) (n int, err error) {\n\tn, err = this.ioReader.Read(buffer)\n\tif err != nil {\n\t\treturn\n\t}\n\tif n != len(buffer) {\n\t\terr = fmt.Errorf(\"Reading failed. Expected %v bytes but %v was read\",\n\t\t\tlen(buffer), n)\n\t}\n\treturn\n}", "func ReadAll(r io.Reader, threshold int) ([]byte, io.Closer, error) {\n\tlr := io.LimitedReader{R: r, N: int64(threshold) + 1}\n\tb, err := ioutil.ReadAll(&lr)\n\tif err != nil {\n\t\treturn b, nilClose, err\n\t}\n\tif lr.N > 0 {\n\t\treturn b, nilClose, nil\n\t}\n\tfh, err := ioutil.TempFile(\"\", \"iohlp-readall-\")\n\tif err != nil {\n\t\treturn b, nilClose, err\n\t}\n\tos.Remove(fh.Name())\n\tif _, err = fh.Write(b); err != nil {\n\t\treturn b, nilClose, err\n\t}\n\tif _, err = io.Copy(fh, r); err != nil {\n\t\tfh.Close()\n\t\treturn nil, nilClose, err\n\t}\n\tb, closer, err := Mmap(fh)\n\tfh.Close()\n\tif err != nil {\n\t\tif closer != nil {\n\t\t\tcloser.Close()\n\t\t}\n\t\treturn b, nil, err\n\t}\n\treturn b, closer, nil\n}", "func (r *ChannelReader) Read(b []byte) (sz int, err error) {\n\tif len(b) == 0 {\n\t\treturn 0, io.ErrShortBuffer\n\t}\n\n\tfor {\n\t\tif len(r.buf) > 0 {\n\t\t\tif len(r.buf) <= len(b) {\n\t\t\t\tsz = len(r.buf)\n\t\t\t\tcopy(b, r.buf)\n\t\t\t\tr.buf = nil\n\t\t\t} else {\n\t\t\t\tcopy(b, r.buf)\n\t\t\t\tr.buf = r.buf[len(b):]\n\t\t\t\tsz = len(b)\n\t\t\t}\n\t\t\treturn sz, nil\n\t\t}\n\n\t\tvar ok bool\n\t\tif r.deadline.IsZero() {\n\t\t\tr.buf, ok = <-r.c\n\t\t} else {\n\t\t\ttimer := time.NewTimer(r.deadline.Sub(time.Now()))\n\t\t\tdefer timer.Stop()\n\n\t\t\tselect {\n\t\t\tcase r.buf, ok = <-r.c:\n\t\t\tcase <-timer.C:\n\t\t\t\treturn 0, context.DeadlineExceeded\n\t\t\t}\n\t\t}\n\t\tif len(r.buf) == 0 && !ok {\n\t\t\treturn 0, io.EOF\n\t\t}\n\t}\n}", "func readSize(conn net.Conn, size int64, buf *[]byte) error {\n\t*buf = make([]byte, 0)\n\tvar err error\n\tleftSize := size\n\tfor {\n\n\t\tbufinner := make([]byte, leftSize)\n\t\tvar n int\n\t\tn, err = conn.Read(bufinner)\n\t\tleftSize -= int64(n)\n\t\tif err == nil {\n\t\t\t*buf = slice_merge(*buf, bufinner)\n\t\t\tif leftSize <= 0 {\n\t\t\t\t//read end\n\t\t\t\tbreak\n\t\t\t}\n\t\t} else {\n\t\t\tif err != io.EOF {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\treturn err\n}", "func MaxBytesReader(w ResponseWriter, r io.ReadCloser, n int64) io.ReadCloser {\n\treturn &maxBytesReader{respWriter: w, readCloser: r, bytesRemaining: n}\n}", "func FutureRead(r io.Reader, b []byte) func() (int, error) {\n\tdone := make(chan ioResult)\n\n\tgo func() {\n\t\tn, err := r.Read(b)\n\n\t\tdone <- ioResult{n, err}\n\t}()\n\n\treturn func() (int, error) {\n\t\tres := <-done\n\n\t\treturn res.n, res.err\n\t}\n}", "func (b *buffer) read(rd io.Reader) (err error) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr = fmt.Errorf(\"panic reading: %v\", r)\n\t\t\tb.err = err\n\t\t}\n\t}()\n\n\tvar n int\n\tbuf := b.buf[0:b.size]\n\tfor n < b.size {\n\t\tn2, err := rd.Read(buf)\n\t\tn += n2\n\t\tif err != nil {\n\t\t\tb.err = err\n\t\t\tbreak\n\t\t}\n\t\tbuf = buf[n2:]\n\t}\n\tb.buf = b.buf[0:n]\n\tb.offset = 0\n\treturn b.err\n}", "func (s *settings) GetMaxReadSize() uint {\n\treturn s.rMaxSize\n}", "func (r *objReader) peek(n int) ([]byte, error) {\n\tif r.err != nil {\n\t\treturn nil, r.err\n\t}\n\tif r.offset >= r.limit {\n\t\tr.error(io.ErrUnexpectedEOF)\n\t\treturn nil, r.err\n\t}\n\tb, err := r.b.Peek(n)\n\tif err != nil {\n\t\tif err != bufio.ErrBufferFull {\n\t\t\tr.error(err)\n\t\t}\n\t}\n\treturn b, err\n}", "func (s *Server) backReader(origAddr *net.UDPAddr, c *net.UDPConn) {\n\tdefer s.wg.Done()\n\n\tvar err error\n\tbuf := make([]byte, bufSize)\n\n\tfor {\n\t\tselect {\n\t\tcase <-s.done:\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\n\t\terr = c.SetReadDeadline(time.Now().Add(time.Second))\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Read error: failed to set deadline: %v\", err)\n\t\t}\n\n\t\tn, _, err := c.ReadFromUDP(buf)\n\t\tif err != nil {\n\t\t\tif _, ok := err.(net.Error); ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog.Printf(\"Read error: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\tif n == 0 {\n\t\t\tlog.Printf(\"Read error: no bytes read\")\n\t\t\tcontinue\n\t\t}\n\n\t\tdata := make([]byte, n)\n\t\tcopy(data, buf[0:n])\n\t\ts.backRecv <- packet{origAddr, data}\n\t}\n}", "func ImplementsReaderAtOpts(t *testing.T, reader io.ReaderAt, length int64, opts ReaderAtOpts) bool {\n\tvar buf = make([]byte, opts.BufferSize)\n\tvar err error\n\tvar n int64\n\n\tif !noopRead(t, toReader(reader, 0)) {\n\t\treturn false\n\t}\n\n\tfor err == nil {\n\t\tvar a int\n\t\ta, err = reader.ReadAt(buf, n)\n\t\tn += int64(a)\n\t\tif !(assert.GreaterOrEqual(t, a, 0) && assert.LessOrEqual(t, int64(opts.BufferSize), n)) {\n\t\t\treturn false\n\t\t}\n\n\t\tif 0 < n && n < int64(opts.BufferSize) {\n\t\t\treturn assert.Error(t, err)\n\t\t}\n\t}\n\n\tgrp, _ := errgroup.WithContext(context.Background())\n\tfor i := int64(0); i < length && i < 50; i++ {\n\t\ti := i\n\t\tgrp.Go(func() error {\n\t\t\tvar buf = make([]byte, opts.BufferSize)\n\t\t\t_, err := reader.ReadAt(buf, i)\n\t\t\tassert.NoError(t, err)\n\t\t\treturn err\n\t\t})\n\t}\n\terr2 := grp.Wait()\n\n\treturn assert.EqualError(t, err, io.EOF.Error()) && assert.NoError(t, err2)\n}", "func ReadFull(f io.Reader, buf []byte) int {\n\tn, err := io.ReadFull(f, buf)\n\tAbortIf(err)\n\treturn n\n}", "func readFull(r io.Reader, p []byte) (int, error) {\n\tcur := 0\n\tfor cur < len(p) {\n\t\tamt, err := r.Read(p[cur:])\n\t\tcur += amt\n\t\tif err != nil {\n\t\t\treturn cur, err\n\t\t}\n\t}\n\treturn cur, nil\n}", "func (c *ByteBuffer) ReadN(n int) (r []byte, err error) {\n\tif n > 0 {\n\t\tif c.Len() >= n { // optimistic branching\n\t\t\tr = make([]byte, n)\n\t\t\t_, _ = c.Read(r)\n\t\t} else {\n\t\t\terr = ErrBufferNotEnoughByteToRead\n\t\t}\n\t}\n\treturn\n}", "func (download *Download) Read(data []byte) (n int, err error) {\n\tif download.closed {\n\t\treturn 0, Error.New(\"already closed\")\n\t}\n\n\tif download.reader == nil {\n\t\terr = download.resetReader(download.offset)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tif download.limit == 0 {\n\t\treturn 0, io.EOF\n\t}\n\tif download.limit > 0 && download.limit < int64(len(data)) {\n\t\tdata = data[:download.limit]\n\t}\n\tn, err = download.reader.Read(data)\n\tif download.limit >= 0 {\n\t\tdownload.limit -= int64(n)\n\t}\n\tdownload.offset += int64(n)\n\n\treturn n, err\n}", "func (s *settings) SetMaxReadSize(size uint) {\n\ts.rMaxSize = size\n}", "func (s Stream) Limit(n int) Stream {\n\treturn s.Pipe(func() func(r Record) (Record, error) {\n\t\tvar count int\n\n\t\treturn func(r Record) (Record, error) {\n\t\t\tif count < n {\n\t\t\t\tcount++\n\t\t\t\treturn r, nil\n\t\t\t}\n\n\t\t\treturn nil, ErrStreamClosed\n\t\t}\n\t})\n}", "func (r *Reader) Remaining() int {\n\treturn len(r.buf)\n}", "func (tr *Reader) Read(b []byte) (n int, err error) {\n\tif tr.nb == 0 {\n\t\t// file consumed\n\t\treturn 0, io.EOF\n\t}\n\n\tif int64(len(b)) > tr.nb {\n\t\tb = b[0:tr.nb]\n\t}\n\tn, err = tr.r.Read(b)\n\ttr.nb -= int64(n)\n\n\tif err == io.EOF && tr.nb > 0 {\n\t\terr = io.ErrUnexpectedEOF\n\t}\n\ttr.err = err\n\treturn\n}", "func (c *BaseConn) SetReadLimit(limit int64) {\n\tc.stream.SetReadLimit(limit)\n}", "func (l *Clog) Read(offset uint64, maxToRead uint64) (dataRead []byte, lastReadOffset uint64, err error) {\n\tl.mu.RLock()\n\tdefer l.mu.RUnlock()\n\n\tvar max int = int(maxToRead)\n\tif max <= 0 {\n\t\tmax = internalMaxToRead\n\t} else if max > (internalMaxToRead * 10) {\n\t\t// prevent a case where a malicious actor sends\n\t\t// a maxToRead that is >>> computer RAM leading to OOM.\n\t\tmax = internalMaxToRead * 10\n\t}\n\n\tvar sizeReadSofar int\n\tfor _, seg := range l.segments {\n\t\tif seg.baseOffset > offset {\n\t\t\t// We exclude the offset from reads.\n\t\t\t// This allows people to use lastReadOffset in subsequent calls to l.Read\n\t\t\tb, errR := seg.Read()\n\t\t\tif errR != nil {\n\t\t\t\treturn dataRead, lastReadOffset, errR\n\t\t\t\t// TODO: test that if error occurs, we still return whatever has been read so far.\n\t\t\t}\n\t\t\tdataRead = append(dataRead, b...)\n\t\t\tlastReadOffset = seg.baseOffset\n\t\t\tsizeReadSofar = sizeReadSofar + len(b)\n\n\t\t\tif sizeReadSofar >= max {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\t// clog reads the whole data from a segment, even if the individual segment\n\t// has data greater than maxToRead.\n\t// Thus, the returned lastReadOffset is safe to be used in subsequent l.Read calls\n\t// since the segment it belongs to wont be read again.\n\treturn dataRead, lastReadOffset, nil\n}", "func (s *stream) read(b []byte) (int, error) {\n\ts.log(logTypeStream, \"Reading from stream %v requested len = %v current chunks=%v\", s.id, len(b), len(s.recv.chunks))\n\n\tread := 0\n\n\tfor len(b) > 0 {\n\t\tif len(s.recv.chunks) == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tchunk := s.recv.chunks[0]\n\n\t\t// We have a gap.\n\t\tif chunk.offset > s.recv.offset {\n\t\t\tbreak\n\t\t}\n\n\t\t// Remove leading bytes\n\t\tremove := s.recv.offset - chunk.offset\n\t\tif remove > uint64(len(chunk.data)) {\n\t\t\t// Nothing left.\n\t\t\ts.recv.chunks = s.recv.chunks[1:]\n\t\t\tcontinue\n\t\t}\n\n\t\tchunk.offset += remove\n\t\tchunk.data = chunk.data[remove:]\n\n\t\t// Now figure out how much we can read\n\t\tn := copy(b, chunk.data)\n\t\tchunk.data = chunk.data[n:]\n\t\tchunk.offset += uint64(n)\n\t\ts.recv.offset += uint64(n)\n\t\tb = b[n:]\n\t\tread += n\n\n\t\t// This chunk is empty.\n\t\tif len(chunk.data) == 0 {\n\t\t\ts.recv.chunks = s.recv.chunks[1:]\n\t\t\tif chunk.last {\n\t\t\t\ts.closeRecv()\n\t\t\t}\n\t\t}\n\t}\n\n\t// If we have read no data, say we would have blocked.\n\tif read == 0 {\n\t\tif s.recv.closed {\n\t\t\treturn 0, ErrorStreamIsClosed\n\t\t}\n\t\treturn 0, ErrorWouldBlock\n\t}\n\treturn read, nil\n}", "func Read(r io.Reader, n uint64) ([]byte, error) {\n\tread := make([]byte, n)\n\t_, err := r.Read(read)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not read from reader: %s\", err)\n\t}\n\n\treturn read, nil\n}", "func BufferedRead(reader *bufio.Reader, msgBuf []byte) (uint64, error) {\n\tlen := uint64(0)\n\tvar lengthBuf [8]byte\n\t_, err := io.ReadFull(reader, lengthBuf[:])\n\tlength := binary.BigEndian.Uint64(lengthBuf[:])\n\tif err != nil {\n\t\treturn len, err\n\t}\n\tfor bytesRead := uint64(0); bytesRead < length; {\n\t\treadLen, err := reader.Read(msgBuf[bytesRead:])\n\t\tif err != nil {\n\t\t\treturn len, err\n\t\t}\n\t\tbytesRead += uint64(readLen)\n\t\tlen += uint64(readLen)\n\t}\n\treturn len, nil\n}", "func TestReadEmptyAtEOF(t *testing.T) {\n\tb := new(Builder)\n\tslice := make([]byte, 0)\n\tn, err := b.Read(slice)\n\tif err != nil {\n\t\tt.Errorf(\"read error: %v\", err)\n\t}\n\tif n != 0 {\n\t\tt.Errorf(\"wrong count; got %d want 0\", n)\n\t}\n}", "func (alr *adjustableLimitedReader) setLimit(n int64) {\n\talr.R.N = n\n}", "func (self *IoRate) TakeMax(other *IoRate) {\n\tself.Read.TakeMax(other.Read)\n\tself.Write.TakeMax(other.Write)\n}", "func SumLimitedReader(algo string, r io.Reader, n int64) ([]byte, error) {\n\tlimit := &io.LimitedReader{\n\t\tR: r,\n\t\tN: n,\n\t}\n\n\treturn SumReader(algo, limit)\n}", "func (c *Conn) SetReadLimit(limit int64) {\n\tc.readLimit = limit\n}", "func Read_n_bytes(conn net.Conn, n int, buf []byte) int {\n\tif cap(buf) < n {\n\t\tlog.Fatalf(\"[error] read_n_bytes capacity < n\\n\")\n\t}\n\tfor read := 0; read < n; {\n\t\tgot, err := conn.Read(buf[read:n])\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[error] reading %v\\n\", err.Error())\n\t\t\treturn 1\n\t\t}\n\t\tread += got\n\t}\n\treturn 0\n}", "func (d *Decoder) NRead() int64 {\n\tr := d.dec.Buffered().(*bytes.Reader)\n\treturn d.r.n - int64(r.Len())\n}", "func (b brokenReader) Read(p []byte) (n int, err error) {\n\treturn 0, errors.New(\"brokenReader is always broken.\")\n}", "func (pipe *slimPipe) Read(buffer []byte) (int, error) {\n\terrChannel := make(chan error)\n\tcountChannel := make(chan int)\n\tgo func() {\n\t\treadBytes, err := io.ReadAtLeast(pipe.reader, buffer, 1)\n\t\tif err != nil {\n\t\t\terrChannel <- err\n\t\t} else {\n\t\t\tcountChannel <- readBytes\n\t\t}\n\t\tclose(errChannel)\n\t\tclose(countChannel)\n\t}()\n\tselect {\n\tcase count := <-countChannel:\n\t\treturn count, nil\n\tcase err := <-errChannel:\n\t\treturn 0, err\n\tcase <-time.After(pipe.timeout):\n\t\treturn 0, fmt.Errorf(\"Timeout (%v)\", pipe.timeout)\n\t}\n}", "func (t *File) Read(b []byte) (int, error) {\n\t// Don't return 0, nil\n\tfor t.ring.Readable == 0 && !t.closed {\n\t\ttime.Sleep(PollIntervalFast) // Maybe swap this out for a notification at some point, but tbh, this works\n\t}\n\n\tif t.closed == true {\n\t\treturn 0, io.EOF\n\t}\n\n\t// Check for any waiting errors\n\tselect {\n\tcase err := <-t.errc:\n\t\tif err != nil { // Just in case XD\n\t\t\treturn 0, err\n\t\t}\n\tdefault:\n\t}\n\n\treturn t.ring.Read(b)\n}", "func (r *Reader) Read(buf []byte) (int, error) {\n\tdefer func() {\n\t\tr.offset = r.h.Offset()\n\t\tr.frameInfo = r.h.FrameInfo()\n\n\t\tf := r.h.MetaCheck()\n\t\tswitch {\n\t\tcase f&MetaNewID3 != 0:\n\t\t\tid3v2, err := r.h.MetaID3()\n\t\t\tif id3v2 != nil && err == nil {\n\t\t\t\tr.meta.ID3v2 = id3v2\n\t\t\t}\n\t\t}\n\n\t}()\n\tif r.nextOffset > r.totalRead {\n\t\tn, err := io.CopyN(ioutil.Discard, r.input, r.nextOffset-r.totalRead)\n\t\tr.totalRead += n\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\tfor r.bytesSinceOk < r.maxBadBytes {\n\t\tvar feed []byte\n\t\tif r.needMore {\n\t\t\tr.needMore = false\n\t\t\tfeedLen, err := r.input.Read(r.feedBuf)\n\t\t\tr.totalRead += int64(feedLen)\n\t\t\tr.nextOffset = r.totalRead\n\t\t\tif feedLen == 0 && err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tfeed = r.feedBuf[:feedLen]\n\t\t\tr.bytesSinceOk += feedLen\n\t\t}\n\n\t\tswitch n, err := r.h.Decode(feed, buf); err {\n\t\tcase ErrNewFormat:\n\t\t\tr.outputFormat = r.h.OutputFormat()\n\t\t\tr.bytesSinceOk = 0\n\t\t\tif len(buf) == 0 {\n\t\t\t\treturn n, nil\n\t\t\t}\n\t\tcase ErrNeedMore:\n\t\t\tr.needMore = true\n\t\t\tif n > 0 {\n\t\t\t\tr.bytesSinceOk = 0\n\t\t\t\treturn n, nil\n\t\t\t}\n\t\tcase ErrDone:\n\t\t\treturn n, io.EOF\n\t\tdefault:\n\t\t\tr.bytesSinceOk = 0\n\t\t\treturn n, nil\n\n\t\t}\n\n\t}\n\tr.bytesSinceOk = 0\n\treturn 0, errors.New(\"No valid data found\")\n}", "func (conn *Conn) read(n int) ([]byte, error) {\n\tresult, err := conn.brw.Peek(n)\n\n\tif err != nil {\n\t\tfmt.Println(\"Error while peeking read buffer\", err)\n\t\treturn result, err\n\t}\n\n\t_, err = conn.brw.Discard(n)\n\n\tif err != nil {\n\t\tfmt.Println(\"Error while discarding read buffer\", err)\n\t}\n\n\treturn result, err\n}", "func (p *pipe) readFrom(r io.Reader) (read int64, failure error) {\n\tfor {\n\t\t// Wait until some space frees up\n\t\tsafeFree, err := p.inputWait()\n\t\tif err != nil {\n\t\t\treturn read, err\n\t\t}\n\t\t// Try to fill the buffer either till the reader position, or the end\n\t\tlimit := p.inPos + safeFree\n\t\tif limit > p.size {\n\t\t\tlimit = p.size\n\t\t}\n\t\tnr, err := r.Read(p.buffer[p.inPos:limit])\n\t\tread += int64(nr)\n\n\t\t// Update the pipe input state and handle any occurred errors\n\t\tp.inputAdvance(nr)\n\t\tif err == io.EOF {\n\t\t\treturn read, nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn read, err\n\t\t}\n\t}\n}", "func readerSize(in io.Reader, buffSize int64) (io.Reader, int64, error) {\n\tvar n int64\n\tvar err error\n\tvar r io.Reader\n\n\t// Read first buffSize bytes into buffer\n\tbuf := make([]byte, buffSize)\n\tm, err := in.Read(buf)\n\tif err != nil && err != io.EOF {\n\t\treturn nil, 0, err\n\t}\n\n\t// If first buffSize bytes are read successfully, that means the data size >= buffSize\n\tif int64(m) == buffSize {\n\t\tr = io.MultiReader(bytes.NewReader(buf), in)\n\t\tn = buffSizeLimit\n\t} else {\n\t\tbuf = buf[:m]\n\t\tr = bytes.NewReader(buf)\n\t}\n\n\treturn r, n, nil\n}", "func discardInput(r io.Reader, n uint32) {\n\tmaxSize := uint32(10 * 1024) // 10k at a time\n\tnumReads := n / maxSize\n\tbytesRemaining := n % maxSize\n\tif n > 0 {\n\t\tbuf := make([]byte, maxSize)\n\t\tfor i := uint32(0); i < numReads; i++ {\n\t\t\tio.ReadFull(r, buf)\n\t\t}\n\t}\n\tif bytesRemaining > 0 {\n\t\tbuf := make([]byte, bytesRemaining)\n\t\tio.ReadFull(r, buf)\n\t}\n}", "func discardInput(r io.Reader, n uint32) {\n\tmaxSize := uint32(10 * 1024) // 10k at a time\n\tnumReads := n / maxSize\n\tbytesRemaining := n % maxSize\n\tif n > 0 {\n\t\tbuf := make([]byte, maxSize)\n\t\tfor i := uint32(0); i < numReads; i++ {\n\t\t\tio.ReadFull(r, buf)\n\t\t}\n\t}\n\tif bytesRemaining > 0 {\n\t\tbuf := make([]byte, bytesRemaining)\n\t\tio.ReadFull(r, buf)\n\t}\n}", "func (c *RawConnectionMock) SetReadLimit(limit int64) {\n\tc.Called(limit)\n}", "func (ctn *Connection) Read(buf []byte, length int) (total int, aerr Error) {\n\tvar err error\n\n\t// if all bytes are not read, retry until successful\n\t// Don't worry about the loop; we've already set the timeout elsewhere\n\tfor total < length {\n\t\tvar r int\n\t\tif err = ctn.updateDeadline(); err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tif !ctn.compressed {\n\t\t\tr, err = ctn.conn.Read(buf[total:length])\n\t\t} else {\n\t\t\tr, err = ctn.inflater.Read(buf[total:length])\n\t\t\tif err == io.EOF && total+r == length {\n\t\t\t\tctn.compressed = false\n\t\t\t\terr = ctn.inflater.Close()\n\t\t\t}\n\t\t}\n\t\ttotal += r\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif total == length {\n\t\t// If all required bytes are read, ignore any potential error.\n\t\t// The error will bubble up on the next network io if it matters.\n\t\treturn total, nil\n\t}\n\n\taerr = chainErrors(errToAerospikeErr(ctn, err), aerr)\n\n\tif ctn.node != nil {\n\t\tctn.node.incrErrorCount()\n\t\tatomic.AddInt64(&ctn.node.stats.ConnectionsFailed, 1)\n\t}\n\n\tctn.Close()\n\n\treturn total, aerr\n}", "func NewLineLimitReader(r io.Reader, n int) *LineLimitedReader { return &LineLimitedReader{r, n, 0} }", "func (br *BufferedReader) Read(v Decoder) (n int, err error) {\n\nRetry:\n\tif br.mustFill {\n\t\t// The buffer needs to be filled before trying to decode\n\t\t// another record.\n\t\tif br.mode == ModeManual {\n\t\t\treturn 0, ErrMustFill\n\t\t}\n\n\t\terr = br.Fill()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tif br.eof && br.offset == br.buffered {\n\t\t// We've reached EOF on a previous Fill attempt and the\n\t\t// buffered data has been fully consumed.\n\t\treturn 0, io.EOF\n\t}\n\n\tn, err = v.Decode(br.buffer[br.offset:br.buffered])\n\n\tif err == ErrShortBuffer {\n\t\t// Unable to decode a full record.\n\n\t\tif br.offset == 0 && br.buffered == len(br.buffer) {\n\t\t\t// We've tried to decode from the start of a full\n\t\t\t// buffer, so it seems we won't be able to fit this\n\t\t\t// record in our buffer.\n\t\t\treturn 0, ErrTooLarge\n\t\t}\n\n\t\tif br.eof {\n\t\t\t// We won't be able to read more bytes yet there's\n\t\t\t// a partial record left to decode.\n\t\t\treturn 0, io.ErrUnexpectedEOF\n\t\t}\n\n\t\tbr.mustFill = true\n\n\t\tgoto Retry\n\t}\n\n\tbr.offset += n\n\n\tif err != nil {\n\t\treturn n, err\n\t}\n\n\treturn n, nil\n}", "func (s *Server) readFull(reader *bufio.Reader, size int) ([]byte, error) {\n\tfullMsg := make([]byte, 0)\n\n\t// Without size in message, there is possible situation,\n\t// when message will be exactly 4096 bytes,\n\t// and Peek() will hang after read\n\t// + size allows to separate data is socket when there is multiple different messages\n\tfor {\n\t\tbuffSize := reader.Buffered() // max reader size == 4096\n\n\t\t// Get required chunk size\n\t\tchunkSize := 0\n\t\tif size < buffSize {\n\t\t\tchunkSize = size\n\t\t} else {\n\t\t\tchunkSize = buffSize\n\t\t}\n\t\t// Create tmp storage, read bytes into it, and append them to the full message.\n\t\tbuff := make([]byte, chunkSize)\n\n\t\t_, err := reader.Read(buff)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfullMsg = append(fullMsg, buff...)\n\n\t\t// Break if message is fully read.\n\t\tsize -= chunkSize\n\t\tif size == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\t// Reader would be empty until peek.\n\t\tif _, err := reader.Peek(1); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn fullMsg, nil\n}", "func (s *source) takeNBuffered(n int) (Fetch, int, bool) {\n\tvar r Fetch\n\tvar taken int\n\n\tb := &s.buffered\n\tbf := &b.fetch\n\tfor len(bf.Topics) > 0 && n > 0 {\n\t\tt := &bf.Topics[0]\n\n\t\tr.Topics = append(r.Topics, *t)\n\t\trt := &r.Topics[len(r.Topics)-1]\n\t\trt.Partitions = nil\n\n\t\ttCursors := b.usedOffsets[t.Topic]\n\n\t\tfor len(t.Partitions) > 0 && n > 0 {\n\t\t\tp := &t.Partitions[0]\n\n\t\t\trt.Partitions = append(rt.Partitions, *p)\n\t\t\trp := &rt.Partitions[len(rt.Partitions)-1]\n\t\t\trp.Records = nil\n\n\t\t\ttake := n\n\t\t\tif take > len(p.Records) {\n\t\t\t\ttake = len(p.Records)\n\t\t\t}\n\n\t\t\trp.Records = p.Records[:take]\n\t\t\tp.Records = p.Records[take:]\n\n\t\t\tn -= take\n\t\t\ttaken += take\n\n\t\t\tpCursor := tCursors[p.Partition]\n\n\t\t\tif len(p.Records) == 0 {\n\t\t\t\tt.Partitions = t.Partitions[1:]\n\n\t\t\t\tpCursor.from.setOffset(pCursor.cursorOffset)\n\t\t\t\tpCursor.from.allowUsable()\n\t\t\t\tdelete(tCursors, p.Partition)\n\t\t\t\tif len(tCursors) == 0 {\n\t\t\t\t\tdelete(b.usedOffsets, t.Topic)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tlastReturnedRecord := rp.Records[len(rp.Records)-1]\n\t\t\tpCursor.from.setOffset(cursorOffset{\n\t\t\t\toffset: lastReturnedRecord.Offset + 1,\n\t\t\t\tlastConsumedEpoch: lastReturnedRecord.LeaderEpoch,\n\t\t\t})\n\t\t}\n\n\t\tif len(t.Partitions) == 0 {\n\t\t\tbf.Topics = bf.Topics[1:]\n\t\t}\n\t}\n\n\tdrained := len(bf.Topics) == 0\n\tif drained {\n\t\ts.takeBuffered()\n\t}\n\treturn r, taken, drained\n}", "func (r *timeoutReadCloser) Read(b []byte) (int, error) {\n\ttimer := time.NewTimer(r.duration)\n\tc := make(chan readResult, 1)\n\n\tgo func() {\n\t\tn, err := r.reader.Read(b)\n\t\ttimer.Stop()\n\t\tc <- readResult{n: n, err: err}\n\t}()\n\n\tselect {\n\tcase data := <-c:\n\t\treturn data.n, data.err\n\tcase <-timer.C:\n\t\treturn 0, &ResponseTimeoutError{TimeoutDur: r.duration}\n\t}\n}", "func (serv *Server) delayReader(conn int) {\n\tvar (\n\t\tdelay = 300 * time.Millisecond\n\t\ttotal time.Duration\n\t)\n\tfor total < serv.Options.ReadWriteTimeout {\n\t\ttime.Sleep(delay)\n\t\tselect {\n\t\tcase serv.qreader <- conn:\n\t\t\treturn\n\t\tdefault:\n\t\t\ttotal += delay\n\t\t}\n\t}\n\tvar req = Frame{\n\t\tcloseCode: StatusInternalError,\n\t}\n\tserv.handleClose(conn, &req)\n}", "func BlockingRead(r *bufio.Reader) *[]byte {\n\tbyteChan := make(chan []byte)\n\tb := make([]byte, 4096) // buffer is 4k- we should never be exceeding this!\n\tgo func() {\n\t\tfor {\n\t\t\tn, _ := r.ReadBytes('\\n')\n\t\t\tif len(n) > 0 {\n\t\t\t\tbyteChan <- n\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase b = <-byteChan:\n\t\t\treturn &b\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func Read(r io.Reader, data []byte) ([]byte, error) {\n\tj := 0\n\tfor {\n\t\tn, err := r.Read(data[j:])\n\t\tj = j + n\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn nil, errors.Wrap(err, \"Read failure\")\n\t\t}\n\n\t\tif (n == 0 && j == len(data)) || j > len(data) {\n\t\t\treturn nil, errors.New(\"Size of requested data is too large\")\n\t\t}\n\t}\n\n\treturn data[:j], nil\n}", "func (r *FileSizeRotator) reachLimit(n int) bool {\n\tatomic.AddUint64(&r.currSize, uint64(n))\n\tif r.currSize > r.limitSize {\n\t\treturn true\n\t}\n\treturn false\n}", "func (q *queue) read(ctx context.Context, dst usermem.IOSequence, l *lineDiscipline) (int64, bool, error) {\n\tq.mu.Lock()\n\tdefer q.mu.Unlock()\n\tif !q.readable {\n\t\treturn 0, false, syserror.ErrWouldBlock\n\t}\n\n\t// Read out from the read buffer.\n\tn := canonMaxBytes\n\tif n > int(dst.NumBytes()) {\n\t\tn = int(dst.NumBytes())\n\t}\n\tif n > q.readBuf.Len() {\n\t\tn = q.readBuf.Len()\n\t}\n\tn, err := dst.Writer(ctx).Write(q.readBuf.Bytes()[:n])\n\tif err != nil {\n\t\treturn 0, false, err\n\t}\n\t// Discard bytes read out.\n\tq.readBuf.Next(n)\n\n\t// If we read everything, this queue is no longer readable.\n\tif q.readBuf.Len() == 0 {\n\t\tq.readable = false\n\t}\n\n\t// Move data from the queue's wait buffer to its read buffer.\n\tnPushed := q.pushWaitBufLocked(l)\n\n\treturn int64(n), nPushed > 0, nil\n}", "func (r *bytesReader) ReadAt(b []byte, offset int64) (n int, err error) {\n\tif offset < 0 {\n\t\treturn 0, errors.New(\"buffer.bytesReader.ReadAt: negative offset\")\n\t}\n\tif offset >= int64(len(r.bs)) {\n\t\treturn 0, io.EOF\n\t}\n\tn = copy(b, r.bs[offset:])\n\tif n < len(b) {\n\t\terr = io.EOF\n\t}\n\treturn\n}", "func (dev *Device) read(contxt context.Context, waitResponse bool) ([]byte, error) {\n\n\tcountError := 0\n\tlastEvent := time.Now()\n\t//TODO timeoutRead?\n\tfuncerr := func(err error) error {\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t\tlog.Printf(\"funcread err: %s\", err)\n\t\tswitch {\n\t\tcase errors.Is(err, os.ErrClosed):\n\t\t\treturn err\n\t\tcase errors.Is(err, io.ErrClosedPipe):\n\t\t\treturn err\n\t\tcase errors.Is(err, io.EOF):\n\t\t\tif time.Since(lastEvent) < 10*time.Microsecond {\n\t\t\t\tif countError > 3 {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcountError++\n\t\t\t}\n\n\t\t\tlastEvent = time.Now()\n\t\t}\n\n\t\treturn nil\n\n\t}\n\n\t//TODO: limit to read\n\tbb := make([]byte, 0)\n\tindxb := 0\n\tlendata := uint32(0)\n\tfor {\n\n\t\tselect {\n\t\tcase <-contxt.Done():\n\t\t\treturn nil, fmt.Errorf(\"timeout error, %w\", smartcard.ErrComm)\n\t\tdefault:\n\t\t}\n\t\ttempb := make([]byte, 2048)\n\n\t\t// fmt.Println(\"execute read\")\n\n\t\tn, err := dev.port.Read(tempb)\n\t\tif err != nil && n <= 0 {\n\t\t\tif err := funcerr(err); err != nil {\n\t\t\t\t// log.Printf(\"0, err: %s\", err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\t// fmt.Printf(\"len: %v, [% X]\\n\", len(tempb[:n]), tempb[:n])\n\n\t\t// prepareBuffer := make([]byte, len(tempb[:n]))\n\n\t\t// copy(prepareBuffer, tempb[:n])\n\n\t\tbf := bytes.NewBuffer(tempb[:n])\n\t\t// fmt.Printf(\"len: %v, %v, %v, %v\\n\", len(prepareBuffer), cap(prepareBuffer), bf.Cap(), bf.Len())\n\n\t\tb := func() []byte {\n\t\t\tvar result []byte\n\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-contxt.Done():\n\t\t\t\t\treturn nil\n\t\t\t\tdefault:\n\t\t\t\t}\n\n\t\t\t\tlast, err := bf.ReadByte()\n\t\t\t\tif err == nil {\n\t\t\t\t\tif indxb <= 0 && last != '\\x02' {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tindxb++\n\t\t\t\t\tbb = append(bb, last)\n\t\t\t\t} else {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\t// fmt.Printf(\"len: %v, last: %X, [% X]\\n\", len(bb), last, bb[:])\n\t\t\t\t// log.Println(\"2\")\n\t\t\t\tif len(bb) == 6 {\n\n\t\t\t\t\tlendata = binary.LittleEndian.Uint32(bb[2:6])\n\t\t\t\t\t// fmt.Printf(\"len data: %d\\n\", lendata)\n\t\t\t\t}\n\t\t\t\tif last == '\\x03' && len(bb) == 4 && bb[1] == bb[2] {\n\t\t\t\t\tresult = make([]byte, len(bb))\n\t\t\t\t\tcopy(result, bb[:])\n\t\t\t\t\tbb = make([]byte, 0)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif last == '\\x03' && len(bb) >= int(lendata)+1+10+1+1 {\n\t\t\t\t\t// fmt.Printf(\"tempb final: [% X]\\n\", bb[:])\n\n\t\t\t\t\tresult = make([]byte, len(bb))\n\t\t\t\t\tcopy(result, bb[:])\n\t\t\t\t\tbb = make([]byte, 0)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn result\n\t\t}()\n\n\t\tif waitResponse {\n\t\t\tif len(b) <= 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif len(b) == 4 && b[1] == b[2] && b[1] == 0x00 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif len(b) == 13 && bytes.Equal(b, FRAME_NACK) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif b[len(b)-1] != 0x03 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// fmt.Printf(\"resul final: [% X]\\n\", b[:])\n\n\t\t// if indxb <= 0 {\n\t\t// \tif b == '\\x02' {\n\t\t// \t\ttempb[0] = b\n\t\t// \t\tindxb = 1\n\t\t// \t}\n\t\t// \tcontinue\n\t\t// }\n\n\t\t// tempb[indxb] = b\n\t\t// indxb++\n\t\t// fmt.Printf(\"len: %v, [% X]\\n\", indxb, tempb[:indxb])\n\t\t// // log.Println(\"2\")\n\t\t// if indxb == 6 {\n\t\t// \tlendata = binary.LittleEndian.Uint32(tempb[2:6])\n\t\t// }\n\t\t// if b == '\\x03' && indxb == 4 && tempb[1] == tempb[2] {\n\t\t// \tdest := make([]byte, indxb)\n\t\t// \tcopy(dest, tempb[:indxb])\n\t\t// \treturn dest, nil\n\t\t// }\n\t\t// if b == '\\x03' && indxb >= int(lendata)+1+10+1+1 {\n\t\t// \t// fmt.Printf(\"tempb final: [% X]\\n\", tempb[:indxb])\n\n\t\t// \tdest := make([]byte, indxb)\n\t\t// \tcopy(dest, tempb[:indxb])\n\t\t// \treturn dest, nil\n\t\t// }\n\t\tdest := make([]byte, len(b))\n\t\tcopy(dest, b[:])\n\t\tfmt.Printf(\"recv data: %v, [% X]\\n\", len(b), b[:])\n\t\treturn dest, nil\n\n\t}\n}", "func TimeoutReader(r io.Reader) io.Reader { return &timeoutReader{r, 0} }", "func read(conn *net.TCPConn, size int, data *bytes.Buffer) error {\n\tconn.SetReadDeadline(time.Now().Add(time.Second * 15))\n\n\tn, err := io.CopyN(data, conn, int64(size))\n\tif err != nil || n != int64(size) {\n\t\treturn errors.New(\"read error\")\n\t}\n\treturn nil\n}", "func (d *Decoder) readN(n int) []byte {\n\tif buf, ok := d.r.(*bytes.Buffer); ok {\n\t\tb := buf.Next(n)\n\t\tif len(b) != n {\n\t\t\tpanic(io.ErrUnexpectedEOF)\n\t\t}\n\t\tif d.n += n; d.n > MaxObjectSize {\n\t\t\tbuild.Critical(ErrObjectTooLarge)\n\t\t}\n\t\treturn b\n\t}\n\tb := make([]byte, n)\n\t_, err := io.ReadFull(d, b)\n\tif err != nil {\n\t\tbuild.Critical(err)\n\t}\n\treturn b\n}", "func (rstra *ReadSeekerToReaderAt) ReadAt(p []byte, offset int64) (n int, err error) {\n\tdefer func() {\n\t\tif state := recover(); state != nil {\n\t\t\terr = log.Wrap(state.(error))\n\t\t}\n\t}()\n\n\toriginalOffset, err := rstra.rs.Seek(0, io.SeekCurrent)\n\tlog.PanicIf(err)\n\n\tdefer func() {\n\t\t_, err := rstra.rs.Seek(originalOffset, io.SeekStart)\n\t\tlog.PanicIf(err)\n\t}()\n\n\t_, err = rstra.rs.Seek(offset, io.SeekStart)\n\tlog.PanicIf(err)\n\n\t// Note that all errors will be wrapped, here. The usage of this method is\n\t// such that typically no specific errors would be expected as part of\n\t// normal operation (in which case we'd check for those first and return\n\t// them directly).\n\tn, err = io.ReadFull(rstra.rs, p)\n\tlog.PanicIf(err)\n\n\treturn n, nil\n}", "func TestNonFatalRead(t *testing.T) {\n\t// Limit runtime in case of deadlocks\n\tlim := test.TimeOut(time.Second * 20)\n\tdefer lim.Stop()\n\n\texpectedData := []byte(\"expectedData\")\n\n\t// In memory pipe\n\tca, cb := net.Pipe()\n\trequire.NoError(t, cb.Close())\n\n\tconn := &muxErrorConn{ca, []muxErrorConnReadResult{\n\t\t// Non-fatal timeout error\n\t\t{packetio.ErrTimeout, nil},\n\t\t{nil, expectedData},\n\t\t{io.ErrShortBuffer, nil},\n\t\t{nil, expectedData},\n\t\t{io.EOF, nil},\n\t}}\n\n\tm := NewMux(Config{\n\t\tConn: conn,\n\t\tBufferSize: testPipeBufferSize,\n\t\tLoggerFactory: logging.NewDefaultLoggerFactory(),\n\t})\n\n\te := m.NewEndpoint(MatchAll)\n\n\tbuff := make([]byte, testPipeBufferSize)\n\tn, err := e.Read(buff)\n\trequire.NoError(t, err)\n\trequire.Equal(t, buff[:n], expectedData)\n\n\tn, err = e.Read(buff)\n\trequire.NoError(t, err)\n\trequire.Equal(t, buff[:n], expectedData)\n\n\t<-m.closedCh\n\trequire.NoError(t, m.Close())\n\trequire.NoError(t, ca.Close())\n}", "func (e *T) readAvailable(dst []byte) (n int, err error) {\n\tn = copy(dst, e.buf[e.ridx:e.widx])\n\ti := e.ridx + n\n\tif e.secure {\n\t\tclearbytes(e.buf[e.ridx:i])\n\t}\n\te.ridx = i\n\tif e.ridx >= e.widx {\n\t\te.ridx = 0\n\t\te.widx = 0\n\t\te.available = false\n\t}\n\treturn n, nil\n}", "func (reader *ExtentReader) Read(req *ExtentRequest) (readBytes int, err error) {\n\toffset := req.FileOffset - int(reader.key.FileOffset) + int(reader.key.ExtentOffset)\n\tsize := req.Size\n\n\treqPacket := NewReadPacket(reader.key, offset, size, reader.inode, req.FileOffset, reader.followerRead)\n\tsc := NewStreamConn(reader.dp, reader.followerRead)\n\n\tlog.LogDebugf(\"ExtentReader Read enter: size(%v) req(%v) reqPacket(%v)\", size, req, reqPacket)\n\n\terr = sc.Send(reqPacket, func(conn *net.TCPConn) (error, bool) {\n\t\treadBytes = 0\n\t\tfor readBytes < size {\n\t\t\treplyPacket := NewReply(reqPacket.ReqID, reader.dp.PartitionID, reqPacket.ExtentID)\n\t\t\tbufSize := util.Min(util.ReadBlockSize, size-readBytes)\n\t\t\treplyPacket.Data = req.Data[readBytes : readBytes+bufSize]\n\t\t\te := replyPacket.readFromConn(conn, proto.ReadDeadlineTime)\n\t\t\tif e != nil {\n\t\t\t\tlog.LogWarnf(\"Extent Reader Read: failed to read from connect, ino(%v) req(%v) readBytes(%v) err(%v)\", reader.inode, reqPacket, readBytes, e)\n\t\t\t\t// Upon receiving TryOtherAddrError, other hosts will be retried.\n\t\t\t\treturn TryOtherAddrError, false\n\t\t\t}\n\n\t\t\t//log.LogDebugf(\"ExtentReader Read: ResultCode(%v) req(%v) reply(%v) readBytes(%v)\", replyPacket.GetResultMsg(), reqPacket, replyPacket, readBytes)\n\n\t\t\tif replyPacket.ResultCode == proto.OpAgain {\n\t\t\t\treturn nil, true\n\t\t\t}\n\n\t\t\te = reader.checkStreamReply(reqPacket, replyPacket)\n\t\t\tif e != nil {\n\t\t\t\t// Dont change the error message, since the caller will\n\t\t\t\t// check if it is NotLeaderErr.\n\t\t\t\treturn e, false\n\t\t\t}\n\n\t\t\treadBytes += int(replyPacket.Size)\n\t\t}\n\t\treturn nil, false\n\t})\n\n\tif err != nil {\n\t\tlog.LogErrorf(\"Extent Reader Read: err(%v) req(%v) reqPacket(%v)\", err, req, reqPacket)\n\t}\n\n\tlog.LogDebugf(\"ExtentReader Read exit: req(%v) reqPacket(%v) readBytes(%v) err(%v)\", req, reqPacket, readBytes, err)\n\treturn\n}", "func (ctn *Connection) Read(buf []byte, length int) (total int, err error) {\n\t// if all bytes are not read, retry until successful\n\t// Don't worry about the loop; we've already set the timeout elsewhere\n\tfor total < length {\n\t\tvar r int\n\t\tif err = ctn.updateDeadline(); err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tif !ctn.compressed {\n\t\t\tr, err = ctn.conn.Read(buf[total:length])\n\t\t} else {\n\t\t\tr, err = ctn.inflater.Read(buf[total:length])\n\t\t\tif err == io.EOF && total+r == length {\n\t\t\t\tctn.compressed = false\n\t\t\t\terr = ctn.inflater.Close()\n\t\t\t}\n\t\t}\n\t\ttotal += r\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif total == length {\n\t\t// If all required bytes are read, ignore any potential error.\n\t\t// The error will bubble up on the next network io if it matters.\n\t\treturn total, nil\n\t}\n\n\tif ctn.node != nil {\n\t\tctn.node.incrErrorCount()\n\t\tatomic.AddInt64(&ctn.node.stats.ConnectionsFailed, 1)\n\t}\n\n\t// the line should happen before .Close()\n\terr = errToTimeoutErr(ctn, err)\n\tctn.Close()\n\n\treturn total, err\n}", "func ReadNMessages(gribFile io.Reader, n int) ([]*Message, error) {\n\tmessages := make([]*Message, 0)\n\n\tfor {\n\t\tmessage, messageErr := ReadMessage(gribFile)\n\n\t\tif messageErr != nil {\n\t\t\tif strings.Contains(messageErr.Error(), \"EOF\") {\n\t\t\t\treturn messages, nil\n\t\t\t}\n\t\t\tlog.Println(\"Error when parsing a message, \", messageErr.Error())\n\t\t\treturn messages, messageErr\n\t\t}\n\t\tmessages = append(messages, message)\n\t\tif len(messages) >= n {\n\t\t\treturn messages, nil\n\t\t}\n\t}\n}", "func TestReadDataWithMaxSize(t *testing.T) {\n\ttests := []struct {\n\t\tlines string\n\t\tmaxSize int\n\t\terr error\n\t}{\n\t\t// Maximum size of zero (the default) should not return an error.\n\t\t{\"Test message.\\r\\n.\\r\\n\", 0, nil},\n\n\t\t// Messages below the maximum size should not return an error.\n\t\t{\"Test message.\\r\\n.\\r\\n\", 16, nil},\n\n\t\t// Messages matching the maximum size should not return an error.\n\t\t{\"Test message.\\r\\n.\\r\\n\", 15, nil},\n\n\t\t// Messages above the maximum size should return a maximum size exceeded error.\n\t\t{\"Test message.\\r\\n.\\r\\n\", 14, maxSizeExceeded(14)},\n\t}\n\tvar buf bytes.Buffer\n\ts := &session{}\n\ts.br = bufio.NewReader(&buf)\n\n\tfor _, tt := range tests {\n\t\ts.srv = &Server{MaxSize: tt.maxSize}\n\t\tbuf.Write([]byte(tt.lines))\n\t\t_, err := s.readData()\n\t\tif err != tt.err {\n\t\t\tt.Errorf(\"readData(%v) returned err: %v\", tt.lines, tt.err)\n\t\t}\n\t}\n}", "func MaxReadahead(n uint32) MountOption {\n\treturn func(conf *mountConfig) error {\n\t\tconf.maxReadahead = n\n\t\treturn nil\n\t}\n}", "func (l *LeechedReadCloser) Read(b []byte) (n int, err error) {\n\tspaceLeft := l.maxBodyLogSize - l.loggedBytesCount\n\tif spaceLeft > 0 {\n\t\t// Let's read the request into our Logger (not all of it maybe), but also let's make sure that\n\t\t// we'll be able to to copy all the content we read in l.data into b\n\t\tn, err := l.originalReadCloser.Read(l.data[l.loggedBytesCount : l.loggedBytesCount+min(int64(len(b)), spaceLeft)])\n\n\t\t// And copy what was read into the original slice\n\t\tcopy(b, l.data[l.loggedBytesCount:l.loggedBytesCount+int64(n)])\n\n\t\t// Let's not forget to increment the pointer on the currently logged amount of bytes\n\t\tl.loggedBytesCount += int64(n)\n\n\t\t// And return what the Read() call we did on the original ReadCloser just returned, shhhhh\n\t\treturn n, err\n\t}\n\n\t// Our leecher is full ? Nevermind, let's just call read on the original Reader. Apart from an\n\t// additional level in the call stack and an if statement, we have no overhead for large bodies :)\n\treturn l.originalReadCloser.Read(b)\n}", "func checkReader(t *testing.T, r zbuf.Reader, checkReads bool) {\n\tfor expect := 3; expect <= 6; expect++ {\n\t\trec, err := r.Read()\n\t\trequire.NoError(t, err)\n\n\t\tv, err := rec.AccessInt(\"value\")\n\t\trequire.NoError(t, err)\n\n\t\trequire.Equal(t, int64(expect), v, \"Got expected record value\")\n\t}\n\n\trec, err := r.Read()\n\trequire.NoError(t, err)\n\trequire.Nil(t, rec, \"Reached eof after last record in time span\")\n\n\tif checkReads {\n\t\trr, ok := r.(*rangeReader)\n\t\trequire.True(t, ok, \"Can get read stats from index reader\")\n\t\trequire.LessOrEqual(t, rr.reads(), uint64(6), \"Indexed reader did not read the entire file\")\n\t}\n}", "func (b *Buffer) ReadNFrom(reader io.Reader, n int) (int, error) {\n\t// Loop until we've filled completed the read, run out of storage, or\n\t// encountered a read error.\n\tvar read, result int\n\tvar err error\n\tfor n > 0 && b.used != b.size && err == nil {\n\t\t// Compute the first available contiguous free storage segment.\n\t\tfreeStart := (b.start + b.used) % b.size\n\t\tfree := b.storage[freeStart:min(freeStart+(b.size-b.used), b.size)]\n\n\t\t// If the storage segment is larger than we need, then truncate it.\n\t\tif len(free) > n {\n\t\t\tfree = free[:n]\n\t\t}\n\n\t\t// Perform the read.\n\t\tread, err = reader.Read(free)\n\n\t\t// Update indices and tracking.\n\t\tresult += read\n\t\tb.used += read\n\t\tn -= read\n\t}\n\n\t// If we couldn't complete the read due to a lack of storage, then we need\n\t// to return an error. However, if a read error occurred simultaneously with\n\t// running out of storage, then we don't overwrite it.\n\tif n > 0 && b.used == b.size && err == nil {\n\t\terr = ErrBufferFull\n\t}\n\n\t// If we encountered io.EOF simultaneously with completing the read, then we\n\t// can clear the error.\n\tif err == io.EOF && n == 0 {\n\t\terr = nil\n\t}\n\n\t// Done.\n\treturn result, err\n}", "func VerifyRLimit(estimateMaxFiles RlimT) error {\n\tif estimateMaxFiles > maxRLimit {\n\t\testimateMaxFiles = maxRLimit\n\t}\n\tvar rLimit syscall.Rlimit\n\terr := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rLimit)\n\tfailpoint.Inject(\"GetRlimitValue\", func(v failpoint.Value) {\n\t\tlimit := RlimT(v.(int))\n\t\trLimit.Cur = limit\n\t\trLimit.Max = limit\n\t\terr = nil\n\t})\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tif rLimit.Cur >= estimateMaxFiles {\n\t\treturn nil\n\t}\n\tif rLimit.Max < estimateMaxFiles {\n\t\t// If the process is not started by privileged user, this will fail.\n\t\trLimit.Max = estimateMaxFiles\n\t}\n\tprevLimit := rLimit.Cur\n\trLimit.Cur = estimateMaxFiles\n\tfailpoint.Inject(\"SetRlimitError\", func(v failpoint.Value) {\n\t\tif v.(bool) {\n\t\t\terr = errors.New(\"Setrlimit Injected Error\")\n\t\t}\n\t})\n\tif err == nil {\n\t\terr = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rLimit)\n\t}\n\tif err != nil {\n\t\treturn errors.Annotatef(err, \"the maximum number of open file descriptors is too small, got %d, expect greater or equal to %d\", prevLimit, estimateMaxFiles)\n\t}\n\n\t// fetch the rlimit again to make sure our setting has taken effect\n\terr = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rLimit)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tif rLimit.Cur < estimateMaxFiles {\n\t\thelper := \"Please manually execute `ulimit -n %d` to increase the open files limit.\"\n\t\treturn errors.Errorf(\"cannot update the maximum number of open file descriptors, expected: %d, got: %d. %s\",\n\t\t\testimateMaxFiles, rLimit.Cur, helper)\n\t}\n\n\tlog.L().Info(\"Set the maximum number of open file descriptors(rlimit)\",\n\t\tzapRlimT(\"old\", prevLimit), zapRlimT(\"new\", estimateMaxFiles))\n\treturn nil\n}", "func (h *ReOpen) Read(p []byte) (n int, err error) {\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\tif h.err != nil {\n\t\t// return a previous error if there is one\n\t\treturn n, h.err\n\t}\n\tn, err = h.rc.Read(p)\n\tif err != nil {\n\t\th.err = err\n\t}\n\th.read += int64(n)\n\tif err != nil && err != io.EOF && !fserrors.IsNoLowLevelRetryError(err) {\n\t\t// close underlying stream\n\t\th.opened = false\n\t\t_ = h.rc.Close()\n\t\t// reopen stream, clearing error if successful\n\t\tfs.Debugf(h.src, \"Reopening on read failure after %d bytes: retry %d/%d: %v\", h.read, h.tries, h.maxTries, err)\n\t\tif h.open() == nil {\n\t\t\terr = nil\n\t\t}\n\t}\n\treturn n, err\n}", "func (mCn mockConn) Read(b []byte) (n int, err error) {\n\tfmt.Printf(\"reading: %d of %d.\\n\", *mCn.readCount, len(mockConnOutpBytes))\n\tif *mCn.readCount < len(mockConnOutpBytes) {\n\t\tcopy(b, mockConnOutpBytes[*mCn.readCount])\n\t\t*mCn.readCount = *mCn.readCount + 1\n\t}\n\treturn len(b), nil\n}", "func (serv *Server) pollReader() {\n\tvar (\n\t\tlogp = `pollReader`\n\n\t\tlistConn []int\n\t\terr error\n\t\tnumReader int32\n\t\tconn int\n\t)\n\n\tfor {\n\t\tlistConn, err = serv.poll.WaitRead()\n\t\tif err != nil {\n\t\t\tlog.Printf(`%s: %s`, logp, err)\n\t\t\tbreak\n\t\t}\n\n\t\tfor _, conn = range listConn {\n\t\t\tselect {\n\t\t\tcase serv.qreader <- conn:\n\t\t\tdefault:\n\t\t\t\tnumReader = serv.numGoReader.Load()\n\t\t\t\tif numReader < serv.Options.maxGoroutineReader {\n\t\t\t\t\tgo serv.reader()\n\t\t\t\t\tserv.numGoReader.Add(1)\n\t\t\t\t\tserv.qreader <- conn\n\t\t\t\t} else {\n\t\t\t\t\tgo serv.delayReader(conn)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func TcpReadFixedSize(ctx context.Context, conn net.Conn, m int, timeout time.Duration) ([]byte, error) {\n\tctx, _ = context.WithTimeout(ctx, timeout)\n\tc := make(chan error, 1)\n\tvar messageBuffer bytes.Buffer\n\tgo func() {\n\t\tvar err error\n\t\tdefer func() { c <- err }()\n\t\tfor m > 0 {\n\t\t\ttmpBuffer := make([]byte, m)\n\t\t\tn, err := conn.Read(tmpBuffer)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tm = m - n\n\t\t\tif _, err = messageBuffer.Write(tmpBuffer); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\treturn\n\t}()\n\tselect {\n\tcase <-ctx.Done():\n\t\tclose(c)\n\t\treturn messageBuffer.Bytes(), oops.Wrapf(ctx.Err(), \"ctx\")\n\tcase err := <-c:\n\t\treturn messageBuffer.Bytes(), oops.Wrapf(err, \"read\")\n\t}\n}", "func (c *Conn) Read(b []byte) (n int, err error) {\n\tif len(b) == 0 {\n\t\treturn 0, nil\n\t}\n\terr = tryAgain\n\tfor err == tryAgain {\n\t\tn, errcb := c.read(b)\n\t\terr = c.handleError(errcb)\n\t\tif err == nil {\n\t\t\tgo c.flushOutputBuffer()\n\t\t\treturn n, nil\n\t\t}\n\t\tif err == io.ErrUnexpectedEOF {\n\t\t\terr = io.EOF\n\t\t}\n\t}\n\treturn 0, err\n}", "func (s *Sniffer) Recv(t *testing.T, timeout time.Duration) []byte {\n\tt.Helper()\n\n\tdeadline := time.Now().Add(timeout)\n\tfor {\n\t\ttimeout = time.Until(deadline)\n\t\tif timeout <= 0 {\n\t\t\treturn nil\n\t\t}\n\t\tusec := timeout.Microseconds()\n\t\tif usec == 0 {\n\t\t\t// Timeout is less than a microsecond; set usec to 1 to avoid\n\t\t\t// blocking indefinitely.\n\t\t\tusec = 1\n\t\t}\n\t\tconst microsInOne = 1e6\n\t\ttv := unix.Timeval{\n\t\t\tSec: usec / microsInOne,\n\t\t\tUsec: usec % microsInOne,\n\t\t}\n\t\tif err := unix.SetsockoptTimeval(s.fd, unix.SOL_SOCKET, unix.SO_RCVTIMEO, &tv); err != nil {\n\t\t\tt.Fatalf(\"can't setsockopt SO_RCVTIMEO: %s\", err)\n\t\t}\n\n\t\tbuf := make([]byte, maxReadSize)\n\t\tnread, _, err := unix.Recvfrom(s.fd, buf, unix.MSG_TRUNC)\n\t\tif err == unix.EINTR || err == unix.EAGAIN {\n\t\t\t// There was a timeout.\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"can't read: %s\", err)\n\t\t}\n\t\tif nread > maxReadSize {\n\t\t\tt.Fatalf(\"received a truncated frame of %d bytes, want at most %d bytes\", nread, maxReadSize)\n\t\t}\n\t\treturn buf[:nread]\n\t}\n}" ]
[ "0.6698476", "0.592675", "0.5846762", "0.58344626", "0.57960194", "0.5787837", "0.5778206", "0.5766628", "0.57591337", "0.5743206", "0.5741069", "0.57359076", "0.5708238", "0.56657344", "0.56469196", "0.56459975", "0.56374776", "0.56374776", "0.5634473", "0.56332356", "0.5598442", "0.55778104", "0.5576698", "0.55350846", "0.5504047", "0.5490909", "0.5468817", "0.5459477", "0.543513", "0.5430435", "0.54167455", "0.5409444", "0.53441507", "0.5339437", "0.5292262", "0.5286134", "0.52626044", "0.52607167", "0.5258025", "0.5201717", "0.5201214", "0.51949525", "0.51899344", "0.51798725", "0.5152308", "0.51435053", "0.51345176", "0.5132058", "0.513093", "0.51174295", "0.51123995", "0.5110346", "0.5109076", "0.5105851", "0.5103035", "0.5100584", "0.5090922", "0.50816554", "0.50786185", "0.5074057", "0.50723016", "0.50651574", "0.50587", "0.505755", "0.505755", "0.505679", "0.5049901", "0.5048456", "0.50402915", "0.50388795", "0.5038756", "0.5033854", "0.5029499", "0.50175303", "0.50116235", "0.5009413", "0.49965423", "0.49919522", "0.49886286", "0.4988432", "0.49859938", "0.49845046", "0.4981513", "0.49756", "0.49729258", "0.49727002", "0.49706835", "0.49681902", "0.4964", "0.4961329", "0.49457538", "0.49410132", "0.49395964", "0.49279583", "0.49253908", "0.49213967", "0.49156803", "0.4906001", "0.490579", "0.49048194" ]
0.81593716
0
HasPrefixAny determines if any of the string values have the given prefix.
HasPrefixAny определяет, имеет ли любое из строковых значений заданный префикс.
func HasPrefixAny(prefix string, values []string) bool { for _, val := range values { if strings.HasPrefix(val, prefix) { return true } } return false }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func StartsWithAny(str string, prefixes ...string) bool {\n\tfor _, prefix := range prefixes {\n\t\tif internalStartsWith(str, (string)(prefix), false) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func HasAnyPrefix(s string, prefixList []string) bool {\n\tfor _, prefix := range prefixList {\n\t\tif strings.HasPrefix(s, prefix) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func HasAnyPrefix(text string, slice []string) bool {\n\tfor _, s := range slice {\n\t\tif strings.HasPrefix(text, s) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StartsWithAnyIgnoreCase(str string, prefixes ...string) bool {\n\tfor _, prefix := range prefixes {\n\t\tif internalStartsWith(str, (string)(prefix), true) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func HasPrefixAnyI(s string, prefixes ...string) bool {\n\tfor _, prefix := range prefixes {\n\t\tif stringsutil.HasPrefixI(s, prefix) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringsHasPrefix(s []string, p string) bool {\n\tfor _, x := range s {\n\t\tif !strings.HasPrefix(x, p) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func HasPrefix(s string, p ...string) bool {\n\tfor _, i := range p {\n\t\tif strings.HasPrefix(s, i) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func HasPrefix(s string, prefixes ...string) bool {\n\tfor _, p := range prefixes {\n\t\tif strings.HasPrefix(s, p) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (s StringSet) IncludesAny(values []string) bool {\n\tfor _, v := range values {\n\t\tif _, ok := s[v]; ok {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func HasOneOfPrefixesFold(str string, prefixes ...string) bool {\n\tfor _, pre := range prefixes {\n\t\tif HasPrefixFold(str, pre) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func HasPrefix(prefix, operand string) bool { return strings.HasPrefix(operand, prefix) }", "func PrefixInList(list []string, prefix string) bool {\n\tfor _, s := range list {\n\t\tif strings.HasPrefix(s, prefix) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func AnyPrefixMatcher(strs ...string) MatcherFunc {\n\ttree := ternary_search_tree.New(strs...)\n\treturn func(_ io.Writer, r io.Reader) bool {\n\t\tbuf := make([]byte, tree.Depth())\n\t\tn, _ := io.ReadFull(r, buf)\n\t\t_, _, ok := tree.Follow(string(buf[:n]))\n\t\treturn ok\n\t}\n}", "func IncludesAnyStr(needles []string, haystack []string) bool {\n\tfor _, needle := range needles {\n\t\tif ok, _ := InArray(needle, haystack); ok {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (this *Trie) StartsWith(prefix string) bool {\n\tfor k, ws := range this.words {\n\t\tif k < len(prefix) {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, w := range ws {\n\t\t\tif strings.HasPrefix(w, prefix) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func ContainsAny(str string, search ...string) bool {\n\tfor _, s := range search {\n\t\tif Contains(str, (string)(s)) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringValHasPrefix(v string) predicate.Property {\n\treturn predicate.Property(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldStringVal), v))\n\t})\n}", "func Contains(s, substr string) bool {\n\tfor i := range s {\n\t\tif HasPrefix(s[i:], substr) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringsSharePrefix(s []string) bool {\n\tsort.Strings(s)\n\n\tfor i := 0; i < len(s)-1; i++ {\n\t\tif strings.HasPrefix(s[i+1], s[i]) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (this *Trie) StartsWith(prefix string) bool {\n\tfor i := 0; i < len(prefix); i++ {\n\t\tif this.son[prefix[i]-'a'] == nil {\n\t\t\treturn false\n\t\t}\n\t\tthis = this.son[prefix[i]-'a']\n\t}\n\treturn true\n}", "func PrefixMatch(key string) (res []interface{}) {\n\tglobalStore.RLock()\n\tdefer globalStore.RUnlock()\n\n\tfor k, v := range globalStore.store {\n\t\tif strings.HasPrefix(k, key) {\n\t\t\tres = append(res, v)\n\t\t}\n\t}\n\n\treturn\n}", "func StringHasPrefix(column string, prefix string, opts ...Option) *sql.Predicate {\n\treturn sql.P(func(b *sql.Builder) {\n\t\topts = append([]Option{Unquote(true)}, opts...)\n\t\tvaluePath(b, column, opts...)\n\t\tb.Join(sql.HasPrefix(\"\", prefix))\n\t})\n}", "func (this *Trie) StartsWith(prefix string) bool {\n\treturn this.dict[prefix] || this.dictPrefix[prefix]\n}", "func HasPrefix(s, prefix string) bool {\n\tif len(s) < len(prefix) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(prefix); i++ {\n\t\tif s[i] != prefix[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func Contains(s, substring string) bool {\n\tfor i := 0; i < len(s); i++ {\n\t\tif HasPrefix(s[i:], substring) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func hasPrefixDemo(a string, b string) bool {\n\treturn strings.HasPrefix(a, b)\n}", "func (this *Trie) StartsWith(prefix string) bool {\n\tt := this\n\tfor i := range prefix {\n\t\tif t.trie == nil {\n\t\t\treturn false\n\t\t}\n\t\tif !t.trie[prefix[i]-'a'].exist {\n\t\t\treturn false\n\t\t}\n\t\tt = &t.trie[prefix[i]-'a'].trie\n\t}\n\treturn true\n}", "func MixedStringHasPrefix(v string) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldMixedString), v))\n\t})\n}", "func (this *Trie) StartsWith(prefix string) bool {\n for _,v :=range prefix{\n if this.name[v-'a'] == nil{\n return false\n }\n \n this = this.name[v-'a']\n }\n return true\n}", "func containsPathPrefix(pats []string, s string) bool {\n\tfor _, pat := range pats {\n\t\tif pat == s || strings.HasPrefix(s, pat+\"/\") {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (this *Trie) StartsWith(prefix string) bool {\n\tif prefix == \"\" {\n\t\treturn false\n\t}\n\thead := this\n\tfor e := range prefix {\n\t\tif head.data[prefix[e]-'a'] == nil {\n\t\t\treturn false\n\t\t}\n\t\thead = head.data[prefix[e]-'a']\n\t}\n\treturn true\n}", "func MixedStringHasPrefix(v string) predicate.User {\n\treturn predicate.User(sql.FieldHasPrefix(FieldMixedString, v))\n}", "func HasPrefixFold(str, prefix string) bool {\n\treturn len(str) >= len(prefix) && strings.EqualFold(str[0:len(prefix)], prefix)\n}", "func StartsWith(str, prefix string) bool {\n\treturn strings.HasPrefix(str, prefix)\n}", "func hasPrefix(s, prefix string) bool {\n\treturn len(prefix) <= len(s) && s[:len(prefix)] == prefix\n}", "func MatchPrefix(prefixes ...string) MatcherFunc { return MatchPrefixes(prefixes) }", "func (cs *CStore) AnyContains(needle string) bool {\n\tfor key := range cs.store {\n\t\tif strings.Contains(key, needle) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (this *Trie) StartsWith(prefix string) bool {\n\tcur := this.root\n\n\t// go through prefix\n\tfor _, c := range prefix {\n\t\t// check if in children\n\t\tif child, ok := cur.children[c]; ok {\n\t\t\t// set cur\n\t\t\tcur = child\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// no probs\n\treturn true\n}", "func (s *Stringish) HasPrefix(prefix string) bool {\n\treturn strings.HasPrefix(s.str, prefix)\n}", "func HasPrefix(prefix string) MatchFunc {\n\treturn func(s string) bool { return strings.HasPrefix(s, prefix) }\n}", "func (this *Trie) StartsWith(prefix string) bool {\n\tcurr := this\n\tfor _, c := range prefix {\n\t\tif curr.next[c-'a'] == nil {\n\t\t\treturn false\n\t\t}\n\t\tcurr = curr.next[c-'a']\n\t}\n\n\treturn true\n}", "func AnyValueInStringSlice(Slice1, Slice2 []string) bool {\n\tif len(Slice1) == 0 || len(Slice2) == 0 {\n\t\treturn false\n\t}\n\tfor _, x := range Slice1 {\n\t\tif IsValueInStringSlice(x, Slice2) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func StringContainsAny(str string, subStrings []string) bool {\n\tfor _, subString := range subStrings {\n\t\tif strings.Contains(str, subString) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (t *Trie) StartsWith(prefix string) bool {\n\tp := t.root\n\twordArr := []rune(prefix)\n\n\tfor i := 0; i < len(wordArr); i++ {\n\t\tif p.edges[wordArr[i]-'a'] == nil {\n\t\t\treturn false\n\t\t} else {\n\t\t\tp = p.edges[wordArr[i]-'a']\n\t\t}\n\t}\n\treturn true\n}", "func (this *Trie) StartsWith(prefix string) bool {\n\troot := this\n\tfor _, chartV := range prefix {\n\t\tnext, ok := root.next[string(chartV)]\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t\troot = next\n\t}\n\treturn true\n}", "func (t *Trie) StartsWith(prefix string) bool {\n\tcur := t.Root\n\tfor _, c := range prefix {\n\t\t_, ok := cur.Next[c]\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\n\t\tcur = cur.Next[c]\n\t}\n\n\treturn true\n}", "func (this *Trie) StartsWith(prefix string) bool {\n\tnode := this\n\tn := len(prefix)\n\tfor i := 0; i < n; i++ {\n\t\tidx := prefix[i] - 'a'\n\t\tif node.sons[idx] == nil {\n\t\t\treturn false\n\t\t}\n\t\tnode = node.sons[idx]\n\t}\n\treturn true\n}", "func hasPrefix(s, prefix string) bool {\n\treturn len(s) >= len(prefix) && s[:len(prefix)] == prefix\n}", "func (this *Trie) StartsWith(prefix string) bool {\n\tcur := this.Root\n\tfor _, c := range prefix {\n\t\tif _, ok := cur.Child[c]; !ok {\n\t\t\treturn false\n\t\t}\n\t\tcur = cur.Child[c]\n\t}\n\treturn true\n}", "func prependIfMissing(str string, prefix string, ignoreCase bool, prefixes ...string) string {\n if IsEmpty(prefix) || internalStartsWith(str, prefix, ignoreCase) {\n\t\treturn str\n\t}\n\tfor _, pref := range prefixes {\n\t\tif pref == \"\" || internalStartsWith(str, pref, ignoreCase) {\n\t\t\treturn str\n\t\t}\n\t}\n\treturn prefix + str\n}", "func SocialPayloadHasPrefix(v string) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldSocialPayload), v))\n\t})\n}", "func HasPrefix(s, prefix string) bool {\n\treturn len(s) >= len(prefix) && s[:len(prefix)] == prefix\n}", "func HasPrefix(s, prefix string) bool {\n\treturn len(s) >= len(prefix) && s[:len(prefix)] == prefix\n}", "func HasPrefix(s, prefix string) bool {\n\treturn len(s) >= len(prefix) && s[:len(prefix)] == prefix\n}", "func (this *Trie) StartsWith(prefix string) bool {\n\tn := this.root\n\n\tfor i := 0; i < len(prefix); i++ {\n\t\twid := prefix[i] - 'a'\n\t\tif n.children[wid] == nil {\n\t\t\treturn false\n\t\t}\n\t\tn = n.children[wid]\n\t}\n\n\treturn true\n}", "func AnySatisfies(pred StringPredicate, slice []string) bool {\n\tfor _, sliceString := range slice {\n\t\tif pred(sliceString) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func internalStartsWith(str string, prefix string, ignoreCase bool) bool {\n\tif str == \"\" || prefix == \"\" {\n\t\treturn (str == \"\" && prefix == \"\")\n\t}\n\tif utf8.RuneCountInString(prefix) > utf8.RuneCountInString(str) {\n\t\treturn false\n\t}\n\tif ignoreCase {\n\t\treturn strings.HasPrefix(strings.ToLower(str), strings.ToLower(prefix))\n\t}\n\treturn strings.HasPrefix(str, prefix)\n}", "func (this *Trie) StartsWith(prefix string) bool {\n\tnode := this\n\tfor _, v := range prefix {\n\t\tif node = node.next[v-'a']; node == nil {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func HasAllowedImageAsPrefix(str string, imageList []string) bool {\n\tfor _, imagePrefix := range imageList {\n\t\tif strings.HasPrefix(str, imagePrefix) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func ListHasPrefix(list, prefix []string) bool {\n\tif len(prefix) == 0 {\n\t\treturn false\n\t}\n\tif len(prefix) > len(list) {\n\t\treturn false\n\t}\n\treturn ListEquals(list[:len(prefix)], prefix)\n}", "func (this *Trie) StartsWith(prefix string) bool {\n\ttemp := this\n\tfor _, v := range prefix {\n\t\tnxt := v - 'a'\n\t\tif temp.next[nxt] == nil {\n\t\t\treturn false\n\t\t} else {\n\t\t\ttemp = temp.next[nxt]\n\t\t}\n\t}\n\treturn true\n}", "func StartsWith(str string, prefix string) bool {\n\treturn internalStartsWith(str, prefix, false)\n}", "func ContainsAny(text string, slice []string) bool {\n\tfor _, s := range slice {\n\t\tif strings.Contains(text, s) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (t *Trie) StartsWith(prefix string) bool {\n\ttmp := t\n\tfor _, c := range prefix {\n\t\tif l, valid := tmp.links[string(c)]; valid {\n\t\t\ttmp = l\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func MatchPrefixes(prefixes []string) MatcherFunc {\n\treturn func(el Elem) bool {\n\t\tfor _, pfx := range prefixes {\n\t\t\tif strings.HasPrefix(el.Name(), pfx) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n}", "func startsWithFunc(a, b string) bool {\n\treturn strings.HasPrefix(a, b)\n}", "func StreetHasPrefix(v string) predicate.Delivery {\n\treturn predicate.Delivery(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldStreet), v))\n\t})\n}", "func startsWith(arr []string, parts ...string) bool {\n\tif len(arr) < len(parts) {\n\t\treturn false\n\t}\n\tfor i, p := range parts {\n\t\tif arr[i] != p {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (this *Trie) StartsWith(prefix string) bool {\n\tcur := this\n\tfor i := 0; i < len(prefix); i++ {\n\t\tb := prefix[i]\n\t\tif cur.next[b-97] == nil {\n\t\t\treturn false\n\t\t}\n\t\tcur = cur.next[b-97]\n\t}\n\treturn cur != nil\n}", "func SliceContainsAny(haystack []string, needles ...string) bool {\n\tfor _, a := range haystack {\n\t\tfor _, needle := range needles {\n\t\t\tif a == needle {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (req *ServerHTTPRequest) HasQueryPrefix(prefix string) bool {\n\tsuccess := req.parseQueryValues()\n\tif !success {\n\t\treturn false\n\t}\n\n\tfor key := range req.queryValues {\n\t\tif strings.HasPrefix(key, prefix) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (this *Trie) StartsWith(prefix string) bool {\n node := this.root\n for _, r := range prefix {\n child, existed := node.children[r]\n if !existed {\n return false\n }\n node = child\n }\n return true\n}", "func (this *Trie) StartsWith(prefix string) bool {\n\ttrie := this\n\tfor _, char := range prefix {\n\t\tif trie.childs[char-97] == nil {\n\t\t\treturn false\n\t\t}\n\t\ttrie = trie.childs[char-97]\n\t}\n\treturn true\n}", "func stringMatchAny(x string, y []string) bool {\n\tfor _, v := range y {\n\t\tif x == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (this *Trie) StartsWith(prefix string) bool {\n\tif len(prefix) == 0 {\n\t\treturn true\n\t}\n\tfor _, e := range this.edges {\n\t\tif e.char == prefix[0] {\n\t\t\treturn e.next.StartsWith(prefix[1:])\n\t\t}\n\t}\n\treturn false\n}", "func (o BucketLifecycleRuleItemConditionResponseOutput) MatchesPrefix() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v BucketLifecycleRuleItemConditionResponse) []string { return v.MatchesPrefix }).(pulumi.StringArrayOutput)\n}", "func ZipcodeHasPrefix(v string) predicate.Delivery {\n\treturn predicate.Delivery(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldZipcode), v))\n\t})\n}", "func ZipcodeHasPrefix(v string) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldZipcode), v))\n\t})\n}", "func anyAreEmpty(values ...string) bool {\n\tfor _, v := range values {\n\t\tif v == \"\" {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (this *Trie) StartsWith(prefix string) bool {\n node := this.searchPrefix(prefix)\n \n return node != nil\n}", "func CountryHasPrefix(v string) predicate.Location {\n\treturn predicate.Location(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldCountry), v))\n\t})\n}", "func TestAnyString(t *testing.T) {\n\tt.Parallel()\n\tvar tests = []struct {\n\t\ts []string\n\t\texpected bool\n\t}{\n\t\t{[]string{\"foo\", \"\\u0062\\u0061\\u0072\", \"baz\"}, true},\n\t\t{[]string{\"boo\", \"bar\", \"baz\"}, false},\n\t\t{[]string{\"foo\", \"far\", \"baz\"}, true},\n\t}\n\tfor _, test := range tests {\n\t\tactual := primitives.AnyString(test.s, func(s string) bool {\n\t\t\treturn strings.HasPrefix(s, \"f\")\n\t\t})\n\t\tassert.Equal(t, test.expected, actual, \"expected value '%v' | actual : '%v'\", test.expected, actual)\n\t}\n}", "func (m URLPrefixMap) Contains(uri *url.URL) bool {\n\ts := strings.TrimPrefix(uri.Host, \"www.\")\n\tif _, ok := m[s]; ok {\n\t\treturn true\n\t}\n\tfor _, p := range strings.Split(uri.Path, \"/\") {\n\t\tif p == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\ts = fmt.Sprintf(\"%s/%s\", s, p)\n\t\tif _, ok := m[s]; ok {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (t *Trie) StartsWith(prefix string) bool {\n\treturn t.searchPrefix(prefix) != nil\n}", "func (a *Assertions) HasPrefix(corpus, prefix string, userMessageComponents ...interface{}) bool {\n\ta.assertion()\n\tif didFail, message := shouldHasPrefix(corpus, prefix); didFail {\n\t\treturn a.fail(message, userMessageComponents...)\n\t}\n\treturn true\n}", "func StreetHasPrefix(v string) predicate.Location {\n\treturn predicate.Location(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldStreet), v))\n\t})\n}", "func PostalcodeHasPrefix(v string) predicate.Location {\n\treturn predicate.Location(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldPostalcode), v))\n\t})\n}", "func (dc *IPMap) checkPrefixAllMatches(sIP string) ([]ServiceID, bool) {\n\tservices := []ServiceID{}\n\tip := net.ParseIP(sIP)\n\tfound := false\n\tfor _, entry := range dc.prefixes {\n\t\tif entry.prefix.Contains(ip) {\n\t\t\tservices = append(services, entry.services...)\n\t\t\tfound = true\n\t\t}\n\t}\n\treturn services, found\n}", "func IsAnyEmpty(strings ...string) bool {\n\tfor _, s := range strings {\n\t\tif IsEmpty(s) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (this *Trie) StartsWith(prefix string) bool {\n\tif prefix == \"\" {\n\t\treturn true\n\t}\n\tif this == nil {\n\t\treturn false\n\t}\n\tindex := ([]byte(prefix[0:1]))[0] - byte('a')\n\tif this.child[index] == nil {\n\t\treturn false\n\t}\n\tif prefix[1:] == \"\" {\n\t\treturn true\n\t}\n\treturn this.child[index].StartsWith(prefix[1:])\n\n}", "func (t *Trie) StartWith(prefix string) bool {\n\tcurr := t.Root\n\tfor _, char := range prefix {\n\t\tif _, ok := curr.Children[char]; !ok {\n\t\t\treturn false\n\t\t}\n\t\tcurr = curr.Children[char]\n\t}\n\treturn true\n}", "func ContainsAtLeastOneString(haystack []string, needles ...string) bool {\n\t// Avoid allocations for a single check.\n\tif len(needles) == 1 {\n\t\tfor _, h := range haystack {\n\t\t\tif h == needles[0] {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\tchecks := make(map[string]struct{}, len(needles))\n\tfor _, n := range needles {\n\t\tchecks[n] = struct{}{}\n\t}\n\n\tfor _, h := range haystack {\n\t\t_, ok := checks[h]\n\t\tif ok {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (this *Trie) StartsWith(prefix string) bool {\n\treturn this.SearchPrefix(prefix) != nil\n}", "func FilterPrefix(stringSet sets.String, prefix string, ignoreCase bool) sets.String {\n\tif prefix == \"\" {\n\t\treturn stringSet\n\t}\n\treturn filterSet(stringSet, prefix, ignoreCase, strings.HasPrefix)\n}", "func TaxIDHasPrefix(v string) predicate.Watchlist {\n\treturn predicate.Watchlist(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldTaxID), v))\n\t})\n}", "func (o BucketLifecycleRuleItemConditionOutput) MatchesPrefix() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v BucketLifecycleRuleItemCondition) []string { return v.MatchesPrefix }).(pulumi.StringArrayOutput)\n}", "func (this *Trie) StartsWith(prefix string) bool {\n\tbytes := []byte(prefix)\n\tif len(bytes) <= 0 {\n\t\treturn true\n\t}\n\tfor _, value := range bytes {\n\t\t//如果数据存在\n\t\tif _, ok := this.nexts[value]; !ok {\n\t\t\treturn false\n\t\t}\n\t\tthis = this.nexts[value]\n\t}\n\treturn true\n}", "func SelectPrefixInStringSlice(prefix string, items []string) []string {\n\n\tl := len(prefix)\n\n\tvar results []string\n\n\t// iterate through the slice of items\n\tfor _, item := range items {\n\n\t\t// check the item length is geater than or equal to the prefix length\n\t\t// this ensures no out of bounds memory errors will occur\n\t\tif len(item) >= l {\n\t\t\tif prefix == item[:l] {\n\t\t\t\tresults = append(results, item)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn results\n}", "func containsWildcardValue(flag []string) bool {\n\tfor _, value := range flag {\n\t\tif value == all {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func FirstNameHasPrefix(v string) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldFirstName), v))\n\t})\n}" ]
[ "0.8174585", "0.79133207", "0.7500345", "0.738086", "0.71756464", "0.6655777", "0.659458", "0.6593437", "0.6548416", "0.6371666", "0.63586235", "0.6307001", "0.6305777", "0.6094398", "0.60713154", "0.59563047", "0.5929936", "0.5913401", "0.58664", "0.58584625", "0.58547485", "0.583861", "0.5823773", "0.58182263", "0.5808574", "0.58025265", "0.5798194", "0.57887095", "0.57716453", "0.5763238", "0.5736851", "0.5735318", "0.57224435", "0.5720496", "0.57106954", "0.5709657", "0.5702433", "0.5692912", "0.56908226", "0.56868607", "0.56796604", "0.5674447", "0.56711376", "0.5663395", "0.5662449", "0.5651354", "0.56510353", "0.5650179", "0.56247807", "0.5621473", "0.56005555", "0.5572845", "0.5572845", "0.5572845", "0.5572789", "0.55708736", "0.5566191", "0.55597174", "0.5552582", "0.55392563", "0.5534622", "0.5522141", "0.550648", "0.5496988", "0.548244", "0.54611415", "0.5459387", "0.5456026", "0.5452717", "0.5436017", "0.54194313", "0.54187816", "0.54114026", "0.54112816", "0.54085696", "0.54052943", "0.5402295", "0.53974885", "0.5390822", "0.5383019", "0.53817374", "0.53621274", "0.5361205", "0.53465873", "0.53336126", "0.5330215", "0.5328933", "0.53286994", "0.5315268", "0.53122467", "0.5306369", "0.53039914", "0.53021765", "0.52985954", "0.5294909", "0.5284019", "0.5281666", "0.5278386", "0.52772087", "0.5271012" ]
0.8484297
0
GetUsersHandler lista todos los usuarios
GetUsersHandler выводит список всех пользователей
func GetUsersHandler(w http.ResponseWriter, r *http.Request) { var users []User for _, v := range Listusers { users = append(users, v) } w.Header().Set("Content-Type", "application/json") j, err := json.Marshal(users) if err != nil { panic(err) } w.WriteHeader(http.StatusOK) w.Write(j) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func getUsersHandler(c *gin.Context) {\n\tuser, _ := c.Get(JwtIdentityKey)\n\n\t// Role check.\n\tif !isAdmin(user) {\n\t\tc.JSON(http.StatusUnauthorized, gin.H{\"message\": \"unauthorized\"})\n\t\treturn\n\t}\n\n\tpage := c.DefaultQuery(\"page\", \"1\")\n\tcount := c.DefaultQuery(\"count\", \"10\")\n\tpageInt, _ := strconv.Atoi(page)\n\tcountInt, _ := strconv.Atoi(count)\n\n\tif page == \"0\" {\n\t\tpageInt = 1\n\t}\n\n\tvar wg sync.WaitGroup\n\tvar users *[]types.User\n\tvar usersCount int\n\n\tdb := data.New()\n\twg.Add(1)\n\tgo func() {\n\t\tusers = db.Users.GetUsers((pageInt-1)*countInt, countInt)\n\t\twg.Done()\n\t}()\n\n\twg.Add(1)\n\tgo func() {\n\t\tusersCount = db.Users.GetUsersCount()\n\t\twg.Done()\n\t}()\n\twg.Wait()\n\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"code\": http.StatusOK,\n\t\t\"users\": users,\n\t\t\"count\": usersCount,\n\t})\n}", "func GetUsers(clients *common.ClientContainer, handler common.HandlerInterface) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tusers, err := handler.GetUsers(clients)\n\t\tif err != nil {\n\t\t\tlog.Logger.Error(err)\n\t\t\tcommon.WriteErrorToResponse(w, http.StatusInternalServerError,\n\t\t\t\thttp.StatusText(http.StatusInternalServerError),\n\t\t\t\t\"Internal server error occured\")\n\t\t\treturn\n\t\t}\n\t\tw.Write(users)\n\t}\n}", "func (auh *AdminUserHandler) GetUsers(w http.ResponseWriter,\n\tr *http.Request, _ httprouter.Params) {\n\n\tvar apiKey = r.Header.Get(\"api-key\")\n\tif apiKey == \"\" || apiKey != adminApiKey {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)\n\t\treturn\n\t}\n\tusers, errs := auh.userService.Users()\n\n\tif len(errs) > 0 {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\n\toutput, err := json.MarshalIndent(users, \"\", \"\\t\")\n\n\tif err != nil {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(output)\n\treturn\n\n}", "func (api *API) getUsersHandler() service.Handler {\n\treturn func(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\t\tusers, err := user.LoadAll(ctx, api.mustDB(), user.LoadOptions.WithOrganization)\n\t\tif err != nil {\n\t\t\treturn sdk.WrapError(err, \"cannot load user from db\")\n\t\t}\n\t\treturn service.WriteJSON(w, users, http.StatusOK)\n\t}\n}", "func (u *UserServiceHandler) List(ctx context.Context) ([]User, error) {\n\n\turi := \"/v1/user/list\"\n\n\treq, err := u.client.NewRequest(ctx, http.MethodGet, uri, nil)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar users []User\n\terr = u.client.DoWithContext(ctx, req, &users)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn users, nil\n}", "func GetUsersHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tj, status := users.GetAllUsers()\n\tw.WriteHeader(status)\n\tw.Write(j)\n}", "func GetUsers(w http.ResponseWriter, r *http.Request) {\n\tvar users []UsersData\n\terr := model.FindAll(nil, &users)\n\tif err != nil {\n\t\tfmt.Println(\"err\", err)\n\t\tw.Write([]byte(\"Something wen't wrong!!\"))\n\t} else {\n\t\trender.JSON(w, 200, &users)\n\t}\n}", "func GetUsers(w http.ResponseWriter, r *http.Request) {\n\tloginOrName := strings.ToLower(r.URL.Query().Get(\"user\"))\n\n\tdb, err := database.Connect()\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\trepository := repository.NewRepositoryUser(db)\n\n\tusers, err := repository.SearchByLoginOrName(loginOrName)\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tresponses.JSON(w, http.StatusOK, users)\n\n}", "func (uc UserController) GetUsers(w http.ResponseWriter, r *http.Request, p httprouter.Params) {\n\n\tvar ul []models.User\n\n\t// Fetch user\n\tif err := uc.session.DB(\"todos\").C(\"users\").Find(nil).All(&ul); err != nil {\n\t\tw.WriteHeader(404)\n\t\treturn\n\t}\n\n\t// Marshal provided interface into JSON structure\n\tuj, _ := json.Marshal(ul)\n\n\t// Write content-type, statuscode, payload\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(200)\n\tfmt.Fprintf(w, \"%s\", uj)\n}", "func (h *Handler) GetUsers(w http.ResponseWriter, r *http.Request) {\n\tvar users []User\n\tcur, err := h.Collection.Find(context.TODO(), bson.D{{}}, options.Find())\n\tif err != nil {\n\t\th.Logger.Errorf(\"err retrieving cursor item: %s\", err)\n\t\thttp.Error(w, http.StatusText(500), 500)\n\t\treturn\n\t}\n\tfor cur.Next(context.TODO()) {\n\t\tuser := &User{}\n\t\terr := cur.Decode(&user)\n\t\tif err != nil {\n\t\t\th.Logger.Errorf(\"err decoding item: %s\", err)\n\t\t\thttp.Error(w, http.StatusText(500), 500)\n\t\t\treturn\n\t\t}\n\t\tuser.Password = \"\" // Never return password hashes\n\t\tusers = append(users, *user)\n\t}\n\trender.JSON(w, r, users) // A chi router helper for serializing and returning json\n}", "func (h *handler) Users(w http.ResponseWriter, r *http.Request) {\n\tapiReq, err := http.NewRequest(\"GET\", h.serverAddress+\"/users\", nil)\n\tif err != nil {\n\t\tserverError(w, err)\n\t\treturn\n\t}\n\n\tclient := &http.Client{}\n\tres, err := client.Do(apiReq)\n\tif err != nil {\n\t\tserverError(w, err)\n\t\treturn\n\t}\n\tdefer res.Body.Close()\n\n\tvar uis []socialnet.UserItem\n\terr = json.NewDecoder(res.Body).Decode(&uis)\n\tif err != nil {\n\t\tserverError(w, err)\n\t\treturn\n\t}\n\n\terr = h.template.ExecuteTemplate(w, \"users.html\", uis)\n\tif err != nil {\n\t\tserverError(w, fmt.Errorf(\"failed to execute template users.html: %s\", err))\n\t\treturn\n\t}\n}", "func GetUsers(c *gin.Context) {\n\tvar user []Models.User\n\tvar u Models.User\n\terr := Models.GetAllUsers(&user)\n\tif err != nil {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"error\" : gin.H { \n\t\t\t\"status\": http.StatusNotFound,\n\t\t\t\"message\": err.Error(),\n\t\t}})\n\t\tc.AbortWithStatus(http.StatusNotFound)\n\t} else {\n\t\tlog.Println(\"====== Bind By Query String ======\")\n\t\tlog.Println(u.Nombre)\n\t\t//var rubro []Models.RubroUsuario\n\t \n\t\tfmt.Println(c.Request.URL.Query())\n\t\t page, _ := strconv.Atoi(c.DefaultQuery(\"page\", \"1\"))\n\t\t limit, _ := strconv.Atoi(c.DefaultQuery(\"limit\", \"50\"))\n\t\n\t\t paginator := pagination.Paging(&pagination.Param{\n\t\t\tDB: Config.DB.Preload(\"Rubros\").Preload(\"Unidades\"),\n\t\t\tPage: page,\n\t\t\tLimit: limit,\n\t\t\tOrderBy: []string{\"id\"},\n\t\t\tShowSQL: true,\n\t\t}, &user)\n \n\t\tc.JSON(200, paginator)\n\n\t}\n}", "func GetUsers(req *http.Request, render render.Render, account services.Account) {\n qs := req.URL.Query()\n userIDs := qs[\"userId\"]\n var users []models.User\n for _, userID := range userIDs {\n if user, err := account.GetUser(userID); err != nil {\n render.JSON(err.HttpCode, err)\n return\n } else {\n users = append(users, *user)\n }\n }\n render.JSON(http.StatusOK, users)\n}", "func (srv *UsersService) ListHandler(ctx *gin.Context) {\n\tlogger := srv.logger.New(\"action\", \"ListHandler\")\n\n\tcurrentUser := GetCurrentUser(ctx)\n\n\tlimitQuery := ctx.DefaultQuery(\"limit\", \"10\")\n\tpageQuery := ctx.DefaultQuery(\"page\", \"1\")\n\tparams := ctx.Request.URL.Query()\n\n\tvar adminsRoleIncluded = false\n\n\troles := params[\"filter[role_name]\"]\n\tif len(roles) > 0 {\n\t\tfor key, role := range roles {\n\t\t\t// remove root from role names if user is not root\n\t\t\t// only root can see root users\n\t\t\tif role == models.RoleRoot && currentUser.RoleName != models.RoleRoot {\n\t\t\t\tcopy(roles[key:], roles[key+1:])\n\t\t\t\troles[len(roles)-1] = \"\"\n\t\t\t\troles = roles[:len(roles)-1]\n\t\t\t}\n\t\t\tif role == models.RoleRoot || role == models.RoleAdmin {\n\t\t\t\tadminsRoleIncluded = true\n\t\t\t}\n\t\t}\n\t} else {\n\t\tadminsRoleIncluded = true\n\t}\n\n\tvar hasPerm bool\n\tif adminsRoleIncluded {\n\t\thasPerm = srv.PermissionsService.CanViewAdminProfile(currentUser.UID)\n\t} else {\n\t\thasPerm = srv.PermissionsService.CanViewUserProfile(currentUser.UID)\n\t}\n\n\tif !hasPerm {\n\t\tsrv.ResponseService.Forbidden(ctx)\n\t\treturn\n\t}\n\n\tquery := srv.Repository.GetUsersRepository().Filter(params)\n\n\tpagination, err := srv.Repository.GetUsersRepository().Paginate(query, pageQuery, limitQuery, serializers.NewUsers())\n\tif err != nil {\n\t\tlogger.Error(\"сan't load list of user\", \"error\", err)\n\t\t// Returns a \"400 StatusBadRequest\" response\n\t\tsrv.ResponseService.Error(ctx, responses.CannotRetrieveCollection, \"Can't load list of users\")\n\t\treturn\n\t}\n\n\t// Returns a \"200 OK\" response\n\tsrv.ResponseService.OkResponse(ctx, pagination)\n}", "func (h *HTTPClientHandler) getAllUsersHandler(w http.ResponseWriter, r *http.Request) {\n\n\tuserid, _ := r.URL.Query()[\"q\"]\n\t// looking for specific user\n\tif len(userid) > 0 {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"userid\": userid[0],\n\t\t}).Info(\"Looking for user..\")\n\n\t\tuser, err := h.db.getUser(userid[0])\n\n\t\tif err == nil {\n\t\t\t// Marshal provided interface into JSON structure\n\t\t\tresponse := UserResource{Data: user}\n\t\t\tuj, _ := json.Marshal(response)\n\n\t\t\t// Write content-type, statuscode, payload\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.WriteHeader(200)\n\t\t\tfmt.Fprintf(w, \"%s\", uj)\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": err.Error(),\n\t\t\t}).Warn(\"Failed to insert..\")\n\n\t\t\tcontent, code := responseDetailsFromMongoError(err)\n\n\t\t\t// Marshal provided interface into JSON structure\n\t\t\tuj, _ := json.Marshal(content)\n\n\t\t\t// Write content-type, statuscode, payload\n\t\t\twriteJsonResponse(w, &uj, code)\n\t\t\treturn\n\n\t\t}\n\t}\n\n\tlog.Warn(len(userid))\n\t// displaying all users\n\tresults, err := h.db.getUsers()\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t}).Error(\"Got error when tried to get all users\")\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"count\": len(results),\n\t}).Info(\"number of users\")\n\n\t// Marshal provided interface into JSON structure\n\tresponse := UsersResource{Data: results}\n\tuj, _ := json.Marshal(response)\n\n\t// Write content-type, statuscode, payload\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(200)\n\tfmt.Fprintf(w, \"%s\", uj)\n}", "func (h *Handler) getAllUsers(c *gin.Context) handlerResponse {\n\n\tusers, err := h.service.User.GetAll()\n\tif err != nil {\n\t\treturn handleError(err)\n\t}\n\tremovePasswords(users)\n\treturn handleOK(StringMap{\"users\": users})\n}", "func (a *Server) ListUsers(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"lists all users\")\n}", "func (ac *ApiConfig) GetAllUsersHandler(w http.ResponseWriter, r *http.Request) {\n\tlmt := r.URL.Query().Get(\"limit\")\n\toff := r.URL.Query().Get(\"offset\")\n\n\tlimit, err := strconv.Atoi(lmt)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\toffset, err := strconv.Atoi(off)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tusers, err := ac.DHolder.GetAllUsers(limit, offset)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\terr = dResponseWriter(w, users, http.StatusOK)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\treturn\n\t}\n\n\treturn\n}", "func (s *ServerState) getUsers(c *gin.Context) {\n\tvar u []User\n\tif err := s.DB.Select(&u, \"select * from users\"); err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"status\": err})\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\"user\": u})\n}", "func GetAllUsers(w http.ResponseWriter, r *http.Request) {\n\tw.Write([]byte(\"Getting all users\"))\n}", "func UsersGet(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tlog.Println(\"starting retrieval\")\n\tstart := 0\n\tlimit := 10\n\n\tnext := start + limit\n\n\tw.Header().Set(\"Pragma\", \"no-cache\")\n\tw.Header().Set(\"Link\", \"<http://localhost:8080/api/users?start=\"+string(next)+\"; rel=\\\"next\\\"\")\n\n\trows, _ := database.Query(\"SELECT * FROM users LIMIT 10\")\n\n\tusers := Users{}\n\n\tfor rows.Next() {\n\t\tuser := User{}\n\t\trows.Scan(&user.ID, &user.Username, &user.First, &user.Last, &user.Email)\n\t\tusers.Users = append(users.Users, user)\n\t}\n\n\toutput, err := json.Marshal(users)\n\tif err != nil {\n\t\tfmt.Fprintln(w, \"Something went wrong while processing your request: \", err)\n\t}\n\n\tfmt.Fprintln(w, string(output))\n}", "func GetUsers(c *gin.Context, client *statsd.Client) {\n\tlog.Info(\"getting all users\")\n\tvar users []entity.User\n\terr := model.GetAllUsers(&users, client)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t\treturn\n\t}\n\n\tfor _, user := range users {\n\t\tc.JSON(http.StatusOK, gin.H{\n\t\t\t\"id\": user.ID,\n\t\t\t\"first_name\": user.FirstName,\n\t\t\t\"last_name\": user.LastName,\n\t\t\t\"username\": user.Username,\n\t\t\t\"account_created\": user.AccountCreated,\n\t\t\t\"account_updated\": user.AccountUpdated,\n\t\t})\n\t}\n}", "func GetUsers(c *gin.Context) {\n\tvar users []models.User\n\tlog.Println(\"GetUsers from db\")\n\tdb := db.GetDB()\n\tdb.Find(&users)\n\tc.JSON(http.StatusOK, users)\n}", "func GetUsers(c *fiber.Ctx) {\n\tvar users []User\n\tdatabase.DBConn.Find(&users)\n\tc.JSON(users)\n}", "func (u *UserService) List(ctx context.Context) ([]*User, *http.Response, error) {\n\treq, err := u.client.newRequest(\"GET\", \"user.list\", nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar users []*User\n\tresp, err := u.client.do(ctx, req, &users)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\treturn users, resp, nil\n}", "func (h *Handler) GetUsers() ([]models.User, error) {\n\tquery := \"SELECT id, first_name, last_name, email, password FROM users;\"\n\trows, err := h.DB.Query(query)\n\tif err != nil {\n\t\tfmt.Printf(\"user_service-GetUsers-query: %s\\n\", err)\n\t\treturn nil, err\n\t}\n\n\tvar users []models.User\n\n\tfor rows.Next(){\n\t\tuser := models.User{}\n\t\t\n\t\terr := rows.Scan(\n\t\t\t&user.ID,\n\t\t\t&user.FirstName,\n\t\t\t&user.LastName,\n\t\t\t&user.Email,\n\t\t\t&user.Password,\n\t\t)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"user_service-GetUsers-Scan: %s \\n\",err)\n\t\t}\n\t\t\n\t\tusers = append(users, user)\n\t}\n\n\treturn users, nil\n}", "func getUsers(types int) {\n\treq, _ := http.NewRequest(\"GET\", cfg.Main.Server+\"users\", nil)\n\treq.Header.Set(\"Content-Type\", \"application/xml\")\n\treq.Header.Set(\"Authorization\", cfg.Main.Key)\n\tclient := &http.Client{}\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\tp(\"Couldn't connect to Openfire server: %s\", err.Error())\n\t\treturn\n\t}\n\tdefer res.Body.Close()\n\tif res.StatusCode != 200 {\n\t\tp(\"Error requesting userlist from the server.\")\n\t\treturn\n\t}\n\tbody, _ := ioutil.ReadAll(res.Body)\n\tvar users XMLUsers\n\txml.Unmarshal(body, &users)\n\tfor _, e := range users.User {\n\t\tn := e.Username + \",\"\n\t\tif e.Name != \"\" {\n\t\t\tn = e.Username + \",\" + e.Name\n\t\t}\n\t\tswitch types {\n\t\tcase 0:\n\t\t\tm := \"<missing e-mail>\"\n\t\t\tif e.Email != \"\" {\n\t\t\t\tm = e.Email\n\t\t\t}\n\t\t\tp(\"%s,%s\", n, m)\n\t\tcase 1:\n\t\t\tif e.Email != \"\" {\n\t\t\t\tp(\"%s,%s\", n, e.Email)\n\t\t\t}\n\t\tcase 2:\n\t\t\tif e.Email == \"\" {\n\t\t\t\tp(\"%s\", n)\n\t\t\t}\n\t\t}\n\t}\n}", "func (handler *UserHandler) GetAllUsers(w http.ResponseWriter, req *http.Request, _ httprouter.Params) {\n\tvar users []*User\n\tul, err := handler.UserService.GetAllUsers()\n\n\tfor _, user := range ul {\n\t\tusers = append(users, user.hidePassword())\n\t}\n\n\tif err != nil {\n\t\thandler.Formatter.JSON(w, http.StatusBadRequest, util.NewError(\"1008\", \"Missing user privileges\", err.Error()))\n\t} else {\n\t\thandler.Formatter.JSON(w, http.StatusOK, users)\n\t}\n\n}", "func GetUsers(c *gin.Context) {\n\n\tlog := logger.WithFields(logrus.Fields{\"tag\": \"GetUsers\"})\n\tlog.Info(\"Fetching users\")\n\n\torganization := auth.GetCurrentOrganization(c.Request)\n\n\tidParam := c.Param(\"id\")\n\tid, err := strconv.ParseUint(idParam, 10, 32)\n\tif idParam != \"\" && err != nil {\n\t\tmessage := fmt.Sprintf(\"error parsing user id: %s\", err)\n\t\tlog.Info(message)\n\t\tc.JSON(http.StatusBadRequest, components.ErrorResponse{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tMessage: message,\n\t\t\tError: message,\n\t\t})\n\t\treturn\n\t}\n\n\tvar users []auth.User\n\tdb := model.GetDB()\n\terr = db.Model(organization).Related(&users, \"Users\").Error\n\tif err != nil {\n\t\tmessage := \"failed to fetch users\"\n\t\tlog.Info(message + \": \" + err.Error())\n\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, components.ErrorResponse{\n\t\t\tCode: http.StatusInternalServerError,\n\t\t\tMessage: message,\n\t\t\tError: message,\n\t\t})\n\t} else if id == 0 {\n\t\tc.JSON(http.StatusOK, users)\n\t} else if len(users) == 1 {\n\t\tc.JSON(http.StatusOK, users[0])\n\t} else if len(users) > 1 {\n\t\tmessage := fmt.Sprintf(\"multiple users found with id: %d\", id)\n\t\tlog.Info(message)\n\t\tc.AbortWithStatusJSON(http.StatusConflict, components.ErrorResponse{\n\t\t\tCode: http.StatusConflict,\n\t\t\tMessage: message,\n\t\t\tError: message,\n\t\t})\n\t} else {\n\t\tmessage := fmt.Sprintf(\"user not found with id: %d\", id)\n\t\tlog.Info(message)\n\t\tc.AbortWithStatusJSON(http.StatusNotFound, components.ErrorResponse{\n\t\t\tCode: http.StatusNotFound,\n\t\t\tMessage: message,\n\t\t\tError: message,\n\t\t})\n\t}\n}", "func ListUsersHandle(service iface.Service) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tlimit := 100\n\t\trawLimit := r.URL.Query()[\"limit\"]\n\t\tif len(rawLimit) > 0 {\n\t\t\tvar err error\n\t\t\tlimit, err = strconv.Atoi(rawLimit[0])\n\t\t\tif err != nil || limit <= 0 {\n\t\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\t\tfmt.Fprintf(w, \"invalid limit \\\"%s\\\"\", rawLimit[0])\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tusers, err := service.FilterUsers(r.Context(), iface.FilterUsers{Limit: uint(limit)})\n\t\tif err != nil {\n\t\t\tlog.Log(err)\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tfmt.Fprintf(w, \"service failed\")\n\t\t\treturn\n\t\t}\n\n\t\tJSON(w, r, map[string]interface{}{\n\t\t\t\"users\": users,\n\t\t})\n\t}\n}", "func GetUsers(c *gin.Context) {\n\tvar users []models.User\n\tdb := db.GetDB()\n\tdb.Find(&users)\n\tc.JSON(200, users)\n}", "func (h UserHTTP) List(w http.ResponseWriter, r *http.Request) {\n\tlistRequest := listRequestDecoder(r)\n\tusers, err := h.svc.ListUsers(r.Context(), listRequest)\n\tif err != nil {\n\t\th.logger.With(r.Context()).Errorf(\"list users error : %s\", err)\n\t\trender.Render(w, r, e.BadRequest(err, \"bad request\"))\n\t\treturn\n\t}\n\trender.Respond(w, r, users)\n}", "func ListUsers(w http.ResponseWriter, r *http.Request) {\n\tusers, err := dal.GetUsers(\"\")\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tcommon.WriteResponse(w, users)\n}", "func (uh UserHandler) GetAllUsers(w http.ResponseWriter, r *http.Request) {\n\tlog.Print(utils.InfoLog + \"UserHandler:GetAllUsers called\")\n\n\tvar results *[]models.User\n\tresults, err := uh.UserManager.GetUsers(); if err != nil {\n\t\tutils.ReturnWithErrorLong(w, *err)\n\t\tlog.Println(utils.ErrorLog + \"Insert body here\") // TODO ??\n\t\treturn\n\t}\n\tjson.NewEncoder(w).Encode(results)\n\tw.WriteHeader(http.StatusOK)\n}", "func (g Graph) GetUsers(w http.ResponseWriter, r *http.Request) {\n\tcon, err := g.initLdap()\n\tif err != nil {\n\t\tg.logger.Error().Err(err).Msg(\"Failed to initialize ldap\")\n\t\terrorcode.ServiceNotAvailable.Render(w, r, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// TODO make filter configurable\n\tresult, err := g.ldapSearch(con, \"(objectClass=posixAccount)\", g.config.Ldap.BaseDNUsers)\n\n\tif err != nil {\n\t\tg.logger.Error().Err(err).Msg(\"Failed search ldap with filter: '(objectClass=posixAccount)'\")\n\t\terrorcode.ServiceNotAvailable.Render(w, r, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tusers := make([]*msgraph.User, 0, len(result.Entries))\n\n\tfor _, user := range result.Entries {\n\t\tusers = append(\n\t\t\tusers,\n\t\t\tcreateUserModelFromLDAP(\n\t\t\t\tuser,\n\t\t\t),\n\t\t)\n\t}\n\n\trender.Status(r, http.StatusOK)\n\trender.JSON(w, r, &listResponse{Value: users})\n}", "func (ur *UserResource) handleGetUsers(c *gin.Context) {\n\tusers, err := ur.Store.GetAllUsers()\n\tif err != nil {\n\t\tlogging.Logger.Errorln(\"[API] Failed to get all users\", err)\n\t}\n\n\tc.JSON(http.StatusOK, users)\n}", "func (cs *UserService) List() ([]UsersResponse, error) {\n\n\treq, err := cs.client.NewRequest(\"GET\", \"/users\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := cs.client.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif err := validateResponse(resp); err != nil {\n\t\treturn nil, err\n\t}\n\n\tbodyBytes, _ := ioutil.ReadAll(resp.Body)\n\tbodyString := string(bodyBytes)\n\n\tu := &listUsersJSONResponse{}\n\terr = json.Unmarshal([]byte(bodyString), &u)\n\n\treturn u.Users, err\n}", "func getAllUsers(c *fiber.Ctx) error {\n\tcollection := mg.Db.Collection(\"users\")\n\tquery := bson.D{{}}\n\tcursor, err := collection.Find(c.Context(), &query)\n\tif err != nil {\n\t\treturn c.Status(500).SendString(err.Error())\n\t}\n\tvar records []User = make([]User, 0)\n\t// iterate the cursor and decode the values\n\tif err := cursor.All(c.Context(), &records); err != nil {\n\t\treturn c.Status(404).SendString(\"There isnt any\")\n\t}\n\tvar users []User = make([]User, 0)\n\tfor i, s := range records {\n\t\ts.Password = \"\"\n\t\ts.TaskCode = \"\"\n\t\tusers = append(users, s)\n\t\tfmt.Println(i)\n\t}\n\n\treturn c.JSON(users)\n}", "func userList(w http.ResponseWriter, r *http.Request) {}", "func (g Graph) GetUsers(w http.ResponseWriter, r *http.Request) {\n\tsanitizedPath := strings.TrimPrefix(r.URL.Path, \"/graph/v1.0/\")\n\todataReq, err := godata.ParseRequest(r.Context(), sanitizedPath, r.URL.Query())\n\tif err != nil {\n\t\tg.logger.Err(err).Interface(\"query\", r.URL.Query()).Msg(\"query error\")\n\t\terrorcode.InvalidRequest.Render(w, r, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\tusers, err := g.identityBackend.GetUsers(r.Context(), r.URL.Query())\n\tif err != nil {\n\t\tvar errcode errorcode.Error\n\t\tif errors.As(err, &errcode) {\n\t\t\terrcode.Render(w, r)\n\t\t} else {\n\t\t\terrorcode.GeneralException.Render(w, r, http.StatusInternalServerError, err.Error())\n\t\t}\n\t\treturn\n\t}\n\n\tusers, err = sortUsers(odataReq, users)\n\tif err != nil {\n\t\tvar errcode errorcode.Error\n\t\tif errors.As(err, &errcode) {\n\t\t\terrcode.Render(w, r)\n\t\t} else {\n\t\t\terrorcode.GeneralException.Render(w, r, http.StatusInternalServerError, err.Error())\n\t\t}\n\t\treturn\n\t}\n\trender.Status(r, http.StatusOK)\n\trender.JSON(w, r, &listResponse{Value: users})\n}", "func GetUsers(c *gin.Context) {\n\tusers := []models.User{}\n\tif err := database.DBCon.Find(&users).Error; err != nil {\n\t\tc.JSON(http.StatusNotFound, structs.Error{Code: http.StatusNotFound, Error: err.Error()})\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, users)\n}", "func (c *UsersClient) List(ctx context.Context, filter string) (*[]models.User, int, error) {\n\tparams := url.Values{}\n\tif filter != \"\" {\n\t\tparams.Add(\"$filter\", filter)\n\t}\n\tresp, status, _, err := c.BaseClient.Get(ctx, base.GetHttpRequestInput{\n\t\tValidStatusCodes: []int{http.StatusOK},\n\t\tUri: base.Uri{\n\t\t\tEntity: \"/users\",\n\t\t\tParams: params,\n\t\t\tHasTenantId: true,\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn nil, status, err\n\t}\n\tdefer resp.Body.Close()\n\trespBody, _ := ioutil.ReadAll(resp.Body)\n\tvar data struct {\n\t\tUsers []models.User `json:\"value\"`\n\t}\n\tif err := json.Unmarshal(respBody, &data); err != nil {\n\t\treturn nil, status, err\n\t}\n\treturn &data.Users, status, nil\n}", "func GetAllUsers(w http.ResponseWriter, r *http.Request) {\n\tusers, err := models.GetAllUsers()\n\tif err != nil {\n\t\tfmt.Println(\"Caught an error\")\n\t\tfmt.Fprintln(w, http.StatusInternalServerError)\n\t} else {\n\t\tfmt.Println(users)\n\t\tjson.NewEncoder(w).Encode(users)\n\t}\n}", "func GetUsers(c *gin.Context) {\n\trequestID := c.GetString(\"x-request-id\")\n\thelper.Logger(requestID, \"\").Infoln(\"RequestID= \", requestID)\n\t// cacheTest := helper.CacheExists(\"xxxxxxxxxx\")\n\t// helper.Logger(requestID, \"\").Infoln(\"cacheTest= \", cacheTest)\n\n\thttpCode, body, erro := helper.MakeHTTPRequest(\"GET\", \"https://api-101.glitch.me/customers\", \"\", nil, true)\n\thelper.Logger(requestID, \"\").Infoln(\"httpCode= \", httpCode)\n\thelper.Logger(requestID, \"\").Infoln(\"body= \", fmt.Sprintf(\"%s\", body))\n\thelper.Logger(requestID, \"\").Infoln(\"error= \", erro)\n\n\tvar user []models.User\n\terr := models.GetAllUsers(&user)\n\tif err != nil {\n\t\tc.AbortWithStatus(http.StatusNotFound)\n\t} else {\n\t\tc.JSON(http.StatusOK, user)\n\t}\n}", "func (h *ServiceUsersHandler) List(ctx context.Context, project, serviceName string) ([]*ServiceUser, error) {\n\t// Aiven API does not provide list operation for service users, need to get them via service info instead\n\tservice, err := h.client.Services.Get(ctx, project, serviceName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn service.Users, nil\n}", "func GetUsers() {\n\tvar users []User\n\t_, err := orm.NewOrm().QueryTable(\"t_user\").Filter(\"name__contains\", \"awd\").All(&users)\n\tif err == nil {\n\t\tfor _, user := range users {\n\t\t\tfmt.Println(user.ToString())\n\t\t}\n\t}\n}", "func (h *userHandler) showUsers(ctx context.Context, rw http.ResponseWriter) {\n\n\tusers, err := h.serv.DB.UserCol.FindAll(ctx)\n\n\tif err != nil {\n\n\t\th.serv.writeResponse(ctx, rw, err.Error(), http.StatusBadRequest, nil)\n\n\t\treturn\n\t}\n\n\th.serv.writeResponsePlus(ctx, rw, \"users\", http.StatusOK, nil, users)\n}", "func (svc *Service) getAllUsersHandler(w http.ResponseWriter, r *http.Request) error {\n\t// TODO: Check errors in encoding\n\n\tusers, err := svc.invitationsAPI.GetAllUsers(svc.invitationsSource.GetAllPeople)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn json.NewEncoder(w).Encode(users)\n}", "func GetUsers(write http.ResponseWriter, request *http.Request) {\n\n\ttypeUser := request.URL.Query().Get(\"type\")\n\tpage := request.URL.Query().Get(\"page\")\n\tsearch := request.URL.Query().Get(\"search\")\n\n\tpagTemp, err := strconv.Atoi(page)\n\tif err != nil {\n\t\thttp.Error(write, \"Page value should be greater than 0\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tpag := int64(pagTemp)\n\n\tresult, status := bd.GetUsers(IDUser, pag, search, typeUser)\n\tif status == false {\n\t\thttp.Error(write, \"Error GetUsers\", http.StatusBadRequest)\n\t\treturn\n\t}\n\twrite.Header().Set(\"Content-Type\", \"application/json\")\n\twrite.WriteHeader(http.StatusCreated)\n\tjson.NewEncoder(write).Encode(result)\n}", "func getAllUsers(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\tusers, err := users.GetAllUsers(ctx)\n\tif err != nil {\n\t\tlog.Error(ctx, \"database problem\", \"error\", err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(w, \"null\")\n\t\treturn\n\t}\n\tdata, err := json.Marshal(users)\n\tif err != nil {\n\t\tlog.Error(ctx, \"json marshaling problem\", \"error\", err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(w, \"null\")\n\t\treturn\n\t}\n\tfmt.Fprintf(w, string(data))\n}", "func GetUsers(c *gin.Context) {\n\tdb := dbConn()\n\tselDB, err := db.Query(\"CALL read_users()\")\n\tif err != nil {\n\t\tpanic(err.Error)\n\t}\n\n\tuser := User{}\n\tusers := []User{}\n\tfor selDB.Next() {\n\t\tvar id, username, useremail, fname, lname, password, passwordchange, passwordexpired, lastlogon, accountlocked string\n\t\terr = selDB.Scan(&id, &username, &useremail, &fname, &lname, &password, &passwordchange, &passwordexpired, &lastlogon, &accountlocked)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tc.JSON(500, gin.H{\n\t\t\t\t\"error\": err.Error(),\n\t\t\t})\n\t\t}\n\t\tuser.ID = id\n\t\tuser.UserName = username\n\t\tuser.UserEmail = useremail\n\t\tuser.FName = fname\n\t\tuser.LName = lname\n\t\tuser.Password = password\n\t\tuser.PasswordChange = passwordchange\n\t\tuser.PasswordExpired = passwordexpired\n\t\tuser.LastLogon = lastlogon\n\t\tuser.AccountLocked = accountlocked\n\t\tiid, err := strconv.Atoi(id)\n\t\tif err != nil {\n\t\t\tpanic(err.Error)\n\t\t}\n\t\tselDB02, err := db.Query(\"CALL read_access_userid(?)\", iid)\n\t\tif err != nil {\n\t\t\tpanic(err.Error)\n\t\t}\n\t\taccess := Access{}\n\t\taccessList := []Access{}\n\t\tfor selDB02.Next() {\n\t\t\tvar accessid, userid, courtid, caseaccess, personaccess, accountingaccess, juryaccess, attorneyaccess, configaccess, securitylevel, sealedcase string\n\t\t\terr := selDB02.Scan(&accessid, &userid, &courtid, &caseaccess, &personaccess, &accountingaccess, &juryaccess, &attorneyaccess, &configaccess, &securitylevel, &sealedcase)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tc.JSON(500, gin.H{\n\t\t\t\t\t\"error\": err.Error(),\n\t\t\t\t})\n\t\t\t}\n\t\t\taccess.AccessID = accessid\n\t\t\taccess.IDUser = userid\n\t\t\taccess.IDCourt = courtid\n\t\t\taccess.CaseAccess = caseaccess\n\t\t\taccess.PersonAccess = personaccess\n\t\t\taccess.AccountingAccess = accountingaccess\n\t\t\taccess.JuryAccess = juryaccess\n\t\t\taccess.AttorneyAccess = attorneyaccess\n\t\t\taccess.ConfigAccess = configaccess\n\t\t\taccess.SecurityLevel = securitylevel\n\t\t\taccess.SealedCase = sealedcase\n\t\t\taccessList = append(accessList, access)\n\t\t}\n\t\tuser.AccessList = accessList\n\t\tusers = append(users, user)\n\t}\n\n\tc.JSON(200, gin.H{\n\t\t\"result\": users,\n\t})\n\n\tdefer db.Close()\n}", "func ListAllUsers(w http.ResponseWriter, r *http.Request){\n\n\trows, err:= db.Query(\"SELECT * FROM users LIMIT 20\")\n\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\tpanic(\"failed to connect database\")\n\t}\n\n\tlistUsers := Users{}\n\tfor rows.Next() {\n\t\tp := User{}\n\t\tif err := rows.Scan(&p.ID, &p.Name, &p.Score); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tlistUsers = append(listUsers, p)\n\n\t}\n\tdefer rows.Close()\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(200)\n\tjson.NewEncoder(w).Encode(listUsers)\n}", "func GetUsers(c *gin.Context) {\n\tvar users []models.User\n\tpagination := models.GeneratePaginationFromRequest(c)\n\terr := repository.GetAllUsersPaged(&users, &pagination)\n\tif err != nil {\n\t\tc.AbortWithStatus(http.StatusNotFound)\n\t} else {\n\t\tc.JSON(http.StatusOK, users)\n\t}\n}", "func (s *AutograderService) GetUsers(ctx context.Context, in *pb.Void) (*pb.Users, error) {\n\tusr, err := s.getCurrentUser(ctx)\n\tif err != nil {\n\t\ts.logger.Errorf(\"GetUsers failed: authentication error: %w\", err)\n\t\treturn nil, ErrInvalidUserInfo\n\t}\n\tif !usr.IsAdmin {\n\t\ts.logger.Error(\"GetUsers failed: user is not admin\")\n\t\treturn nil, status.Errorf(codes.PermissionDenied, \"only admin can access other users\")\n\t}\n\tusrs, err := s.getUsers()\n\tif err != nil {\n\t\ts.logger.Errorf(\"GetUsers failed: %w\", err)\n\t\treturn nil, status.Errorf(codes.NotFound, \"failed to get users\")\n\t}\n\treturn usrs, nil\n}", "func AllUsersGet(c *gin.Context) {\n\tmeta := model.TableMetaFromQuery(c)\n\tginutils.WriteGinJSON(c, http.StatusOK, model.AllUsers(meta))\n}", "func (s *Shell) ListUsers(_ *cli.Context) (err error) {\n\tresp, err := s.HTTP.Get(\"/v2/users/\", nil)\n\tif err != nil {\n\t\treturn s.errorOut(err)\n\t}\n\tdefer func() {\n\t\tif cerr := resp.Body.Close(); cerr != nil {\n\t\t\terr = multierr.Append(err, cerr)\n\t\t}\n\t}()\n\n\treturn s.renderAPIResponse(resp, &AdminUsersPresenters{})\n}", "func (c *MysqlUserController) GetUsers(w http.ResponseWriter, r *http.Request) {\n\tapiKey := r.Header.Get(\"apiKey\")\n\tresult, err := c.service.GetUsers(apiKey)\n\tif err != nil {\n\t\tstatusCode := int(http.StatusInternalServerError)\n\t\topenapi.EncodeJSONResponse(result, &statusCode, w)\n\t\treturn\n\t}\n\topenapi.EncodeJSONResponse(result, nil, w)\n}", "func GetUsers(c echo.Context) error {\n\tu := []*models.User{}\n\tfor _, v := range users {\n\t\tu = append(u, v)\n\t}\n\n\treturn c.JSON(http.StatusOK, u)\n}", "func ListAllUsers(w http.ResponseWriter, r *http.Request) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusBadRequest, \"Mohomaaf ...dsb\", nil, nil)\n\t\t}\n\t}()\n\n\tfLog := userMgmtLogger.WithField(\"func\", \"ListAllUsers\").WithField(\"RequestID\", r.Context().Value(constants.RequestID)).WithField(\"path\", r.URL.Path).WithField(\"method\", r.Method)\n\n\tiauthctx := r.Context().Value(constants.HansipAuthentication)\n\tif iauthctx == nil {\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusUnauthorized, \"You are not authorized to access this resource\", nil, nil)\n\t\treturn\n\t}\n\n\tfLog.Trace(\"Listing Users\")\n\tpageRequest, err := helper.NewPageRequestFromRequest(r)\n\tif err != nil {\n\t\tfLog.Errorf(\"helper.NewPageRequestFromRequest got %s\", err.Error())\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusBadRequest, err.Error(), nil, nil)\n\t\treturn\n\t}\n\tusers, page, err := UserRepo.ListUser(r.Context(), pageRequest)\n\tif err != nil {\n\t\tfLog.Errorf(\"UserRepo.ListUser got %s\", err.Error())\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusInternalServerError, err.Error(), nil, nil)\n\t\treturn\n\t}\n\tsusers := make([]*SimpleUser, len(users))\n\tfor i, v := range users {\n\t\tsusers[i] = &SimpleUser{\n\t\t\tRecID: v.RecID,\n\t\t\tEmail: v.Email,\n\t\t\tEnabled: v.Enabled,\n\t\t\tSuspended: v.Suspended,\n\t\t}\n\t}\n\tret := make(map[string]interface{})\n\tret[\"users\"] = susers\n\tret[\"page\"] = page\n\thelper.WriteHTTPResponse(r.Context(), w, http.StatusOK, \"List of all user paginated\", nil, ret)\n}", "func (ctlr *userServiceController) GetUsers(ctx context.Context, req *mygrpc.GetUsersRequest) (*mygrpc.GetUsersResponse, error) {\n\tresultMap := ctlr.userService.GetUsersByIDs(req.GetIds())\n\n\tresp := &mygrpc.GetUsersResponse{}\n\tfor _, u := range resultMap {\n\t\tresp.Users = append(resp.Users, marshalUser(u))\n\t}\n\treturn resp, nil\n}", "func GetUsers() UsersResponse {\n\tvar users UsersResponse\n\tresponse := network.Get(\"admin/users\")\n\tjson.Unmarshal(response, &users)\n\n\treturn users\n}", "func (c *Client) ListUsers() (*http.Response, error) {\n\treturn c.get(\"/user/listusers\", nil)\n}", "func hGetUsers(c echo.Context) error {\n\tvar e httpError\n\t// read from token user id\n\tvar tokenUserID int64\n\ttokenUserID = 2\n\n\tusers, errGetUsers := blog.GetAllUsers(tokenUserID, 50)\n\tif errGetUsers != nil {\n\t\te.TheError = errGetUsers.Error()\n\t\treturn c.JSON(http.StatusInternalServerError, e)\n\t}\n\treturn c.JSON(http.StatusOK, users)\n}", "func HandleUserGetAll(c *gin.Context) {\n\tvar u User\n\tusers, err := u.GetAll()\n\tif err != nil {\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\n\t\t\t\"code\": 1,\n\t\t\t\"msg\": err.Error(),\n\t\t\t\"data\": gin.H{},\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"code\": 0,\n\t\t\"msg\": \"ok\",\n\t\t\"data\": users,\n\t})\n}", "func GetAllUsers(res http.ResponseWriter, req *http.Request) {\n\tres.Header().Set(\"Content-Type\", \"application/json\")\n\tquery := bson.M{}\n\tselector := bson.M{\n\t\t\"_id\": 1,\n\t\t\"name\": 1,\n\t\t\"email\": 1,\n\t}\n\n\tusers, err := db.GetAllUsers(query, selector)\n\tif err != nil {\n\t\tif err.Error() == mgo.ErrNotFound.Error() {\n\t\t\tmsg := \"User not found\"\n\n\t\t\tutils.ReturnErrorResponse(http.StatusNotFound, msg, \"\", nil, nil, res)\n\t\t\treturn\n\t\t}\n\n\t\tmsg := \"Error occurred while getting user details\"\n\t\tutils.ReturnErrorResponse(http.StatusBadRequest, msg, \"\", nil, nil, res)\n\t\treturn\n\t}\n\n\tmsg := \"Your request processed successfully\"\n\tutils.ReturnSuccessReponse(http.StatusOK, msg, users, res)\n}", "func (u *UserCtr) GetUserAll(c *gin.Context) {\n\tusers, err := model.UserAll(u.DB)\n\tif err != nil {\n\t\tresp := errors.New(err.Error())\n\t\tc.JSON(http.StatusInternalServerError, resp)\n\t\treturn\n\t}\n\n\tif len(users) == 0 {\n\t\tc.JSON(http.StatusOK, make([]*model.User, 0))\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"result\": users,\n\t})\n\treturn\n}", "func (uc UserController) getUsers(response http.ResponseWriter, request *http.Request, p httprouter.Params) {\n\tresponse.Header().Add(\"content-type\", \"application/json\")\n\tvar UserArray []Users\n\tctx, _ := context.WithTimeout(context.Background(), 10*time.Second)\n\tcursor, err := uc.collection.Find(ctx, bson.M{})\n\tif err != nil {\n\t\tresponse.WriteHeader(http.StatusInternalServerError)\n\t\tresponse.Write([]byte(`{\"message: \"` + err.Error() + `\"}\"`))\n\t\treturn\n\t}\n\tdefer cursor.Close(ctx)\n\n\tfor cursor.Next(ctx) {\n\t\tvar user Users\n\t\tcursor.Decode(&user)\n\t\tUserArray = append(UserArray, user)\n\t}\n\n\tif err := cursor.Err(); err != nil {\n\t\tresponse.WriteHeader(http.StatusInternalServerError)\n\t\tresponse.Write([]byte(`{\"message: \"` + err.Error() + `\"}\"`))\n\t\treturn\n\t}\n\tjson.NewEncoder(response).Encode(UserArray)\n}", "func (h *Handler) list() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tentities, err := h.UserDAO.FetchAll(r.Context())\n\t\tswitch {\n\t\tcase errors.Is(err, errorx.ErrNoUser):\n\t\t\tmsg := &errorMessage{\n\t\t\t\tMessage: fmt.Sprintf(\"no users exist\"),\n\t\t\t}\n\t\t\tresponse.JSON(w, http.StatusNotFound, msg)\n\t\t\treturn\n\t\tcase err != nil:\n\t\t\tmsg := &errorMessage{\n\t\t\t\tError: err.Error(),\n\t\t\t\tMessage: \"user datastore error\",\n\t\t\t}\n\t\t\tresponse.JSON(w, http.StatusInternalServerError, msg)\n\t\t\treturn\n\t\tdefault:\n\t\t\tresponse.JSON(w, http.StatusOK, entities)\n\t\t}\n\t}\n}", "func (s *UsersService) GetAll(limit, page int64) (*Users, error) {\n\tvar params = map[string]string{}\n\tparams[\"limit\"] = strconv.FormatInt(limit, 10)\n\tparams[\"page\"] = strconv.FormatInt(page, 10)\n\n\tvar data Users\n\terr := s.client.get(\"/users\", params, nil, &data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &data, err\n}", "func (h *User) List(w http.ResponseWriter, r *http.Request) {\n\tlimit, offset := utils.GetPaginationParams(r.URL.Query())\n\tresp, err := h.Storage.GetUserList(limit, offset)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tR.JSON500(w)\n\t\treturn\n\t}\n\n\tif len(resp) < 1 {\n\t\tR.JSON404(w)\n\t\treturn\n\t}\n\n\tR.JSON200(w, resp)\n}", "func GetUsers(writer http.ResponseWriter, r *http.Request) {\n\tusers := []models.AssocUser{}\n\n\tif !initDB {\n\t\tdb = utils.GetConnection()\n\t}\n\n\tsqlDB, err := db.DB()\n\tif err != nil {\n\t\tlog.Fatal(\"Error clossing the DB\")\n\t} else {\n\t\tdefer sqlDB.Close()\n\t}\n\tdb.Find(&users)\n\tjUsers, _ := json.Marshal(users)\n\tutils.SendResponse(writer, http.StatusOK, jUsers)\n}", "func GetUserList(w http.ResponseWriter, r *http.Request) {\n\n\tusers, err := user.GetUserList(r)\n\n\tif err != nil {\n\t\thttpext.AbortAPI(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\thttpext.SuccessDataAPI(w, \"Ok\", users)\n}", "func GetAllUser(w http.ResponseWriter, r *http.Request) {\n\temail, err := getEmailFromTokenHeader(r)\n\tif err != nil || email == \"\" {\n\t\thttp.Error(w, \"Invalid Token\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\tw.Header().Set(\"Context-Type\", \"application/x-www-form-urlencoded\")\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\t// get all the users in the db\n\tusers, err := database.GetAllUsers()\n\tif err != nil {\n\t\tlogrus.Errorf(\"Unable to get all user. %v\", err)\n\t\thttp.Error(w, \"\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\t// send all the users as response\n\terr = json.NewEncoder(w).Encode(&models.UserList{Users: users})\n\tif err != nil {\n\t\tlogrus.Errorf(err.Error())\n\t\treturn\n\t}\n}", "func (a *App) GetAllUsers(w http.ResponseWriter, r *http.Request) {\n\tusers, err := models.GetAllUsers(a.DB)\n\tif err != nil {\n\t\tresponses.ERROR(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tresponses.JSON(w, http.StatusOK, users)\n\treturn\n}", "func GetAllUsers() (users []User, err error) {\n\trows, err := db.DbClient.Query(\"Select * from reg_users;\")\n\tif err != nil {\n\t\treturn users, err\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tu := User{}\n\t\tif err := rows.Scan(&u.ID, &u.Name, &u.Password, &u.Email, &u.Registered, &u.Activated); err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn users, err\n\t\t}\n\t\tusers = append(users, u)\n\t}\n\treturn\n}", "func (w *ServerInterfaceWrapper) GetUsers(ctx echo.Context) error {\n\tvar err error\n\n\tctx.Set(\"OAuth.Scopes\", []string{\"\"})\n\n\t// Parameter object where we will unmarshal all parameters from the context\n\tvar params GetUsersParams\n\t// ------------- Optional query parameter \"page_size\" -------------\n\n\terr = runtime.BindQueryParameter(\"form\", true, false, \"page_size\", ctx.QueryParams(), &params.PageSize)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"Invalid format for parameter page_size: %s\", err))\n\t}\n\n\t// ------------- Optional query parameter \"page_number\" -------------\n\n\terr = runtime.BindQueryParameter(\"form\", true, false, \"page_number\", ctx.QueryParams(), &params.PageNumber)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"Invalid format for parameter page_number: %s\", err))\n\t}\n\n\t// Invoke the callback with all the unmarshalled arguments\n\terr = w.Handler.GetUsers(ctx, params)\n\treturn err\n}", "func (a *App) UsersGet(w http.ResponseWriter, r *http.Request) {\n\tusername := chi.URLParam(r, \"username\")\n\tctx := r.Context()\n\tuserID, ok := ctx.Value(middleware.UserCtxKeys(0)).(int64)\n\tquserID, err := a.validateUsernameAndGetID(username)\n\tif err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\tutils.RespondWithError(w, http.StatusNotFound, \"User not found\")\n\t\t\treturn\n\t\t}\n\t\tutils.RespondWithError(w, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\tlog.Println(quserID)\n\tvar user *User\n\tif ok {\n\t\tuser, err = a.dbAuthenticatedGetUser(userID, quserID)\n\t} else {\n\t\tuser, err = a.dbGetUser(quserID)\n\t}\n\tif err != nil {\n\t\tutils.RespondWithError(w, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\tutils.RespondWithJSON(w, http.StatusOK, &user)\n}", "func (a *API) listUsers(w http.ResponseWriter, req *http.Request) {\n\tvar (\n\t\tpath string\n\t\tusers cmap.ConcurrentMap\n\t\twg sync.WaitGroup\n\t)\n\t// Force reload policy to get the newest\n\t_ = a.policyEngine.LoadPolicy()\n\tif username := req.FormValue(\"name\"); username != \"\" {\n\t\tpath = common.Path(model.DefaultUsersPrefix, common.Hash(username, crypto.MD5))\n\t} else {\n\t\tpath = common.Path(model.DefaultUsersPrefix)\n\t}\n\tresp, err := a.etcdcli.DoGet(path, etcdv3.WithPrefix(),\n\t\tetcdv3.WithSort(etcdv3.SortByKey, etcdv3.SortAscend))\n\tif err != nil {\n\t\ta.respondError(w, apiError{\n\t\t\tcode: http.StatusInternalServerError,\n\t\t\terr: err,\n\t\t})\n\t\treturn\n\t}\n\tusers = cmap.New()\n\tfor _, ev := range resp.Kvs {\n\t\twg.Add(1)\n\t\tgo func(evv []byte) {\n\t\t\tdefer wg.Done()\n\t\t\tvar (\n\t\t\t\tu model.User\n\t\t\t\tp [][]string\n\t\t\t)\n\t\t\t_ = json.Unmarshal(evv, &u)\n\t\t\tp = a.policyEngine.GetFilteredPolicy(0, u.Username)\n\t\t\tfor i, v := range p {\n\t\t\t\t// The first element is username, so just remove it.\n\t\t\t\tp[i] = v[1:]\n\t\t\t}\n\t\t\tusers.Set(u.Username, p)\n\t\t}(ev.Value)\n\t}\n\twg.Wait()\n\ta.respondSuccess(w, http.StatusOK, users)\n}", "func (c *Client) GetUsers(queryParams ...string) (map[string]interface{}, error) {\n\tlog.info(\"========== GET CLIENT USERS ==========\")\n\turl := buildURL(path[\"users\"])\n\n\treturn c.do(\"GET\", url, \"\", queryParams)\n}", "func (uc UserController) GetAllUsers(c *doze.Context) doze.ResponseSender {\n\treturn doze.NewOKJSONResponse(users)\n}", "func (mgr *UserMgr) GetUsers() []User {\n\tusers := []User{}\n\tmgr.db.Select(&users, \"SELECT username, email, password, role FROM users ORDER BY id desc\")\n\treturn users\n}", "func GetUsers(w http.ResponseWriter, r *http.Request, db *gorp.DbMap, c *businesscontext.Ctx) error {\n\tusers, err := user.LoadUsers(db)\n\tif err != nil {\n\t\treturn sdk.WrapError(err, \"GetUsers: Cannot load user from db\")\n\t}\n\treturn WriteJSON(w, r, users, http.StatusOK)\n}", "func (u UserController) GetUsers(w http.ResponseWriter, r *http.Request, p httprouter.Params) {\n\tusers, err := u.userRepository.GetAll()\n\tif err != nil {\n\t\thttp.Error(w, \"service unavailable\", http.StatusServiceUnavailable)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tjson.NewEncoder(w).Encode(users)\n}", "func (u *UserHandler) GetAllUsers(c *gin.Context) {\n\tusers, err := (*u.UserService).GetAll()\n\tif err != nil {\n\t\t_ = c.Error(err).SetType(gin.ErrorTypePublic)\n\t\treturn\n\t}\n\n\tvar userList []*dto.User\n\tfor _, user := range users {\n\t\tuserList = append(userList, adapter.UserDomainToDTO(user))\n\t}\n\n\tc.JSON(http.StatusOK, userList)\n}", "func (serv *AppServer) GetUsers() []*User {\n\tret := []*User{}\n\tlines := strings.Split(serv.ServerRequest([]string{\"GetUsers\"}), \"[|]\")\n\tfor _, line := range lines[:len(lines)-1] {\n\t\tret = append(ret, ParseUser(line))\n\t}\n\treturn ret\n}", "func (cli *OpsGenieUserV2Client) List(req userv2.ListUsersRequest) (*userv2.ListUsersResponse, error) {\n\tvar response userv2.ListUsersResponse\n\terr := cli.sendGetRequest(&req, &response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &response, nil\n}", "func GetAllUsers(w http.ResponseWriter, req *http.Request) {\n\trs, err := db.GetAll()\n\tif err != nil {\n\t\thandleError(err, \"Failed to load database Users: %v\", w)\n\t\treturn\n\t}\n\n\tbs, err := json.Marshal(rs)\n\tif err != nil {\n\t\thandleError(err, \"Failed to load marshal data: %v\", w)\n\t\treturn\n\t}\n\n\tw.Write(bs)\n}", "func (u *User) List(ctx context.Context, w http.ResponseWriter, r *http.Request, params map[string]string) error {\n\tctx, span := trace.StartSpan(ctx, \"handlers.User.List\")\n\tdefer span.End()\n\n\tusers, err := user.List(ctx, u.db)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn web.Respond(ctx, w, users, http.StatusOK)\n}", "func GetAllUsers(dbmap *gorp.DbMap) func(w http.ResponseWriter, r *http.Request) {\n\n return usersHandler(nil, func(r *http.Request) *[]models.User {\n var users []models.User\n _, dbError := dbmap.Select(&users, \"select * from \\\"user\\\"\")\n if dbError != nil {\n log.Print(dbError)\n }\n\n return &users\n })\n\n}", "func GetAllUsers(context *fiber.Ctx) error {\n\tvar users = repository.GetAllUsers()\n\n\tif users == nil {\n\t\tlog.Printf(\"database is empty\")\n\t\treturn context.Status(404).JSON(&fiber.Map{\"response\": \"not found\"})\n\t} else {\n\t\treturn context.Status(200).JSON(users)\n\t}\n}", "func (uc UserController) GetAllUsers(c rest.Context) rest.ResponseSender {\n\treturn rest.NewOKJSONResponse(users)\n}", "func (s *userService) GetAll(ctx context.Context, req *pb.Request) (rsp *pb.Response, err error) {\n\tif req.PageNum == 0 || req.PageSize == 0 {\n\t\treq.PageNum = 1\n\t\treq.PageSize = 10\n\t}\n\tusers, err := s.dao.GetAllUsers(ctx, req)\n\tif err != nil {\n\t\treturn\n\t}\n\trsp = &pb.Response{\n\t\tUsers: users,\n\t}\n\treturn\n}", "func (UserService) List(ctx context.Context, gdto dto.GeneralListDto) ([]model.User, int64) {\n\tcols := \"*\"\n\tgdto.Q, cols = dataPermService.DataPermFilter(ctx, \"users\", gdto)\n\treturn userDao.List(gdto, cols)\n}", "func GetUsers(db *sql.DB) ([]models.UserResponse, error) {\n\n\trows, err := db.Query(\"SELECT id, username, email, createdAt FROM users\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpersons := make([]models.UserResponse, 0)\n\n\tfor rows.Next() {\n\t\tvar id int\n\t\tvar username string\n\t\tvar email string\n\t\tvar createdAt time.Time\n\t\terr = rows.Scan(&id, &username, &email, &createdAt)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpersons = append(persons, models.UserResponse{ID: id, Username: username, CreatedAt: createdAt})\n\t}\n\treturn persons, nil\n}", "func userIndex(w http.ResponseWriter, r *http.Request) {\n w.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tdb := co.DbConnection(dbc)\n\t// DB query to get all the users\n\tresults, err := db.Query(\"SELECT user_name FROM members\")\n\tif err != nil {\n\t\tpanic(err.Error()) // proper error handling instead of panic in your app\n\t}\n\tvar members []string\n\tfor results.Next() {\n\t\tvar name string\n\t\terr = results.Scan(&name)\n\t\tif err != nil {\n\t\t\tpanic(err.Error()) // proper error handling instead of panic in your app\n\t\t}\n\t\tmembers = append(members, name)\n\t}\n\tresults.Close()\n\tdb.Close()\n\tjsonPrint(w, members)\n return\n}", "func allUsers(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tvar users []User\n\tdb.Scopes(Paginate(r)).Find(&users)\n\terr := json.NewEncoder(w).Encode(users)\n\tlog.ErrorHandler(err)\n\tlog.AccessHandler(r, 200)\n\treturn\n}", "func (u *usecase) GetAll(ctx context.Context) ([]*User, error) {\n\tusers, err := u.repository.GetAll(ctx)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error fetching all users\")\n\t}\n\treturn users, nil\n}", "func GetUsers(c router.Context) (interface{}, error) {\n\t// get the data from the request and parse it as structure\n\tdata := c.Param(`data`).(UserId)\n\n\t// Validate the inputed data\n\terr := data.Validate()\n\tif err != nil {\n\t\tif _, ok := err.(validation.InternalError); ok {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, status.ErrStatusUnprocessableEntity.WithValidationError(err.(validation.Errors))\n\t}\n\tstub := c.Stub()\n\tqueryString := fmt.Sprintf(\"{\\\"selector\\\":{\\\"_id\\\":{\\\"$ne\\\":\\\"%s\\\"},\\\"doc_type\\\":\\\"%s\\\"}}\", data.ID, utils.DocTypeUser)\n\tresultsIterator, err := stub.GetQueryResult(queryString)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn nil, status.ErrInternal.WithError(err)\n\t}\n\n\tdefer resultsIterator.Close()\n\n\t// buffer is a JSON array containing QueryResults\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"{\")\n\tbuffer.WriteString(\"\\\"users\\\": [\")\n\taArrayMemberAlreadyWritten := false\n\tfor resultsIterator.HasNext() {\n\t\tqueryResponse, err2 := resultsIterator.Next()\n\t\tif err2 != nil {\n\t\t\treturn nil, status.ErrInternal.WithError(err2)\n\t\t}\n\n\t\t// Add a comma before array members, suppress it for the first array member\n\t\tif aArrayMemberAlreadyWritten == true {\n\t\t\tbuffer.WriteString(\",\")\n\t\t}\n\t\tuserData := UserResponse{}\n\t\terr3 := json.Unmarshal(queryResponse.Value, &userData)\n\t\tif err3 != nil {\n\t\t\treturn nil, status.ErrInternal.WithError(err3)\n\t\t}\n\n\t\tuserData.ID = queryResponse.Key\n\t\tuserDataBytes, _ := json.Marshal(userData)\n\n\t\tbuffer.WriteString(string(userDataBytes))\n\t\taArrayMemberAlreadyWritten = true\n\t}\n\tbuffer.WriteString(\"]}\")\n\n\t//return the response\n\treturn buffer.Bytes(), nil\n}", "func (c *EcomClient) GetUsers(ctx context.Context) ([]*UserResponse, error) {\n\turi := c.endpoint + \"/users\"\n\tres, err := c.request(http.MethodGet, uri, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"request failed: %w\", err)\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode >= 400 {\n\t\treturn nil, fmt.Errorf(\"%s: %w\", res.Status, err)\n\t}\n\n\tvar userContainer UserContainer\n\tif err := json.NewDecoder(res.Body).Decode(&userContainer); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"json decode url=%q\", uri)\n\t}\n\treturn userContainer.Data, nil\n}", "func UserListAll(w http.ResponseWriter, r *http.Request) {\n\n\tvar err error\n\tvar pageSize int\n\tvar paginatedUsers auth.PaginatedUsers\n\n\t// Init output\n\toutput := []byte(\"\")\n\n\t// Add content type header to the response\n\tcontentType := \"application/json\"\n\tcharset := \"utf-8\"\n\tw.Header().Add(\"Content-Type\", fmt.Sprintf(\"%s; charset=%s\", contentType, charset))\n\n\t// Grab context references\n\trefStr := gorillaContext.Get(r, \"str\").(stores.Store)\n\trefRoles := gorillaContext.Get(r, \"auth_roles\").([]string)\n\n\t// Grab url path variables\n\turlValues := r.URL.Query()\n\tpageToken := urlValues.Get(\"pageToken\")\n\tstrPageSize := urlValues.Get(\"pageSize\")\n\tprojectName := urlValues.Get(\"project\")\n\tprojectUUID := \"\"\n\n\tif projectName != \"\" {\n\t\tprojectUUID = projects.GetUUIDByName(projectName, refStr)\n\t\tif projectUUID == \"\" {\n\t\t\terr := APIErrorNotFound(\"ProjectUUID\")\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif strPageSize != \"\" {\n\t\tif pageSize, err = strconv.Atoi(strPageSize); err != nil {\n\t\t\tlog.Errorf(\"Pagesize %v produced an error while being converted to int: %v\", strPageSize, err.Error())\n\t\t\terr := APIErrorInvalidData(\"Invalid page size\")\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// check that user is indeed a service admin in order to be priviledged to see full user info\n\tpriviledged := auth.IsServiceAdmin(refRoles)\n\n\t// Get Results Object - call is always priviledged because this handler is only accessible by service admins\n\tif paginatedUsers, err = auth.PaginatedFindUsers(pageToken, int32(pageSize), projectUUID, priviledged, refStr); err != nil {\n\t\terr := APIErrorInvalidData(\"Invalid page token\")\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Output result to JSON\n\tresJSON, err := paginatedUsers.ExportJSON()\n\n\tif err != nil {\n\t\terr := APIErrExportJSON()\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Write response\n\toutput = []byte(resJSON)\n\trespondOK(w, output)\n\n}" ]
[ "0.77413064", "0.76049715", "0.75767225", "0.7560748", "0.7475642", "0.74546", "0.74237984", "0.73957825", "0.7393577", "0.73627543", "0.73378384", "0.72897553", "0.72844577", "0.7261622", "0.72614956", "0.7254002", "0.72333294", "0.7209193", "0.7197647", "0.71920323", "0.713491", "0.71262527", "0.7123355", "0.70965743", "0.7064692", "0.70629674", "0.7050243", "0.70381474", "0.70371675", "0.70284104", "0.70257306", "0.7024374", "0.70189476", "0.7017125", "0.7014344", "0.7003844", "0.70016146", "0.6999788", "0.6992649", "0.6992396", "0.6986773", "0.6981682", "0.6966427", "0.6964424", "0.6956028", "0.695575", "0.69538933", "0.69463867", "0.69463235", "0.6946047", "0.6942803", "0.6935161", "0.69258964", "0.6923909", "0.6922941", "0.6906425", "0.69058704", "0.6898161", "0.6888485", "0.6885382", "0.6875799", "0.6873976", "0.6869259", "0.68668354", "0.686286", "0.68512875", "0.68381137", "0.68295324", "0.68294466", "0.68245655", "0.68191177", "0.68157667", "0.6813053", "0.68035334", "0.6796124", "0.6789523", "0.67876416", "0.6784302", "0.6782266", "0.67685735", "0.676751", "0.67673934", "0.6744833", "0.673032", "0.6728018", "0.67265326", "0.67237747", "0.6716131", "0.6714956", "0.67139983", "0.6712402", "0.67074966", "0.6707187", "0.6688783", "0.66762954", "0.66694957", "0.66659707", "0.6655547", "0.66455907", "0.6630085" ]
0.77894056
0
PostUserHandler crea un usuario en la base de datos
PostUserHandler создает пользователя в базе данных
func PostUserHandler(w http.ResponseWriter, r *http.Request) { var user User err := json.NewDecoder(r.Body).Decode(&user) if err != nil { panic(err) } user.CreateAt = time.Now() id++ k := strconv.Itoa(id) Listusers[k] = user w.Header().Set("Content-Type", "application/json") j, err := json.Marshal(user) if err != nil { panic(err) } w.WriteHeader(http.StatusCreated) w.Write(j) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func PostUserHandler(w http.ResponseWriter, r *http.Request) {\n\tvar user models.User\n\terr := json.NewDecoder(r.Body).Decode(&user)\n\tif err != nil {\n\t\tlog.Println(\"Error al parsear usuario\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tstatus := users.CreateUser(user)\n\tw.WriteHeader(status)\n}", "func (a *api) h_POST_users(c *gin.Context) {\n\tusr := &User{}\n\tif a.errorResponse(c, bindAppJson(c, usr)) {\n\t\treturn\n\t}\n\ta.logger.Info(\"Creating new user \", usr)\n\tmu := a.user2muser(usr)\n\tif a.errorResponse(c, a.Dc.CreateUser(mu)) {\n\t\treturn\n\t}\n\n\tif usr.Password != nil {\n\t\tif err := a.Dc.SetUserPasswd(usr.Login, ptr2string(usr.Password, \"\")); err != nil {\n\t\t\ta.logger.Warn(\"Could not set user password for new user \", usr.Login, \", err=\", err, \". Will leave it intact\")\n\t\t}\n\t}\n\n\tw := c.Writer\n\turi := composeURI(c.Request, usr.Login)\n\tw.Header().Set(\"Location\", uri)\n\tc.Status(http.StatusCreated)\n}", "func (h *UserHandler) handlePostUser(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n //Decode request\n var req postUserRequest\n if err := json.NewDecoder(r.Body).Decode(&req.User); err != nil {\n Error(w, ErrInvalidJSON, http.StatusBadRequest, h.Logger)\n return\n }\n u := req.User\n\n //create a new user\n err := h.UserService.CreateUser(u)\n if err != nil {\n Error(w, err, http.StatusBadRequest, h.Logger)\n }\n w.Header().Set(\"Content-Type\", \"application/json\")\n json.NewEncoder(w).Encode(&postUserResponse{User: u})\n}", "func PostUser(w http.ResponseWriter, req *http.Request, app *App) {\n\tif models.UserCount(app.Db) == 0 {\n\t\temail, password := req.FormValue(\"email\"), req.FormValue(\"password\")\n\t\tuser := models.NewUser(email, password)\n\t\terr := user.Save(app.Db)\n\t\tif err != nil {\n\t\t\thttp.Redirect(w, req, app.Config.General.Prefix+\"/register\", http.StatusFound)\n\t\t} else {\n\t\t\thttp.Redirect(w, req, app.Config.General.Prefix+\"/login\", http.StatusFound)\n\t\t}\n\t}\n}", "func (auh *AdminUserHandler) PostUser(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tvar apiKey = r.Header.Get(\"api-key\")\n\tif apiKey == \"\" || (apiKey != adminApiKey && apiKey != userApiKey) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)\n\t\treturn\n\t}\n\tl := r.ContentLength\n\tbody := make([]byte, l)\n\tr.Body.Read(body)\n\n\tuser := &entity.User{}\n\n\terr := json.Unmarshal(body, user)\n\n\tif err != nil {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\n\tuser, errs := auh.userService.StoreUser(user)\n\n\tif len(errs) > 0 {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\toutput, err := json.MarshalIndent(user, \"\", \"\\t\")\n\tw.WriteHeader(http.StatusCreated)\n\tw.Write(output)\n\treturn\n}", "func CreateUserHandler(w http.ResponseWriter, req *http.Request) {\n // Validate internal token.\n if internalToken := req.Header.Get(app.Config.AuthHeaderName); internalToken != app.Config.RestApiToken {\n respond.Error(w, errmsg.Unauthorized())\n return\n }\n\n // Parse & validate payload.\n var pl payload.CreateUserPayload\n\n if !pl.Validate(req) {\n respond.Error(w, errmsg.InvalidPayload())\n return\n }\n\n // Check if the executor is using the USER_CREATION_HASH to create this user.\n usingUserCreationPw := pl.ExecutorEmail == \"\" && app.Config.UserCreationHash != \"\" &&\n crypt.VerifySha256(pl.ExecutorPassword, app.Config.UserCreationHash)\n\n // If not using USER_CREATION_HASH for auth, verify executor exists using email/pw.\n if !usingUserCreationPw {\n // Get executor user by email.\n executorUser, err := usersvc.FromEmail(pl.ExecutorEmail)\n\n if err != nil {\n app.Log.Errorln(err.Error())\n respond.Error(w, errmsg.UserNotFound())\n return\n }\n\n // Ensure executor user's password is correct.\n if !crypt.VerifyBcrypt(pl.ExecutorPassword, executorUser.HashedPw) {\n app.Log.Errorln(\"error creating new User: invalid executor user password\")\n respond.Error(w, errmsg.Unauthorized())\n return\n }\n\n // Only admin users can create other users.\n if !executorUser.Admin {\n app.Log.Errorln(\"error creating new User: executor user must be an admin\")\n respond.Error(w, errmsg.Unauthorized())\n return\n }\n }\n\n // Hash provided user password.\n hashedPw, err := crypt.BcryptHash(pl.NewPassword)\n\n if err != nil {\n app.Log.Errorf(\"error creating new User: bcrypt password hash failed with %s\\n\", err.Error())\n respond.Error(w, errmsg.ISE())\n return\n }\n\n // Create new User.\n newUser, err := usersvc.Create(pl.NewEmail, hashedPw, pl.Admin)\n\n if err != nil {\n app.Log.Errorln(err.Error())\n pqError, ok := err.(*pq.Error)\n\n if ok && pqError.Code.Name() == \"unique_violation\" {\n respond.Error(w, errmsg.EmailNotAvailable())\n } else {\n respond.Error(w, errmsg.UserCreationFailed())\n }\n\n return\n }\n\n // Create response payload and respond.\n respData := successmsg.UserCreationSuccess\n respData[\"uid\"] = newUser.Uid\n\n respond.Created(w, respData)\n}", "func CreateUserHandler(connection *sql.DB, cnf config.Config) negroni.HandlerFunc {\n\treturn negroni.HandlerFunc(func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\t\tuser := &models.UserCreate{}\n\t\terr := util.RequestToJSON(r, user)\n\t\tif err != nil {\n\t\t\tutil.SendBadRequest(w, errors.New(\"Bad json\"))\n\t\t\treturn\n\t\t}\n\n\t\tif err := user.Validate(); err == nil {\n\t\t\tif err := user.ValidatePassword(); err == nil {\n\n\t\t\t\thash, _ := bcrypt.GenerateFromPassword([]byte(user.Password), 10)\n\t\t\t\tuser.Hash = string(hash)\n\n\t\t\t\tcreatedID, err := db.CreateUser(connection, user)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tutil.SendBadRequest(w, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tcreatedUser, err := db.GetUserByID(connection, createdID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tutil.SendBadRequest(w, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// create JWT object with claims\n\t\t\t\texpiration := time.Now().Add(time.Hour * 24 * 31).Unix()\n\t\t\t\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\t\t\t\"sub\": createdUser.ID,\n\t\t\t\t\t\"iat\": time.Now().Unix(),\n\t\t\t\t\t\"exp\": expiration,\n\t\t\t\t})\n\n\t\t\t\t// Load secret key from config and generate a signed token\n\t\t\t\tsecretKey := cnf.SecretKey\n\t\t\t\ttokenString, err := token.SignedString([]byte(secretKey))\n\t\t\t\tif err != nil {\n\t\t\t\t\tutil.SendError(w, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\ttype Token struct {\n\t\t\t\t\tToken string `json:\"token\"`\n\t\t\t\t\tExpiresOn string `json:\"expires_on\"`\n\t\t\t\t\tUser *models.UserResponse `json:\"user\"`\n\t\t\t\t}\n\n\t\t\t\tutil.SendOK(w, &Token{\n\t\t\t\t\tToken: tokenString,\n\t\t\t\t\tExpiresOn: strconv.Itoa(int(expiration)),\n\t\t\t\t\tUser: &createdUser,\n\t\t\t\t})\n\n\t\t\t} else {\n\t\t\t\tutil.SendBadRequest(w, err)\n\t\t\t}\n\t\t} else {\n\t\t\tutil.SendBadRequest(w, err)\n\t\t}\n\t})\n}", "func (h *userHandler) createUser(ctx context.Context, rw http.ResponseWriter, r *http.Request) {\n\n\tvar user = &model.User{}\n\n\terr := json.NewDecoder(r.Body).Decode(user)\n\tif err != nil {\n\n\t\th.serv.writeResponse(ctx, rw, err.Error(), http.StatusBadRequest, nil)\n\n\t\treturn\n\n\t}\n\n\tif user.Login == \"\" || user.Password == \"\" {\n\n\t\th.serv.writeResponse(ctx, rw, \"Login or password are empty\", http.StatusBadRequest, nil)\n\n\t\treturn\n\t}\n\n\terr = h.registerUser(ctx, user)\n\tif err != nil {\n\n\t\th.serv.writeResponse(ctx, rw, err.Error(), http.StatusBadRequest, nil)\n\n\t\treturn\n\t}\n\n\th.serv.writeResponse(ctx, rw, \"user was created: \"+user.Login, http.StatusCreated, user)\n\n}", "func (e *env) UserSignupPostHandler(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase \"GET\":\n\tcase \"POST\":\n\t\tusername := r.FormValue(\"username\")\n\t\tpassword := r.FormValue(\"password\")\n\n\t\terr := e.authState.NewUser(username, password)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error adding user:\", err)\n\t\t\te.authState.SetFlash(\"Error adding user. Check logs.\", r)\n\t\t\thttp.Redirect(w, r, r.Referer(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\t// Login the recently added user\n\t\tif e.authState.Auth(username, password) {\n\t\t\te.authState.Login(username, r)\n\t\t}\n\n\t\te.authState.SetFlash(\"Successfully added '\"+username+\"' user.\", r)\n\t\thttp.Redirect(w, r, \"/\", http.StatusSeeOther)\n\n\tcase \"PUT\":\n\t\t// Update an existing record.\n\tcase \"DELETE\":\n\t\t// Remove the record.\n\tdefault:\n\t\t// Give an error message.\n\t}\n}", "func CreateUserHandler(w http.ResponseWriter, r *http.Request) {\n\n\tuser := &models.User{}\n\terr := json.NewDecoder(r.Body).Decode(user) //decode the request body into struct and fail if any error occur\n\tif err != nil {\n\t\tfmt.Println(\"Debug user CreateUserHandler:\", err)\n\t\tutils.Respond(w, utils.Message(false, \"Invalid request\"))\n\t\treturn\n\t}\n\n\tresp := user.Create() //Create user\n\tutils.Respond(w, resp)\n}", "func (handler *Handler) handleUserCreate(w http.ResponseWriter, r *http.Request) {\n\n\t//Create an empty new user\n\tnewUser := handler.userHelper.NewEmptyUser()\n\n\t/**\n\tDefine a struct for just updating password\n\t*/\n\ttype newUserStruct struct {\n\t\tEmail string `json:\"email\"`\n\t\tPassword string `json:\"password\"`\n\t}\n\n\t//Create the new user\n\tnewUserInfo := &newUserStruct{}\n\n\t//decode the request body into struct and failed if any error occur\n\terr := json.NewDecoder(r.Body).Decode(newUserInfo)\n\tif err != nil {\n\t\tutils.ReturnJsonStatus(w, http.StatusUnprocessableEntity, false, err.Error())\n\t\treturn\n\n\t}\n\n\t//Copy over the new user data\n\tnewUser.SetEmail(newUserInfo.Email)\n\tnewUser.SetPassword(newUserInfo.Password)\n\n\t//Now create the new suer\n\terr = handler.userHelper.createUser(newUser)\n\n\tif err != nil {\n\t\tutils.ReturnJsonStatus(w, http.StatusUnprocessableEntity, false, err.Error())\n\t\treturn\n\t}\n\n\t//Check to see if the user was created\n\tif err == nil {\n\t\tutils.ReturnJsonStatus(w, http.StatusCreated, true, \"create_user_added\")\n\t} else {\n\t\tutils.ReturnJsonStatus(w, http.StatusUnprocessableEntity, false, err.Error())\n\t}\n\n}", "func (h *Handler) PostUser(w http.ResponseWriter, r *http.Request) {\n\tdecoder := json.NewDecoder(r.Body)\n\n\tvar newUser User\n\tif err := decoder.Decode(&newUser); err != nil {\n\t\tif err.Error() == \"EOF\" {\n\t\t\thttp.Error(w, \"Empty user request\", 400)\n\t\t\treturn\n\t\t}\n\t\th.Logger.Errorf(\"err decoding user request: %s\", err)\n\t\thttp.Error(w, http.StatusText(500), 500)\n\t\treturn\n\t}\n\n\tif newUser.Email == \"\" || newUser.Password == \"\" {\n\t\thttp.Error(w, \"Invalid email or password\", 400)\n\t\treturn\n\t}\n\n\t// Hash password\n\tbytes, err := bcrypt.GenerateFromPassword([]byte(newUser.Password), bcrypt.DefaultCost)\n\tif err != nil {\n\t\th.Logger.Errorf(\"err hashing password: %s\", err)\n\t}\n\tnewUser.Password = string(bytes)\n\n\t_, err = h.Collection.InsertOne(context.TODO(), newUser)\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), \"duplicate key error\") {\n\t\t\thttp.Error(w, \"User already exists\", 400)\n\t\t\treturn\n\t\t}\n\t\th.Logger.Errorf(\"error creating user: %s\", err)\n\t\thttp.Error(w, http.StatusText(500), 500)\n\t\treturn\n\t}\n\n\tnewUser.Password = \"\"\n\n\trender.JSON(w, r, newUser)\n}", "func UserCreate(w http.ResponseWriter, r *http.Request) {\n\terr := json.NewDecoder(r.Body).Decode(&user)\n\tif err != nil {\n\t\tm.Message = fmt.Sprintf(\"Error al leer el usuario a registrarse: %s\", err)\n\t\tm.Code = http.StatusBadRequest\n\t\tcommons.DisplayMessage(w, m)\n\t\treturn\n\t}\n\tif user.Password != user.ConfirmPassword {\n\t\tm.Message = \"Las contraseña no coinciden\"\n\t\tm.Code = http.StatusBadRequest\n\t\tcommons.DisplayMessage(w, m)\n\t\treturn\n\t}\n\tuser.Password = password\n\tavatarmd5 := md5.Sum([]byte(user.Password))\n\tavatarstr := fmt.Sprintf(\"%x\", avatarmd5)\n\tuser.Avatar = \"https://gravatar.com/avatar/\" + avatarstr + \"?s=100\"\n\tdatabase := configuration.GetConnection()\n\tdefer database.Close()\n\terr = database.Create(&user).Error\n\tif err != nil {\n\t\tm.Message = fmt.Sprintf(\"Error al crear el registro: %s\", err)\n\t\tm.Code = http.StatusBadRequest\n\t\tcommons.DisplayMessage(w, m)\n\t\treturn\n\t}\n\tm.Message = \"Usuario creado con éxito\"\n\tm.Code = http.StatusCreated\n\tcommons.DisplayMessage(w, m)\n}", "func (uh UserHandler) CreateUser(w http.ResponseWriter, r *http.Request) {\n\tlog.Print(utils.InfoLog + \"UserHandler:CreateUser called\")\n\tvar newUser models.User\n\n\treqBody, genErr := ioutil.ReadAll(r.Body); if genErr != nil {\n\t\tutils.ReturnWithError(w, http.StatusBadRequest, http.StatusText(http.StatusBadRequest), genErr.Error())\n\t\tlog.Println(utils.ErrorLog + \"Unable to read request body\")\n\t\treturn\n\t}\n\n\tjson.Unmarshal(reqBody, &newUser)\n\t_, genErr = valid.ValidateStruct(&newUser) ; if genErr != nil {\n\t\tutils.ReturnWithError(w, http.StatusBadRequest, http.StatusText(http.StatusBadRequest), genErr.Error())\n\t\tlog.Println(utils.ErrorLog + \"Request body data invalid\")\n\t\treturn\n\t}\n\terr := models.ValidateUser(&newUser); if err != nil {\n\t\tutils.ReturnWithErrorLong(w, *err)\n\t\tlog.Println(utils.ErrorLog + \"Request body data invalid\") // TODO ??\n\t\treturn\n\t}\n\n\terr = uh.UserManager.CreateUser(&newUser); if err != nil {\n\t\tutils.ReturnWithErrorLong(w, *err)\n\t\tlog.Println(utils.ErrorLog + \"Insert body here\") // TODO ??\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusCreated)\n\tjson.NewEncoder(w).Encode(newUser)\n}", "func PostUser(w http.ResponseWriter, req *http.Request) {\n\tID := req.FormValue(\"id\")\n\tnameStr := req.FormValue(\"name\")\n\tname := string(nameStr)\n\n\tuser := db.User{ID: ID, Name: name}\n\n\tdb.Save(user)\n\n\tw.Write([]byte(\"OK\"))\n}", "func (a *API) userSignupPostHandler(w http.ResponseWriter, r *http.Request) {\n\t// Validate user input\n\tvar u model.User\n\terr := json.NewDecoder(r.Body).Decode(&u)\n\tif err != nil {\n\t\tresponse.Errorf(w, r, err, http.StatusInternalServerError, \"Internal Server Error\")\n\t\treturn\n\t} else if u.Name == \"\" {\n\t\tresponse.Errorf(w, r, nil, http.StatusBadRequest, \"Name is missing\")\n\t\treturn\n\t} else if u.Lastname == \"\" {\n\t\tresponse.Errorf(w, r, nil, http.StatusBadRequest, \"Lastname is missing\")\n\t\treturn\n\t} else if u.Email == \"\" {\n\t\tresponse.Errorf(w, r, nil, http.StatusBadRequest, \"Email address is missing\")\n\t\treturn\n\t} else if u.Password == \"\" {\n\t\tresponse.Errorf(w, r, nil, http.StatusBadRequest, \"Password is missing\")\n\t\treturn\n\t}\n\t// Always use the lower case email address\n\tu.Email = strings.ToLower(u.Email)\n\n\t// Create jwt token\n\tu.Token, err = a.createJWT(jwt.MapClaims{\n\t\t\"email\": u.Email,\n\t\t\"name\": u.Name,\n\t\t\"lastname\": u.Lastname,\n\t\t\"password\": u.Password,\n\t\t\"exp\": time.Now().Add(time.Hour * 24 * 7).Unix(),\n\t})\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n\t// Hash the user password\n\terr = u.HashPassword()\n\tif err != nil {\n\t\tresponse.Errorf(w, r, err, http.StatusInternalServerError, \"Internal Server Error\")\n\t\treturn\n\t}\n\t// Save user to database\n\tInsertedUserID, err := a.db.CreateUser(&u)\n\tif err != nil {\n\t\tif err.Error() == \"email_address_already_exists\" {\n\t\t\tresponse.Errorf(w, r, err, http.StatusBadRequest, \"Email address already exists\")\n\t\t\treturn\n\t\t}\n\t\tresponse.Errorf(w, r, err, http.StatusInternalServerError, \"Internal Server Error\")\n\t\treturn\n\t}\n\treturnUser := model.ViewUser{\n\t\tID: InsertedUserID,\n\t\tName: u.Name,\n\t\tLastname: u.Lastname,\n\t\tEmail: u.Email,\n\t\tPassword: u.Password,\n\t\tToken: u.Token,\n\t}\n\t//\thttp.SetCookie(w, &http.Cookie{\n\t//\t\tName: \"token\",\n\t//\t\tValue: u.Token,\n\t//\t\tPath: \"/\",\n\t//\t})\n\n\t// Omit password\n\t//\tu.ID, err = primitive.ObjectIDFromHex(InsertedUserID)\n\t//\tif err != nil {\n\t//\t\tresponse.Errorf(w, r, err, http.StatusInternalServerError, \"Internal Server Error\")\n\t//\t\treturn\n\t//\t}\n\t//\tu.Password = \"\"\n\tresponse.Write(w, r, returnUser)\n}", "func createHandler(w http.ResponseWriter, r *http.Request) {\n\tusername := r.FormValue(\"user\")\n\tpass := r.FormValue(\"pass\")\n\tuser := User{}\n\terr := userDB.Find(bson.M{\"username\": username}).One(&user)\n\n\tif user.UserName == username {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintln(w, \"User already exists\")\n\t\treturn\n\t}\n\n\tif len(pass) < 8 {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintln(w, \"Password must contain at least 8 characters\")\n\t\treturn\n\t}\n\n\thash, _ := bcrypt.GenerateFromPassword([]byte(pass), 10)\n\tuser.UserName = username\n\tuser.Hash = hash\n\terr = userDB.Insert(&user)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintln(w, \"Error creating user.\")\n\t\treturn\n\t}\n\n\tr.URL.Path = \"/login\"\n}", "func PostUser(w http.ResponseWriter, r *http.Request) {\n\tvar user models.User\n\tjson.NewDecoder(r.Body).Decode(&user)\n\tmodels.CreateUser(user)\n}", "func (h Handler) CreateUser(w http.ResponseWriter, r *http.Request) {\n\n\tbuf := new(bytes.Buffer)\n\tbuf.ReadFrom(r.Body)\n\tdefer r.Body.Close()\n\n\tvar usersRequestDTO model.UserRequestDTO\n\n\t//Unmarshall body\n\n\tvar err error\n\tif err = json.Unmarshal(buf.Bytes(), &usersRequestDTO); err != nil {\n\t\thttp.Error(w, \"Invalid body\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t//Validate dto\n\n\tif !users.ValidateUsersRequestDTO(usersRequestDTO) {\n\t\thttp.Error(w, \"Invalid body\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t//Check If user exists\n\n\tvar found bool\n\tif _, found, err = h.Db.GetUser(usersRequestDTO.Username); found && err == nil {\n\t\thttp.Error(w, \"Username already exists\", http.StatusConflict)\n\t\treturn\n\t}\n\n\tif !found && err != nil {\n\t\thttp.Error(w, \"Error generating user\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t//Create User\n\n\tvar user model.User\n\tif user, err = users.CreateUser(usersRequestDTO.Username, usersRequestDTO.Password); err != nil {\n\t\thttp.Error(w, \"Error generating user\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t//Post user\n\n\tuser, err = h.Db.InsertUser(user)\n\tif err != nil {\n\t\thttp.Error(w, \"Error generating user\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif err := json.NewEncoder(w).Encode(model.UserResponseDTO{user.Userid}); err != nil {\n\t\thttp.Error(w, \"Write error\", http.StatusInternalServerError)\n\t}\n}", "func createHandler(w http.ResponseWriter, r *http.Request) {\n\tuser := new(User)\n\tuser.Token = validateToken(r.FormValue(\"token\"))\n\tuser.PasswordHash = validatePassHash(r.FormValue(\"passHash\"))\n\tuser.PublicKey = validatePublicKey(r.FormValue(\"publicKey\"))\n\tuser.PublicHash = computePublicHash(user.PublicKey)\n\tuser.CipherPrivateKey = validateHex(r.FormValue(\"cipherPrivateKey\"))\n\n\tlog.Printf(\"Woot! New user %s %s\\n\", user.Token, user.PublicHash)\n\n\tif !SaveUser(user) {\n\t\thttp.Error(w, \"That username is taken\", http.StatusBadRequest)\n\t}\n}", "func postRegistrationHandler(w http.ResponseWriter, r *http.Request, _ map[string]string) {\n\tif database.RetrieveUsersCount() == 0 { // TODO: Or check if authenticated user is admin when adding users from inside the admin area\n\t\tname := r.FormValue(\"name\")\n\t\temail := r.FormValue(\"email\")\n\t\tpassword := r.FormValue(\"password\")\n\t\tif name != \"\" && password != \"\" {\n\t\t\thashedPassword, err := authentication.EncryptPassword(password)\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tuser := structure.User{Name: []byte(name), Slug: slug.Generate(name, \"users\"), Email: []byte(email), Image: []byte(filenames.DefaultUserImageFilename), Cover: []byte(filenames.DefaultUserCoverFilename), Role: 4}\n\t\t\terr = methods.SaveUser(&user, hashedPassword, 1)\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\thttp.Redirect(w, r, \"/admin\", http.StatusFound)\n\t\t\treturn\n\t\t}\n\t\thttp.Redirect(w, r, \"/admin\", http.StatusFound)\n\t\treturn\n\t}\n\t// TODO: Handle creation of other users (not just the first one)\n\thttp.Error(w, \"Not implemented yet.\", http.StatusInternalServerError)\n\treturn\n}", "func CreateUser(response http.ResponseWriter, request *http.Request) {\n\n\t\n\t\trequest.ParseForm()\n\t\tdecoder := json.NewDecoder(request.Body)\n\t\tvar newUser User\n\t\t\n\t\terr := decoder.Decode(&newUser)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\t\n newUser.Password=hashAndSalt([]byte(newUser.Password))\n\t\t\n\t\tinsertUser(newUser)\n\t\n}", "func createUserHandler(res http.ResponseWriter, req *http.Request) {\n\tvar user MongoUserSchema\n\tjson.NewDecoder(req.Body).Decode(&user)\n\t// fmt.Println(hash(user.Password))\n\tif checkEmailValidity(user.Email) == false {\n\t\tres.WriteHeader(http.StatusInternalServerError)\n\t\tres.Write([]byte(\"Invalid e-mail id!\"))\n\t\treturn\n\t}\n\n\tusersCol := client.Database(\"Aviroop_Nandy_Appointy\").Collection(\"users\")\n\tctx, _ := context.WithTimeout(context.Background(), 15*time.Second)\n\tcursor, err := usersCol.Find(ctx, bson.M{})\n\n\tfor cursor.Next(ctx) {\n\t\tvar backlogUser MongoUserSchema\n\t\tcursor.Decode(&backlogUser)\n\t\tif backlogUser.Email == user.Email {\n\t\t\tres.WriteHeader(http.StatusInternalServerError)\n\t\t\tres.Write([]byte(`{\"This e-mail is already registered!\":\"` + err.Error() + `\"}`))\n\t\t\treturn\n\t\t}\n\t}\n\n\thashedPswd := hashPassword(user.Password)\n\tuser.Password = hashedPswd\n\n\tuserResult, insertErrorUser := usersCol.InsertOne(ctx, user)\n\tif insertErrorUser != nil {\n\t\tfmt.Println(\"Error while creating user: \", insertErrorUser)\n\t} else {\n\t\tjson.NewEncoder(res).Encode(userResult)\n\t\tuserID := userResult.InsertedID\n\t\tfmt.Println(\"New user id: \", userID)\n\t}\n\n\tres.Header().Add(\"content-type\", \"application/json\")\n\tres.WriteHeader(http.StatusOK)\n}", "func NewUserHandler() *UserHandler {\n h := &UserHandler{\n Router: httprouter.New(),\n Logger: log.New(os.Stderr, \"\", log.LstdFlags),\n }\n h.POST(\"/api/user\", h.handlePostUser)\n return h\n}", "func createUser(c *gin.Context) {\n password,_ := HashPassword(c.PostForm(\"password\"))\n\tuser := user{Login: c.PostForm(\"login\"), Password: password}\n\tdb.Save(&user)\n\tc.JSON(http.StatusCreated, gin.H{\"status\": http.StatusCreated, \"message\": \"User item created successfully!\"})\n}", "func (db *DB) PostUser(w http.ResponseWriter, r *http.Request) {\n\n\tcontentType := r.Header.Get(\"content-type\")\n\n\tif contentType == \"application/json\" {\n\t\tvar user User\n\n\t\terr := json.NewDecoder(r.Body).Decode(&user)\n\t\tif err != nil {\n\t\t\tlog.Println(\"error in encoding the post body\", err)\n\t\t}\n\n\t\t// store id\n\t\tuser.ID = bson.NewObjectId()\n\t\t\n\t\tcollection := db.Database.C(\"users\")\n\t\terr = collection.Insert(user)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\tw.Write([]byte(\"Cannot insert user into database\"))\n\n\t\t\tlog.Fatalln(err.Error())\n\t\t} else {\n\t\t\tw.Header().Set(\"Content-type\", \"application/json\")\n\t\t\tresponse, err := json.Marshal(user)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"error in converting struct to json\", err)\n\t\t\t}\n\t\n\t\t\tw.Write(response)\n\t\t}\n\t} else {\n\t\tw.WriteHeader(http.StatusNotAcceptable)\n\t\tw.Write([]byte(\"unsupported format\"))\n\t}\n\n}", "func createNewUser(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tparams := mux.Vars(r)\n\tvar userInfo UserBody\n\t//decode the json object and store the values in userInfo\n\terr := json.NewDecoder(r.Body).Decode(&userInfo)\n\tif err != nil {\n\t\tfmt.Println(\"ERROR DECODING JSON OBJ FROM CREATE NEW USER\")\n\t}\n\tresult := post.CreateUser(params[\"id\"], userInfo.FirstName, userInfo.LastName, userInfo.Email)\n\tjson.NewEncoder(w).Encode(map[string]bool{\n\t\t\"result\": result,\n\t})\n}", "func UserRegisterPostHandler(w http.ResponseWriter, r *http.Request) {\n\tb := form.RegistrationForm{}\n\terr := form.NewErrors()\n\tif !captcha.Authenticate(captcha.Extract(r)) {\n\t\terr[\"errors\"] = append(err[\"errors\"], \"Wrong captcha!\")\n\t}\n\tif len(err) == 0 {\n\t\tif len(r.PostFormValue(\"email\")) > 0 {\n\t\t\t_, err = form.EmailValidation(r.PostFormValue(\"email\"), err)\n\t\t}\n\t\t_, err = form.ValidateUsername(r.PostFormValue(\"username\"), err)\n\t\tif len(err) == 0 {\n\t\t\tmodelHelper.BindValueForm(&b, r)\n\t\t\terr = modelHelper.ValidateForm(&b, err)\n\t\t\tif len(err) == 0 {\n\t\t\t\t_, errorUser := userService.CreateUser(w, r)\n\t\t\t\tif errorUser != nil {\n\t\t\t\t\terr[\"errors\"] = append(err[\"errors\"], errorUser.Error())\n\t\t\t\t}\n\t\t\t\tif len(err) == 0 {\n\t\t\t\t\tlanguages.SetTranslationFromRequest(viewRegisterSuccessTemplate, r, \"en-us\")\n\t\t\t\t\tu := model.User{\n\t\t\t\t\t\tEmail: r.PostFormValue(\"email\"), // indicate whether user had email set\n\t\t\t\t\t}\n\t\t\t\t\thtv := UserRegisterTemplateVariables{b, err, NewSearchForm(), Navigation{}, &u, r.URL, mux.CurrentRoute(r)}\n\t\t\t\t\terrorTmpl := viewRegisterSuccessTemplate.ExecuteTemplate(w, \"index.html\", htv)\n\t\t\t\t\tif errorTmpl != nil {\n\t\t\t\t\t\thttp.Error(w, errorTmpl.Error(), http.StatusInternalServerError)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif len(err) > 0 {\n\t\tb.CaptchaID = captcha.GetID()\n\t\tlanguages.SetTranslationFromRequest(viewRegisterTemplate, r, \"en-us\")\n\t\thtv := UserRegisterTemplateVariables{b, err, NewSearchForm(), Navigation{}, GetUser(r), r.URL, mux.CurrentRoute(r)}\n\t\terrorTmpl := viewRegisterTemplate.ExecuteTemplate(w, \"index.html\", htv)\n\t\tif errorTmpl != nil {\n\t\t\thttp.Error(w, errorTmpl.Error(), http.StatusInternalServerError)\n\t\t}\n\t}\n}", "func CreateUser(c *gin.Context) {\n\tvar user Models.User\n\tc.BindJSON(&user)\n\n\tauth := c.Request.Header.Get(\"Authorization\")\n if auth != Utils.GetAuthToken() {\n\t\tc.JSON(http.StatusUnauthorized, gin.H{\n\t\t\t\"error\" : gin.H { \n\t\t\t\"status\": http.StatusUnauthorized,\n\t\t\t\"message\": \"Invalid Token\",\n\t\t}})\n\t\tc.Abort()\n\t\treturn\n\t}\n\tvar now = time.Now().Unix()\n\tnur := Models.User(user)\n\t err_password := validator.Validate(nur)\n\t if err_password != nil {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"error\" : gin.H { \n\t\t\t\"status\": http.StatusNotFound,\n\t\t\t\"message\": err_password.Error(),\n\t\t}})\n\t\tfmt.Println(err_password.Error())\n\t\tc.AbortWithStatus(http.StatusNotFound)\n\t\treturn\n\t}\n\n\t//user.Password = Utils.EncodeBase64(user.Password)\n\tuser.Date_created = Utils.ConvertTimestampToDate(int64(now))\n\terr := Models.CreateUser(&user)\n\tif err != nil {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"error\" : gin.H { \n\t\t\t\"status\": http.StatusNotFound,\n\t\t\t\"message\": err.Error(),\n\t\t}})\n\t\tfmt.Println(err.Error())\n\t\tc.AbortWithStatus(http.StatusNotFound)\n\t} else {\n\t\tc.JSON(http.StatusOK, user)\n\t\tfmt.Println(\"usuario_creado\", user.Id)\n\t}\n}", "func CreateUser(w http.ResponseWriter, r *http.Request) {\n\t// create an empty user of type models.User\n\tvar user models.User\n\n\t// decode the json request to user\n\terr := json.NewDecoder(r.Body).Decode(&user)\n\n\tif err != nil {\n\t\tlogrus.Errorf(\"Unable to decode the request body. %v\", err)\n\t\thttp.Error(w, \"Bad Request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tif !user.Valid() {\n\t\thttp.Error(w, \"Invalid User\", http.StatusBadRequest)\n\t\treturn\n\t}\n\t// set the header to content type x-www-form-urlencoded\n\t// Allow all origin to handle cors issue\n\tw.Header().Set(\"Context-Type\", \"application/x-www-form-urlencoded\")\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tw.Header().Set(\"Access-Control-Allow-Methods\", \"POST\")\n\tw.Header().Set(\"Access-Control-Allow-Headers\", \"Content-Type\")\n\t//set the hash\n\thashedPass, err := user.HashPassword()\n\tif err != nil {\n\t\tlogrus.Errorf(\"Unable to create hash of the given password. %v\", err)\n\t\thttp.Error(w, \"Bad Request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\t// call insert user function and pass the user\n\terr = database.InsertUser(user.Email, hashedPass, user.FirstName, user.LastName)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Unable to insert user. %v\", err)\n\t\thttp.Error(w, \"\", http.StatusBadRequest)\n\t\treturn\n\t}\n\ttkn, err := models.CreateToken(user.Email)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Unable to create token. %v\", err)\n\t\thttp.Error(w, \"\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\t// format a response object\n\tres := models.TokenResponse{\n\t\tToken: tkn,\n\t}\n\t// send the response\n\terr = json.NewEncoder(w).Encode(res)\n\tif err != nil {\n\t\tlogrus.Errorf(err.Error())\n\t\treturn\n\t}\n}", "func CreateUserHandler() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\trole, _ := c.Get(\"Role\")\n\n\t\tif role != \"admin\" {\n\t\t\tc.JSON(http.StatusBadRequest, gin.H{\n\t\t\t\t\"status\": \"error\",\n\t\t\t\t\"message\": \"forbidden\",\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\tvar data entity.RegistrationUserEntity\n\t\trepositoryService := repository.UserRepository{}\n\t\tuserService := UserService{repositoryService}\n\n\t\t_ = c.ShouldBindJSON(&data)\n\n\t\tuserData, err := userService.CreateUser(data)\n\t\tif err != nil {\n\t\t\tc.JSON(http.StatusBadRequest, gin.H{\n\t\t\t\t\"status\": \"error\",\n\t\t\t\t\"message\": err.Error(),\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\tc.JSON(http.StatusOK, gin.H{\n\t\t\t\"status\": \"ok\",\n\t\t\t\"data\": userData,\n\t\t})\n\t}\n}", "func NewUserCreateHandler(db *gorm.DB) echo.HandlerFunc {\n\treturn func(ctx echo.Context) error {\n\t\tuser := &model.User{}\n\t\tif err := ctx.Bind(user); err != nil {\n\t\t\treturn echo.NewHTTPError(http.StatusInternalServerError, err)\n\t\t}\n\n\t\tif err := user.Validate(); err != nil {\n\t\t\treturn echo.NewHTTPError(http.StatusUnprocessableEntity, err)\n\t\t}\n\n\t\thashBytes, err := bcrypt.GenerateFromPassword([]byte(user.Password), 10)\n\t\tif err != nil {\n\t\t\treturn echo.NewHTTPError(http.StatusInternalServerError, err)\n\t\t}\n\n\t\tuser.Password = \"\"\n\t\tuser.PasswordDigest = hashBytes\n\t\tuser.ResetJWTToken()\n\n\t\tif err := db.Create(user).Error; err != nil {\n\t\t\treturn echo.NewHTTPError(http.StatusInternalServerError, err.Error())\n\t\t}\n\n\t\treturn ctx.JSON(http.StatusCreated, user)\n\t}\n}", "func UserRegisterPost(w http.ResponseWriter, r *http.Request) {\n\t// Get session\n\tsess := session.Instance(r)\n\n\t// Prevent brute force login attempts by not hitting MySQL and pretending like it was invalid :-)\n\tif sess.Values[\"register_attempt\"] != nil && sess.Values[\"register_attempt\"].(int) >= 5 {\n\t\tlog.Println(\"Brute force register prevented\")\n\t\thttp.Redirect(w, r, \"/not_found\", http.StatusFound)\n\t\treturn\n\t}\n\n\tbody, readErr := ioutil.ReadAll(r.Body)\n\tif readErr != nil {\n\t\tlog.Println(readErr)\n\t\tReturnError(w, readErr)\n\t\treturn\n\t}\n\n\tvar regResp webpojo.UserCreateResp\n\tif len(body) == 0 {\n\t\tlog.Println(\"Empty json payload\")\n\t\tRecordRegisterAttempt(sess)\n\t\tsess.Save(r, w)\n\t\tregResp = webpojo.UserCreateResp{constants.StatusCode_400, constants.Msg_400}\n\t\tbs, err := json.Marshal(regResp)\n\t\tif err != nil {\n\t\t\tReturnError(w, err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprint(w, string(bs))\n\t\treturn\n\t}\n\n\t//log.Println(\"r.Body\", string(body))\n\tregReq := webpojo.UserCreateReq{}\n\tjsonErr := json.Unmarshal(body, &regReq)\n\tif jsonErr != nil {\n\t\tlog.Println(jsonErr)\n\t\tReturnError(w, jsonErr)\n\t\treturn\n\t}\n\tlog.Println(regReq.Email)\n\n\t// Validate with required fields\n\tif validate, _ := validateRegisterInfo(r, &regReq, constants.DefaultRole); !validate {\n\t\tlog.Println(\"Invalid reg request! Missing field\")\n\t\tRecordRegisterAttempt(sess)\n\t\tsess.Save(r, w)\n\t\tregResp = webpojo.UserCreateResp{constants.StatusCode_400, constants.Msg_400}\n\t\tbs, err := json.Marshal(regResp)\n\t\tif err != nil {\n\t\t\tReturnError(w, err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprint(w, string(bs))\n\t\treturn\n\t}\n\n\tpassword, errp := passhash.HashString(regReq.Password)\n\n\t// If password hashing failed\n\tif errp != nil {\n\t\tlog.Println(errp)\n\t\tRecordRegisterAttempt(sess)\n\t\tsess.Save(r, w)\n\t\tregResp = webpojo.UserCreateResp{constants.StatusCode_500, constants.Msg_500}\n\t\tbs, err := json.Marshal(regResp)\n\t\tif err != nil {\n\t\t\tReturnError(w, err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprint(w, string(bs))\n\t\treturn\n\t}\n\n\t// Get database result\n\t_, err := model.UserByEmail(regReq.Email)\n\n\tif err == model.ErrNoResult { // If success (no user exists with that email)\n\t\tex := model.UserCreate(regReq.FirstName, regReq.LastName, regReq.Email, password)\n\t\t// Will only error if there is a problem with the query\n\t\tif ex != nil {\n\t\t\tlog.Println(ex)\n\t\t\tRecordRegisterAttempt(sess)\n\t\t\tsess.Save(r, w)\n\t\t\tregResp = webpojo.UserCreateResp{constants.StatusCode_500, constants.Msg_500}\n\t\t\tbs, err := json.Marshal(regResp)\n\t\t\tif err != nil {\n\t\t\t\tReturnError(w, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfmt.Fprint(w, string(bs))\n\t\t} else {\n\t\t\tlog.Println(\"Account created successfully for: \" + regReq.Email)\n\t\t\tRecordRegisterAttempt(sess)\n\t\t\tsess.Save(r, w)\n\t\t\tregResp = webpojo.UserCreateResp{constants.StatusCode_200, constants.Msg_200}\n\t\t\tbs, err := json.Marshal(regResp)\n\t\t\tif err != nil {\n\t\t\t\tReturnError(w, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfmt.Fprint(w, string(bs))\n\t\t}\n\t} else if err != nil { // Catch all other errors\n\t\tlog.Println(err)\n\t\tRecordRegisterAttempt(sess)\n\t\tsess.Save(r, w)\n\t\tregResp = webpojo.UserCreateResp{constants.StatusCode_500, constants.Msg_500}\n\t\tbs, err := json.Marshal(regResp)\n\t\tif err != nil {\n\t\t\tReturnError(w, err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprint(w, string(bs))\n\t} else { // Else the user already exists\n\t\tlog.Println(\"User already existed!!!\")\n\t\tRecordRegisterAttempt(sess)\n\t\tsess.Save(r, w)\n\t\tregResp = webpojo.UserCreateResp{constants.StatusCode_400, constants.Msg_400}\n\t\tbs, err := json.Marshal(regResp)\n\t\tif err != nil {\n\t\t\tReturnError(w, err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprint(w, string(bs))\n\t}\n}", "func Post(w http.ResponseWriter, r *http.Request) {\n\tvar errs []string\n\tvar gocqlUuid gocql.UUID\n\n\tuser, errs := FormToUser(r)\n\n\tcreated := false\n\n\tif len(errs) == 0 {\n\t\tfmt.Println(\"creating a new user\")\n\n\t\t// generate a UUID for the user\n\t\tgocqlUuid = gocql.TimeUUID()\n\n\t\t// write data to Cassandra\n\t\tif err := cassandra.Session.Query(\n\t\t\t`INSERT INTO users (id, firstname, lastname, email, city, age) VALUES (?, ?, ?, ?, ?, ?)`,\n\t\t\tgocqlUuid,\n\t\t\tuser.FirstName,\n\t\t\tuser.LastName,\n\t\t\tuser.Email,\n\t\t\tuser.City,\n\t\t\tuser.Age,\n\t\t).Exec(); err != nil {\n\t\t\terrs = append(errs, err.Error())\n\t\t} else {\n\t\t\tcreated = true\n\t\t}\n\t}\n\n\tif created {\n\t\tfmt.Println(\"user_id\", gocqlUuid)\n\t\tjson.NewEncoder(w).Encode(NewUserResponse{ID: gocqlUuid})\n\t} else {\n\t\tfmt.Println(\"errors\", errs)\n\t\tjson.NewEncoder(w).Encode(ErrorResponse{Errors: errs})\n\t}\n}", "func (h *Handler) createUser(c *gin.Context) handlerResponse {\n\n\tvar newUser types.User\n\tif err := c.ShouldBindJSON(&newUser); err != nil {\n\t\treturn handleBadRequest(err)\n\t}\n\tstoredUser, err := h.service.User.Create(newUser, h.who(c))\n\tif err != nil {\n\t\treturn handleError(err)\n\t}\n\t// Remove password so we do not show in response\n\tstoredUser.Password = \"\"\n\treturn handleCreated(storedUser)\n}", "func (h *userHandler) CreateUserHandler(c *gin.Context) {\n\tvar inputUser entity.UserInput\n\n\tif err := c.ShouldBindJSON(&inputUser); err != nil {\n\t\tsplitError := helper.SplitErrorInformation(err)\n\t\tresponseError := helper.APIResponse(\"input data required\", 400, \"bad request\", gin.H{\"errors\": splitError})\n\n\t\tc.JSON(400, responseError)\n\t\treturn\n\t}\n\n\tnewUser, err := h.userService.SaveNewUser(inputUser)\n\tif err != nil {\n\t\tresponseError := helper.APIResponse(\"internal server error\", 500, \"error\", gin.H{\"error\": err.Error()})\n\n\t\tc.JSON(500, responseError)\n\t\treturn\n\t}\n\n\tresponse := helper.APIResponse(\"success create new User\", 201, \"status Created\", newUser)\n\tc.JSON(201, response)\n}", "func CreateUser(w http.ResponseWriter, req *http.Request) {\n\tbody, err := ioutil.ReadAll(io.LimitReader(req.Body, 1048576))\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif err := req.Body.Close(); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tvar user User\n\terr = json.Unmarshal(body, &user)\n\tif err != nil {\n\t\tw.WriteHeader(422)\n\t\tlog.Println(err.Error())\n\t}\n\n\tInsertUser(user)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusCreated)\n\tif err := json.NewEncoder(w).Encode(user); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func agregarUsuario(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tvar User usr\n\treqBody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"Insert a Valid Task Data\")\n\t}\n\tjson.Unmarshal(reqBody, &User)\n\tfmt.Println(User)\n\tpol := newCn()\n\tpol.abrir()\n\trows, err := pol.db.Query(\"insert into usuario(username, password, nombre, apellido, fecha_nacimiento, correo) values(:1,:2,:3,:4,to_date(:5, 'yyyy/mm/dd'),:6)\", User.User, User.Contrasena, User.Nombre, User.Apellido, User.Fechanacimiento, User.Correo)\n\tpol.cerrar()\n\tif err != nil {\n\t\tfmt.Println(\"Error running query\")\n\t\tfmt.Println(err)\n\t\tfmt.Fprintf(w, \"usuario ya existe o correo invalido\")\n\t\treturn\n\t} else {\n\t\tfmt.Fprintf(w, \"registro exitos\")\n\t}\n\tdefer rows.Close()\n\n}", "func CreateUser(w http.ResponseWriter, r *http.Request) {\n\trequestBody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlog.Fatal(\"Error\")\n\t}\n\n\tvar user models.User\n\tif err = json.Unmarshal(requestBody, &user); err != nil {\n\t\tlog.Fatal(\"Error\")\n\t}\n\n\tdb, err := database.OpenDbConnection()\n\tif err != nil {\n\t\tlog.Fatal(\"error\")\n\t}\n\n\trepository := repositories.UserRepository(db)\n\trepository.Create(user)\n}", "func UserHandler(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\n\tcase \"GET\":\n\n\t\tusersJSON, err := json.Marshal(db.Users)\n\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tw.Write(usersJSON)\n\n\tcase \"POST\":\n\n\t\terr := utils.IsJsonValid(w, r)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tuserPayload := db.UserPayload{}\n\t\tb, err := ioutil.ReadAll(r.Body)\n\t\tdefer r.Body.Close()\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\terr = json.Unmarshal(b, &userPayload)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\t//validate email\n\t\tvalidEmail := utils.IsEmailValid(userPayload.UserEmail)\n\t\tif !validEmail {\n\t\t\tmsg := \"Email address is not valid\"\n\t\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\taccessToken := utils.String(50)\n\n\t\tcookie := http.Cookie{\n\t\t\tName: \"userToken\",\n\t\t\tValue: accessToken,\n\t\t\tExpires: time.Time{},\n\t\t\tMaxAge: 86400,\n\t\t\tSecure: false,\n\t\t\tHttpOnly: false,\n\t\t\tSameSite: 0,\n\t\t}\n\t\thttp.SetCookie(w, &cookie)\n\n\t\tcookie = http.Cookie{\n\t\t\tName: \"userEmail\",\n\t\t\tValue: userPayload.UserEmail,\n\t\t\tExpires: time.Time{},\n\t\t\tMaxAge: 86400,\n\t\t\tSecure: false,\n\t\t\tHttpOnly: false,\n\t\t\tSameSite: 0,\n\t\t}\n\t\thttp.SetCookie(w, &cookie)\n\n\t\tuser := db.User{}\n\t\tuser.Token = accessToken\n\t\tuser.Devices = make(map[string]*db.Device)\n\n\t\tdb.Users[userPayload.UserEmail] = &user\n\n\t\tw.Write([]byte(accessToken))\n\t}\n}", "func CreateUser(w http.ResponseWriter, r *http.Request){\n\n\t\tu := User{}\n\n\t\terr:= json.NewDecoder(r.Body).Decode(&u)\n\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\t// Checks if name is Empty\n\t\tfmt.Printf(\"name: [%+v]\\n\", u.Name)\n\t\tif u.Name == \"\" {\n\t\t\tfmt.Println(\"Empty string\")\n\t\t\tw.Write([]byte(`{\"status\":\"Invalid Name\"}`))\n\t\t\treturn\n\t\t}\n\n\n\t\t//start validation for username\n\t\tvar isStringAlphabetic = regexp.MustCompile(`^[A-Za-z][A-Za-z0-9]*$`).MatchString\n\t\tif !isStringAlphabetic(u.Name){\n\t\t\tfmt.Println(\"is not alphanumeric\")\n\t\t\tw.Write([]byte(`{\"status\":\"Invalid Name\"}`))\n\t\t\treturn\n\t\t}\n\n\t\t//make the Name Uppercase\n\t\tu.Name = strings.ToUpper(u.Name)\n\n\t\t// check if username already exists\n\t\tuser := userExist(u.Name)\n\t\tif user != (User{}) {\n\t\t\tfmt.Println(\"Name already exists\")\n\t\t\tw.Write([]byte(`{\"status\":\"Name Exists\"}`))\n\t\t\treturn\n\t\t}\n\n\t\t//if it does exist create the user with a random ID and score = 0\n\t\tuuid, err := uuid.NewV4()\n\t\tu.ID = uuid.String()\n\t\tu.Score = 0\n\n\t\tquery := \"INSERT INTO users (id, name, score) VALUES ($1, $2, $3);\"\n\t\t_, err = db.Exec(query, u.ID, u.Name, u.Score);\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(201)\n\t\tjson.NewEncoder(w).Encode(u)\n\n}", "func CreateUser(w http.ResponseWriter, r *http.Request) {\r\n\tdefer r.Body.Close()\r\n\tvar user model.User\r\n\r\n\tif err := json.NewDecoder(r.Body).Decode(&user); err != nil {\r\n\t\tlog.Println(err)\r\n\t\tu.RespondWithError(w, http.StatusBadRequest, \"Invalid request payload\")\r\n\t\treturn\r\n\t}\r\n\r\n\tif resp, ok := validate(&user); !ok {\r\n\t\tlog.Println(resp)\r\n\t\tu.RespondWithError(w, http.StatusBadRequest, resp)\r\n\t\treturn\r\n\t}\r\n\r\n\thashedPassword, _ := bcrypt.GenerateFromPassword([]byte(user.Password), bcrypt.DefaultCost)\r\n\tuser.Password = string(hashedPassword)\r\n\r\n\tif err := dao.DBConn.InsertUser(user); err != nil {\r\n\t\tlog.Println(err)\r\n\t\tu.RespondWithError(w, http.StatusInternalServerError, err.Error())\r\n\t\treturn\r\n\t}\r\n\r\n\tuser.Token = model.GenerateToken(user.Email)\r\n\r\n\t// Delete password before response\r\n\tuser.Password = \"\"\r\n\r\n\tu.RespondWithJSON(w, http.StatusOK, user)\r\n}", "func (r *UsersResource) PostUser(c *gin.Context) {\n\tvar nu newUser\n\n\t// validate form input\n\tif err := c.Bind(&nu); err != nil {\n\t\tc.JSON(http.StatusBadRequest, strings.Split(err.Error(), \"\\n\"))\n\t\treturn\n\t}\n\n\t// verify a user with ID does not already exist\n\t// TODO: try to put this in middleware\n\tif existing, _ := r.userStore.User(nu.ID); existing != nil {\n\t\tc.Status(http.StatusConflict)\n\t\treturn\n\t}\n\n\t// create new user\n\tu := fountain.User{\n\t\tID: nu.ID,\n\t\tFullName: nu.FullName,\n\t\tEmail: nu.Email,\n\t}\n\n\t// insert new user into store\n\tif err := r.userStore.PutUser(&u); err != nil {\n\t\tc.Error(err)\n\t\tc.Status(http.StatusInternalServerError)\n\t} else {\n\t\tloc := fmt.Sprintf(\"%s/%s\", c.Request.URL.Path, u.ID)\n\t\tc.Redirect(http.StatusSeeOther, loc)\n\t}\n}", "func (h *UserHandler) Create(w http.ResponseWriter, r *http.Request) {\n\tlog.Print(\"/users POST handled\")\n\n\treq := &CreateRequest{}\n\tif err := util.ScanRequest(r, req); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tuser := &schema.User{\n\t\tName: req.Name,\n\t}\n\n\tif err := h.model.Validate(user); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tres, err := h.model.Create(user)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif err := util.JSONWrite(w, res, http.StatusCreated); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n}", "func (server Server) CreateNewUser(w http.ResponseWriter, r *http.Request) {\n\tvar user models.User // make a user\n\tvar res models.APIResponse // make a response\n\n\terr := json.NewDecoder(r.Body).Decode(&user) //decode the user\n\tif err != nil {\n\t\tlog.Printf(\"Unable to decode the request body. %v\", err)\n\t\tres = models.BuildAPIResponseFail(\"Unable to decode the request body\", nil)\n\t}\n\tif user.Name == \"\" || user.Email == \"\" || user.Password == \"\" {\n\t\tres = models.BuildAPIResponseFail(\"Blank users cannot be created\", nil)\n\t} else {\n\t\tinsertID := insertUser(user, server.db) // call insert user function and pass the note\n\t\tres = models.BuildAPIResponseSuccess(fmt.Sprintf(\"User Created with %d id\", insertID), nil) // format a response object\n\t}\n\tjson.NewEncoder(w).Encode(res)\n\n}", "func (a *API) userLoginPostHandler(w http.ResponseWriter, r *http.Request) {\n\t// Validate user input\n\tvar u model.User\n\terr := json.NewDecoder(r.Body).Decode(&u)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t} else if u.Email == \"\" {\n\t\tresponse.Errorf(w, r, nil, http.StatusBadRequest, \"Email address is missing\")\n\t\treturn\n\t} else if u.Password == \"\" {\n\t\tresponse.Errorf(w, r, nil, http.StatusBadRequest, \"Password is missing\")\n\t\treturn\n\t}\n\t// Always use the lower case email address\n\tu.Email = strings.ToLower(u.Email)\n\t// Get the user database entry\n\tuser, err := a.db.GetUserByEmail(u.Email)\n\tif err != nil {\n\t\tresponse.Errorf(w, r, err, http.StatusInternalServerError, \"Internal Server Error\")\n\t\treturn\n\t} else if user == nil {\n\t\tresponse.Errorf(w, r, err, http.StatusBadRequest, \"Invalid email address or password\")\n\t\treturn\n\t}\n\t// Check the password\n\tif !user.MatchPassword(u.Password) {\n\t\tresponse.Errorf(w, r, err, http.StatusBadRequest, \"Invalid email address or password\")\n\t\treturn\n\t}\n\t// Create jwt token\n\tuser.Token, err = a.createJWT(jwt.MapClaims{\n\t\t\"email\": user.Email,\n\t\t\"name\": user.Name,\n\t\t\"lastname\": user.Lastname,\n\t\t\"password\": user.Password,\n\t\t\"exp\": time.Now().Add(time.Hour * 24 * 7).Unix(),\n\t})\n\tif err != nil {\n\t\tresponse.Errorf(w, r, err, http.StatusInternalServerError, \"Internal Server Error\")\n\t\treturn\n\t}\n\t//\thttp.SetCookie(w, &http.Cookie{\n\t//\t\tName: \"token\",\n\t//\t\tValue: u.Token,\n\t//\t\tPath: \"/\",\n\t//\t})\n\tresponse.Write(w, r, user)\n}", "func CreateUserHandler(ur UserRepo) func(c *gin.Context) {\n\treturn func(c *gin.Context) {\n\t\tswitch c.DefaultQuery(\"type\", \"direct\") {\n\t\tcase \"direct\":\n\t\t\tcreateUserDirectly(ur, c)\n\t\t\treturn\n\t\tcase \"facebook\":\n\t\t\tcreateUserByFacebook(ur, c)\n\t\t\treturn\n\t\tcase \"account-kit\":\n\t\t\tcreateUserByAccountKit(ur, c)\n\t\t\treturn\n\t\tcase \"gmail\":\n\t\t\tcreateUserByGmail(ur, c)\n\t\t\treturn\n\t\tcase \"apple\":\n\t\t\tcreateUserByApple(ur, c)\n\t\t\treturn\n\t\t}\n\t}\n}", "func (h *HTTPClientHandler) addUserHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Add(\"Access-Control-Allow-Origin\", ServerName)\n\t// adding new user to database\n\tvar userRequest User\n\n\tdefer r.Body.Close()\n\tbody, err := ioutil.ReadAll(r.Body)\n\n\tif err != nil {\n\t\t// failed to read response body\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t}).Error(\"Could not read response body!\")\n\t\thttp.Error(w, \"Failed to read request body.\", 400)\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(body, &userRequest)\n\n\tif err != nil {\n\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t\tw.WriteHeader(422) // can't process this entity\n\t\treturn\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"firstName\": userRequest.FirstName,\n\t\t\"lastName\": userRequest.LastName,\n\t\t\"userID\": userRequest.UserID,\n\t\t\"profilePicUrl\": userRequest.ProfilePicUrl,\n\t\t\"gender\": userRequest.Gender,\n\t\t\"body\": string(body),\n\t}).Info(\"Got user info\")\n\n\t// adding user\n\terr = h.db.addUser(userRequest)\n\n\tif err == nil {\n\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t\tw.WriteHeader(201) // user inserted\n\t\treturn\n\t} else {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t}).Warn(\"Failed to insert..\")\n\n\t\tcontent, code := responseDetailsFromMongoError(err)\n\n\t\t// Marshal provided interface into JSON structure\n\t\tuj, _ := json.Marshal(content)\n\n\t\t// Write content-type, statuscode, payload\n\t\twriteJsonResponse(w, &uj, code)\n\n\t}\n\n}", "func CreateUser(w http.ResponseWriter, r *http.Request) {\n\tu := User{}\n\terr := json.NewDecoder(r.Body).Decode(&u)\n\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(500), http.StatusInternalServerError)\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\terr = SaveUser(u.FullName, u.NickName, u.Email, u.Balance)\n\n\tif err != nil {\n\t\thttp.Error(w, http.StatusText(500), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusCreated)\n}", "func (userHandler UserHandler) CreateUser() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvar u user\n\n\t\tif err := json.NewDecoder(r.Body).Decode(&u); err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t_, _ = w.Write([]byte(\"Something went wrong while parsing the user from request body\"))\n\t\t\treturn\n\t\t}\n\n\t\tcreateUserCmd := application.CreateUserCommand{\n\t\t\tUsername: u.Username,\n\t\t\tPassword: u.Password,\n\t\t}\n\n\t\tid, err := userHandler.Cmd.Handle(r.Context(), createUserCmd)\n\n\t\tif err != nil {\n\t\t\thttpError, _ := err.(helpers.HttpError)\n\t\t\thttp.Error(w, httpError.Message, httpError.Code)\n\t\t\treturn\n\t\t}\n\n\t\tw.WriteHeader(http.StatusOK)\n\t\tjson.NewEncoder(w).Encode(id)\n\t}\n}", "func CreateUser(w http.ResponseWriter, r *http.Request) {\n\tvar user models.User\n\tbodyRequest, err := ioutil.ReadAll(r.Body)\n\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\t}\n\n\tif err = json.Unmarshal(bodyRequest, &user); err != nil {\n\t\tresponses.Error(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\tif err := user.Prepare(true); err != nil {\n\t\tresponses.Error(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\tif err := validateUniqueDataUser(user, true); err != nil {\n\t\tresponses.Error(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\tdb, err := database.Connect()\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t}\n\tdefer db.Close()\n\n\trepository := repository.NewRepositoryUser(db)\n\n\tuser.Id, err = repository.Insert(user)\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tresponses.JSON(w, http.StatusCreated, user)\n\n}", "func (ctx *Context) UserHandler(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase \"POST\":\n\t\tdecoder := json.NewDecoder(r.Body)\n\t\tnewuser := &users.NewUser{}\n\t\tif err := decoder.Decode(newuser); err != nil {\n\t\t\thttp.Error(w, \"Invalid JSON\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\terr := newuser.Validate()\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"User not valid\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tusr, _ := ctx.UserStore.GetByEmail(newuser.Email)\n\t\tif usr != nil {\n\t\t\thttp.Error(w, \"Email Already Exists\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tuser, err := ctx.UserStore.Insert(newuser)\n\t\tstate := &SessionState{\n\t\t\tBeganAt: time.Now(),\n\t\t\tClientAddr: r.RequestURI,\n\t\t\tUser: user,\n\t\t}\n\t\t_, err = sessions.BeginSession(ctx.SessionKey, ctx.SessionStore, state, w)\n\n\t\t_, err = ctx.UserStore.CreateLikesList(user)\n\t\t_, err = ctx.UserStore.CreateGroceryList(user)\n\n\t\tw.Header().Add(\"Content-Type\", contentTypeJSONUTF8)\n\t\tencoder := json.NewEncoder(w)\n\t\tencoder.Encode(user)\n\tcase \"GET\":\n\t\tusers, err := ctx.UserStore.GetAll()\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Error fetching users\", http.StatusInternalServerError)\n\t\t}\n\t\tw.Header().Add(\"Content-Type\", contentTypeJSONUTF8)\n\t\tencoder := json.NewEncoder(w)\n\t\tencoder.Encode(users)\n\t}\n}", "func HandleUserCreate(c *gin.Context) {\n\tvar user User\n\terr := c.ShouldBindJSON(&user)\n\tif err != nil {\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\n\t\t\t\"code\": 1,\n\t\t\t\"msg\": err.Error(),\n\t\t\t\"data\": gin.H{},\n\t\t})\n\t\treturn\n\t}\n\n\tuid, err := user.Create()\n\tif err != nil {\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\n\t\t\t\"code\": 1,\n\t\t\t\"msg\": err.Error(),\n\t\t\t\"data\": gin.H{},\n\t\t})\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"code\": 0,\n\t\t\"msg\": \"ok\",\n\t\t\"data\": gin.H{\n\t\t\t\"uid\": uid,\n\t\t},\n\t})\n}", "func CreateUser(w http.ResponseWriter, r *http.Request) {\n\tdata := authInfo{}\n\terr := json.NewDecoder(r.Body).Decode(&data)\n\tif err != nil {\n\t\tutils.JSONRespnseWithErr(w, &utils.ErrPostDataNotCorrect)\n\t\treturn\n\t}\n\tmessage := models.SignUp(data.Email, data.Password, data.RoleID)\n\tutils.JSONResonseWithMessage(w, message)\n}", "func createHandler(w http.ResponseWriter, r *http.Request) {\n user := new(User)\n user.Token = validateToken(r.FormValue(\"token\"))\n user.PasswordHash = validateHash(r.FormValue(\"passHash\"))\n user.PublicKey = validatePublicKey(r.FormValue(\"publicKey\"))\n user.PublicHash = computePublicHash(user.PublicKey)\n user.CipherPrivateKey = validateHex(r.FormValue(\"cipherPrivateKey\"))\n\n log.Printf(\"Woot! New user %s %s\\n\", user.Token, user.PublicHash)\n\n if !SaveUser(user) {\n http.Error(w, \"That username is taken\", http.StatusBadRequest)\n }\n}", "func CreateUserHandler(w http.ResponseWriter, r *http.Request) {\n\tlog.Debug(\"userservice.CreateUserHandler called\")\n\tusername, err := apiserver.Authn(apiserver.CREATE_USER_PERM, w, r)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tw.Header().Set(\"WWW-Authenticate\", `Basic realm=\"Restricted\"`)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\n\tvar request msgs.CreateUserRequest\n\t_ = json.NewDecoder(r.Body).Decode(&request)\n\n\tresp := msgs.CreateUserResponse{}\n\n\tvar ns string\n\tns, err = apiserver.GetNamespace(apiserver.Clientset, username, request.Namespace)\n\tif err != nil {\n\t\tresp.Status.Code = msgs.Error\n\t\tresp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR\n\t\tjson.NewEncoder(w).Encode(resp)\n\t\treturn\n\t}\n\n\tif request.ClientVersion != msgs.PGO_VERSION {\n\t\tresp.Status.Code = msgs.Error\n\t\tresp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR\n\t\tjson.NewEncoder(w).Encode(resp)\n\t\treturn\n\t}\n\n\tresp = CreateUser(&request, ns)\n\tjson.NewEncoder(w).Encode(resp)\n\n}", "func NewUser(c *fiber.Ctx) {\n\t// create new user\n\tuser := new(User)\n\t// put post req body onto user struct\n\tif err := c.BodyParser(user); err != nil {\n\t\tc.Status(503).Send(err)\n\t\treturn\n\t}\n\t// put into db\n\tdatabase.DBConn.Create(&user)\n\tc.JSON(user)\n}", "func CreateUser(c *gin.Context) {}", "func (ac *ApiConfig) AddUserHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != http.MethodPost {\n\t\thttp.Error(w, r.Method+\" is not available\", http.StatusInternalServerError)\n\t\tzerolog.Error().Msg(r.Method + \" is not available\")\n\t\treturn\n\t}\n\n\tvar user *models.Users\n\terr := json.NewDecoder(r.Body).Decode(&user)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\thashedPass, err := bcrypt.GenerateFromPassword([]byte(user.Password), 12)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tuser.Password = string(hashedPass)\n\n\terr = ac.DHolder.AddUser(user)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tstat := &models.StatusIdentifier{\n\t\tOk: true,\n\t\tMessage: \"User Added\",\n\t}\n\n\terr = dResponseWriter(w, stat, http.StatusOK)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\treturn\n\t}\n\n\treturn\n}", "func CreateUser(clients *common.ClientContainer, handler common.HandlerInterface) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tuser := User{}\n\t\terr := json.NewDecoder(r.Body).Decode(&user)\n\t\tif err != nil {\n\t\t\tcommon.WriteErrorToResponse(w, http.StatusBadRequest,\n\t\t\t\thttp.StatusText(http.StatusBadRequest),\n\t\t\t\terr.Error())\n\t\t\treturn\n\t\t}\n\t\tif len(user.Email) == 0 {\n\t\t\tif len(user.Name) == 0 {\n\t\t\t\tcommon.WriteErrorToResponse(w, http.StatusInternalServerError,\n\t\t\t\t\thttp.StatusText(http.StatusInternalServerError),\n\t\t\t\t\t\"provide Either Name or Email in parameters\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif len(user.Email) != 0 {\n\t\t\tif !(emailValid.MatchString(user.Email)) {\n\t\t\t\tcommon.WriteErrorToResponse(w, http.StatusInternalServerError,\n\t\t\t\t\thttp.StatusText(http.StatusInternalServerError),\n\t\t\t\t\t\"Email Invalid\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif len(user.Login) == 0 {\n\t\t\tcommon.WriteErrorToResponse(w, http.StatusInternalServerError,\n\t\t\t\thttp.StatusText(http.StatusInternalServerError),\n\t\t\t\t\"provide Login in parameters\")\n\t\t\treturn\n\t\t}\n\t\tif len(user.Password) == 0 {\n\t\t\tcommon.WriteErrorToResponse(w, http.StatusInternalServerError,\n\t\t\t\thttp.StatusText(http.StatusInternalServerError),\n\t\t\t\t\"provide Password in parameters\")\n\t\t\treturn\n\t\t}\n\n\t\t// Create user if no error\n\t\thelperCreateUser(clients, handler, w, user)\n\t}\n}", "func UsersRegisterPost(c buffalo.Context) error {\n\t// Allocate an empty User\n\tuser := &models.User{}\n\t// Bind user to the html form elements\n\tif err := c.Bind(user); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\t// Get the DB connection from the context\n\ttx := c.Value(\"tx\").(*pop.Connection)\n\t// Validate the data from the html form\n\tverrs, err := user.Create(tx)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tif verrs.HasAny() {\n\t\t// Make user available inside the html template\n\t\tc.Set(\"user\", user)\n\t\t// Make the errors available inside the html template\n\t\tc.Set(\"errors\", verrs.Errors)\n\t\t// Render again the register.html template that the user can\n\t\t// correct the input.\n\t\treturn c.Render(422, r.HTML(\"users/register.html\"))\n\t}\n\t// If there are no errors set a success message\n\tc.Flash().Add(\"success\", \"Account created successfully.\")\n\t// and redirect to the home page\n\treturn c.Redirect(302, \"/\")\n}", "func (env *Env) RegisterUser(c *gin.Context) {\n\n\ttype registerRequest struct {\n\t\tUsername string `json:\"username\"`\n\t\tPassword string `json:\"password\"`\n\t\tDeviceID string `json:\"device_id\"`\n\t}\n\n\ttype registerResponse struct {\n\t\tAccessToken string `json:\"access_token\"`\n\t\tRefreshToken string `json:\"refresh_token\"`\n\t\tUser mysql.User `json:\"user\"`\n\t\tResetCode string `json:\"reset_code\"`\n\t}\n\n\t//decode request body\n\tjsonData, err := ioutil.ReadAll(c.Request.Body)\n\tif err != nil {\n\t\tLog.WithField(\"module\", \"handler\").WithError(err)\n\t\tc.AbortWithStatusJSON(http.StatusBadRequest, errs.RQST001)\n\t\treturn\n\t}\n\n\tvar request registerRequest\n\terr = json.Unmarshal(jsonData, &request)\n\tif err != nil {\n\t\tLog.WithField(\"module\", \"handler\").WithError(err)\n\t\tc.AbortWithStatusJSON(http.StatusBadRequest, errs.RQST001)\n\t\treturn\n\t}\n\n\tif request.Username == \"\" || request.Password == \"\" || request.DeviceID == \"\" {\n\t\tLog.WithField(\"module\", \"handler\").Error(\"Empty Fields in Request Body\")\n\t\tc.AbortWithStatusJSON(http.StatusBadRequest, errs.RQST002)\n\t\treturn\n\t}\n\n\tvar empty int64\n\tresult := env.db.Model(&mysql.User{}).Count(&empty)\n\tif result.Error != nil {\n\t\tLog.WithField(\"module\", \"handler\").WithError(result.Error)\n\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.DBSQ001)\n\t\treturn\n\t}\n\n\tuser := mysql.User{}\n\tperms := mysql.Permissions{}\n\tdefaultGroup := mysql.UserGroup{}\n\n\tif empty == 0 {\n\n\t\tperms.Admin = true\n\t\tperms.CanEdit = true\n\n\t\tdefaultGroupPerms := mysql.Permissions{CanEdit: false, Admin: false}\n\n\t\tdefaultGroup.Name = \"default\"\n\n\t\tresult = env.db.Save(&defaultGroupPerms)\n\t\tif result.Error != nil {\n\t\t\tLog.WithField(\"module\", \"handler\").WithError(result.Error)\n\t\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.DBSQ001)\n\t\t\treturn\n\t\t}\n\n\t\tdefaultGroup.Permissions = defaultGroupPerms\n\n\t\tresult = env.db.Save(&defaultGroup)\n\t\tif result.Error != nil {\n\t\t\tLog.WithField(\"module\", \"handler\").WithError(result.Error)\n\t\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.DBSQ001)\n\t\t\treturn\n\t\t}\n\n\t} else {\n\t\tvar exists int64\n\t\t//Check if Username already exists in Database\n\t\tresult = env.db.Model(&user).Where(\"upper(username) = upper(?)\", user.Username).Count(&exists)\n\t\tif result.Error != nil {\n\t\t\tLog.WithField(\"module\", \"handler\").WithError(result.Error)\n\t\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.DBSQ001)\n\t\t\treturn\n\t\t}\n\t\tLog.WithField(\"module\", \"handler\").Debug(\"Users found: \", exists)\n\n\t\tif exists != 0 {\n\t\t\tLog.WithField(\"module\", \"handler\").Error(\"Username already exists in Database\")\n\t\t\tc.AbortWithStatusJSON(http.StatusForbidden, errs.AUTH004)\n\t\t\treturn\n\t\t}\n\n\t\tperms.Admin = false\n\t\tperms.CanEdit = false\n\n\t\tdefaultGroup.Name = \"default\"\n\t\tresult = env.db.Model(&defaultGroup).Find(&defaultGroup)\n\t\tif result.Error != nil {\n\t\t\tLog.WithField(\"module\", \"handler\").WithError(result.Error)\n\t\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.DBSQ001)\n\t\t\treturn\n\t\t}\n\n\t}\n\n\t//Create permission entry for new user in permissions table\n\tresult = env.db.Save(&perms)\n\tif result.Error != nil {\n\t\tLog.WithField(\"module\", \"sql\").WithError(result.Error)\n\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.DBSQ001)\n\t\treturn\n\t}\n\n\tuser.Username = request.Username\n\tuser.Password = request.Password\n\tuser.AvatarID = \"default\"\n\tuser.PermID = perms.ID\n\tuser.UserGroups = append(user.UserGroups, &defaultGroup)\n\tuser.ResetCode = utils.GenerateCode()\n\n\t//Save new user to users database\n\tresult = env.db.Save(&user)\n\tif result.Error != nil {\n\t\tLog.WithField(\"module\", \"sql\").WithError(result.Error)\n\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.DBSQ001)\n\t\treturn\n\t}\n\n\t//Generate JWT AccessToken\n\taccessToken, err := utils.JWTAuthService(config.JWTAccessSecret).GenerateToken(user.ID, request.DeviceID, time.Hour*24)\n\tif err != nil {\n\t\tLog.WithField(\"module\", \"jwt\").WithError(err)\n\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.AUTH002)\n\t\treturn\n\t}\n\n\t//Add AccessToken to Redis\n\terr = env.rdis.AddPair(fmt.Sprint(user.ID), accessToken, time.Hour*24)\n\tif err != nil {\n\t\tLog.WithField(\"module\", \"redis\").WithError(err).Error(\"Error adding AccessToken to Redis.\")\n\t\terr = nil\n\t}\n\n\t//Generate RefreshToken\n\trefreshToken, err := utils.JWTAuthService(config.JWTRefreshSecret).GenerateToken(user.ID, request.DeviceID, time.Hour*24)\n\tif err != nil {\n\t\tLog.WithField(\"module\", \"jwt\").WithError(err)\n\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.AUTH002)\n\t\treturn\n\t}\n\n\tuser.RefreshToken = refreshToken\n\n\t//Save RefreshToken to Database\n\tresult = env.db.Save(&user)\n\tif result.Error != nil {\n\t\tLog.WithField(\"module\", \"sql\").WithError(result.Error)\n\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.DBSQ002)\n\t\treturn\n\t}\n\n\tc.JSON(200, registerResponse{AccessToken: accessToken, RefreshToken: refreshToken, User: user, ResetCode: user.ResetCode})\n}", "func handleSignUp(w http.ResponseWriter, r *http.Request) {\n\tif parseFormErr := r.ParseForm(); parseFormErr != nil {\n\t\thttp.Error(w, \"Sent invalid form\", 400)\n\t}\n\n\tname := r.FormValue(\"name\")\n\tuserHandle := r.FormValue(\"userHandle\")\n\temail := r.FormValue(\"email\")\n\tpassword := r.FormValue(\"password\")\n\n\tif !verifyUserHandle(userHandle) {\n\t\thttp.Error(w, \"Invalid userHandle\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif !verifyEmail(email) {\n\t\thttp.Error(w, \"Invalid email\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif !verifyPassword(password) {\n\t\thttp.Error(w, \"Password does not meet complexity requirements\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\thashed, _ := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)\n\n\turChannel := make(chan *database.InsertResponse)\n\tgo createUser(\n\t\tmodel.User{Name: name, UserHandle: userHandle, Email: email, Password: hashed},\n\t\turChannel,\n\t)\n\tcreatedUser := <-urChannel\n\n\tif createdUser.Err != nil {\n\t\tlog.Println(createdUser.Err)\n\n\t\tif strings.Contains(createdUser.Err.Error(), \"E11000\") {\n\t\t\tif strings.Contains(createdUser.Err.Error(), \"index: userHandle_1\") {\n\t\t\t\thttp.Error(w, \"Userhandle \"+userHandle+\" already registered\", http.StatusConflict)\n\t\t\t} else {\n\t\t\t\thttp.Error(w, \"Email \"+email+\" already registered\", http.StatusConflict)\n\t\t\t}\n\t\t} else {\n\t\t\tcommon.SendInternalServerError(w)\n\t\t}\n\n\t} else {\n\t\tlog.Println(\"Created user with ID \" + createdUser.ID)\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_, wError := w.Write([]byte(\"Created user with ID \" + createdUser.ID))\n\n\t\tif wError != nil {\n\t\t\tlog.Println(\"Error while writing: \" + wError.Error())\n\t\t}\n\t}\n\n}", "func postUser(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tvar user User\n\terr := json.NewDecoder(r.Body).Decode(&user)\n\tlog.ErrorHandler(err)\n\tvar (\n\t\temail = strings.ToLower(user.Email)\n\t\talias = user.Alias\n\t\tuserName = user.UserName\n\t\tpassword = user.Password\n\t\tfullName = user.FullName\n\t\tsafeNames bool\n\t\tsafeEmail = emailValidator(email)\n\t\tsafePassword = passwordValidator(password)\n\t\tsimilarToUser = similarToUser(fullName, alias, userName, password)\n\t)\n\n\tduplicateEmail := DuplicateCheck(email)\n\n\tif duplicateEmail {\n\t\tw.WriteHeader(http.StatusConflict)\n\t\terr := json.NewEncoder(w).Encode(core.FourONine)\n\t\tlog.ErrorHandler(err)\n\t\tlog.AccessHandler(r, 409)\n\t\treturn\n\t}\n\n\tsafeNames = userDetails(fullName, alias, userName)\n\n\tif safeNames {\n\t\t// Some or all of the details in the body are empty\n\t\t//\tAll fields are required\n\t\tw.WriteHeader(http.StatusUnprocessableEntity)\n\t\terr := json.NewEncoder(w).Encode(core.FourTwoTwo)\n\t\tlog.ErrorHandler(err)\n\t\tlog.AccessHandler(r, 422)\n\t\treturn\n\t}\n\n\tif !safeEmail {\n\t\t// Issue with Email\n\t\t//Email couldn't be verified or invalid email\n\t\tw.WriteHeader(http.StatusUnprocessableEntity)\n\t\terr := json.NewEncoder(w).Encode(core.FourTwoTwo)\n\t\tlog.ErrorHandler(err)\n\t\tlog.AccessHandler(r, 422)\n\t\treturn\n\t}\n\n\tif similarToUser {\n\t\t// Issue with Password\n\t\t// Password is similar to user information\n\t\tw.WriteHeader(http.StatusUnprocessableEntity)\n\t\terr := json.NewEncoder(w).Encode(core.FourTwoTwo)\n\t\tlog.ErrorHandler(err)\n\t\tlog.AccessHandler(r, 422)\n\t\treturn\n\t}\n\n\tif !safePassword {\n\t\t// Issue with Password\n\t\t//\tPassword doesn't go through the validator successfully\n\t\tw.WriteHeader(http.StatusUnprocessableEntity)\n\t\terr := json.NewEncoder(w).Encode(core.FourTwoTwo)\n\t\tlog.ErrorHandler(err)\n\t\tlog.AccessHandler(r, 422)\n\t\treturn\n\t}\n\n\tpasswordHash, err := generatePasswordHash(password)\n\tlog.ErrorHandler(err)\n\n\tuser = User{\n\t\tUserName: userName,\n\t\tFullName: fullName,\n\t\tAlias: alias,\n\t\tEmail: email,\n\t\tIsAdmin: false,\n\t\tPassword: passwordHash,\n\t\tLastLogin: time.Time{},\n\t\tIsActive: false,\n\t\tIsEmailVerified: false,\n\t}\n\n\t//\tfmt.Println(\"Create The Fucking User Here\")\n\n\tdb.Create(&user)\n\terr = json.NewEncoder(w).Encode(user)\n\tlog.ErrorHandler(err)\n\n\t// Create OTP to verify email by\n\t// OTP expires in 30 minutes\n\t// Stored in Redis with key new_user_otp_email\n\tverifiableToken := generateOTP()\n\terr = redisClient.Set(ctx, \"new_user_otp_\"+email, verifiableToken, 30*time.Minute).Err()\n\tlog.ErrorHandler(err)\n\n\t//payload := struct {\n\t//\tToken string\n\t//}{\n\t//\tToken: verifiableToken,\n\t//}\n\t//\n\t//var status bool\n\t//\n\t////status, err = core.SendEmailNoAttachment(email, \"OTP for Verification\", payload, \"token.txt\")\n\t//if !status {\n\t//\tw.WriteHeader(http.StatusInternalServerError)\n\t//\terr = json.NewEncoder(w).Encode(core.FiveHundred)\n\t//\tlog.ErrorHandler(err)\n\t//\tlog.AccessHandler(r, 500)\n\t//\treturn\n\t//}\n\tlog.ErrorHandler(err)\n\tlog.AccessHandler(r, 200)\n\treturn\n}", "func SignUpUser(c *gin.Context) {\n\tvar db = models.InitDB()\n\tvar userData models.User\n\terr := c.Bind(&userData)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfmt.Println(userData)\n\tif err := db.Create(&userData).Error; err != nil {\n\t\tc.JSON(200, gin.H{\n\t\t\t\"creation\": \"false\",\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(200, gin.H{\n\t\t\"creation\": \"true\",\n\t})\n}", "func UserCreate(w http.ResponseWriter, r *http.Request) {\n\n\t// Init output\n\toutput := []byte(\"\")\n\n\t// Add content type header to the response\n\tcontentType := \"application/json\"\n\tcharset := \"utf-8\"\n\tw.Header().Add(\"Content-Type\", fmt.Sprintf(\"%s; charset=%s\", contentType, charset))\n\n\t// Grab url path variables\n\turlVars := mux.Vars(r)\n\turlUser := urlVars[\"user\"]\n\n\t// Grab context references\n\trefStr := gorillaContext.Get(r, \"str\").(stores.Store)\n\trefUserUUID := gorillaContext.Get(r, \"auth_user_uuid\").(string)\n\n\t// Read POST JSON body\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\terr := APIErrorInvalidRequestBody()\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Parse pull options\n\tpostBody, err := auth.GetUserFromJSON(body)\n\tif err != nil {\n\t\terr := APIErrorInvalidArgument(\"User\")\n\t\trespondErr(w, err)\n\t\tlog.Error(string(body[:]))\n\t\treturn\n\t}\n\n\tuuid := uuid.NewV4().String() // generate a new uuid to attach to the new project\n\ttoken, err := auth.GenToken() // generate a new user token\n\tcreated := time.Now().UTC()\n\t// Get Result Object\n\tres, err := auth.CreateUser(uuid, urlUser, postBody.FirstName, postBody.LastName, postBody.Organization, postBody.Description,\n\t\tpostBody.Projects, token, postBody.Email, postBody.ServiceRoles, created, refUserUUID, refStr)\n\n\tif err != nil {\n\t\tif err.Error() == \"exists\" {\n\t\t\terr := APIErrorConflict(\"User\")\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tif strings.HasPrefix(err.Error(), \"duplicate\") {\n\t\t\terr := APIErrorInvalidData(err.Error())\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tif strings.HasPrefix(err.Error(), \"invalid\") {\n\t\t\terr := APIErrorInvalidData(err.Error())\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\n\t\terr := APIErrGenericInternal(err.Error())\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Output result to JSON\n\tresJSON, err := res.ExportJSON()\n\tif err != nil {\n\t\terr := APIErrExportJSON()\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Write response\n\toutput = []byte(resJSON)\n\trespondOK(w, output)\n\n}", "func (e *env) UserSignupTokenPostHandler(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase \"GET\":\n\tcase \"POST\":\n\t\tusername := r.FormValue(\"username\")\n\t\tpassword := r.FormValue(\"password\")\n\t\tgivenToken := r.FormValue(\"register_key\")\n\n\t\tisValid, userRole := e.authState.ValidateRegisterToken(givenToken)\n\n\t\tif isValid {\n\n\t\t\t// Delete the token so it cannot be reused if the token is not blank\n\t\t\t// The first user can signup without a token and is granted admin rights\n\t\t\tif givenToken != \"\" {\n\t\t\t\te.authState.DeleteRegisterToken(givenToken)\n\t\t\t}\n\n\t\t\tif userRole == auth.RoleAdmin {\n\t\t\t\terr := e.authState.NewAdmin(username, password)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(\"Error adding admin:\", err)\n\t\t\t\t\te.authState.SetFlash(\"Error adding user. Check logs.\", r)\n\t\t\t\t\thttp.Redirect(w, r, r.Referer(), http.StatusInternalServerError)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else if userRole == auth.RoleUser {\n\t\t\t\terr := e.authState.NewUser(username, password)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(\"Error adding user:\", err)\n\t\t\t\t\te.authState.SetFlash(\"Error adding user. Check logs.\", r)\n\t\t\t\t\thttp.Redirect(w, r, r.Referer(), http.StatusInternalServerError)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Login the recently added user\n\t\t\tif e.authState.Auth(username, password) {\n\t\t\t\te.authState.Login(username, r)\n\t\t\t}\n\n\t\t\te.authState.SetFlash(\"Successfully added '\"+username+\"' user.\", r)\n\t\t\thttp.Redirect(w, r, \"/\", http.StatusSeeOther)\n\t\t} else {\n\t\t\te.authState.SetFlash(\"Registration token is invalid.\", r)\n\t\t\thttp.Redirect(w, r, \"/\", http.StatusInternalServerError)\n\t\t}\n\n\tcase \"PUT\":\n\t\t// Update an existing record.\n\tcase \"DELETE\":\n\t\t// Remove the record.\n\tdefault:\n\t\t// Give an error message.\n\t}\n}", "func CreateUser(db *gorm.DB) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\t// Get the mandatory query parameters.\n\t\tname, ok := c.GetPostForm(\"name\")\n\t\tif !ok {\n\t\t\terrors.Apply(c, errors.MissingParameters)\n\t\t\treturn\n\t\t}\n\t\tusername, ok := c.GetPostForm(\"username\")\n\t\tif !ok {\n\t\t\terrors.Apply(c, errors.MissingParameters)\n\t\t\treturn\n\t\t}\n\t\tif !usernameRegexp.MatchString(username) {\n\t\t\terrors.Apply(c, errors.BadParameters)\n\t\t\treturn\n\t\t}\n\t\tpassword, ok := c.GetPostForm(\"password\")\n\t\tif !ok {\n\t\t\terrors.Apply(c, errors.MissingParameters)\n\t\t\treturn\n\t\t}\n\n\t\t// Try getting type.\n\t\tuserType, ok := c.GetPostForm(\"type\")\n\t\tif !ok {\n\t\t\tuserType = models.General\n\t\t}\n\t\tif userType != models.Admin && userType != models.Writer && userType != models.General {\n\t\t\terrors.Apply(c, errors.BadParameters)\n\t\t\treturn\n\t\t}\n\t\tif _, ok := c.Get(\"user\"); userType != models.General && !ok {\n\t\t\terrors.Apply(c, errors.NoPermission)\n\t\t\treturn\n\t\t}\n\n\t\t// Check if any users have the same username.\n\t\tvar checkUsers []models.User\n\t\terr := db.Where(\"user_name = ?\", username).\n\t\t\tFind(&checkUsers).\n\t\t\tError\n\t\tif err != nil && err != gorm.ErrRecordNotFound {\n\t\t\terrors.Apply(c, err)\n\t\t\treturn\n\t\t}\n\t\tif len(checkUsers) != 0 {\n\t\t\terrors.Apply(c, errors.UserExists)\n\t\t\treturn\n\t\t}\n\n\t\t// Create the user.\n\t\tuser := &models.User{\n\t\t\tType: userType,\n\t\t\tName: name,\n\t\t\tUserName: username,\n\t\t}\n\t\tif err := user.SetPassword(password); err != nil {\n\t\t\terrors.Apply(c, err)\n\t\t\treturn\n\t\t}\n\t\tif err := db.Create(user).Error; err != nil {\n\t\t\terrors.Apply(c, err)\n\t\t\treturn\n\t\t}\n\n\t\t// Respond with the user's JSON.\n\t\tc.JSON(200, user)\n\t}\n}", "func CreateUser(w http.ResponseWriter, r *http.Request) {\n\tvar user User\n\tvar b []byte\n\tr.Body.Read(b)\n\terr := json.Unmarshal(b, &user)\n\tif err != nil {\n\t\tjson.NewEncoder(w).Encode(err)\n\t}\n}", "func CreateUser(w http.ResponseWriter, r *http.Request) {\n\tr.ParseForm()\n\n\tuser, err := json.Marshal(map[string]string{\n\t\t\"name\": r.FormValue(\"name\"),\n\t\t\"email\": r.FormValue(\"email\"),\n\t\t\"nick\": r.FormValue(\"nick\"),\n\t\t\"password\": r.FormValue(\"password\"),\n\t})\n\tif err != nil {\n\t\tresponses.JSON(w, http.StatusBadRequest, responses.ErrorAPI{Err: err.Error()})\n\t\treturn\n\t}\n\n\turl := fmt.Sprintf(\"%s/users\", config.APIURL)\n\tresponse, err := http.Post(url, \"application/json\", bytes.NewBuffer(user))\n\tif err != nil {\n\t\tresponses.JSON(w, http.StatusInternalServerError, responses.ErrorAPI{Err: err.Error()})\n\t\treturn\n\t}\n\tdefer response.Body.Close()\n\n\tif response.StatusCode >= 400 {\n\t\tresponses.TreatStatusCode(w, response)\n\t\treturn\n\t}\n\n\tresponses.JSON(w, response.StatusCode, nil)\n}", "func (_this *UserHandler) CreateUser() echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\t\tvar request dtos.CreateUserRequest\n\t\tif err := c.Bind(&request); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tresp, err := _this.userService.CreateUser(&request)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn c.JSON(http.StatusOK, resp)\n\t}\n}", "func (h *userHandler) CreatePost(w http.ResponseWriter, r *http.Request) {\r\n\tvar u post\r\n\tif err := json.NewDecoder(r.Body).Decode(&u); err != nil {\r\n\t\tinternalServerError(w, r)\r\n\t\treturn\r\n\t}\r\n\th.store.Lock()\r\n\th.store.m[u.ID] = u\r\n\th.store.Unlock()\r\n\tjsonBytes, err := json.Marshal(u)\r\n\tif err != nil {\r\n\t\tinternalServerError(w, r)\r\n\t\treturn\r\n\t}\r\n\tw.WriteHeader(http.StatusOK)\r\n\tw.Write(jsonBytes)\r\n}", "func (handler *UserHandler) Create(w http.ResponseWriter, req *http.Request, _ httprouter.Params) {\n\tpayload := &User{}\n\n\tif err := json.NewDecoder(req.Body).Decode(payload); err != nil {\n\t\thandler.Formatter.JSON(w, http.StatusBadRequest, util.NewError(\"1001\",\n\t\t\t\"Invalid JSON payload supplied.\", err.Error()))\n\t\treturn\n\t}\n\n\tif err := payload.Validate(); err != nil {\n\t\thandler.Formatter.JSON(w, http.StatusBadRequest, util.NewError(\"1002\",\n\t\t\t\"Unable to validate the payload provided.\", err.Error()))\n\t\treturn\n\t}\n\n\tuser, err := handler.UserService.CreateUser(payload)\n\n\tif err != nil {\n\t\thandler.Formatter.JSON(w, http.StatusBadRequest, util.NewError(\"1003\",\n\t\t\t\"Unable to create a new user.\", err.Error()))\n\t\treturn\n\t}\n\n\thandler.Formatter.JSON(w, http.StatusCreated, user.hidePassword())\n}", "func createUser(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\tvar user schema.User\n\n\t// we decode our body request params in JSON\n\t_ = json.NewDecoder(r.Body).Decode(&user)\n\n\tresult, err := users.InsertOne(context.TODO(), user)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// we decode the recieved params in JSON\n\tjson.NewEncoder(w).Encode(result)\n}", "func (uc UserController) CreateUser(w http.ResponseWriter, r *http.Request, p httprouter.Params) {\n\t// Stub an user to be populated from the body\n\tu := models.User{}\n\n\t// Populate the user data\n\tjson.NewDecoder(r.Body).Decode(&u)\n\n\t// Add an Id\n\tu.Id = bson.NewObjectId()\n\n\thPass, err := bcrypt.GenerateFromPassword([]byte(u.Password), bcrypt.DefaultCost)\n\tif err != nil {\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n\tu.HashPassword = hPass\n\t// clear the incoming text password\n\tu.Password = \"\"\n\n\t// Write the user to mongo\n\terr = uc.session.DB(\"todos\").C(\"users\").Insert(&u)\n\n\t// clear hashed password\n\tu.HashPassword = nil\n\n\t// Marshal provided interface into JSON structure\n\tuj, _ := json.Marshal(u)\n\n\t// Write content-type, statuscode, payload\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(201)\n\tfmt.Fprintf(w, \"%s\", uj)\n}", "func CreateUser(res http.ResponseWriter, req *http.Request) {\n\tres.Header().Set(\"Content-Type\", \"application/json\")\n\n\tvar user models.User\n\tif err := json.NewDecoder(req.Body).Decode(&user); err != nil {\n\t\tmsg := \"Error while reading input body\"\n\n\t\tutils.ReturnErrorResponse(http.StatusBadRequest, msg, \"\", nil, nil, res)\n\t\treturn\n\t}\n\n\t// Helper function to generate encrypted password hash\n\tpasswordHash := helpers.GeneratePasswordHash(user.Password)\n\n\tif passwordHash == \"\" {\n\t\tmsg := \"Error occurred while hashing the password\"\n\n\t\tutils.ReturnErrorResponse(http.StatusBadRequest, msg, \"\", nil, nil, res)\n\t\treturn\n\t}\n\n\tuser.ID = bson.NewObjectId()\n\tuser.Password = passwordHash\n\n\terr := db.CreateUser(user)\n\tif err != nil {\n\t\tmsg := \"Error occurred while creating user\"\n\n\t\tutils.ReturnErrorResponse(http.StatusBadRequest, msg, \"\", nil, nil, res)\n\t\treturn\n\t}\n\n\tmsg := \"User created successfully\"\n\tutils.ReturnSuccessReponse(http.StatusCreated, msg, user.ID, res)\n\n}", "func HandleUserRegister(context *gin.Context) {\n\n\tuserAcc := context.PostForm(\"user_acc\")\n\tuserAvatar := context.PostForm(\"user_avatar\")\n\tuserNickName := context.PostForm(\"user_nick_name\")\n\tuserPassword := context.PostForm(\"user_password\")\n\tuserPhone := context.PostForm(\"user_phone\")\n\tuserEmail := context.PostForm(\"user_email\")\n\tuserGender := context.PostForm(\"user_gender\")\n\tuserSign := context.PostForm(\"user_sign\")\n\n\tuserType := context.PostForm(\"user_type\")\n\tuserTypeInt, _ := strconv.Atoi(userType)\n\n\tif userAcc == \"\" || userNickName == \"\" || userPassword == \"\"{\n\t\tcontext.JSON(http.StatusBadRequest, gin.H{\n\t\t\t\"status\": \"invalid\",\n\t\t\t\"code\": http.StatusBadRequest,\n\t\t\t\"msg\": \"user_acc, user_nick_name, user_password must not be none\",\n\t\t\t\"data\": \"\",\n\t\t})\n\t}\n\tuser := models.User{\n\t\tUserAcc:userAcc,\n\t\tUserAvatar:userAvatar,\n\t\tUserNickName:userNickName,\n\t\tUserPassword:userPassword,\n\t\tUserPhone:userPhone,\n\t\tUserEmail:userEmail,\n\t\tUserGender:userGender,\n\t\tUserSign:userSign,\n\t\tUserType:models.UserType(userTypeInt),\n\t}\n\tuserTry := models.User{}\n\tif db.DB.Where(\"user_acc=?\", userAcc).First(&userTry).RecordNotFound(){\n\t\t// user not found, create it\n\t\tdb.DB.Create(&user)\n\t\tuAddr := utils.GenAddr(user.ID)\n\t\tuser.UserAddr = \"usr\" + uAddr\n\n\t\tlog.Infof(\"FUCK GenAddr: %s gened: %s\", user.UserAddr, uAddr)\n\t\tdb.DB.Save(&user)\n\n\t\t// should return a token to user, as well as login\n\t\tclaims := make(map[string]interface{})\n\t\tclaims[\"id\"] = user.ID\n\t\tclaims[\"msg\"] = \"hiding egg\"\n\t\tclaims[\"user_addr\"] = user.UserAddr\n\t\ttoken, _ := utils.Encrypt(claims)\n\t\tlog.Infof(\"Request new user: %s, it is new.\", user)\n\t\tdata := map[string]interface{}{\"token\": token, \"id\": user.ID, \"user_addr\": user.UserAddr}\n\t\tcontext.JSON(200, gin.H{\n\t\t\t\"status\": \"success\",\n\t\t\t\"code\": http.StatusOK,\n\t\t\t\"msg\": \"user register succeed.\",\n\t\t\t\"data\": data,\n\t\t})\n\t}else{\n\t\tlog.Info(\"user exist.\")\n\t\tcontext.JSON(200, gin.H{\n\t\t\t\"status\": \"conflict\",\n\t\t\t\"code\": http.StatusConflict,\n\t\t\t\"msg\": \"user already exist.\",\n\t\t\t\"data\": nil,\n\t\t})\n\t}\n}", "func (srv *UsersService) CreateHandler(ctx *gin.Context) {\n\tlogger := srv.logger.New(\"action\", \"CreateHandler\")\n\n\t// Checks if the query entry is valid\n\tvalidator := validators.CreateUserValidator{}\n\tif err := validator.BindJSON(ctx); err != nil {\n\t\t// Returns a \"422 StatusUnprocessableEntity\" response\n\t\tsrv.ResponseService.ValidatorErrorResponse(ctx, responses.UnprocessableEntity, err)\n\t\treturn\n\t}\n\n\t// Check permissions\n\tcurrentUser := GetCurrentUser(ctx)\n\tif hasPerm := srv.PermissionsService.CanCreateProfile(currentUser.UID, &validator.UserModel); !hasPerm {\n\t\tsrv.ResponseService.Forbidden(ctx)\n\t\treturn\n\t}\n\n\ttmpPassword := validator.UserModel.Password\n\n\t// Create new user\n\tcreatedUser, err := srv.userCreator.Create(&validator.UserModel, true, false, nil)\n\tif err != nil {\n\t\tlogger.Error(\"сan't create a user\", \"error\", err)\n\t\t// Returns a \"500 StatusInternalServerError\" response\n\t\tsrv.ResponseService.Error(ctx, responses.CanNotCreateUser, \"Can't create a user\")\n\t\treturn\n\t}\n\n\tif nil != currentUser {\n\t\tsrv.SystemLogsService.LogCreateUserProfileAsync(createdUser, currentUser.UID)\n\t}\n\t// TODO: refactor - use events, move above functionality to the event subscriber\n\tconfirmationCode, err := srv.confirmationCodeService.GenerateSetPasswordCode(createdUser)\n\tif err != nil {\n\t\tlogger.Error(\"unable to generate set_password confirmation code\")\n\t\treturn\n\t}\n\n\tif _, err = srv.notificationsService.ProfileCreated(createdUser.UID, tmpPassword, confirmationCode.Code); nil != err {\n\t\tlogger.Error(\"сan't send notification\", \"error\", err)\n\t\treturn\n\t}\n\n\t// Returns a \"201 Created\" response\n\tsrv.ResponseService.SuccessResponse(ctx, http.StatusCreated, validator.UserModel)\n}", "func (u *UserHandler) Create(c *fiber.Ctx) error {\n\tuser := models.User{}\n\terr := c.BodyParser(&user)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = u.Repo.Create(user)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.Status(fiber.StatusOK).JSON(user)\n}", "func signupHandler(usr string, pass string) string {\n\t// query for the number of users with the passed in username\n\tquery := \"SELECT count(*) FROM users WHERE username = ?;\"\n\thashed_usr := Hash1(usr)\n\trows := QueryDB(query, hashed_usr)\n\trows.Next()\n\tvar count int\n\t// scan the result\n\terr = rows.Scan(&count)\n\tif err != nil {\n\t\tfmt.Println(\"ERROR: querying for number of users with a given username (my_server.go: signupHandler)\")\n\t\tfmt.Println(err)\n\t\treturn \"\";\n\t}\n\trows.Close()\n\t// make sure that username is unique\n\tif count == 0 {\n\t\t// generate a per user salt\n\t\tsalt := GenerateRandomString()\n\t\t// hash the password with the generated salt\n\t\thashed_pass := Hash256(pass, salt)\n\t\t// make the RC4 key for the user\n\t\tkey := GenerateRandomString()\n\t\tfor KeyExists(key) {\n\t\t\tkey = GenerateRandomString()\n\t\t}\n\t\t// insert the information into the DB\n\t\tquery := \"INSERT INTO users VALUES (?,?,?,?);\"\n\t\t// make a call to execute the query\n\t\tExecDB(query, hashed_usr, hashed_pass, salt, key)\n\t\t// mkdir for new user\n\t\tencrypted := EncryptString(hashed_usr, key)\n\t\tpath := GetBaseDir(encrypted)\n\t\tusr_info := []string{hashed_usr, hashed_pass, salt, key}\n\t\ttoken := NewUserSignIn(path, usr, usr_info)\n\t\treturn token\n\t}\n\treturn \"\"\n}", "func (a *Server) CreateUser(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"create a new user\")\n}", "func (a *UserApiService) UserCreatePost(ctx context.Context, body UserCreateReq) (UserResp, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Post\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t \tsuccessPayload UserResp\n\t)\n\n\t// validate body params\n\tif err := body.Validate(); err != nil {\n\t\treturn successPayload, nil, err\n\t}\n\t\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/user/create\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &body\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"X-Auth-Token\"] = key\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn successPayload, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn successPayload, localVarHttpResponse, err\n\t}\n\tdefer localVarHttpResponse.Body.Close()\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tbodyBytes, _ := ioutil.ReadAll(localVarHttpResponse.Body)\n\t\treturn successPayload, localVarHttpResponse, reportError(\"Status: %v, Body: %s\", localVarHttpResponse.Status, bodyBytes)\n\t}\n\n\tif err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil {\n\t\treturn successPayload, localVarHttpResponse, err\n\t}\n\n\n\treturn successPayload, localVarHttpResponse, err\n}", "func (h *Handler) CreateUser(c *fiber.Ctx) error {\n\tvar service = services.NewUserService()\n\tvar usr = &user.User{}\n\tif err := c.BodyParser(usr); err != nil {\n\t\treturn c.Status(422).JSON(fiber.Map{\"status\": \"error\", \"message\": err})\n\t}\n\n\tnewUser, err := service.CreateUser(usr)\n\tif err != nil {\n\t\treturn c.Status(400).JSON(fiber.Map{\"status\": \"error\", \"message\": err.Error()})\n\t}\n\n\treturn c.JSON(fiber.Map{\"status\": \"success\", \"message\": \"Created usr\", \"data\": newUser})\n}", "func (h *Handler) CreateHandler(w http.ResponseWriter, r *http.Request) {\n\tvar u user.User\n\n\terr := json.NewDecoder(r.Body).Decode(&u)\n\tif err != nil {\n\t\th.log.Error(err)\n\t\t_ = response.HTTPError(w, http.StatusBadRequest, response.ErrorParsingUser.Error())\n\t\treturn\n\t}\n\n\tu.Role = user.Client\n\n\tctx, cancel := context.WithCancel(r.Context())\n\tdefer cancel()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\t_ = response.HTTPError(w, http.StatusBadGateway, response.ErrTimeout.Error())\n\t\treturn\n\tdefault:\n\t\terr = h.service.Create(ctx, &u)\n\t}\n\n\tif err != nil {\n\t\th.log.Error(err)\n\t\t_ = response.HTTPError(w, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\n\tw.Header().Add(\"Location\", r.URL.String()+u.ID.Hex())\n\t_ = response.JSON(w, http.StatusCreated, response.Map{\"user\": u})\n}", "func (e *Example) PostUserAuth(ctx context.Context, req *example.Request, rsp *example.Response) error {\n\tlog.Log(\"POST /api/v1.0/user/name PutUersinfo()\")\n\n\t//创建返回空间\n\trsp.Errno= utils.RECODE_OK\n\trsp.Errmsg = utils.RecodeText(rsp.Errno)\n\n\t/*从从sessionid获取当前的userid*/\n\t//连接redis\n\tredis_config_map := map[string]string{\n\t\t\"key\": utils.G_server_name,\n\t\t\"conn\": utils.G_redis_addr + \":\" + utils.G_redis_port,\n\t\t\"dbNum\": utils.G_redis_dbnum,\n\t}\n\tredis_config , _ := json.Marshal(redis_config_map)\n\t//连接redis数据库 创建句柄\n\tbm, err := cache.NewCache(\"redis\", string(redis_config) )\n\tif err != nil {\n\t\tlog.Log(\"缓存创建失败\",err)\n\t\trsp.Errno = utils.RECODE_DBERR\n\t\trsp.Errmsg = utils.RecodeText(rsp.Errno)\n\t\treturn nil\n\t}\n\t//拼接key\n\tsessioniduserid := req.Sessionid + \"user_id\"\n\t//获取userid\n\tvalue_id := bm.Get(sessioniduserid)\n\tid := int(value_id.([]uint8)[0])\n\n\t//创建表对象\n\tuser := models.User{ Id: id, Real_name: req.Realname, Id_card: req.Idcard }\n\n\t//创建数据库句柄\n\to:= orm.NewOrm()\n\n\t//更新\n\t_ , err = o.Update(&user ,\"real_name\", \"id_card\")\n\tif err !=nil{\n\t\trsp.Errno= utils.RECODE_DBERR\n\t\trsp.Errmsg = utils.RecodeText(rsp.Errno)\n\t\treturn nil\n\t}\n\n\t// 更新缓存\n\tbm.Put(sessioniduserid, string(user.Id), time.Second * 600)\n\n\tlog.Log(\"更新实名认证信息成功\")\n\treturn nil\n}", "func (ctx *HandlerContext) SignUpHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"POST\" {\n\t\thttp.Error(w, \"that method is not allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\tcontentType := r.Header.Get(\"Content-Type\")\n\tif contentType != \"application/json\" {\n\t\thttp.Error(w, \"request body must be in JSON\", http.StatusUnsupportedMediaType)\n\t\treturn\n\t}\n\tnewUser := &users.NewUser{}\n\tdec := json.NewDecoder(r.Body)\n\terr := dec.Decode(newUser)\n\tif err != nil {\n\t\thttp.Error(w, \"Bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif err := newUser.Validate(); err != nil {\n\t\thttp.Error(w, \"Bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tuser, err := newUser.ToUser()\n\tif err != nil {\n\t\thttp.Error(w, \"Bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\taddedUser, err := ctx.Users.Insert(user)\n\tif err != nil {\n\t\thttp.Error(w, \"Bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tstate := sessions.SessionState{SessionTime: time.Now(), User: addedUser}\n\n\t_, err2 := sessions.BeginSession(ctx.SigningKey, ctx.Store, state, w)\n\tif err2 != nil {\n\t\thttp.Error(w, \"Bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusCreated)\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\n\terr = json.NewEncoder(w).Encode(addedUser)\n\n\tif err != nil {\n\t\thttp.Error(w, \"Bad request\", http.StatusBadRequest)\n\t}\n\n}", "func createUser(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tvar user User\n\tif err := json.NewDecoder(r.Body).Decode(&user); err != nil {\n\t\tpanic(err)\n\t}\n\t//Todo (Farouk): Mock ID - not safe\n\tuser.ID = strconv.Itoa(rand.Intn(1000000))\n\tusers = append(users, user)\n}", "func Handler(w http.ResponseWriter, r *http.Request) {\n\tu := user.Create()\n\n\tj, _ := json.Marshal(u)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(j))\n}", "func (app *App) createUser(w http.ResponseWriter, r *http.Request) {\n\tvar user users.User\n\tdecoder := json.NewDecoder(r.Body)\n\tif err := decoder.Decode(&user); err != nil {\n\t\trespondWithError(w, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\n\tif (user.Email == \"\") || (user.FName == \"\") || (user.LName == \"\") || (user.Role == \"\") || (user.Password == \"\") {\n\t\trespondWithError(w, http.StatusBadRequest, \"Missing fields\")\n\t\treturn\n\t}\n\n\tif !inArray(user.Role, []string{\"base\", \"admin\"}) {\n\t\trespondWithError(w, http.StatusBadRequest, \"The 'Role' field must be one of: base, admin\")\n\t\treturn\n\t}\n\n\terr := user.CreateUser(app.Db)\n\tif err != nil {\n\t\trespondWithError(w, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\trespondWithJSON(w, http.StatusOK, user)\n}", "func CreateUser(c *gin.Context) {\n\n\tfmt.Println(\"Endpoint Hit: Create A new User\")\n\n\tuser := model.Users{}\n\n\terr := c.Bind(&user)\n\n\tif err != nil {\n\t\tfmt.Print(err)\n\t}\n\n\tfmt.Println(user.Name)\n\n\tfmt.Println(user.Email)\n\tfmt.Println(user.Password)\n\tdb, err := sql.Open(\"mysql\", \"root:password@tcp(127.0.0.1:3306)/twitter\")\n\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\t_, errQ := db.Query(\"INSERT INTO users(name, email, password) VALUES (?,?,?)\", user.Name, user.Email, user.Password)\n\n\tif errQ != nil {\n\t\tpanic(err.Error())\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"Name\": user.Name,\n\t\t\"Email\": user.Email,\n\t\t\"Password\": user.Password,\n\t})\n\n\tdefer db.Close()\n\n}", "func (a *App) CreateUser(w http.ResponseWriter, r *http.Request) {\n\thandler.CreateUser(a.DB, w, r)\n}", "func (ur *UserResource) handleCreateUser(c *gin.Context) {\n\tvar u model.User\n\n\tif err := c.ShouldBindJSON(&u); err != nil {\n\t\tc.JSON(http.StatusUnprocessableEntity, errUserCreateInvalidFields)\n\t\treturn\n\t}\n\n\thash, err := auth.GeneratePassword(u.Password)\n\tif err != nil {\n\t\tlogging.Logger.Errorln(\"[API] Failed to generate hash from password\", err)\n\t\tc.JSON(http.StatusBadRequest, errUserCreateGeneric)\n\t\treturn\n\t}\n\tu.Password = hash\n\n\tnewUser, err := ur.Store.CreateUser(u)\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, errUserCreateGeneric)\n\t\treturn\n\t}\n\n\tnewUser.Password = \"\"\n\tc.JSON(http.StatusCreated, newUser)\n}", "func (e *Example) PostUserAuth(ctx context.Context, req *example.Request, rsp *example.Response) error {\n\tbeego.Info(\" 实名认证 Postuserauth api/v1.0/user/auth \")\n\n\t//创建返回空间\n\trsp.Errno = utils.RECODE_OK\n\trsp.Errmsg = utils.RecodeText(rsp.Errno)\n\n\t/*从session中获取我们的user_id*/\n\t//连接redis数据库\n\tbm, err := utils.GetRedisConnector()\n\n\tuserInfo_redis := bm.Get(req.SessionId)\n\tuserInfo_string, _ := redis.String(userInfo_redis, nil)\n\tuserOld := models.User{}\n\tjson.Unmarshal([]byte(userInfo_string), &userOld)\n\n\t//创建user对象\n\tuser := models.User{Uid: userOld.Uid, Real_name: req.RealName, Id_card: req.IdCard}\n\t/*更新user表中的 姓名和 身份号*/\n\to := orm.NewOrm()\n\t//更新表\n\t_, err = o.Update(&user, \"real_name\", \"id_card\")\n\tif err != nil {\n\t\trsp.Errno = utils.RECODE_DBERR\n\t\trsp.Errmsg = utils.RecodeText(rsp.Errno)\n\t\treturn nil\n\t}\n\n\t//更新缓存\n\tuserOld.Real_name = req.RealName\n\tuserOld.Id_card = req.IdCard\n\tuserInfo, _ := json.Marshal(userOld)\n\tbm.Put(req.SessionId, userInfo, time.Second*600)\n\treturn nil\n}", "func RegisterHandler(w http.ResponseWriter, r *http.Request) {\n\tr.ParseForm()\n\tusername := r.PostFormValue(\"username\")\n\temail := r.PostFormValue(\"email\")\n\tpassword := r.PostFormValue(\"password\")\n\tuser, err := models.RegisterUser(username, email, password)\n\tif err != nil {\n\t\tlog.Print(err)\n\t} else {\n\t\tlog.Print(user)\n\t}\n}", "func Register(w http.ResponseWriter, r *http.Request) {\n\tt:= models.Users{}\n\n\terr := json.NewDecoder(r.Body).Decode(&t)\n\n\tif err != nil {\n\t\thttp.Error(w, \"Error en los datos recibidos \"+err.Error(), 400)\n\t\treturn\n\t}\n\tif len(t.Login) < 6 {\n\t\thttp.Error(w, \"Error en los datos recibidos, ingrese un login mayor a 5 digitos \", 400)\n\t\treturn\n\t}\n\tif len(t.Password) < 6 {\n\t\thttp.Error(w, \"Ingrese una contraseña mayor a 5 digitos \", 400)\n\t\treturn\n\t}\n\n\t_, found, _ := bd.CheckUser(t.Login)\n\tif found == true {\n\t\thttp.Error(w, \"Ya existe un usuario registrado con ese login\", 400)\n\t\treturn\n\t}\n\n\tif t.Id_role == 3 {\n\t\tcod := bd.CodFamiliar(t.Cod_familiar)\n\t\tif cod == false {\n\t\t\thttp.Error(w, \"Debe ingresar un codigo de familia correcto\", 400)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif t.Id_role == 1 {\n\t\thttp.Error(w, \"Usted no esta autorizado para crear este tipo de usuario\", 400)\n\t\treturn\n\t}\n\n\t_, status, err := bd.InsertRegister(t)\n\tif err != nil {\n\t\thttp.Error(w, \"Ocurrió un error al intentar realizar el registro de usuario \"+err.Error(), 400)\n\t\treturn\n\t}\n\n\tif status == false {\n\t\thttp.Error(w, \"No se ha logrado insertar el registro del usuario\", 400)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusCreated)\n}", "func signupHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method == \"POST\" {\n\t\tdecoder := json.NewDecoder(r.Body)\n\t\tvar u user\n\t\terr := decoder.Decode(&u)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err.Error())\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tfmt.Fprintf(w, err.Error())\n\t\t} else {\n\t\t\tdb.Table(\"user\").Insert(u).Run(session)\n\t\t\tfmt.Fprintf(w, \"Hello from api.\")\n\t\t}\n\t} else {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tfmt.Fprintf(w, \"no such method\")\n\t}\n}", "func (uh *UserHandler) Register(w http.ResponseWriter, r *http.Request) {\n\n\tvar userHolder *entity.User\n\tvar password string\n\n\tif r.Method == http.MethodGet {\n\t\tuh.CSRF, _ = stringTools.GenerateRandomBytes(30)\n\t\ttoken, err := stringTools.CSRFToken(uh.CSRF)\n\t\tif err != nil {\n\t\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\t}\n\t\tinputContainer := InputContainer{CSRF: token}\n\t\tuh.Temp.ExecuteTemplate(w, \"SignUp.html\", inputContainer)\n\t\treturn\n\t}\n\n\tif r.Method == http.MethodPost {\n\n\t\tthirdParty := r.FormValue(\"thirdParty\")\n\t\tvar identification entity.Identification\n\t\tfirstname := r.FormValue(\"firstname\")\n\t\tlastname := r.FormValue(\"lastname\")\n\t\temail := r.FormValue(\"email\")\n\t\tidentification.ConfirmPassword = r.FormValue(\"confirmPassword\")\n\n\t\tif thirdParty == \"true\" {\n\n\t\t\tif r.FormValue(\"serverAUT\") != ServerAUT {\n\t\t\t\thttp.Error(w, \"Invalid server key\", http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tidentification.From = r.FormValue(\"from\")\n\t\t\tidentification.TpFlag = true\n\t\t} else {\n\t\t\tpassword = r.FormValue(\"password\")\n\t\t\tidentification.ConfirmPassword = r.FormValue(\"confirmPassword\")\n\t\t}\n\n\t\t// Validating CSRF Token\n\t\tcsrfToken := r.FormValue(\"csrf\")\n\t\tok, errCRFS := stringTools.ValidCSRF(csrfToken, uh.CSRF)\n\n\t\tuserHolder = entity.NewUserFR(firstname, lastname, email, password)\n\t\terrMap := uh.UService.Verification(userHolder, identification)\n\t\tif !ok || errCRFS != nil {\n\t\t\tif len(errMap) == 0 {\n\t\t\t\terrMap = make(map[string]string)\n\t\t\t}\n\t\t\terrMap[\"csrf\"] = \"Invalid token used!\"\n\t\t}\n\t\tif len(errMap) > 0 {\n\t\t\tuh.CSRF, _ = stringTools.GenerateRandomBytes(30)\n\t\t\ttoken, _ := stringTools.CSRFToken(uh.CSRF)\n\t\t\tinputContainer := InputContainer{Error: errMap, CSRF: token}\n\t\t\tuh.Temp.ExecuteTemplate(w, \"SignUp.html\", inputContainer)\n\t\t\treturn\n\t\t}\n\n\t\tif identification.TpFlag {\n\n\t\t\tnewSession := uh.configSess()\n\t\t\tclaims := stringTools.Claims(email, newSession.Expires)\n\t\t\tsession.Create(claims, newSession, w)\n\t\t\t_, err := uh.SService.StoreSession(newSession)\n\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\thttp.Redirect(w, r, \"/Dashboard\", http.StatusSeeOther)\n\t\t}\n\n\t\tuh.Temp.ExecuteTemplate(w, \"CheckEmail.html\", nil)\n\t\treturn\n\t}\n}", "func CreateUser(db *gorm.DB, w http.ResponseWriter, r *http.Request) {\n\tuser := models.AppUser{}\n\tdecoder := json.NewDecoder(r.Body)\n\tdefer r.Body.Close()\n\tdecoder.DisallowUnknownFields()\n\tif err := decoder.Decode(&user); err != nil {\n\t\trespondJSON(w, http.StatusBadRequest, JSONResponse{models.AppUser{}, \"Erro interno del servidor\"})\n\t\treturn\n\t}\n\tuserTemp := getUserOrNull(db, user.AppUserID, w, r)\n\tif userTemp != nil {\n\t\trespondJSON(w, http.StatusBadRequest, JSONResponse{models.AppUser{}, \"Ya existe un usuario con este ID\"})\n\t\treturn\n\t}\n\t//hashing the password\n\tpass := user.AppUserPassword\n\thashPass, err := bcrypt.GenerateFromPassword([]byte(pass), 10)\n\tif err != nil {\n\t\trespondJSON(w, http.StatusInternalServerError, JSONResponse{models.AppUser{}, \"Error Interno del servidor\"})\n\t\treturn\n\t}\n\ts := bytes.NewBuffer(hashPass).String()\n\tuser.AppUserPassword = s\n\t//end hashing\n\n\tif result := db.Create(&user); result.Error != nil || result.RowsAffected == 0 {\n\t\tif result.Error != nil {\n\t\t\trespondJSON(w, http.StatusBadRequest, JSONResponse{models.AppUser{}, err.Error()})\n\t\t\treturn\n\t\t}\n\t\trespondJSON(w, http.StatusInternalServerError, JSONResponse{models.AppUser{}, \"Error No se pudo realizar el registro\"})\n\t\treturn\n\t}\n\trespondJSON(w, http.StatusCreated, JSONResponse{user, \"Registro realizado\"})\n}", "func CreateUser(c *gin.Context) {\n\ttype result struct {\n\t\tFirstName string `json:\"first_name\"`\n\t\tLastName string `json:\"last_name\"`\n\t\tEmail string `json:\"email\"`\n\t\tPassword string `json:\"password\"`\n\t\tDateOfBirth string `json:\"birth_date\"`\n\t}\n\tUserParams := result{}\n\n\terr := c.ShouldBindJSON(&UserParams)\n\tlayout := \"2006-01-02\"\n\tstr := UserParams.DateOfBirth\n\tt, er := time.Parse(layout, str)\n\n\tif er != nil {\n\t\tfmt.Println(er)\n\t}\n\n\tvar user model.User\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, err)\n\t\treturn\n\t}\n\tif len(UserParams.Password) == 0 {\n\t\tfmt.Println(\"err2\")\n\t\tlog.Println(err)\n\t\tc.JSON(http.StatusBadRequest, \"No given password\")\n\t\treturn\n\t}\n\tif age.Age(t) < 18 {\n\t\tlog.Println(err)\n\t\tc.JSON(http.StatusBadRequest, \"You are not adult!\")\n\t\treturn\n\t}\n\tif !db.Where(\"email = ?\", UserParams.Email).Find(&user).RecordNotFound() {\n\t\tc.JSON(http.StatusBadRequest, \"User with this email already exist\")\n\t\treturn\n\t}\n\tid := uuid.NewV4()\n\t// 1 = single user; 2 = admin\n\tuser.AccessLevel = 1\n\tuser.UUID = id.String()\n\tvar hash = hashPassword(UserParams.Password)\n\tuser.Password = hash\n\tuser.FirstName = UserParams.FirstName\n\tuser.LastName = UserParams.LastName\n\tuser.Email = UserParams.Email\n\tuser.DateOfBirth = t\n\tdb.Create(&user)\n\tuser.Password = \"\"\n\tc.JSON(200, &user)\n}", "func CreateNewUser(w http.ResponseWriter, r *http.Request) {\n\tfLog := userMgmtLogger.WithField(\"func\", \"CreateNewUser\").WithField(\"RequestID\", r.Context().Value(constants.RequestID)).WithField(\"path\", r.URL.Path).WithField(\"method\", r.Method)\n\n\tiauthctx := r.Context().Value(constants.HansipAuthentication)\n\tif iauthctx == nil {\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusUnauthorized, \"You are not authorized to access this resource\", nil, nil)\n\t\treturn\n\t}\n\n\tfLog.Trace(\"Creating new user\")\n\treq := &CreateNewUserRequest{}\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tfLog.Errorf(\"ioutil.ReadAll got %s\", err.Error())\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusInternalServerError, err.Error(), nil, nil)\n\t\treturn\n\t}\n\terr = json.Unmarshal(body, req)\n\tif err != nil {\n\t\tfLog.Errorf(\"json.Unmarshal got %s\", err.Error())\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusBadRequest, err.Error(), nil, nil)\n\t\treturn\n\t}\n\tisValidPassphrase := passphrase.Validate(req.Passphrase, config.GetInt(\"security.passphrase.minchars\"), config.GetInt(\"security.passphrase.minwords\"), config.GetInt(\"security.passphrase.mincharsinword\"))\n\tif !isValidPassphrase {\n\t\tfLog.Errorf(\"Passphrase invalid\")\n\t\tinvalidMsg := fmt.Sprintf(\"Invalid passphrase. Passphrase must at least has %d characters and %d words and for each word have minimum %d characters\", config.GetInt(\"security.passphrase.minchars\"), config.GetInt(\"security.passphrase.minwords\"), config.GetInt(\"security.passphrase.mincharsinword\"))\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusBadRequest, \"invalid passphrase\", nil, invalidMsg)\n\t\treturn\n\t}\n\tuser, err := UserRepo.CreateUserRecord(r.Context(), req.Email, req.Passphrase)\n\tif err != nil {\n\t\tfLog.Errorf(\"UserRepo.CreateUserRecord got %s\", err.Error())\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusBadRequest, err.Error(), nil, nil)\n\t\treturn\n\t}\n\tresp := &CreateNewUserResponse{\n\t\tRecordID: user.RecID,\n\t\tEmail: user.Email,\n\t\tEnabled: user.Enabled,\n\t\tSuspended: user.Suspended,\n\t\tLastSeen: user.LastSeen,\n\t\tLastLogin: user.LastLogin,\n\t\tTotpEnabled: user.Enable2FactorAuth,\n\t}\n\tfLog.Warnf(\"Sending email\")\n\tmailer.Send(r.Context(), &mailer.Email{\n\t\tFrom: config.Get(\"mailer.from\"),\n\t\tFromName: config.Get(\"mailer.from.name\"),\n\t\tTo: []string{user.Email},\n\t\tCc: nil,\n\t\tBcc: nil,\n\t\tTemplate: \"EMAIL_VERIFY\",\n\t\tData: user,\n\t})\n\n\thelper.WriteHTTPResponse(r.Context(), w, http.StatusOK, \"Success creating user\", nil, resp)\n\treturn\n}" ]
[ "0.78383803", "0.7456648", "0.74130136", "0.73975974", "0.7373872", "0.73664296", "0.73215455", "0.7290208", "0.728816", "0.7268887", "0.7192617", "0.71799153", "0.71711975", "0.71696395", "0.71660227", "0.7104086", "0.710336", "0.7103203", "0.7039367", "0.7038086", "0.70310396", "0.70294183", "0.69984925", "0.6976975", "0.69390804", "0.69234383", "0.69204587", "0.6880102", "0.68657106", "0.68530154", "0.6817111", "0.6809694", "0.6792934", "0.67825264", "0.6779558", "0.6770838", "0.67498994", "0.6748949", "0.67466027", "0.67413694", "0.67213947", "0.6718605", "0.6714131", "0.6705692", "0.66797507", "0.6666807", "0.66649276", "0.6662693", "0.6630949", "0.6617591", "0.6590869", "0.6587597", "0.65794027", "0.65771884", "0.6576558", "0.657552", "0.6573201", "0.6571512", "0.6570066", "0.65632975", "0.6555752", "0.655369", "0.6553161", "0.65505695", "0.65497804", "0.65485877", "0.6545178", "0.65358114", "0.65348804", "0.6526257", "0.6511398", "0.6508399", "0.6497926", "0.64954615", "0.6485888", "0.6478527", "0.6477599", "0.646827", "0.644814", "0.64429504", "0.64381456", "0.64326304", "0.6430453", "0.6421207", "0.6417862", "0.6411681", "0.6408126", "0.6401768", "0.63938856", "0.6393648", "0.6389857", "0.63872993", "0.6383413", "0.637845", "0.6366463", "0.63662153", "0.6362731", "0.6349646", "0.6343867", "0.63395303" ]
0.75723433
1
PutUserHandler Actualiza un usuario en base al id
PutUserHandler Обновляет пользователя по идентификатору
func PutUserHandler(w http.ResponseWriter, r *http.Request) { params := mux.Vars(r) k := params["id"] var userupdate User err := json.NewDecoder(r.Body).Decode(&userupdate) if err != nil { panic(err) } if user, ok := Listusers[k]; ok { userupdate.CreateAt = user.CreateAt delete(Listusers, k) Listusers[k] = userupdate } else { log.Printf("No encontramos el id %s", k) } w.WriteHeader(http.StatusNoContent) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func PutUserHandler(w http.ResponseWriter, r *http.Request) {\n\tid := mux.Vars(r)[\"id\"]\n\tvar userUpdate models.User\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\terr := json.NewDecoder(r.Body).Decode(&userUpdate)\n\tif err != nil {\n\t\tlog.Printf(\"Error al parsear usuario con el id %s\", id)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\tstatus := users.PutUser(id, userUpdate)\n\tw.WriteHeader(status)\n}", "func (auh *AdminUserHandler) PutUser(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\n\tvar apiKey = r.Header.Get(\"api-key\")\n\tif apiKey == \"\" || (apiKey != adminApiKey && apiKey != userApiKey) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)\n\t\treturn\n\t}\n\tid, err := strconv.Atoi(ps.ByName(\"id\"))\n\tif err != nil {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\n\tuser, errs := auh.userService.User(uint(id))\n\n\tif len(errs) > 0 {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\n\tl := r.ContentLength\n\n\tbody := make([]byte, l)\n\n\tr.Body.Read(body)\n\n\tjson.Unmarshal(body, &user)\n\n\tuser, errs = auh.userService.UpdateUser(user)\n\n\tif len(errs) > 0 {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\n\toutput, err := json.MarshalIndent(user, \"\", \"\\t\")\n\n\tif err != nil {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(output)\n\treturn\n}", "func updateUserByIDHandler(c *gin.Context) {\n\tid, _ := strconv.Atoi(c.Param(\"id\"))\n\tuser, _ := c.Get(JwtIdentityKey)\n\n\t// Role check.\n\tif !isAdmin(user) {\n\t\tc.JSON(http.StatusUnauthorized, gin.H{\"message\": \"unauthorized\"})\n\t\treturn\n\t}\n\n\t// Decode json.\n\tvar json userUpdateRequest\n\tif err := c.ShouldBindJSON(&json); err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error()})\n\t\treturn\n\t}\n\n\tdb := data.New()\n\tu, err := db.Users.GetUserByID(id)\n\tif err != nil {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"status\": http.StatusNotFound,\n\t\t\t\"message\": \"User does not exist\",\n\t\t})\n\t\treturn\n\t}\n\n\t// Disallow updates on master user.\n\tif id != 1 {\n\t\t// Set role.\n\t\tif json.Role != \"\" {\n\t\t\tu.Role = json.Role\n\t\t}\n\n\t\t// Set active status.\n\t\tu.Active = json.Active\n\t}\n\n\tupdatedUser, _ := db.Users.UpdateUserByID(id, u)\n\tc.JSON(http.StatusOK, updatedUser)\n}", "func UpdateUserHandler(connection *sql.DB, cnf config.Config) negroni.HandlerFunc {\n\treturn negroni.HandlerFunc(func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\t\tvar queryToken = r.URL.Query().Get(\"token\")\n\n\t\tif len(queryToken) < 1 {\n\t\t\tqueryToken = r.Header.Get(\"token\")\n\t\t}\n\n\t\tif len(queryToken) < 1 {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tw.Write([]byte(string(\"token is mandatory\")))\n\t\t\treturn\n\t\t}\n\n\t\tuser := &models.UserResponse{}\n\t\terr := util.RequestToJSON(r, user)\n\t\tif err != nil {\n\t\t\tutil.SendBadRequest(w, errors.New(\"bad json\"))\n\t\t\treturn\n\t\t}\n\n\t\tsecretKey := cnf.SecretKey\n\t\ttok, err := jwt.Parse(queryToken, func(t *jwt.Token) (interface{}, error) {\n\t\t\treturn []byte(secretKey), nil\n\t\t})\n\t\tif err != nil {\n\t\t\tutil.SendBadRequest(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tclaims := tok.Claims.(jwt.MapClaims)\n\t\tvar ID = claims[\"sub\"].(float64) // gets the ID\n\n\t\tif int64(ID) != user.ID {\n\t\t\tutil.SendBadRequest(w, errors.New(\"you can only change your own user object\"))\n\t\t\treturn\n\t\t}\n\n\t\tif err := user.Validate(); err == nil {\n\n\t\t\tdb.UpdateUser(connection, user)\n\n\t\t\tutil.SendOK(w, user)\n\n\t\t} else {\n\t\t\tutil.SendBadRequest(w, err)\n\t\t}\n\t})\n}", "func UpdateUser(w http.ResponseWriter, r *http.Request) {\n\tparams := mux.Vars(r)\n\tuserID, err := strconv.ParseInt(params[\"id\"], 10, 64)\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tuserIDToken, err := authentication.ExtractUserId(r)\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusUnauthorized, err)\n\t\treturn\n\t}\n\n\tif userIDToken != userID {\n\t\tresponses.Error(w, http.StatusForbidden, errors.New(\"não é possível manipular usuário de terceiros\"))\n\t\treturn\n\t}\n\n\tbodyRequest, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\t}\n\n\tvar user models.User\n\tif err := json.Unmarshal(bodyRequest, &user); err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tif err := user.Prepare(false); err != nil {\n\t\tresponses.Error(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\tuser.Id = userID\n\tif err := validateUniqueDataUser(user, false); err != nil {\n\t\tresponses.Error(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\tdb, err := database.Connect()\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\trepository := repository.NewRepositoryUser(db)\n\tif err = repository.UpdateUser(userID, user); err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tresponses.JSON(w, http.StatusNoContent, nil)\n\n}", "func (h *UserHandler) Update(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tidStr := vars[\"id\"]\n\tid, err := strconv.ParseUint(idStr, 10, 64)\n\tif err != nil {\n\t\tlog.Println(errors.Wrapf(err, \"error parse uint:%v\", idStr))\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tlog.Printf(\"/users/%d PUT handled\", id)\n\n\treq := &UpdateRequest{}\n\tif err := util.ScanRequest(r, req); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tuser := &schema.User{\n\t\tID: id,\n\t\tName: req.Name,\n\t}\n\n\tif err := h.model.Validate(user); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tres, err := h.model.Update(user)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif err := util.JSONWrite(w, res, http.StatusOK); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n}", "func (u *UserResource) updateUser(request *restful.Request, response *restful.Response) {\n\tusr := new(User)\n\terr := request.ReadEntity(&usr)\n\tif err == nil {\n\t\tdb.WLock()\n\t\tdefer db.WUnlock() //unlock when exit this method\n\n\t\tif _, err = db.Engine.Id(usr.ID).Update(usr); err != nil {\n\t\t\tresponse.WriteHeaderAndEntity(http.StatusInternalServerError, UsersResponse{Error: err.Error()})\n\t\t} else {\n\t\t\tresponse.WriteEntity(UsersResponse{Success: true})\n\t\t}\n\t} else {\n\t\tresponse.WriteHeaderAndEntity(http.StatusInternalServerError, UsersResponse{Error: err.Error()})\n\t}\n}", "func UpdateUser(c *gin.Context) {}", "func UpdateHandler(db *sql.DB) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tb, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(w, \"%v\\n\", err.Error())\n\t\t\treturn\n\t\t}\n\t\tdefer r.Body.Close()\n\n\t\tvar u User\n\t\tif err := json.Unmarshal(b, &u); err != nil {\n\t\t\tfmt.Fprintf(w, \"%v\\n\", err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tvars := mux.Vars(r)\n\t\tu.ID = vars[\"id\"]\n\n\t\tuser, err := update(db, u)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(w, \"%v\\n\", err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tjson.NewEncoder(w).Encode(user)\n\t\treturn\n\t}\n}", "func (h *Handler) EditUser(c *fiber.Ctx) error {\n\tservice := services.NewUserService()\n\tid, err := strconv.ParseInt(c.Params(\"id\"), 10, 32)\n\n\tif err != nil {\n\t\treturn c.Status(400).JSON(fiber.Map{\"status\": \"error\", \"message\": err.Error()})\n\t}\n\n\tvar usr user.User\n\tif err := c.BodyParser(&usr); err != nil {\n\t\treturn c.Status(422).JSON(fiber.Map{\"status\": \"error\", \"message\": \"Invalid fields\"})\n\t}\n\n\terr = service.UpdateUser(&usr, int(id))\n\n\tif err != nil {\n\t\treturn c.Status(500).JSON(fiber.Map{\"status\": \"error\", \"message\": err.Error()})\n\t}\n\n\treturn c.JSON(fiber.Map{\"status\": \"success\", \"message\": \"UpdatedUser\", \"data\": usr})\n}", "func UpdateUserHandler(w http.ResponseWriter, r *http.Request, db *gorp.DbMap, c *businesscontext.Ctx) error {\n\tvars := mux.Vars(r)\n\tusername := vars[\"username\"]\n\n\tif !c.User.Admin && username != c.User.Username {\n\t\treturn WriteJSON(w, r, nil, http.StatusForbidden)\n\t}\n\n\tuserDB, errload := user.LoadUserWithoutAuth(db, username)\n\tif errload != nil {\n\t\treturn sdk.WrapError(errload, \"getUserHandler: Cannot load user from db\")\n\t}\n\n\tvar userBody sdk.User\n\tif err := UnmarshalBody(r, &userBody); err != nil {\n\t\treturn err\n\t}\n\n\tuserBody.ID = userDB.ID\n\n\tif !user.IsValidEmail(userBody.Email) {\n\t\treturn sdk.WrapError(sdk.ErrWrongRequest, \"updateUserHandler: Email address %s is not valid\", userBody.Email)\n\t}\n\n\tif err := user.UpdateUser(db, userBody); err != nil {\n\t\treturn sdk.WrapError(err, \"updateUserHandler: Cannot update user table\")\n\t}\n\n\treturn WriteJSON(w, r, userBody, http.StatusOK)\n}", "func (handler *Handler) handleUserActivationPut(w http.ResponseWriter, r *http.Request) {\n\n\t//Define a local struct to get the email out of the request\n\ttype ActivationGet struct {\n\t\tEmail string `json:\"email\"`\n\t\tActToken string `json:\"activation_token\"`\n\t}\n\n\t//Create a new password change object\n\tinfo := ActivationGet{}\n\n\t//Now get the json info\n\terr := json.NewDecoder(r.Body).Decode(&info)\n\tif err != nil {\n\t\tutils.ReturnJsonError(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\n\t}\n\n\t//Lookup the user id\n\tuser, err := handler.userHelper.GetUserByEmail(info.Email)\n\n\t//Return the error\n\tif err != nil {\n\t\tutils.ReturnJsonStatus(w, http.StatusForbidden, false, \"activation_forbidden\")\n\t\treturn\n\t}\n\n\t//Try to use the token\n\trequestId, err := handler.userHelper.CheckForActivationToken(user.Id(), info.ActToken)\n\n\t//Return the error\n\tif err != nil {\n\t\tutils.ReturnJsonStatus(w, http.StatusForbidden, false, \"activation_forbidden\")\n\t\treturn\n\t}\n\t//Now activate the user\n\terr = handler.userHelper.ActivateUser(user)\n\n\t//Return the error\n\tif err != nil {\n\t\tutils.ReturnJsonError(w, http.StatusForbidden, err)\n\t\treturn\n\t}\n\t//Mark the request as used\n\terr = handler.userHelper.UseToken(requestId)\n\n\t//Check to see if the user was created\n\tif err == nil {\n\t\tutils.ReturnJsonStatus(w, http.StatusAccepted, true, \"user_activated\")\n\t} else {\n\t\tutils.ReturnJsonError(w, http.StatusForbidden, err)\n\t}\n}", "func (h *HTTPClientHandler) addUserHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Add(\"Access-Control-Allow-Origin\", ServerName)\n\t// adding new user to database\n\tvar userRequest User\n\n\tdefer r.Body.Close()\n\tbody, err := ioutil.ReadAll(r.Body)\n\n\tif err != nil {\n\t\t// failed to read response body\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t}).Error(\"Could not read response body!\")\n\t\thttp.Error(w, \"Failed to read request body.\", 400)\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(body, &userRequest)\n\n\tif err != nil {\n\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t\tw.WriteHeader(422) // can't process this entity\n\t\treturn\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"firstName\": userRequest.FirstName,\n\t\t\"lastName\": userRequest.LastName,\n\t\t\"userID\": userRequest.UserID,\n\t\t\"profilePicUrl\": userRequest.ProfilePicUrl,\n\t\t\"gender\": userRequest.Gender,\n\t\t\"body\": string(body),\n\t}).Info(\"Got user info\")\n\n\t// adding user\n\terr = h.db.addUser(userRequest)\n\n\tif err == nil {\n\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t\tw.WriteHeader(201) // user inserted\n\t\treturn\n\t} else {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t}).Warn(\"Failed to insert..\")\n\n\t\tcontent, code := responseDetailsFromMongoError(err)\n\n\t\t// Marshal provided interface into JSON structure\n\t\tuj, _ := json.Marshal(content)\n\n\t\t// Write content-type, statuscode, payload\n\t\twriteJsonResponse(w, &uj, code)\n\n\t}\n\n}", "func (handler *Handler) handleUserUpdate(w http.ResponseWriter, r *http.Request) {\n\n\t//We have gone through the auth, so we should know the id of the logged in user\n\tloggedInUser := r.Context().Value(\"user\").(int) //Grab the id of the user that send the request\n\n\t//Now load the current user from the repo\n\tuser, err := handler.userHelper.GetUser(loggedInUser)\n\n\t//Check for an error\n\tif err != nil {\n\t\tutils.ReturnJsonError(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\t}\n\n\t//decode the request body into struct with all of the info specified and failed if any error occur\n\terr = json.NewDecoder(r.Body).Decode(user)\n\tif err != nil {\n\t\tutils.ReturnJsonError(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\n\t}\n\n\t//Now update the user\n\tuser, err = handler.userHelper.updateUser(loggedInUser, user)\n\n\t//Check to see if the user was created\n\tif err == nil {\n\t\tutils.ReturnJson(w, http.StatusAccepted, user)\n\t} else {\n\t\tutils.ReturnJsonError(w, http.StatusForbidden, err)\n\t}\n\n}", "func updateUser(w http.ResponseWriter, r *http.Request) {\r\n\tparams := mux.Vars(r)\r\n\tstmt, err := db.Prepare(\"UPDATE users SET name = ? WHERE id = ?\")\r\n\tif err != nil {\r\n\t\tpanic(err.Error())\r\n\t}\r\n\tbody, err := ioutil.ReadAll(r.Body)\r\n\tif err != nil {\r\n\t\tpanic(err.Error())\r\n\t}\r\n\tkeyVal := make(map[string]string)\r\n\tjson.Unmarshal(body, &keyVal)\r\n\tnewName := keyVal[\"name\"]\r\n\t_, err = stmt.Exec(newName, params[\"id\"])\r\n\tif err != nil {\r\n\t\tpanic(err.Error())\r\n\t}\r\n\tfmt.Fprintf(w, \"User with id = %s was updated\", params[\"id\"])\r\n}", "func UpdateUser(w http.ResponseWriter, r *http.Request) {\n\n\t// w.Header().Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\t// w.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\t// w.Header().Set(\"Access-Control-Allow-Methods\", \"PUT\")\n\t// w.Header().Set(\"Access-Control-Allow-Headers\", \"Content-Type\")\n\n\t// get the userid from the request params, key is \"id\"\n\tparams := mux.Vars(r)\n\n\t// convert the id type from string to int\n\tid, err := strconv.Atoi(params[\"id\"])\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to convert the string into int. %v\", err)\n\t}\n\n\t// create an empty user of type models.User\n\tvar user TempUsers\n\n\t// decode the json request to user\n\terr = json.NewDecoder(r.Body).Decode(&user)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to decode the request body. %v\", err)\n\t}\n\n\tdb := createConnection()\n\t// close the db connection\n\tdefer db.Close()\n\n\t// create the update sql query\n\tsqlStatement := `UPDATE users SET full_name=$2, email=$3, mobile_no=$4, username=$5, passwd=$6, created_at=$7 WHERE userid=$1`\n\n\t// execute the sql statement\n\tres, err := db.Exec(sqlStatement, id, user.FullName, user.Email, user.MobileNo, user.UserName, user.Password, time.Now())\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to execute the query. %v\", err)\n\t}\n\n\t// check how many rows affected\n\trowsAffected, err := res.RowsAffected()\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Error while checking the affected rows. %v\", err)\n\t}\n\n\tif rowsAffected > 0 {\n\t\tmsg := map[string]string{\"msg\": \"Updated Successfully.\"}\n\t\tjson.NewEncoder(w).Encode(msg)\n\t} else {\n\t\tmsg := map[string]string{\"msg\": \"Unable to Update, ID does not exists.\"}\n\t\tjson.NewEncoder(w).Encode(msg)\n\t}\n}", "func (ctl *controller) APIUserPutAction(ctx *gin.Context) {\n\tctl.logger.Info(\"[PUT] UserPutAction\")\n\n\tvar userRequest UserRequest\n\tuserID, err := validateUserRequestUpdate(ctx, &userRequest)\n\tif err != nil {\n\t\tctx.AbortWithError(http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\taffected, err := ctl.updateUser(&userRequest, userID)\n\tif err != nil {\n\t\tctx.AbortWithError(http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tif affected == 0 {\n\t\tctl.logger.Debug(\"nothing updated\")\n\t}\n\n\t// json response\n\tctx.JSON(http.StatusOK, jsonresp.CreateUserJSON(userID))\n}", "func (app *application) EditUser(w http.ResponseWriter, r *http.Request) {\n\tid := chi.URLParam(r, \"id\")\n\tuserID, _ := strconv.Atoi(id)\n\n\tvar user models.User\n\n\terr := app.readJSON(w, r, &user)\n\tif err != nil {\n\t\tapp.badRequest(w, r, err)\n\t\treturn\n\t}\n\n\tif userID > 0 { // For an existing user, update the user record\n\t\terr = app.DB.EditUser(user)\n\t\tif err != nil {\n\t\t\tapp.badRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\n\t\tif user.Password != \"\" {\n\t\t\tnewHash, err := bcrypt.GenerateFromPassword([]byte(user.Password), 12)\n\t\t\tif err != nil {\n\t\t\t\tapp.badRequest(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\terr = app.DB.UpdatePasswordForUser(user, string(newHash))\n\t\t\tif err != nil {\n\t\t\t\tapp.badRequest(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t} else { // For a new user, simply add the user to the users table\n\t\tnewHash, err := bcrypt.GenerateFromPassword([]byte(user.Password), 12)\n\t\tif err != nil {\n\t\t\tapp.badRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\t\terr = app.DB.AddUser(user, string(newHash))\n\t\tif err != nil {\n\t\t\tapp.badRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tvar resp struct {\n\t\tError bool `json:\"error\"`\n\t\tMessage string `json:\"message\"`\n\t}\n\n\tresp.Error = false\n\tapp.writeJSON(w, http.StatusOK, resp)\n}", "func setUser(ctx context.Context, data *User) error {\n\t// clear session_token and API_token for user\n\tk := datastore.NameKey(\"Users\", strings.ToLower(data.Username), nil)\n\n\t// New struct, to not add body, author etc\n\n\tif _, err := dbclient.Put(ctx, k, data); err != nil {\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (s *Service) PutUser(ctx context.Context, o *platform.User) error {\n\ts.userKV.Store(o.ID.String(), o)\n\treturn nil\n}", "func UpdateUser(w http.ResponseWriter, r *http.Request) {\n\n}", "func (h *User) Update(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tR.JSON500(w)\n\t\treturn\n\t}\n\n\t// @todo we might want extra check that /users/id equals to user.ID received in body\n\tuser, err := validator.UserCreate(body)\n\tif err != nil || user.ID == 0 {\n\t\tlog.Println(err)\n\t\tR.JSON400(w)\n\t\treturn\n\t}\n\n\terr = h.Storage.UpdateUser(user)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tR.JSON500(w)\n\t\treturn\n\t}\n\n\tR.JSON200OK(w)\n}", "func updateUser(c *gin.Context) {\n\tvar user user\n\tuserID := c.Param(\"id\")\n\n\tdb.First(&user, userID)\n\n\tif user.Id == 0 {\n\t\tc.JSON(http.StatusNotFound, gin.H{\"status\": http.StatusNotFound, \"message\": \"No user found!\"})\n\t\treturn\n\t}\n\n\tdb.Model(&user).Update(\"login\", c.PostForm(\"login\"))\n password,_ := HashPassword(c.PostForm(\"password\"))\n\tdb.Model(&user).Update(\"password\", password)\n\tc.JSON(http.StatusOK, gin.H{\"status\": http.StatusOK, \"message\": \"User updated successfully!\"})\n}", "func (_obj *WebApiAuth) SysUser_Insert(req *SysUser, id *int32, _opt ...map[string]string) (err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = req.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = _os.Write_int32((*id), 2)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\ttarsCtx := context.Background()\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"SysUser_Insert\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = _is.Read_int32(&(*id), 2, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn nil\n}", "func HandleUserUpdate(c *gin.Context) {\n\tuid := c.Param(\"uid\")\n\tvar u User\n\tu.Username = c.DefaultPostForm(\"username\", \"\")\n\tu.Password = c.DefaultPostForm(\"password\", \"\")\n\tu.Nickname = c.DefaultPostForm(\"nickname\", \"\")\n\n\tuser, err := u.Update(uid)\n\tif err != nil {\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\n\t\t\t\"code\": 1,\n\t\t\t\"msg\": err.Error(),\n\t\t\t\"data\": gin.H{},\n\t\t})\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"code\": 0,\n\t\t\"msg\": \"ok\",\n\t\t\"data\": user,\n\t})\n}", "func UpdateUser(c *gin.Context) {\n\tvar user Models.User\n\tid := c.Params.ByName(\"id\")\n\tfmt.Println(\"id\", id)\n\terr := Models.GetUserByID(&user, id)\n\tif err != nil {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"error\" : gin.H { \n\t\t\t\"status\": http.StatusNotFound,\n\t\t\t\"message\": \"Not Found\",\n\t\t}})\n\t\treturn\n\t} else {\n\tc.BindJSON(&user)\n\t\n\terr = Models.UpdateUser(&user, id)\n\tif err != nil {\n\t\tc.AbortWithStatus(http.StatusNotFound)\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"data\":gin.H { \n\t\t\t\"error\" : gin.H { \n\t\t\t\"status\": http.StatusBadRequest,\n\t\t\t\"message\": \"Can´t update user\",\n\t\t}}})\n\t} else {\n\t\tc.JSON(http.StatusOK, user)\n\t}\n}\n}", "func (ctx *Context) UserHandler(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase \"POST\":\n\t\tdecoder := json.NewDecoder(r.Body)\n\t\tnewuser := &users.NewUser{}\n\t\tif err := decoder.Decode(newuser); err != nil {\n\t\t\thttp.Error(w, \"Invalid JSON\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\terr := newuser.Validate()\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"User not valid\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tusr, _ := ctx.UserStore.GetByEmail(newuser.Email)\n\t\tif usr != nil {\n\t\t\thttp.Error(w, \"Email Already Exists\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tuser, err := ctx.UserStore.Insert(newuser)\n\t\tstate := &SessionState{\n\t\t\tBeganAt: time.Now(),\n\t\t\tClientAddr: r.RequestURI,\n\t\t\tUser: user,\n\t\t}\n\t\t_, err = sessions.BeginSession(ctx.SessionKey, ctx.SessionStore, state, w)\n\n\t\t_, err = ctx.UserStore.CreateLikesList(user)\n\t\t_, err = ctx.UserStore.CreateGroceryList(user)\n\n\t\tw.Header().Add(\"Content-Type\", contentTypeJSONUTF8)\n\t\tencoder := json.NewEncoder(w)\n\t\tencoder.Encode(user)\n\tcase \"GET\":\n\t\tusers, err := ctx.UserStore.GetAll()\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Error fetching users\", http.StatusInternalServerError)\n\t\t}\n\t\tw.Header().Add(\"Content-Type\", contentTypeJSONUTF8)\n\t\tencoder := json.NewEncoder(w)\n\t\tencoder.Encode(users)\n\t}\n}", "func AddUserHandler(w http.ResponseWriter, r *http.Request) {\n\terr := AddUserProcessor(w, r)\n\tif err != nil {\n\t\tdata, statusCode, _ := oauth2Svr.GetErrorData(err)\n\t\tdata[\"user_id\"] = username(r)\n\t\tHttpResponse(w, data, statusCode)\n\t\treturn\n\t}\n\tHttpResponse(w, defaultSuccessResponse(), http.StatusOK)\n\treturn\n}", "func SetUser(next echo.HandlerFunc) echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\t\tc.Set(\"userIdFromToken\", \"12345\")\n\t\treturn next(c)\n\t}\n}", "func (ah *AuthHandler) UpdateUsername(w http.ResponseWriter, r *http.Request) {\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\tuser := &data.User{}\n\terr := data.FromJSON(user, r.Body)\n\tif err != nil {\n\t\tah.logger.Error(\"unable to decode user json\", \"error\", err.Error())\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t// data.ToJSON(&GenericError{Error: err.Error()}, w)\n\t\tdata.ToJSON(&GenericResponse{Status: false, Message: err.Error()}, w)\n\t\treturn\n\t}\n\n\tuser.ID = r.Context().Value(UserIDKey{}).(string)\n\tah.logger.Debug(\"udpating username for user : \", user)\n\n\terr = ah.repo.UpdateUsername(context.Background(), user)\n\tif err != nil {\n\t\tah.logger.Error(\"unable to update username\", \"error\", err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t// data.ToJSON(&GenericError{Error: err.Error()}, w)\n\t\tdata.ToJSON(&GenericResponse{Status: false, Message: \"Unable to update username. Please try again later\"}, w)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\t// data.ToJSON(&UsernameUpdate{Username: user.Username}, w)\n\tdata.ToJSON(&GenericResponse{\n\t\tStatus: true,\n\t\tMessage: \"Successfully updated username\",\n\t\tData: &UsernameUpdate{Username: user.Username},\n\t}, w)\n}", "func (h *Handler) updateUser(c *gin.Context) handlerResponse {\n\n\tvar updatedUser types.User\n\tif err := c.ShouldBindJSON(&updatedUser); err != nil {\n\t\treturn handleBadRequest(err)\n\t}\n\tif updatedUser.Name != c.Param(userParameter) {\n\t\treturn handleNameMismatch()\n\t}\n\tstoredUser, err := h.service.User.Update(updatedUser, h.who(c))\n\tif err != nil {\n\t\treturn handleError(err)\n\t}\n\t// Remove password so we do not show in response\n\tstoredUser.Password = \"\"\n\treturn handleOK(storedUser)\n}", "func putUserToKeyServer(cfg upspin.Config, ep *upspin.Endpoint) (upspin.Config, error) {\n\tcfg = config.SetStoreEndpoint(cfg, *ep)\n\tcfg = config.SetDirEndpoint(cfg, *ep)\n\tuser := &upspin.User{\n\t\tName: cfg.UserName(),\n\t\tDirs: []upspin.Endpoint{cfg.DirEndpoint()},\n\t\tStores: []upspin.Endpoint{cfg.StoreEndpoint()},\n\t\tPublicKey: cfg.Factotum().PublicKey(),\n\t}\n\tkey, err := bind.KeyServer(cfg, cfg.KeyEndpoint())\n\tif err != nil {\n\t\treturn cfg, err\n\t}\n\terr = key.Put(user)\n\treturn cfg, err\n}", "func PUT(ctx *web.Context) {\n\t// Deserialize request into PutRequest.\n\treq := &PutRequest{}\n\tvar err error\n\tif err = ctx.Decode(req); nil != err {\n\t\tctx.Respond().Status(http.StatusBadRequest).With(err).Do()\n\t\treturn\n\t}\n\n\t// Check that request is valid.\n\tif err = req.Err(); nil != err {\n\t\tctx.Respond().Status(http.StatusBadRequest).With(err).Do()\n\t\treturn\n\t}\n\n\t// Add new user.\n\tif err = model.User.Add(req.Username, req.Password); nil != err {\n\t\tctx.Respond().Status(http.StatusConflict).With(err).Do()\n\t\treturn\n\t}\n\n\t// Reply with success.\n\tresp := &PutResponse{}\n\tctx.Respond().With(resp).Do()\n}", "func PostUserHandler(w http.ResponseWriter, r *http.Request) {\n\tvar user User\n\terr := json.NewDecoder(r.Body).Decode(&user)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tuser.CreateAt = time.Now()\n\tid++\n\tk := strconv.Itoa(id)\n\tListusers[k] = user\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tj, err := json.Marshal(user)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tw.WriteHeader(http.StatusCreated)\n\tw.Write(j)\n}", "func (a DefaultApi) UserPut(body User, xAuthToken string) (*APIResponse, error) {\n\n\tvar httpMethod = \"Put\"\n\t// create path and map variables\n\tpath := a.Configuration.BasePath + \"/user\"\n\n\n\theaderParams := make(map[string]string)\n\tqueryParams := url.Values{}\n\tformParams := make(map[string]string)\n\tvar postBody interface{}\n\tvar fileName string\n\tvar fileBytes []byte\n\t// add default headers if any\n\tfor key := range a.Configuration.DefaultHeader {\n\t\theaderParams[key] = a.Configuration.DefaultHeader[key]\n\t}\n\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\theaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\theaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\n\t// header params \"X-Auth-Token\"\n\theaderParams[\"X-Auth-Token\"] = xAuthToken\n\n\t// body params\n\tpostBody = &body\n\n\n\thttpResponse, err := a.Configuration.APIClient.CallAPI(path, httpMethod, postBody, headerParams, queryParams, formParams, fileName, fileBytes)\n\tif err != nil {\n\t\treturn NewAPIResponse(httpResponse.RawResponse), err\n\t}\n\n\treturn NewAPIResponse(httpResponse.RawResponse), err\n}", "func PutUserInGroupHandler(c *gin.Context) {\n\tsrc := c.Param(\"source\")\n\tdst := c.Param(\"destination\")\n\tif(PutUserInGroup(src, dst)){\n\t\tc.AbortWithStatus(200);\n\t}else{\n\t\tc.AbortWithStatus(http.StatusInternalServerError);\n\t}\n}", "func (ac *ApiConfig) AddUserHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != http.MethodPost {\n\t\thttp.Error(w, r.Method+\" is not available\", http.StatusInternalServerError)\n\t\tzerolog.Error().Msg(r.Method + \" is not available\")\n\t\treturn\n\t}\n\n\tvar user *models.Users\n\terr := json.NewDecoder(r.Body).Decode(&user)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\thashedPass, err := bcrypt.GenerateFromPassword([]byte(user.Password), 12)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tuser.Password = string(hashedPass)\n\n\terr = ac.DHolder.AddUser(user)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tstat := &models.StatusIdentifier{\n\t\tOk: true,\n\t\tMessage: \"User Added\",\n\t}\n\n\terr = dResponseWriter(w, stat, http.StatusOK)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\treturn\n\t}\n\n\treturn\n}", "func UpdateUserProfileHandler(w http.ResponseWriter, r *http.Request) {\n\n}", "func (h *Handler) UpdateHandler(w http.ResponseWriter, r *http.Request) {\n\tvar u, updatedUser user.User\n\terr := json.NewDecoder(r.Body).Decode(&u)\n\tif err != nil {\n\t\th.log.Error(err)\n\t\t_ = response.HTTPError(w, http.StatusBadRequest, response.ErrorParsingUser.Error())\n\t\treturn\n\t}\n\n\tid := chi.URLParam(r, \"id\")\n\n\tcu, err := auth.GetID(r)\n\tif err != nil {\n\t\t_ = response.HTTPError(w, http.StatusBadRequest, response.ErrorBadRequest.Error())\n\t\treturn\n\t}\n\trole, err := auth.GetRole(r)\n\tif err != nil {\n\t\t_ = response.HTTPError(w, http.StatusBadRequest, response.ErrorBadRequest.Error())\n\t\treturn\n\t}\n\n\tctx, cancel := context.WithCancel(r.Context())\n\tdefer cancel()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\t_ = response.HTTPError(w, http.StatusBadGateway, response.ErrTimeout.Error())\n\t\treturn\n\tdefault:\n\t\tupdatedUser, err = h.service.Update(ctx, id, cu, role, &u)\n\t}\n\n\tif err != nil {\n\t\th.log.Error(err)\n\t\t_ = response.HTTPError(w, http.StatusNotFound, err.Error())\n\t\treturn\n\t}\n\n\trender.JSON(w, r, render.M{\"user\": updatedUser})\n}", "func (c *UserController) PutBy(id int) string {\n\t// Update user by ID == $id\n\treturn \"User updated\"\n}", "func (client IdentityClient) updateUser(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodPut, \"/users/{userId}\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response UpdateUserResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func PutUsermetaViaUserId(UserId_ int64, iUsermeta *Usermeta) (int64, error) {\n\trow, err := Engine.Update(iUsermeta, &Usermeta{UserId: UserId_})\n\treturn row, err\n}", "func UpdateUser(w http.ResponseWriter, r *http.Request) {\n\temail, err := getEmailFromTokenHeader(r)\n\tif err != nil || email == \"\" {\n\t\thttp.Error(w, \"Invalid Token\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tw.Header().Set(\"Access-Control-Allow-Methods\", \"PUT\")\n\tw.Header().Set(\"Access-Control-Allow-Headers\", \"Content-Type\")\n\n\t// create an empty user of type models.User\n\tvar user models.User\n\t// decode the json request to user\n\terr = json.NewDecoder(r.Body).Decode(&user)\n\tif err != nil {\n\t\tlogrus.Debugf(\"Unable to decode the request body. %v\", err)\n\t\thttp.Error(w, \"Invalid Request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\t// call update user to update the user\n\tupdatedRows, err := database.UpdateUser(email, user.FirstName, user.LastName)\n\tif err != nil {\n\t\tlogrus.Errorf(\"failed updating user. %v\", err)\n\t\thttp.Error(w, \"Invalid Request\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tlogrus.Debugf(\"User updated successfully. Total rows/record affected %v\", updatedRows)\n}", "func (s *Server) registerUserWithEnrollID(id string, enrollID string, attr []*pb.Attribute) (string, error) {\n\tmutex.Lock()\n\tdefer mutex.Unlock()\n\n\tlog.Debug(\"Registering user: \", id)\n\n\tvar tok string\n\ttok = randomString(12)\n\n\t// TODO: Update db with registered user\n\n\treturn tok, nil\n}", "func (e *Example) PostUserAuth(ctx context.Context, req *example.Request, rsp *example.Response) error {\n\tlog.Log(\"POST /api/v1.0/user/name PutUersinfo()\")\n\n\t//创建返回空间\n\trsp.Errno= utils.RECODE_OK\n\trsp.Errmsg = utils.RecodeText(rsp.Errno)\n\n\t/*从从sessionid获取当前的userid*/\n\t//连接redis\n\tredis_config_map := map[string]string{\n\t\t\"key\": utils.G_server_name,\n\t\t\"conn\": utils.G_redis_addr + \":\" + utils.G_redis_port,\n\t\t\"dbNum\": utils.G_redis_dbnum,\n\t}\n\tredis_config , _ := json.Marshal(redis_config_map)\n\t//连接redis数据库 创建句柄\n\tbm, err := cache.NewCache(\"redis\", string(redis_config) )\n\tif err != nil {\n\t\tlog.Log(\"缓存创建失败\",err)\n\t\trsp.Errno = utils.RECODE_DBERR\n\t\trsp.Errmsg = utils.RecodeText(rsp.Errno)\n\t\treturn nil\n\t}\n\t//拼接key\n\tsessioniduserid := req.Sessionid + \"user_id\"\n\t//获取userid\n\tvalue_id := bm.Get(sessioniduserid)\n\tid := int(value_id.([]uint8)[0])\n\n\t//创建表对象\n\tuser := models.User{ Id: id, Real_name: req.Realname, Id_card: req.Idcard }\n\n\t//创建数据库句柄\n\to:= orm.NewOrm()\n\n\t//更新\n\t_ , err = o.Update(&user ,\"real_name\", \"id_card\")\n\tif err !=nil{\n\t\trsp.Errno= utils.RECODE_DBERR\n\t\trsp.Errmsg = utils.RecodeText(rsp.Errno)\n\t\treturn nil\n\t}\n\n\t// 更新缓存\n\tbm.Put(sessioniduserid, string(user.Id), time.Second * 600)\n\n\tlog.Log(\"更新实名认证信息成功\")\n\treturn nil\n}", "func (handler *Handler) handleUserCreate(w http.ResponseWriter, r *http.Request) {\n\n\t//Create an empty new user\n\tnewUser := handler.userHelper.NewEmptyUser()\n\n\t/**\n\tDefine a struct for just updating password\n\t*/\n\ttype newUserStruct struct {\n\t\tEmail string `json:\"email\"`\n\t\tPassword string `json:\"password\"`\n\t}\n\n\t//Create the new user\n\tnewUserInfo := &newUserStruct{}\n\n\t//decode the request body into struct and failed if any error occur\n\terr := json.NewDecoder(r.Body).Decode(newUserInfo)\n\tif err != nil {\n\t\tutils.ReturnJsonStatus(w, http.StatusUnprocessableEntity, false, err.Error())\n\t\treturn\n\n\t}\n\n\t//Copy over the new user data\n\tnewUser.SetEmail(newUserInfo.Email)\n\tnewUser.SetPassword(newUserInfo.Password)\n\n\t//Now create the new suer\n\terr = handler.userHelper.createUser(newUser)\n\n\tif err != nil {\n\t\tutils.ReturnJsonStatus(w, http.StatusUnprocessableEntity, false, err.Error())\n\t\treturn\n\t}\n\n\t//Check to see if the user was created\n\tif err == nil {\n\t\tutils.ReturnJsonStatus(w, http.StatusCreated, true, \"create_user_added\")\n\t} else {\n\t\tutils.ReturnJsonStatus(w, http.StatusUnprocessableEntity, false, err.Error())\n\t}\n\n}", "func (s MockStore) Put(u User) error {\n\ts.id[u.ID] = u\n\ts.name[u.Name] = u\n\ts.email[u.Email] = u\n\n\treturn nil\n}", "func UpdateUser(db *gorm.DB, w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\n\tid := vars[\"id\"]\n\tuser := getUserByID(db, id, w, r)\n\tif user == nil {\n\t\treturn\n\t}\n\n\tdecoder := json.NewDecoder(r.Body)\n\tif err := decoder.Decode(&user); err != nil {\n\t\tRespondError(w, http.StatusBadRequest, \"\")\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\n\tif err := db.Save(&user).Error; err != nil {\n\t\tRespondError(w, http.StatusInternalServerError, \"\")\n\t\treturn\n\t}\n\tRespondJSON(w, http.StatusOK, user)\n}", "func (handler *Handler) handleUserGet(w http.ResponseWriter, r *http.Request) {\n\n\t//We have gone through the auth, so we should know the id of the logged in user\n\tloggedInUser := r.Context().Value(\"user\").(int) //Grab the id of the user that send the request\n\n\t//Get the user\n\tuser, err := handler.userHelper.GetUser(loggedInUser)\n\n\t//Make sure we null the password\n\t//Blank out the password before returning\n\tuser.SetPassword(\"\")\n\n\t//Check to see if the user was created\n\tif err == nil {\n\t\tutils.ReturnJson(w, http.StatusOK, user)\n\t} else {\n\t\tutils.ReturnJsonStatus(w, http.StatusUnsupportedMediaType, false, err.Error())\n\t}\n\n}", "func SetUserOnContext(param string) gin.HandlerFunc {\n\n\treturn func(c *gin.Context) {\n\n\t\tuid := bson.ObjectIdHex(c.Params.ByName(param))\n\t\tu, err := service.ReadUserByID(uid)\n\t\tif err != nil {\n\t\t\tif err == mgo.ErrNotFound {\n\t\t\t\terrors.Send(c, errors.NotFound())\n\t\t\t} else {\n\t\t\t\tlogger.Error(err)\n\t\t\t\terrors.Send(c, fmt.Errorf(\"failed to get a user\"))\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tutils.SetTargetUser(c, u)\n\t}\n}", "func patchAPIUserHandler(w http.ResponseWriter, r *http.Request, _ map[string]string) {\n\tuserName := sessionHandler.GetUserName(r)\n\tuserID, err := getUserID(userName)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdecoder := json.NewDecoder(r.Body)\n\tvar json JSONUser\n\terr = decoder.Decode(&json)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\t// Make sure user id is over 0\n\tif json.ID < 1 {\n\t\thttp.Error(w, \"Wrong user id.\", http.StatusInternalServerError)\n\t\treturn\n\t} else if userID != json.ID { // Make sure the authenticated user is only changing his/her own data. TODO: Make sure the user is admin when multiple users have been introduced\n\t\thttp.Error(w, \"You don't have permission to change this data.\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\t// Get old user data to compare\n\ttempUser, err := database.RetrieveUser(json.ID)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\t// Make sure user email is provided\n\tif json.Email == \"\" {\n\t\tjson.Email = string(tempUser.Email)\n\t}\n\t// Make sure user name is provided\n\tif json.Name == \"\" {\n\t\tjson.Name = string(tempUser.Name)\n\t}\n\t// Make sure user slug is provided\n\tif json.Slug == \"\" {\n\t\tjson.Slug = tempUser.Slug\n\t}\n\t// Check if new name is already taken\n\tif json.Name != string(tempUser.Name) {\n\t\t_, err = database.RetrieveUserByName([]byte(json.Name))\n\t\tif err == nil {\n\t\t\t// The new user name is already taken. Assign the old name.\n\t\t\t// TODO: Return error that will be displayed in the admin interface.\n\t\t\tjson.Name = string(tempUser.Name)\n\t\t}\n\t}\n\t// Check if new slug is already taken\n\tif json.Slug != tempUser.Slug {\n\t\t_, err = database.RetrieveUserBySlug(json.Slug)\n\t\tif err == nil {\n\t\t\t// The new user slug is already taken. Assign the old slug.\n\t\t\t// TODO: Return error that will be displayed in the admin interface.\n\t\t\tjson.Slug = tempUser.Slug\n\t\t}\n\t}\n\tuser := structure.User{ID: json.ID, Name: []byte(json.Name), Slug: json.Slug, Email: []byte(json.Email), Image: []byte(json.Image), Cover: []byte(json.Cover), Bio: []byte(json.Bio), Website: []byte(json.Website), Location: []byte(json.Location)}\n\terr = methods.UpdateUser(&user, userID)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif json.Password != \"\" && (json.Password == json.PasswordRepeated) { // Update password if a new one was submitted\n\t\tencryptedPassword, err := authentication.EncryptPassword(json.Password)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\terr = database.UpdateUserPassword(user.ID, encryptedPassword, date.GetCurrentTime(), json.ID)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\t// Check if the user name was changed. If so, update the session cookie to the new user name.\n\tif json.Name != string(tempUser.Name) {\n\t\tlogInUser(json.Name, w)\n\t}\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(\"User settings updated!\"))\n\treturn\n}", "func RegisterUser(w http.ResponseWriter, r *http.Request) {\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\thttp.Error(w, \"No input found!\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tvar newReq User\n\terr = json.Unmarshal(body, &newReq)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tvar username = newReq.UserID\n\tif _, ok := userData[username]; ok {\n\t\thttp.Error(w, \"User already exists!\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// log.Println(util.StringWithCharset(random.Intn(20)+10, charset))\n\tpreHashString := newReq.UserID + util.StringWithCharset(random.Intn(20)+10, util.Charset)\n\thashedString := crypto.CreateSHA256Hash(preHashString)\n\tuserData[username] = hashedString\n\thashOutput := UserHash{hashedString}\n\tlog.Println(userData)\n\toutJSON, err := json.Marshal(hashOutput)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(outJSON)\n}", "func (h *Handler) update() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tuser := &model.User{}\n\t\tif err := json.NewDecoder(r.Body).Decode(user); err != nil {\n\t\t\tmsg := &errorMessage{\n\t\t\t\tError: err.Error(),\n\t\t\t\tMessage: \"user json decode error\",\n\t\t\t}\n\t\t\tresponse.JSON(w, http.StatusBadRequest, msg)\n\t\t\treturn\n\t\t}\n\t\tif len(user.FirstName) == 0 && len(user.LastName) == 0 {\n\t\t\tmsg := &errorMessage{\n\t\t\t\tMessage: \"user must have fields to update\",\n\t\t\t}\n\t\t\tresponse.JSON(w, http.StatusBadRequest, msg)\n\t\t\treturn\n\t\t}\n\n\t\tvars := mux.Vars(r)\n\t\tid := vars[userID]\n\t\tentity, err := h.UserDAO.Update(r.Context(), id, user)\n\t\tswitch {\n\t\tcase errors.Is(err, errorx.ErrNoUser):\n\t\t\tmsg := &errorMessage{\n\t\t\t\tMessage: fmt.Sprintf(\"user %s does not exist\", id),\n\t\t\t}\n\t\t\tresponse.JSON(w, http.StatusNotFound, msg)\n\t\t\treturn\n\t\tcase errors.Is(err, errorx.ErrDeleteUser):\n\t\t\tmsg := &errorMessage{\n\t\t\t\tMessage: fmt.Sprintf(\"user %s has been deleted\", id),\n\t\t\t}\n\t\t\tresponse.JSON(w, http.StatusGone, msg)\n\t\t\treturn\n\t\tcase err != nil:\n\t\t\tmsg := &errorMessage{\n\t\t\t\tError: err.Error(),\n\t\t\t\tMessage: \"user datastore error\",\n\t\t\t}\n\t\t\tresponse.JSON(w, http.StatusInternalServerError, msg)\n\t\t\treturn\n\t\tdefault:\n\t\t\tresponse.JSON(w, http.StatusOK, entity)\n\t\t}\n\n\t}\n}", "func PutUser(u structs.User) error {\n\tuserexists := false\n\tcuru := &structs.User{}\n\tif u.Username != \"\" {\n\t\terr := User([]byte(u.Username), curu)\n\t\tif err == nil {\n\t\t\tuserexists = true\n\t\t} else {\n\t\t\tlog.Errorw(\"PutUser userexists lookup error\",\n\t\t\t\t\"error\", err.Error(),\n\t\t\t\t\"userexists\", userexists,\n\t\t\t\t\"u\", u,\n\t\t\t\t\"curu\", curu,\n\t\t\t)\n\t\t}\n\t}\n\n\treturn Db.Update(func(tx *bolt.Tx) error {\n\t\tb := getBucket(tx, userBucket)\n\n\t\tu.LastUpdate = time.Now().Unix()\n\t\tif userexists {\n\t\t\tlog.Debugf(\"userexists.. keeping time at %v\", curu.CreatedOn)\n\t\t\tu.CreatedOn = curu.CreatedOn\n\t\t} else {\n\t\t\tu.CreatedOn = u.LastUpdate\n\t\t\tid, _ := b.NextSequence()\n\t\t\tu.ID = int(id)\n\t\t\tlog.Debugf(\"new user.. setting created on to %v\", u.CreatedOn)\n\t\t}\n\n\t\teU, err := gobEncodeUser(&u)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn err\n\t\t}\n\n\t\terr = b.Put([]byte(u.Username), eU)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn err\n\t\t}\n\t\tlog.Debugf(\"user created %v\", u)\n\t\treturn nil\n\t})\n}", "func PostUserHandler(w http.ResponseWriter, r *http.Request) {\n\tvar user models.User\n\terr := json.NewDecoder(r.Body).Decode(&user)\n\tif err != nil {\n\t\tlog.Println(\"Error al parsear usuario\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tstatus := users.CreateUser(user)\n\tw.WriteHeader(status)\n}", "func updateUser(user UserID, params map[string]interface{}, client *Client) error {\n\treturn client.Put(params, \"/access/users/\"+user.ToString())\n}", "func (c UserController) handleUser(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != http.MethodGet {\n\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tuserInContext := r.Context().Value(\"CustomUser\")\n\t//fmt.Println(\"added and fetched from context in controller: \", tester)\n\tresponse, err := json.Marshal(userInContext)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(string(response)))\n}", "func UpdateUser(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"Update user endpoint hit\")\n\n\tvars := mux.Vars(r)\n\n\tid := vars[\"id\"]\n\n\temail := r.FormValue(\"email\")\n\n\tuser := &models.User{}\n\n\tuser.Update(id, email)\n\n\tjson.NewEncoder(w).Encode(user)\n}", "func (handler *Handler) handleUserLogin(w http.ResponseWriter, r *http.Request) {\n\n\t/**\n\tDefine a struct for just updating password\n\t*/\n\ttype loginUserStruct struct {\n\t\tEmail string `json:\"email\"`\n\t\tPassword string `json:\"password\"`\n\t}\n\n\tuserCred := &loginUserStruct{}\n\n\t//decode the request body into struct and failed if any error occur\n\terr := json.NewDecoder(r.Body).Decode(userCred)\n\tif err != nil {\n\t\tutils.ReturnJsonError(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\n\t}\n\n\t//Now look up the user\n\tuser, err := handler.userHelper.GetUserByEmail(strings.TrimSpace(strings.ToLower(userCred.Email)))\n\n\t//check for an error\n\tif err != nil {\n\t\t//There prob is not a user to return\n\t\tutils.ReturnJsonError(w, http.StatusForbidden, err)\n\t\treturn\n\t}\n\n\t//We have the user, try to login\n\tuser, err = handler.userHelper.login(userCred.Password, user)\n\n\t//If there is an error, don't login\n\tif err != nil {\n\t\t//There prob is not a user to return\n\t\tutils.ReturnJsonError(w, http.StatusForbidden, err)\n\t\treturn\n\t}\n\n\t//Check to see if the user was created\n\tif err == nil {\n\t\tutils.ReturnJson(w, http.StatusCreated, user)\n\t} else {\n\t\tutils.ReturnJsonError(w, http.StatusForbidden, err)\n\t}\n\n}", "func UserHandler(w http.ResponseWriter, r *http.Request) {\n\tconn := &sql.DB{}\n\tuserRepo := repository.NewUserRepository(conn)\n\tuserUsecase := usecase.NewUserUsecase(userRepo)\n\tc := NewUserController(userUsecase, w, r)\n\tswitch r.Method {\n\tcase \"GET\":\n\t\tc.List()\n\tcase \"POST\":\n\t\tc.Create()\n\tdefault:\n\t\tfmt.Println(\"404 error\")\n\t}\n}", "func (_obj *WebApiAuth) SysUser_Update(id int32, req *SysUser, res *SysUser, _opt ...map[string]string) (err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_int32(id, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = req.WriteBlock(_os, 2)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = (*res).WriteBlock(_os, 3)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\ttarsCtx := context.Background()\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"SysUser_Update\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = (*res).ReadBlock(_is, 3, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn nil\n}", "func (this *UserController) Update() {\n\tflash \t := beego.ReadFromRequest(&this.Controller)\n\n\tid, _ := strconv.Atoi(this.Ctx.Input.Param(\":id\"))\n\tuser := &models.User{Id:id}\n\tuser.GetOne()\n\n\tnamesurname \t\t:= this.GetString(\"name_surname\")\n\tusername \t\t\t:= this.GetString(\"user_name\")\n\temail \t\t\t\t:= this.GetString(\"email\")\n\tpassword\t \t\t:= this.GetString(\"password\")\n\turl\t\t\t \t\t:= this.GetString(\"url\")\n\tinfo\t\t\t\t:= this.GetString(\"info\")\n\n\tvalid := validation.Validation{}\n\n\tvalid.Email(email, \"Email\")\n\n\tvalid.Required(username, \"Username\")\n\tvalid.Required(password, \"Password\")\n\n\tvalid.MaxSize(username, 20, \"Username\")\n\tvalid.MaxSize(password, 16, \"Password\")\n\n\tswitch {\n\tcase valid.HasErrors():\n\t\tfor _, err := range valid.Errors {\n\t\t\tlog.Println(err.Key, err.Message)\n\t\t}\n\t\tvalid.Error(\"Problem creating user!\")\n\t\tflash.Error(\"Problem creating user!\")\n\t\tflash.Store(&this.Controller)\n\tdefault:\n\t\tuser := &models.User{\n\t\t\tNameSurname\t\t:namesurname,\n\t\t\tUserName\t\t:username,\n\t\t\tEmail\t\t\t:email,\n\t\t\tPassword\t\t:Md5(password),\n\t\t\tUrl\t\t\t\t:url,\n\t\t\tInfo\t\t\t:info,\n\t\t\tRegisterTime \t:time.Now(),\n\t\t}\n\t\tswitch {\n\t\t\tcase user.ExistUserName():\n\t\t\t\tvalid.Error(\"This username is in use!\")\n\t\t\t\tflash.Error(\"This username is in use!\")\n\t\t\t\tflash.Store(&this.Controller)\n\t\t\tcase user.ExistEmail():\n\t\t\t\tvalid.Error(\"This email is in use!\")\n\t\t\t\tflash.Error(\"This email is in use!\")\n\t\t\t\tflash.Store(&this.Controller)\n\t\t\tdefault:\n\t\t\t\terr := user.Update()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tvalid.Error(fmt.Sprintf(\"%v\", err))\n\t\t\t\tflash.Notice(\"User updated successfully!\")\n\t\t\t\tflash.Store(&this.Controller)\n\t\t\t\tthis.Redirect(\"/admin/users\", 302)\n\t\t\t\treturn\n\t\t}\n\n\t}\n\n\tredirectUrl := \"/admin/users/edit/\" + strconv.Itoa(id)\n\tthis.Redirect(redirectUrl, 302)\n\tthis.Abort(\"302\")\n\treturn\n}", "func (ch *Context) UserPatchHandler(ctx *fasthttp.RequestCtx) {\n\treq := &ctx.Request\n\tres := &ctx.Response\n\t_, err := ch.getUserBody(req)\n\tif err != nil {\n\t\tres.AppendBodyString(\"User not found\")\n\t\tres.SetStatusCode(400)\n\t}\n\tdata := make(map[string]interface{})\n\terr = json.Unmarshal(req.Body(), data)\n\tif err != nil {\n\t\tfmt.Println(\"Error unmarshalling\")\n\t}\n\n\t//更新现有的数据\n\t//for k, v := range data {\n\t//\tuserBody[k] = data[v]\n\t//}\n\t//res.AppendBody(userJson)\n\t//res.Header.SetContentType(\"application/json\")\n\tres.SetStatusCode(200)\n}", "func (auh *AdminUserHandler) PostUser(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tvar apiKey = r.Header.Get(\"api-key\")\n\tif apiKey == \"\" || (apiKey != adminApiKey && apiKey != userApiKey) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)\n\t\treturn\n\t}\n\tl := r.ContentLength\n\tbody := make([]byte, l)\n\tr.Body.Read(body)\n\n\tuser := &entity.User{}\n\n\terr := json.Unmarshal(body, user)\n\n\tif err != nil {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\n\tuser, errs := auh.userService.StoreUser(user)\n\n\tif len(errs) > 0 {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\toutput, err := json.MarshalIndent(user, \"\", \"\\t\")\n\tw.WriteHeader(http.StatusCreated)\n\tw.Write(output)\n\treturn\n}", "func (srv *UsersService) UpdateHandler(ctx *gin.Context) {\n\tlogger := srv.logger.New(\"action\", \"UpdateHandler\")\n\tuser := GetRequestedUser(ctx)\n\tif user == nil {\n\t\t// Returns a \"404 StatusNotFound\" response\n\t\tsrv.ResponseService.NotFound(ctx)\n\t\treturn\n\t}\n\n\trawData, err := ctx.GetRawData()\n\tif err != nil {\n\t\tlogger.Error(\"cannot read body\", \"err\", err)\n\t\tsrv.ResponseService.Error(ctx, responses.CanNotUpdateUser, \"Can't update user.\")\n\t\treturn\n\t}\n\n\tcurrentUser := GetCurrentUser(ctx)\n\tif currentUser.UID == user.UID ||\n\t\tcurrentUser.RoleName == \"root\" ||\n\t\tcurrentUser.RoleName == \"admin\" {\n\n\t\terr = srv.userForm.Update(user, currentUser, rawData)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"cannot update a user\", \"err\", err)\n\t\t\tsrv.ResponseService.ValidatorErrorResponse(ctx, responses.UnprocessableEntity, err)\n\t\t\treturn\n\t\t}\n\n\t\told, err := srv.Repository.GetUsersRepository().FindByUID(user.UID)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"cannot found user\", \"err\", err)\n\t\t\tsrv.ResponseService.NotFound(ctx)\n\t\t\treturn\n\t\t}\n\n\t\terr = srv.userLoaderService.LoadUserCompletely(old)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"cannot load user\", \"err\", err)\n\t\t\tsrv.ResponseService.Error(ctx, responses.CanNotUpdateUser, \"Can't update a user\")\n\t\t\treturn\n\t\t}\n\n\t\ttx := srv.Repository.GetUsersRepository().DB.Begin()\n\t\terr = srv.userCreator.Update(user, tx)\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t\t// Returns a \"400 StatusBadRequest\" response\n\t\t\tsrv.ResponseService.Error(ctx, responses.CanNotUpdateUser, \"Can't update a user\")\n\t\t\treturn\n\t\t}\n\n\t\tif currentUser.UID != user.UID &&\n\t\t\t(currentUser.RoleName == \"admin\" || currentUser.RoleName == \"root\") {\n\t\t\tsrv.SystemLogsService.LogModifyUserProfileAsync(old, user, currentUser.UID)\n\t\t}\n\n\t\ttx.Commit()\n\t}\n\n\t// Returns a \"204 StatusNoContent\" response\n\tctx.JSON(http.StatusNoContent, nil)\n}", "func (_obj *WebApiAuth) SysUser_InsertWithContext(tarsCtx context.Context, req *SysUser, id *int32, _opt ...map[string]string) (err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = req.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = _os.Write_int32((*id), 2)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"SysUser_Insert\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = _is.Read_int32(&(*id), 2, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn nil\n}", "func UpdateUser(res http.ResponseWriter, req *http.Request) {\n\tvar response responses.User\n\tuser := new(model.User)\n\tID := req.Context().Value(\"ID\").(string)\n\tdata := req.Context().Value(\"data\").(*validation.UpdateUser)\n\tnow := time.Now()\n\tdocKey, err := connectors.ReadDocument(\"users\", ID, user)\n\tif err != nil {\n\t\trender.Render(res, req, responses.NewHTTPError(http.StatusServiceUnavailable, constants.Unavailable))\n\t\treturn\n\t} else if len(docKey) == 0 {\n\t\trender.Render(res, req, responses.NewHTTPError(http.StatusBadRequest, constants.NotFoundResource))\n\t\treturn\n\t}\n\tcopier.Copy(&user, data)\n\tuser.UpdatedAt = now.Unix()\n\tdocKey, err = connectors.UpdateDocument(\"users\", docKey, user)\n\tif err != nil {\n\t\trender.Render(res, req, responses.NewHTTPError(http.StatusServiceUnavailable, constants.Unavailable))\n\t\treturn\n\t}\n\tresponse.ID = docKey\n\tcopier.Copy(&response, user)\n\trender.Render(res, req, responses.NewHTTPSucess(http.StatusOK, response))\n}", "func editUser(userID int, firstName string, MI string, lastName string, privLevel int) error {\n\n\tdb, err := sql.Open(\"mysql\", DB_USER_NAME+\":\"+DB_PASSWORD+\"@unix(/var/run/mysql/mysql.sock)/\"+DB_NAME)\n\tif err != nil {\n\t\treturn errors.New(\"No connection\")\n\t}\n\n\tres, err := db.Exec(\"update Users set FirstName=?, MiddleInitial=?, LastName=?, PrivLevel=? where UserID=?\", firstName, MI, lastName, privLevel, userID)\n\n\tif err != nil {\n\t\treturn errors.New(\"User update failed.\")\n\t}\n\n\trowsAffected, err := res.RowsAffected()\n\n\tif rowsAffected != 1 {\n\t\treturn errors.New(\"Query didn't match any users.\")\n\t}\n\n\treturn nil\n}", "func PutUsermetaViaUmetaId(UmetaId_ int64, iUsermeta *Usermeta) (int64, error) {\n\trow, err := Engine.Update(iUsermeta, &Usermeta{UmetaId: UmetaId_})\n\treturn row, err\n}", "func UpdateUser(person *Person, id string) (err error) {\n\tfmt.Println(person)\n\tConfig.DB.Save(person)\n\treturn nil\n}", "func db_update_user(username string, sessionid string, follow_username string, post Post){\n file_path := path.Join(\"db/users\", strings.ToLower(username)+\".json\")\n \n if _, err := os.Stat(file_path); os.IsNotExist(err) {\n return\n }\n user := db_JSON_to_user(username)\n \n if sessionid != \"\" {\n user.SessionID = sessionid\n }\n if follow_username != \"\" {\n user.Follows = append(user.Follows, follow_username)\n }\n if post.Content != \"\" {\n user.Posts = append(user.Posts, &post)\n }\n \n updated_user := db_user_to_JSON(user)\n \n writeerr := ioutil.WriteFile(file_path, updated_user, 0644)\n\n if writeerr != nil {\n panic(writeerr)\n }\n}", "func (h *Handler) Add(_ context.Context, usr *usersapi.User) (err error) {\n\treturn h.provider.Add(&users.User{\n\t\tUsername: usr.Username,\n\t\tPassword: usr.Password,\n\t})\n}", "func Update(c *gin.Context) {\n\tuserID, err := getUserID(c.Param(\"user_id\"))\n\tif err != nil {\n\t\tc.JSON(err.Status, err)\n\t\treturn\n\t}\n\n\tvar newUser users.User\n\tif err := c.ShouldBindJSON(&newUser); err != nil {\n\t\tbdErr := errors.NewBadRequestError(fmt.Sprintf(\"invalid json body %s\", err.Error()))\n\t\tc.JSON(bdErr.Status, bdErr)\n\t\treturn\n\t}\n\n\tnewUser.ID = userID\n\tisPartial := c.Request.Method == http.MethodPatch\n\n\tresult, updateErr := services.UserServ.UpdateUser(newUser, isPartial)\n\tif err != nil {\n\t\tc.JSON(updateErr.Status, updateErr)\n\t\treturn\n\t}\n\n\tisPublic := c.GetHeader(\"X-Public\") == \"true\"\n\tc.JSON(http.StatusOK, result.Marshall(isPublic))\n}", "func (s *Server) handleDashboardUserEdit() http.HandlerFunc {\n\tvar o sync.Once\n\tvar tpl *template.Template\n\n\t//steps on the page\n\tsteps := struct {\n\t\tStepDel string\n\t}{\n\t\tStepDel: \"stepDel\",\n\t}\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tctx, logger := GetLogger(s.getCtx(r))\n\t\to.Do(func() {\n\t\t\ttpl = s.loadWebTemplateDashboard(ctx, \"user-edit.html\")\n\t\t})\n\t\tctx, provider, data, _, ok := s.createTemplateDataDashboard(w, r.WithContext(ctx), tpl, true)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\n\t\t//setup the breadcrumbs\n\t\tbreadcrumbs := []breadcrumb{\n\t\t\t{\"Users\", provider.GetURLUsers()},\n\t\t\t{\"Edit Team Member\", \"\"},\n\t\t}\n\t\tdata[TplParamBreadcrumbs] = breadcrumbs\n\t\tdata[TplParamActiveNav] = provider.GetURLUsers()\n\t\tdata[TplParamFormAction] = provider.GetURLUserEdit()\n\t\tdata[TplParamSteps] = steps\n\n\t\t//handle the input\n\t\tidStr := r.FormValue(URLParams.UserID)\n\t\tstep := r.FormValue(URLParams.Step)\n\n\t\t//prepare the data\n\t\tdata[TplParamUserID] = idStr\n\n\t\t//load the provider user\n\t\tid := uuid.FromStringOrNil(idStr)\n\t\tif id == uuid.Nil {\n\t\t\tlogger.Warnw(\"invalid uuid\", \"id\", idStr)\n\t\t\ts.SetCookieErr(w, Err)\n\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLUsers(), http.StatusSeeOther)\n\t\t\treturn\n\t\t}\n\t\tctx, providerUser, err := LoadProviderUserByProviderIDAndID(ctx, s.getDB(), provider.ID, &id)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(\"load provider user\", \"error\", err, \"id\", id)\n\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\t\tif providerUser == nil {\n\t\t\tlogger.Errorw(\"no provider user\", \"id\", id)\n\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\t\tdata[TplParamUser] = providerUser\n\n\t\t//prepare the confirmation modal\n\t\tdata[TplParamConfirmMsg] = GetMsgText(MsgUserDelConfirm)\n\t\tdata[TplParamConfirmSubmitName] = URLParams.Step\n\t\tdata[TplParamConfirmSubmitValue] = steps.StepDel\n\n\t\t//check the method\n\t\tif r.Method == http.MethodGet {\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\n\t\t//execute the correct operation\n\t\tvar msgKey MsgKey\n\t\tswitch step {\n\t\tcase steps.StepDel:\n\t\t\t//delete the provider user\n\t\t\tctx, err := DeleteUserProvider(ctx, s.getDB(), provider.ID, providerUser.ID)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorw(\"delete provider user\", \"error\", err, \"id\", providerUser.ID)\n\t\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tmsgKey = MsgUserDel\n\t\tdefault:\n\t\t\tlogger.Errorw(\"invalid step\", \"id\", providerUser.ID, \"step\", step)\n\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\n\t\t//success\n\t\ts.SetCookieMsg(w, msgKey, providerUser.Login)\n\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLUsers(), http.StatusSeeOther)\n\t}\n}", "func UserHandler(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\n\tcase \"GET\":\n\n\t\tusersJSON, err := json.Marshal(db.Users)\n\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tw.Write(usersJSON)\n\n\tcase \"POST\":\n\n\t\terr := utils.IsJsonValid(w, r)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tuserPayload := db.UserPayload{}\n\t\tb, err := ioutil.ReadAll(r.Body)\n\t\tdefer r.Body.Close()\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\terr = json.Unmarshal(b, &userPayload)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\t//validate email\n\t\tvalidEmail := utils.IsEmailValid(userPayload.UserEmail)\n\t\tif !validEmail {\n\t\t\tmsg := \"Email address is not valid\"\n\t\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\taccessToken := utils.String(50)\n\n\t\tcookie := http.Cookie{\n\t\t\tName: \"userToken\",\n\t\t\tValue: accessToken,\n\t\t\tExpires: time.Time{},\n\t\t\tMaxAge: 86400,\n\t\t\tSecure: false,\n\t\t\tHttpOnly: false,\n\t\t\tSameSite: 0,\n\t\t}\n\t\thttp.SetCookie(w, &cookie)\n\n\t\tcookie = http.Cookie{\n\t\t\tName: \"userEmail\",\n\t\t\tValue: userPayload.UserEmail,\n\t\t\tExpires: time.Time{},\n\t\t\tMaxAge: 86400,\n\t\t\tSecure: false,\n\t\t\tHttpOnly: false,\n\t\t\tSameSite: 0,\n\t\t}\n\t\thttp.SetCookie(w, &cookie)\n\n\t\tuser := db.User{}\n\t\tuser.Token = accessToken\n\t\tuser.Devices = make(map[string]*db.Device)\n\n\t\tdb.Users[userPayload.UserEmail] = &user\n\n\t\tw.Write([]byte(accessToken))\n\t}\n}", "func UpdateUser(c *gin.Context) {\n\tuserID, userErr := strconv.ParseInt(c.Param(\"user_id\"), 10, 64)\n\tif userErr != nil {\n\t\terr := errors.NewBadRequestError(\"user id should be a number\")\n\t\tc.JSON(err.Status, err)\n\t\treturn\n\t}\n\n\t//intialize\n\tvar user users.User\n\t//check whether the given json body is valid or not\n\tif err := c.ShouldBindJSON(&user); err != nil {\n\t\tinvalidErr := errors.NewInternalServerError(\"invalid json body\")\n\t\tc.JSON(invalidErr.Status, invalidErr)\n\t\treturn\n\t}\n\n\t//send the user struct to the services\n\tuser.ID = userID\n\t//check whether the request method is PATCH and PUT\n\tisPartial := c.Request.Method == http.MethodPatch\n\n\tresult, err := services.UsersService.UpdateUser(isPartial, user)\n\tif err != nil {\n\t\tc.JSON(err.Status, err)\n\t\treturn\n\t}\n\n\t//final implementation\n\tc.JSON(http.StatusOK, result.Marshall(c.GetHeader(\"X-Public\") == \"true\"))\n}", "func Update(user User) error {\n\n}", "func (ac *ApiConfig) UpdateUserHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != http.MethodPost {\n\t\thttp.Error(w, r.Method+\" is not available\", http.StatusInternalServerError)\n\t\tzerolog.Error().Msg(r.Method + \" is not available\")\n\t\treturn\n\t}\n\tvar user *models.Users = &models.Users{}\n\terr := json.NewDecoder(r.Body).Decode(user)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tsPass, err := bcrypt.GenerateFromPassword([]byte(user.Password), 12)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tuser.Password = string(sPass)\n\n\terr = ac.DHolder.UpdateUser(user)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tstat := &models.StatusIdentifier{\n\t\tOk: true,\n\t\tMessage: \"User Deleted\",\n\t}\n\n\terr = dResponseWriter(w, stat, http.StatusOK)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\treturn\n\t}\n\n\treturn\n}", "func getUserHandler(res http.ResponseWriter, req *http.Request) {\n\tres.Header().Add(\"content-type\", \"application/json\")\n\tparams := mux.Vars(req)\n\tid, _ := primitive.ObjectIDFromHex(params[\"id\"])\n\tvar user MongoUserSchema\n\tusersCol := client.Database(\"Aviroop_Nandy_Appointy\").Collection(\"users\")\n\tctx, _ := context.WithTimeout(context.Background(), 15*time.Second)\n\terr := usersCol.FindOne(ctx, MongoUserSchema{ID: id}).Decode(&user)\n\tif err != nil {\n\t\tres.WriteHeader(http.StatusInternalServerError)\n\t\tres.Write([]byte(`{\"Error message\":\"` + err.Error() + `\"}`))\n\t\treturn\n\t}\n\tjson.NewEncoder(res).Encode(user)\n}", "func (server Server) UpdateUser(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r) // mux params\n\tid, err := strconv.Atoi(vars[\"id\"]) // convert the id type from string to int\n\tvar res models.APIResponse // make a response\n\tvar user models.User // make a user\n\n\tif err != nil {\n\t\tlog.Printf(\"Unable to convert the string into an int. %v\", err)\n\t\tres = models.BuildAPIResponseFail(\"Unable to convert the string into an int.\", nil)\n\t} else {\n\t\terr = json.NewDecoder(r.Body).Decode(&user) // decode the json request to note\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Unable to decode the request body. %v\", err)\n\t\t\tres = models.BuildAPIResponseFail(\"Unable to decode the request body.\", nil)\n\t\t} else {\n\t\t\tupdatedRows := updateUser(int64(id), user, server.db) // call update note to update the note\n\t\t\tres = models.BuildAPIResponseSuccess(\"User updated successfully.\", updatedRows)\n\t\t}\n\t}\n\tjson.NewEncoder(w).Encode(res) // send the response\n}", "func (a *API) addUser(w http.ResponseWriter, req *http.Request) {\n\t// NOTE(kiennt): Who can signup (create new user)?\n\tif err := req.ParseForm(); err != nil {\n\t\ta.respondError(w, apiError{\n\t\t\tcode: http.StatusBadRequest,\n\t\t\terr: err,\n\t\t})\n\t\treturn\n\t}\n\tusername := req.Form.Get(\"username\")\n\tpassword := req.Form.Get(\"password\")\n\tif username == \"\" || password == \"\" {\n\t\ta.respondError(w, apiError{\n\t\t\tcode: http.StatusBadRequest,\n\t\t\terr: errors.New(\"Incorrect sign up form\"),\n\t\t})\n\t\treturn\n\t}\n\n\t// Check to see if the user is already taken\n\tpath := common.Path(model.DefaultUsersPrefix, common.Hash(username, crypto.MD5))\n\tresp, err := a.etcdcli.DoGet(path)\n\tif err != nil {\n\t\ta.respondError(w, apiError{\n\t\t\tcode: http.StatusInternalServerError,\n\t\t\terr: err,\n\t\t})\n\t\treturn\n\t}\n\tif len(resp.Kvs) != 0 {\n\t\ta.respondError(w, apiError{\n\t\t\tcode: http.StatusBadRequest,\n\t\t\terr: errors.New(\"The username is already taken\"),\n\t\t})\n\t\treturn\n\t}\n\t// Do not store the plain text password, encrypt it!\n\thashed, err := common.GenerateBcryptHash(password, config.Get().PasswordHashingCost)\n\tif err != nil {\n\t\ta.respondError(w, apiError{\n\t\t\tcode: http.StatusInternalServerError,\n\t\t\terr: errors.Wrap(err, \"Something went wrong\"),\n\t\t})\n\t\treturn\n\t}\n\n\tuser := &model.User{\n\t\tUsername: username,\n\t\tPassword: hashed,\n\t}\n\t_ = user.Validate()\n\tr, _ := json.Marshal(&user)\n\t_, err = a.etcdcli.DoPut(path, string(r))\n\tif err != nil {\n\t\ta.respondError(w, apiError{\n\t\t\tcode: http.StatusInternalServerError,\n\t\t\terr: errors.Wrap(err, \"Unable to put a key-value pair into etcd\"),\n\t\t})\n\t\treturn\n\t}\n\t// Add user permission to view clouds\n\tif ok, err := a.policyEngine.AddPolicy(username, \"/clouds\", \"GET\"); !ok || err != nil {\n\t\ta.respondError(w, apiError{\n\t\t\tcode: http.StatusInternalServerError,\n\t\t\terr: errors.Wrap(err, \"Unable to add view cloud permission\"),\n\t\t})\n\t\treturn\n\t}\n\ta.respondSuccess(w, http.StatusOK, nil)\n}", "func (ctx *HandlerContext) SignUpHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"POST\" {\n\t\thttp.Error(w, \"that method is not allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\tcontentType := r.Header.Get(\"Content-Type\")\n\tif contentType != \"application/json\" {\n\t\thttp.Error(w, \"request body must be in JSON\", http.StatusUnsupportedMediaType)\n\t\treturn\n\t}\n\tnewUser := &users.NewUser{}\n\tdec := json.NewDecoder(r.Body)\n\terr := dec.Decode(newUser)\n\tif err != nil {\n\t\thttp.Error(w, \"Bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif err := newUser.Validate(); err != nil {\n\t\thttp.Error(w, \"Bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tuser, err := newUser.ToUser()\n\tif err != nil {\n\t\thttp.Error(w, \"Bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\taddedUser, err := ctx.Users.Insert(user)\n\tif err != nil {\n\t\thttp.Error(w, \"Bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tstate := sessions.SessionState{SessionTime: time.Now(), User: addedUser}\n\n\t_, err2 := sessions.BeginSession(ctx.SigningKey, ctx.Store, state, w)\n\tif err2 != nil {\n\t\thttp.Error(w, \"Bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusCreated)\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\n\terr = json.NewEncoder(w).Encode(addedUser)\n\n\tif err != nil {\n\t\thttp.Error(w, \"Bad request\", http.StatusBadRequest)\n\t}\n\n}", "func (pk *JSONPasswordKeeper) Put(userinfo basicauth.Account) error {\n\tpk.mutex.Lock()\n\tdefer pk.mutex.Unlock()\n\tif _, exists := pk.userInfo[userinfo.UserName]; exists {\n\t\treturn ErrUserExists\n\t}\n\tpk.userInfo[userinfo.UserName] = userinfo\n\treturn pk.flushToDisk()\n}", "func UpdateUser(c *gin.Context) {\n\tid := c.Param(\"id\")\n\tlog.Printf(\"UpdateUser in db %v\", id)\n\tvar user models.User\n\n\tdb := db.GetDB()\n\tif err := db.Where(\"id = ?\", id).First(&user).Error; err != nil {\n\t\tc.AbortWithStatus(http.StatusNotFound)\n\t\tlog.Println(\"Failed to UpdateUser in db\")\n\t\treturn\n\t}\n\tc.BindJSON(&user)\n\tdb.Save(&user)\n\tc.JSON(http.StatusOK, &user)\n}", "func (ctx *Context) UsersMeHandler(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase \"GET\":\n\t\tstate := &SessionState{}\n\t\ts, err := sessions.GetSessionID(r, ctx.SessionKey)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Could not find user\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\terr = ctx.SessionStore.Get(s, state)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Error getting sessionID\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tencoder := json.NewEncoder(w)\n\t\tencoder.Encode(state.User)\n\tcase \"PATCH\":\n\t\t// get the current authenticated user\n\t\tstate := &SessionState{}\n\t\ts, err := sessions.GetSessionID(r, ctx.SessionKey)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Could not find user\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\terr = ctx.SessionStore.Get(s, state)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Error getting sessionID\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\t//get updates and apply to user\n\t\tdecoder := json.NewDecoder(r.Body)\n\t\tupdated := &users.UserUpdates{}\n\t\tif err := decoder.Decode(updated); err != nil {\n\t\t\thttp.Error(w, \"Invalid JSON\", http.StatusBadRequest)\n\t\t}\n\t\terr = ctx.UserStore.Update(updated, state.User)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Error updating user\", http.StatusInternalServerError)\n\t\t}\n\t}\n}", "func (s *authService) saveUser(u *User) error {\n\tif err := u.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\tvar sqlExec string\n\n\t// if id is nil then it is a new user\n\tif u.ID == uuid.Nil {\n\t\t// generate ID\n\t\tu.ID = uuid.NewV4()\n\t\tsqlExec = `INSERT INTO user \n\t\t(id, email, password, firstname, lastname, is_superuser, is_active, is_deleted, created_at, updated_at, deleted_at, avatar_url) \n\t\tVALUES (:id, :email, :password, :firstname, :lastname, :is_superuser, :is_active, :is_deleted, :created_at, :updated_at, :deleted_at, :avatar_url)`\n\t} else {\n\t\tsqlExec = `UPDATE user SET email=:email, password=:password, firstname=:firstname, lastname=:lastname, is_superuser=:is_superuser, \n\t\tis_active=:is_active, is_deleted=:is_deleted, created_at=:created_at, updated_at=:updated_at, deleted_at=:deleted_at, avatar_url=:avatar_url WHERE id=:id`\n\t}\n\n\ttx, err := s.db.Beginx()\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = tx.NamedExec(sqlExec, &u)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\terr = tx.Commit()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func Update(c *gin.Context) {\n\tuserId, idErr := getUserID(c.Param(\"user_id\"))\n\tif idErr != nil {\n\t\tc.JSON(idErr.Status, idErr)\n\t\treturn\n\t}\n\n\tvar user models.User\n\tif err := c.ShouldBindJSON(&user); err != nil {\n\t\trestErr := rest_errors.NewBadRequestError(\"invalid json body\")\n\t\tc.JSON(restErr.Status, restErr)\n\t\treturn\n\t}\n\n\tuser.Id = userId\n\n\tisPartial := c.Request.Method == http.MethodPatch\n\n\tresult, err := services.UsersService.UpdateUser(isPartial, user)\n\tif err != nil {\n\t\tc.JSON(err.Status, err)\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, result.Marshal(c.GetHeader(\"X-Public\") == \"true\"))\n}", "func UpdateUser(userId int64, userData *UserEntry) error {\n _ , nerr := model.Database.Exec(\"UPDATE users SET username = ?, isadmin = ?, email = ? WHERE userid = ?\", userData.Username, userData.IsAdmin, userData.Email, userId)\n if nerr != nil {\n return nerr\n }\n return nil\n}", "func (f *Factory) UpdateUser(id string,firstname string, lastname string, age int) * domain.User {\n\treturn &domain.User{\n\t\tID:\t\t\tid,\t\t\n\t\tFirstname: firstname,\n\t\tLastname: lastname,\n\t\tAge: age,\n\t}\n\n}", "func (h *Host) SetUser(u string) {\n}", "func (h *userHandler) createUser(ctx context.Context, rw http.ResponseWriter, r *http.Request) {\n\n\tvar user = &model.User{}\n\n\terr := json.NewDecoder(r.Body).Decode(user)\n\tif err != nil {\n\n\t\th.serv.writeResponse(ctx, rw, err.Error(), http.StatusBadRequest, nil)\n\n\t\treturn\n\n\t}\n\n\tif user.Login == \"\" || user.Password == \"\" {\n\n\t\th.serv.writeResponse(ctx, rw, \"Login or password are empty\", http.StatusBadRequest, nil)\n\n\t\treturn\n\t}\n\n\terr = h.registerUser(ctx, user)\n\tif err != nil {\n\n\t\th.serv.writeResponse(ctx, rw, err.Error(), http.StatusBadRequest, nil)\n\n\t\treturn\n\t}\n\n\th.serv.writeResponse(ctx, rw, \"user was created: \"+user.Login, http.StatusCreated, user)\n\n}", "func UpdateUser(ctx iris.Context) {\n\tvar (\n\t\tuser model.User\n\t\tnewUser model.User\n\t\tresult iris.Map\n\t)\n\tid := ctx.Params().Get(\"id\") // get id by params\n\tdb := config.GetDatabaseConnection()\n\tdefer db.Close()\n\terr := db.First(&user, id).Error\n\tif err != nil {\n\t\tresult = iris.Map{\n\t\t\t\"error\": \"true\",\n\t\t\t\"status\": iris.StatusBadRequest,\n\t\t\t\"message\": \"user not found\",\n\t\t\t\"result\": nil,\n\t\t}\n\t}\n\tctx.ReadJSON(&newUser)\n\terr = db.Model(&user).Updates(newUser).Error\n\tif err != nil {\n\t\tresult = iris.Map{\n\t\t\t\"error\": \"true\",\n\t\t\t\"status\": iris.StatusBadRequest,\n\t\t\t\"message\": \"error when update user\",\n\t\t\t\"result\": err.Error(),\n\t\t}\n\t} else {\n\t\tresult = iris.Map{\n\t\t\t\"error\": \"false\",\n\t\t\t\"status\": iris.StatusOK,\n\t\t\t\"message\": \"success update user\",\n\t\t\t\"result\": newUser,\n\t\t}\n\t}\n\tctx.JSON(result)\n\treturn\n}", "func (a *Users) Update(w http.ResponseWriter, r *http.Request) {\n\tid := getUserID(r)\n\ta.l.Println(\"[DEBUG] get record id\", id)\n\n\t// fetch the user from the context\n\tacc := r.Context().Value(KeyUser{}).(*models.User)\n\tacc.ID = id\n\ta.l.Println(\"[DEBUG] updating user with id\", acc.ID)\n\n\terr := models.UpdateUser(acc)\n\n\tif err == models.ErrUserNotFound {\n\t\ta.l.Println(\"[ERROR] user not found\", err)\n\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tmodels.ToJSON(&GenericError{Message: \"User not found in database\"}, w)\n\t\treturn\n\t}\n\n\t// write the no content success header\n\tw.WriteHeader(http.StatusNoContent)\n}", "func CreateUserHandler(connection *sql.DB, cnf config.Config) negroni.HandlerFunc {\n\treturn negroni.HandlerFunc(func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\t\tuser := &models.UserCreate{}\n\t\terr := util.RequestToJSON(r, user)\n\t\tif err != nil {\n\t\t\tutil.SendBadRequest(w, errors.New(\"Bad json\"))\n\t\t\treturn\n\t\t}\n\n\t\tif err := user.Validate(); err == nil {\n\t\t\tif err := user.ValidatePassword(); err == nil {\n\n\t\t\t\thash, _ := bcrypt.GenerateFromPassword([]byte(user.Password), 10)\n\t\t\t\tuser.Hash = string(hash)\n\n\t\t\t\tcreatedID, err := db.CreateUser(connection, user)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tutil.SendBadRequest(w, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tcreatedUser, err := db.GetUserByID(connection, createdID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tutil.SendBadRequest(w, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// create JWT object with claims\n\t\t\t\texpiration := time.Now().Add(time.Hour * 24 * 31).Unix()\n\t\t\t\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\n\t\t\t\t\t\"sub\": createdUser.ID,\n\t\t\t\t\t\"iat\": time.Now().Unix(),\n\t\t\t\t\t\"exp\": expiration,\n\t\t\t\t})\n\n\t\t\t\t// Load secret key from config and generate a signed token\n\t\t\t\tsecretKey := cnf.SecretKey\n\t\t\t\ttokenString, err := token.SignedString([]byte(secretKey))\n\t\t\t\tif err != nil {\n\t\t\t\t\tutil.SendError(w, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\ttype Token struct {\n\t\t\t\t\tToken string `json:\"token\"`\n\t\t\t\t\tExpiresOn string `json:\"expires_on\"`\n\t\t\t\t\tUser *models.UserResponse `json:\"user\"`\n\t\t\t\t}\n\n\t\t\t\tutil.SendOK(w, &Token{\n\t\t\t\t\tToken: tokenString,\n\t\t\t\t\tExpiresOn: strconv.Itoa(int(expiration)),\n\t\t\t\t\tUser: &createdUser,\n\t\t\t\t})\n\n\t\t\t} else {\n\t\t\t\tutil.SendBadRequest(w, err)\n\t\t\t}\n\t\t} else {\n\t\t\tutil.SendBadRequest(w, err)\n\t\t}\n\t})\n}", "func (m *SmsLogRow) SetUserId(value *string)() {\n err := m.GetBackingStore().Set(\"userId\", value)\n if err != nil {\n panic(err)\n }\n}", "func (s *Service) PutUser(ctx context.Context, u *influxdb.User) error {\n\treturn s.kv.Update(ctx, func(tx Tx) error {\n\t\treturn s.putUser(ctx, tx, u)\n\t})\n}", "func (context *HandlerContext) SpecificUserHandler(w http.ResponseWriter, r *http.Request) {\n\t// Get session state from session store.\n\n\tsessionState := &SessionState{}\n\tsessionID, err := sessions.GetState(r, context.SigningKey, context.SessionStore, sessionState)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"error getting session state: %v\", err), http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tpath := path.Base(r.URL.Path)\n\n\tvar givenID int64\n\n\tif path != \"me\" {\n\t\tgivenID, err = strconv.ParseInt(path, 10, 64)\n\t}\n\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"error parsing ID: %v\", err), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tswitch r.Method {\n\n\t// Get the current user from the session state and respond with that user encoded as JSON object.\n\tcase \"GET\":\n\n\t\tvar user *users.User\n\n\t\tif path == \"me\" {\n\t\t\tuser, err = sessionState.User, nil\n\t\t} else {\n\t\t\tuser, err = context.UserStore.GetByID(givenID)\n\t\t}\n\n\t\tif err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(\"no user is found with given ID: %v\", err), http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\n\t\tw.Header().Add(headerContentType, contentTypeJSON)\n\t\tw.WriteHeader(http.StatusOK)\n\n\t\terr = json.NewEncoder(w).Encode(user)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"error encoding SessionState Struct to JSON\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t// Update the current user with the JSON in the request body,\n\t// and respond with the newly updated user, encoded as a JSON object.\n\tcase \"PATCH\":\n\t\t// Get Updates struct from request body.\n\t\tif path != \"me\" || givenID != sessionState.User.ID {\n\t\t\thttp.Error(w, \"User ID is not valid or does not match current-authenticaled user\", http.StatusForbidden)\n\t\t\treturn\n\t\t}\n\t\t// containJSON(r.Header.Get(headerContentType), w)\n\n\t\tif !strings.HasPrefix(r.Header.Get(headerContentType), contentTypeJSON) {\n\t\t\thttp.Error(w, \"request body must be in JSON\", http.StatusUnsupportedMediaType)\n\t\t\treturn\n\t\t}\n\n\t\t// Remove the user old fields from the trie.\n\t\tcontext.Trie.Remove(sessionState.User.FirstName, sessionState.User.ID)\n\t\tcontext.Trie.Remove(sessionState.User.LastName, sessionState.User.ID)\n\n\t\tupdates := &users.Updates{}\n\t\terr := json.NewDecoder(r.Body).Decode(updates)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"error decoding request body: invalid JSON in request body\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\t// Update session state.\n\t\tsessionState.User.FirstName = updates.FirstName\n\t\tsessionState.User.LastName = updates.LastName\n\n\t\t// Update session store.\n\t\terr = context.SessionStore.Save(sessionID, sessionState)\n\t\tif err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(\"error saving updated session state to session store: %s\", err), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\t// Insert the updated user fields into the trie.\n\t\tcontext.Trie.Insert(sessionState.User.FirstName, sessionState.User.ID)\n\t\tcontext.Trie.Insert(sessionState.User.LastName, sessionState.User.ID)\n\n\t\t// Update user store.\n\t\tuser, err := context.UserStore.Update(sessionState.User.ID, updates)\n\n\t\tif err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(\"error updating user store: %s\", err), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tw.Header().Add(headerContentType, contentTypeJSON)\n\t\tw.WriteHeader(http.StatusOK)\n\n\t\t// err = json.NewEncoder(w).Encode(sessionState.User)\n\t\terr = json.NewEncoder(w).Encode(user)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"error encoding SessionState Struct to JSON\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\tdefault:\n\t\thttp.Error(w, \"expect GET or PATCH method only\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n}", "func (user *User) Put(field string, value interface{}) {\n\tdb.First(user).Update(field, value)\n}", "func (p *Processor) updateUser(user User) {\n\tp.users[user.Id] = user\n}", "func OnBoardUser(u *models.User) error {\n\to := GetOrmer()\n\tcreated, id, err := o.ReadOrCreate(u, \"Username\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif created {\n\t\tu.UserID = int(id)\n\t\t// current orm framework doesn't support to fetch a pointer or sql.NullString with QueryRow\n\t\t// https://github.com/astaxie/beego/issues/3767\n\t\tif len(u.Email) == 0 {\n\t\t\t_, err = o.Raw(\"update harbor_user set email = null where user_id = ? \", id).Exec()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\texisting, err := GetUser(*u)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tu.Email = existing.Email\n\t\tu.SysAdminFlag = existing.SysAdminFlag\n\t\tu.Realname = existing.Realname\n\t\tu.UserID = existing.UserID\n\t}\n\treturn nil\n}" ]
[ "0.7637592", "0.7349026", "0.676662", "0.6664704", "0.64797777", "0.64533484", "0.6371254", "0.6328185", "0.62599856", "0.62352246", "0.62276334", "0.6226214", "0.6223639", "0.62121433", "0.6204188", "0.6164084", "0.6130815", "0.6123331", "0.6111087", "0.61034936", "0.60974777", "0.609597", "0.60953236", "0.6079551", "0.6048001", "0.6046353", "0.60435945", "0.60421264", "0.60281533", "0.60269654", "0.602096", "0.6015632", "0.5983673", "0.598316", "0.5950145", "0.59456414", "0.5927528", "0.5905189", "0.5893824", "0.5888378", "0.58848035", "0.5881218", "0.58448744", "0.58408195", "0.5836028", "0.5831159", "0.58236706", "0.5820083", "0.5817802", "0.5816679", "0.5808205", "0.58013475", "0.5790225", "0.5782465", "0.57712084", "0.5768169", "0.57642597", "0.576399", "0.5759173", "0.5749179", "0.5746776", "0.57455677", "0.5744283", "0.5733939", "0.57154197", "0.57103574", "0.5702464", "0.5699568", "0.56931716", "0.5691207", "0.5683006", "0.5675591", "0.5675211", "0.5661125", "0.5657748", "0.56566006", "0.5654865", "0.56528085", "0.56521326", "0.56440437", "0.5637734", "0.56348646", "0.5624609", "0.5622909", "0.5615753", "0.5612259", "0.56080484", "0.56035906", "0.55992705", "0.55986196", "0.55966985", "0.55921674", "0.5588647", "0.55882186", "0.5587865", "0.55871195", "0.5585392", "0.55825114", "0.5581896", "0.55707645" ]
0.7697687
0
DeleteUserHandler elimina un usuario en base al id
DeleteUserHandler удаляет пользователя по идентификатору
func DeleteUserHandler(w http.ResponseWriter, r *http.Request) { params := mux.Vars(r) k := params["id"] if _, ok := Listusers[k]; ok { delete(Listusers, k) } else { log.Printf("No encontramos el id %s", k) } w.WriteHeader(http.StatusNoContent) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func DeleteUserHandler(connection *sql.DB, cnf config.Config) negroni.HandlerFunc {\n\treturn negroni.HandlerFunc(func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\t\tvar queryToken = r.URL.Query().Get(\"token\")\n\n\t\tif len(queryToken) < 1 {\n\t\t\tqueryToken = r.Header.Get(\"token\")\n\t\t}\n\n\t\tif len(queryToken) < 1 {\n\t\t\tutil.SendBadRequest(w, errors.New(\"token is mandatory\"))\n\t\t\treturn\n\t\t}\n\n\t\tuser := &models.UserResponse{}\n\t\terr := util.RequestToJSON(r, user)\n\t\tif err != nil {\n\t\t\tutil.SendBadRequest(w, errors.New(\"Bad json\"))\n\t\t\treturn\n\t\t}\n\n\t\tsecretKey := cnf.SecretKey\n\t\ttok, err := jwt.Parse(queryToken, func(t *jwt.Token) (interface{}, error) {\n\t\t\treturn []byte(secretKey), nil\n\t\t})\n\n\t\tclaims := tok.Claims.(jwt.MapClaims)\n\t\tvar ID = claims[\"sub\"].(float64)\n\n\t\tif int64(ID) != user.ID {\n\t\t\tutil.SendBadRequest(w, errors.New(\"you can only delete your own user object\"))\n\t\t\treturn\n\t\t}\n\n\t\tdb.DeleteUser(connection, user)\n\t\tif err != nil {\n\t\t\tutil.SendBadRequest(w, err)\n\t\t\treturn\n\t\t}\n\t\tutil.SendOK(w, string(\"\"))\n\n\t})\n}", "func (h *UserHandler) Delete(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tidStr := vars[\"id\"]\n\tid, err := strconv.ParseUint(idStr, 10, 64)\n\tif err != nil {\n\t\tlog.Println(errors.Wrapf(err, \"error parse uint:%v\", idStr))\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tlog.Printf(\"/users/%d DELETE handled\", id)\n\n\tif err := h.model.Delete(id); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusNoContent)\n}", "func deleteUser(w http.ResponseWriter, r *http.Request) {\r\n\tparams := mux.Vars(r)\r\n\tstmt, err := db.Prepare(\"DELETE FROM users WHERE id = ?\")\r\n\tif err != nil {\r\n\t\tpanic(err.Error())\r\n\t}\r\n\t_, err = stmt.Exec(params[\"id\"])\r\n\tif err != nil {\r\n\t\tpanic(err.Error())\r\n\t}\r\n\tfmt.Fprintf(w, \"User with id = %s was deleted\", params[\"id\"])\r\n}", "func DeleteUserHandler(w http.ResponseWriter, r *http.Request, db *gorp.DbMap, c *businesscontext.Ctx) error {\n\tvars := mux.Vars(r)\n\tusername := vars[\"username\"]\n\n\tif !c.User.Admin && username != c.User.Username {\n\t\treturn WriteJSON(w, r, nil, http.StatusForbidden)\n\t}\n\n\tu, errLoad := user.LoadUserWithoutAuth(db, username)\n\tif errLoad != nil {\n\t\treturn sdk.WrapError(errLoad, \"deleteUserHandler> Cannot load user from db\")\n\t}\n\n\ttx, errb := db.Begin()\n\tif errb != nil {\n\t\treturn sdk.WrapError(errb, \"deleteUserHandler> cannot start transaction\")\n\t}\n\tdefer tx.Rollback()\n\n\tif err := user.DeleteUserWithDependencies(tx, u); err != nil {\n\t\treturn sdk.WrapError(err, \"deleteUserHandler> cannot delete user\")\n\t}\n\n\tif err := tx.Commit(); err != nil {\n\t\treturn sdk.WrapError(err, \"deleteUserHandler> cannot commit transaction\")\n\t}\n\n\treturn nil\n}", "func (api *API) deleteUserHandler() service.Handler {\n\treturn func(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\t\tvars := mux.Vars(r)\n\t\tusername := vars[\"permUsernamePublic\"]\n\n\t\tconsumer := getUserConsumer(ctx)\n\n\t\ttx, err := api.mustDB().Begin()\n\t\tif err != nil {\n\t\t\treturn sdk.WrapError(err, \"cannot start transaction\")\n\t\t}\n\t\tdefer tx.Rollback() // nolint\n\n\t\tvar u *sdk.AuthentifiedUser\n\t\tif username == \"me\" {\n\t\t\tu, err = user.LoadByID(ctx, tx, consumer.AuthConsumerUser.AuthentifiedUserID)\n\t\t} else {\n\t\t\tu, err = user.LoadByUsername(ctx, tx, username)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// We can't delete the last admin\n\t\tif u.Ring == sdk.UserRingAdmin {\n\t\t\tcount, err := user.CountAdmin(tx)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif count < 2 {\n\t\t\t\treturn sdk.NewErrorFrom(sdk.ErrForbidden, \"can't remove the last admin\")\n\t\t\t}\n\t\t}\n\n\t\t// We can't delete a user if it's the last admin in a group\n\t\tvar adminGroupIDs []int64\n\t\tgus, err := group.LoadLinksGroupUserForUserIDs(ctx, tx, []string{u.ID})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor i := range gus {\n\t\t\tif gus[i].Admin {\n\t\t\t\tadminGroupIDs = append(adminGroupIDs, gus[i].GroupID)\n\t\t\t}\n\t\t}\n\t\tif len(adminGroupIDs) > 0 {\n\t\t\tgus, err := group.LoadLinksGroupUserForGroupIDs(ctx, tx, adminGroupIDs)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tadminLeftCount := make(map[int64]int)\n\t\t\tfor _, id := range adminGroupIDs {\n\t\t\t\tadminLeftCount[id] = 0\n\t\t\t}\n\t\t\tfor i := range gus {\n\t\t\t\tif gus[i].AuthentifiedUserID != u.ID && gus[i].Admin {\n\t\t\t\t\tadminLeftCount[gus[i].GroupID] += 1\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, count := range adminLeftCount {\n\t\t\t\tif count < 1 {\n\t\t\t\t\treturn sdk.NewErrorFrom(sdk.ErrForbidden, \"cannot remove user because it is the last admin of a group\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif err := user.DeleteByID(tx, u.ID); err != nil {\n\t\t\treturn sdk.WrapError(err, \"cannot delete user\")\n\t\t}\n\n\t\tif err := tx.Commit(); err != nil {\n\t\t\treturn sdk.WithStack(err)\n\t\t}\n\n\t\treturn service.WriteJSON(w, nil, http.StatusOK)\n\t}\n}", "func DeleteUser(c *gin.Context) {}", "func (uh *UserHandler) Delete(c echo.Context) error {\n\tid_, err := strconv.Atoi(c.Param(\"id\"))\n\tid := uint(id_)\n\n\terr = uh.UserUseCase.Delete(id)\n\n\tif err != nil {\n\t\treturn c.JSON(GetStatusCode(err), ResponseError{Message: err.Error()})\n\t}\n\n\treturn c.NoContent(http.StatusNoContent)\n}", "func DeleteUser(w http.ResponseWriter, req *http.Request) {\n\tvars := mux.Vars(req)\n\tid := vars[\"id\"]\n\n\tif err := db.Remove(id); err != nil {\n\t\thandleError(err, \"Failed to remove User: %v\", w)\n\t\treturn\n\t}\n\n\tw.Write([]byte(\"OK\"))\n}", "func (app *App) deleteUser(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid, err := strconv.Atoi(vars[\"id\"])\n\tif err != nil {\n\t\trespondWithError(w, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\n\tuser := &users.User{ID: int64(id)}\n\terr = user.DeleteUser(app.Db)\n\tif err != nil {\n\t\trespondWithError(w, http.StatusNotFound, err.Error())\n\t\treturn\n\t}\n\n\trespondWithJSON(w, http.StatusOK, map[string]string{\"message\": \"User deleted successfully\"})\n}", "func (ac *ApiConfig) DeleteUserHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != http.MethodGet {\n\t\thttp.Error(w, r.Method+\" is not available\", http.StatusInternalServerError)\n\t\tzerolog.Error().Msg(r.Method + \" is not available\")\n\t\treturn\n\t}\n\n\tuserID := r.URL.Query().Get(\"user_id\")\n\tif userID == \"\" {\n\t\thttp.Error(w, \"user_id is empty, fill it \", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tid, err := strconv.Atoi(userID)\n\tif err != nil {\n\t\thttp.Error(w, \"user_id is not an integer\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\terr = ac.DHolder.DeleteUser(id)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tstat := &models.StatusIdentifier{\n\t\tOk: true,\n\t\tMessage: \"User Deleted\",\n\t}\n\n\terr = dResponseWriter(w, stat, http.StatusOK)\n\tif err != nil {\n\t\tzerolog.Error().Msg(err.Error())\n\t\treturn\n\t}\n\n\treturn\n}", "func deleteUser(res http.ResponseWriter, req *http.Request, p httprouter.Params) {\n\t_, err := db.Exec(`\n\t\tDELETE FROM accounts\n\t\tWHERE username = $1;`, p.ByName(\"username\"),\n\t)\n\tif err != nil {\n\t\tlog.Println(\"deleteUser:\", err)\n\t}\n\n\twriteJSON(res, 200, jsMap{\"status\": \"OK\"})\n}", "func deleteUser(c *gin.Context) {\n\tvar user user\n\tuserID := c.Param(\"id\")\n\n\tdb.First(&user, userID)\n\n\tif user.Id == 0 {\n\t\tc.JSON(http.StatusNotFound, gin.H{\"status\": http.StatusNotFound, \"message\": \"No user found!\"})\n\t\treturn\n\t}\n\n\tdb.Delete(&user)\n\tc.JSON(http.StatusOK, gin.H{\"status\": http.StatusOK, \"message\": \"User deleted successfully!\"})\n}", "func DeleteUserHandler(w http.ResponseWriter, req *http.Request) {\n\n\t// Get session values or redirect to Login\n\tsession, err := sessions.Store.Get(req, \"session\")\n\n\tif err != nil {\n\t\tlog.Println(\"error identifying session\")\n\t\thttp.Redirect(w, req, \"/login/\", 302)\n\t\treturn\n\t\t// in case of error\n\t}\n\n\t// Prep for user authentication\n\tsessionMap := getUserSessionValues(session)\n\n\tusername := sessionMap[\"username\"]\n\tloggedIn := sessionMap[\"loggedin\"]\n\tisAdmin := sessionMap[\"isAdmin\"]\n\n\tvars := mux.Vars(req)\n\tidString := vars[\"id\"]\n\n\tpk, err := strconv.Atoi(idString)\n\tif err != nil {\n\t\tpk = 0\n\t\tlog.Println(err)\n\t}\n\n\tfmt.Println(session)\n\n\tif isAdmin != \"true\" {\n\t\thttp.Redirect(w, req, \"/\", 302)\n\t\treturn\n\t}\n\n\tuser, err := database.PKLoadUser(db, int64(pk))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\tfmt.Println(\"Unable to load User\")\n\t\thttp.Redirect(w, req, \"/\", http.StatusSeeOther)\n\t}\n\n\tuser.IsAdmin = true\n\n\terr = database.UpdateUser(db, user)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\twv := WebView{\n\t\tUser: user,\n\t\tIsLoggedIn: loggedIn,\n\t\tSessionUser: username,\n\t\tIsAdmin: isAdmin,\n\t\tUserFrame: false,\n\t\tArchitecture: baseArchitecture,\n\t}\n\n\tif req.Method == \"GET\" {\n\t\tRender(w, \"templates/delete_user.html\", wv)\n\t}\n\n\tif req.Method == \"POST\" {\n\n\t\terr := database.DeleteUser(db, user.ID)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\turl := \"/user_index/\"\n\n\t\thttp.Redirect(w, req, url, http.StatusFound)\n\t}\n\n}", "func (auh *AdminUserHandler) DeleteUser(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\n\tvar apiKey = r.Header.Get(\"api-key\")\n\tif apiKey == \"\" || (apiKey != adminApiKey && apiKey != userApiKey) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)\n\t\treturn\n\t}\n\tid, err := strconv.Atoi(ps.ByName(\"id\"))\n\n\tif err != nil {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\n\t_, errs := auh.userService.DeleteUser(uint(id))\n\n\tif len(errs) > 0 {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusNoContent)\n\treturn\n}", "func (h *Handler) deleteUser(c *gin.Context) handlerResponse {\n\n\tdeletedUser, err := h.service.User.Delete(c.Param(userParameter), h.who(c))\n\tif err != nil {\n\t\treturn handleError(err)\n\t}\n\t// Remove password so we do not show in response\n\tdeletedUser.Password = \"\"\n\treturn handleOK(deletedUser)\n}", "func DeleteUser(w http.ResponseWriter, r *http.Request) {\n\tparams := mux.Vars(r)\n\n\tuserID, err := strconv.ParseInt(params[\"id\"], 10, 64)\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\tuserIDToken, err := authentication.ExtractUserId(r)\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusUnauthorized, err)\n\t\treturn\n\t}\n\n\tif userIDToken != userID {\n\t\tresponses.Error(w, http.StatusForbidden, errors.New(\"não é possível manipular usuário de terceiros\"))\n\t\treturn\n\t}\n\n\tdb, err := database.Connect()\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\trepository := repository.NewRepositoryUser(db)\n\n\tif err := repository.DeleteUser(userID); err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tresponses.JSON(w, http.StatusNoContent, nil)\n}", "func DeleteUser(id int) {\n\tvar i int\n\ti = GetIndexOfUser(id)\n\tDeleteUserFromDatabase(i)\n}", "func Delete(w http.ResponseWriter, r *http.Request) {\n\tuserID := context.Get(r, \"userID\").(int)\n\n\t// Excluindo usuário logado\n\terr := ServiceUser.Delete(userID)\n\n\tif err != nil {\n\t\tw.Write(util.MessageInfo(\"message\", err.Error()))\n\t\treturn\n\t}\n\n\tw.Write(util.MessageInfo(\"message\", \"Excluído com sucesso\"))\n}", "func (pc UserController) Delete(c *gin.Context) {\n\tid := c.Params.ByName(\"id\")\n\tvar u repository.UserRepository\n\tidInt, _ := strconv.Atoi(id)\n\tif err := u.DeleteByID(idInt); err != nil {\n\t\tc.AbortWithStatus(403)\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error()})\n\t\treturn\n\t}\n\n\tc.JSON(200, gin.H{\"success\": \"ID\" + id + \"のユーザーを削除しました\"})\n\treturn\n}", "func DeleteUser(w http.ResponseWriter, r *http.Request) {\r\n\tdefer r.Body.Close()\r\n\tuser := r.Context().Value(\"user\").(string)\r\n\r\n\tif err := dao.DBConn.RemoveUserByEmail(user); err != nil {\r\n\t\tlog.Println(err)\r\n\t\tu.RespondWithError(w, http.StatusBadRequest, \"User doesn't exist or has already been deleted\")\r\n\t\treturn\r\n\t}\r\n\r\n\tif err := dao.DBConn.RemoveUserExpenses(user); err != nil {\r\n\t\tlog.Println(err)\r\n\t\tu.RespondWithError(w, http.StatusBadRequest, \"User doesn't exist or has already been deleted\")\r\n\t\treturn\r\n\t}\r\n\r\n\tu.RespondWithJSON(w, http.StatusOK, \"User deleted\")\r\n}", "func DeleteUser(c *gin.Context) {\n\tuuid := c.Params.ByName(\"uuid\")\n\tvar user models.User\n\tdb := db.GetDB()\n\tif uuid != \"\" {\n\n\t\tjwtClaims := jwt.ExtractClaims(c)\n\t\tauthUserAccessLevel := jwtClaims[\"access_level\"].(float64)\n\t\tauthUserUUID := jwtClaims[\"uuid\"].(string)\n\t\tif authUserAccessLevel != 1 {\n\t\t\tif authUserUUID != uuid {\n\t\t\t\tc.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{\n\t\t\t\t\t\"error\": \"Sorry but you can't delete user, ONLY admins can\",\n\t\t\t\t})\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\t// DELETE FROM users WHERE uuid= user.uuid\n\t\t// exemple : UPDATE users SET deleted_at=date.now WHERE uuid = user.uuid;\n\t\tif err := db.Where(\"uuid = ?\", uuid).Delete(&user).Error; err != nil {\n\t\t\t// error handling...\n\t\t\tc.AbortWithStatusJSON(http.StatusBadRequest, gin.H{\n\t\t\t\t\"error\": err.Error(),\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\t// Display JSON result\n\t\t// c.JSON(200, gin.H{\"success\": \"User #\" + uuid + \" deleted\"})\n\t\tc.JSON(200, gin.H{\"success\": \"User successfully deleted\"})\n\t} else {\n\t\t// Display JSON error\n\t\tc.JSON(404, gin.H{\"error\": \"User not found\"})\n\t}\n\n}", "func DeleteUser(c *gin.Context) {\n\tvar user Models.User\n\tid := c.Params.ByName(\"id\")\n\terr := Models.DeleteUser(&user, id)\n\tif err != nil {\n\t\tc.AbortWithStatus(http.StatusNotFound)\n\t} else { \n\t\tc.JSON(http.StatusOK, gin.H{\"id\":\"is deleted\"})\n\t}\n}", "func HandleUserDelete(c *gin.Context) {\n\tuid := c.Param(\"uid\")\n\n\tvar u User\n\taffected, err := u.Delete(uid)\n\tif err != nil {\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\n\t\t\t\"code\": 1,\n\t\t\t\"msg\": err.Error(),\n\t\t\t\"data\": gin.H{},\n\t\t})\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"code\": 0,\n\t\t\"msg\": \"ok\",\n\t\t\"data\": gin.H{\n\t\t\t\"affected\": affected,\n\t\t},\n\t})\n}", "func DeleteUser(clients *common.ClientContainer, handler common.HandlerInterface) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tuserID := chi.URLParam(r, \"userID\")\n\t\tID, err := strconv.Atoi(userID)\n\t\tif err != nil {\n\t\t\tcommon.WriteErrorToResponse(w, http.StatusUnprocessableEntity,\n\t\t\t\thttp.StatusText(http.StatusUnprocessableEntity),\n\t\t\t\t\"userID provided is not integer\")\n\t\t\treturn\n\t\t}\n\n\t\t// check if the ID exists\n\t\t_, err = handler.GetUserID(clients, ID)\n\t\tif err != nil {\n\t\t\tswitch err.(type) {\n\t\t\t// grafanaclient.NotFound means, that user provided the\n\t\t\t// ID of non existent user. We return 404\n\t\t\tcase grafanaclient.NotFound:\n\t\t\t\terrMsg := fmt.Sprintf(\"User Not Found\")\n\t\t\t\tcommon.WriteErrorToResponse(w, http.StatusNotFound,\n\t\t\t\t\terrMsg, err.Error())\n\t\t\t\treturn\n\t\t\t// If any other error happened -> return 500 error\n\t\t\tdefault:\n\t\t\t\tlog.Logger.Error(err)\n\t\t\t\tcommon.WriteErrorToResponse(w, http.StatusInternalServerError,\n\t\t\t\t\thttp.StatusText(http.StatusInternalServerError),\n\t\t\t\t\t\"Internal server error occured\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// if ID exists then delete that user\n\t\terr = handler.DeleteUser(clients, ID)\n\t\tif err != nil {\n\t\t\tlog.Logger.Error(err)\n\t\t\tcommon.WriteErrorToResponse(w, http.StatusInternalServerError,\n\t\t\t\thttp.StatusText(http.StatusInternalServerError),\n\t\t\t\t\"Internal server error occured\")\n\t\t\treturn\n\t\t}\n\t}\n}", "func (server Server) DeleteUser(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r) //mux params\n\tid, err := strconv.Atoi(vars[\"id\"]) // convert the id in string to int\n\tvar res models.APIResponse // make a response\n\n\tif err != nil {\n\t\tlog.Printf(\"Unable to convert the string into int. %v\", err)\n\t\tres = models.BuildAPIResponseFail(\"Unable to convert the string into int\", nil)\n\t} else {\n\t\tdeletedRows := deleteUser(int64(id), server.db) // call the deleteUser, convert the int to int64\n\t\tres = models.BuildAPIResponseSuccess(\"User updated successfully.\", deletedRows)\n\t}\n\t// send the response\n\tjson.NewEncoder(w).Encode(res)\n}", "func (u *UserCtr) DeleteUser(c *gin.Context) {\n\tid,err := strconv.Atoi(c.Param(\"id\"))\n\tif err != nil {\n\t\tresp := errors.New(err.Error())\n\t\tc.JSON(http.StatusInternalServerError, resp)\n\t\treturn\n\t}\n\n\terr = model.DeleteUser(u.DB,id)\n\tif err != nil {\n\t\tresp := errors.New(err.Error())\n\t\tc.JSON(http.StatusInternalServerError, resp)\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\n\t})\n\treturn\n}", "func UserDelete(w http.ResponseWriter, r *http.Request) {\n\n\t// Init output\n\toutput := []byte(\"\")\n\n\t// Add content type header to the response\n\tcontentType := \"application/json\"\n\tcharset := \"utf-8\"\n\tw.Header().Add(\"Content-Type\", fmt.Sprintf(\"%s; charset=%s\", contentType, charset))\n\n\t// Grab context references\n\trefStr := gorillaContext.Get(r, \"str\").(stores.Store)\n\t// Grab url path variables\n\turlVars := mux.Vars(r)\n\turlUser := urlVars[\"user\"]\n\n\tuserUUID := auth.GetUUIDByName(urlUser, refStr)\n\n\terr := auth.RemoveUser(userUUID, refStr)\n\tif err != nil {\n\t\tif err.Error() == \"not found\" {\n\t\t\terr := APIErrorNotFound(\"User\")\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\t\terr := APIErrGenericInternal(err.Error())\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Write empty response if anything ok\n\trespondOK(w, output)\n\n}", "func DeleteUser(db *gorm.DB, w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\n\tid := vars[\"id\"]\n\tuser := getUserByID(db, id, w, r)\n\tif user == nil {\n\t\treturn\n\t}\n\tif err := db.Delete(&user).Error; err != nil {\n\t\tRespondError(w, http.StatusInternalServerError, \"\")\n\t\treturn\n\t}\n\tRespondJSON(w, http.StatusNoContent, nil)\n}", "func DeleteUserHandler(w http.ResponseWriter, r *http.Request) {\n\tid := mux.Vars(r)[\"id\"]\n\tstatus := users.DeleteUser(id)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n}", "func DeleteUser(c *gin.Context) {\n\tid := c.Param(\"id\")\n\tlog.Printf(\"DeleteUser in db %v\", id)\n\tvar user models.User\n\tdb := db.GetDB()\n\n\tif err := db.Where(\"id = ?\", id).First(&user).Error; err != nil {\n\t\tc.AbortWithStatus(http.StatusNotFound)\n\t\tlog.Println(\"Failed to DeleteUser in db\")\n\t\treturn\n\t}\n\n\tdb.Delete(&user)\n}", "func DeleteUser(ctx iris.Context) {\n\tvar (\n\t\tuser model.User\n\t\tresult iris.Map\n\t)\n\tid := ctx.Params().Get(\"id\") // get id by params\n\tdb := config.GetDatabaseConnection()\n\tdefer db.Close()\n\terr := db.First(&user, id).Error\n\tif err != nil {\n\t\tresult = iris.Map{\n\t\t\t\"error\": \"true\",\n\t\t\t\"status\": iris.StatusBadRequest,\n\t\t\t\"message\": \"User not found\",\n\t\t\t\"result\": nil,\n\t\t}\n\t}\n\n\terr = db.Where(\"id = ?\", id).Delete(&user, id).Error\n\tif err != nil {\n\t\tresult = iris.Map{\n\t\t\t\"error\": \"true\",\n\t\t\t\"status\": iris.StatusBadRequest,\n\t\t\t\"message\": \"Failed Delete user\",\n\t\t\t\"result\": err.Error(),\n\t\t}\n\t} else {\n\t\tresult = iris.Map{\n\t\t\t\"error\": \"false\",\n\t\t\t\"status\": iris.StatusOK,\n\t\t\t\"message\": \"Failed Delete user\",\n\t\t\t\"result\": nil,\n\t\t}\n\t}\n\tctx.JSON(result)\n\treturn\n}", "func (s *Server) deleteUser(request *restful.Request, response *restful.Response) {\n\t// Authorize\n\tif !s.auth(request, response) {\n\t\treturn\n\t}\n\t// get user-id and put into temp\n\tuserId := request.PathParameter(\"user-id\")\n\tif err := s.dataStore.DeleteUser(userId); err != nil {\n\t\tinternalServerError(response, err)\n\t\treturn\n\t}\n\tok(response, Success{RowAffected: 1})\n}", "func DeleteUser(c *gin.Context) {\n\tvar user models.User\n\tid := c.Params.ByName(\"id\")\n\terr := models.DeleteUser(&user, id)\n\tif err != nil {\n\t\tc.AbortWithStatus(http.StatusNotFound)\n\t} else {\n\t\tc.JSON(http.StatusOK, gin.H{\"id\" + id: \"is deleted\"})\n\t}\n}", "func DeleteUser(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"Delete user endpoint hit\")\n\t\n\tvars := mux.Vars(r)\n\n\tid := vars[\"id\"]\n\n\tvar user models.User\n\n\tmessage := user.Destroy(id)\n\n json.NewEncoder(w).Encode(message)\n}", "func (h *userHandler) deleteUser(ctx context.Context, rw http.ResponseWriter, r *http.Request) {\n\n\tlogin := fmt.Sprint(r.URL.Query().Get(\"login\"))\n\n\terr := h.serv.DB.UserCol.UpdateActive(ctx, login)\n\n\tif err != nil {\n\n\t\th.serv.writeResponse(ctx, rw, err.Error(), http.StatusBadRequest, nil)\n\n\t\treturn\n\t}\n\n\tuser, _ := h.serv.getUserFromClaimsFromCookie(ctx, r)\n\n\tif login == user.Login {\n\n\t\th.serv.writeResponse(ctx, rw, \"user active was updated and logged out\", http.StatusOK, user)\n\n\t} else {\n\n\t\th.serv.writeResponse(ctx, rw, \"user active was updated\", http.StatusOK, nil)\n\n\t}\n}", "func (h *Handler) delete() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvars := mux.Vars(r)\n\t\tid := vars[userID]\n\t\terr := h.UserDAO.Delete(r.Context(), id)\n\t\tswitch {\n\t\tcase errors.Is(err, errorx.ErrNoUser):\n\t\t\tmsg := &errorMessage{\n\t\t\t\tMessage: fmt.Sprintf(\"user %s does not exist\", id),\n\t\t\t}\n\t\t\tresponse.JSON(w, http.StatusNotFound, msg)\n\t\t\treturn\n\t\tcase errors.Is(err, errorx.ErrDeleteUser):\n\t\t\tmsg := &errorMessage{\n\t\t\t\tMessage: fmt.Sprintf(\"user %s has been deleted\", id),\n\t\t\t}\n\t\t\tresponse.JSON(w, http.StatusGone, msg)\n\t\t\treturn\n\t\tcase err != nil:\n\t\t\tmsg := &errorMessage{\n\t\t\t\tError: err.Error(),\n\t\t\t\tMessage: \"user datastore error\",\n\t\t\t}\n\t\t\tresponse.JSON(w, http.StatusInternalServerError, msg)\n\t\t\treturn\n\t\tdefault:\n\t\t\tresponse.JSON(w, http.StatusNoContent, nil)\n\t\t}\n\t}\n\n}", "func DeleteUser(c *gin.Context) {\n\t// Get model if exist\n\tvar user models.User\n\tif err := models.DB.Where(\"id = ?\", c.Param(\"id\")).First(&user).Error; err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": \"Record not found!\"})\n\t\treturn\n\t}\n\n\tmodels.DB.Delete(&user)\n\n\tc.JSON(http.StatusOK, gin.H{\"data\": true})\n}", "func DeleteUser(user *models.User, id string) (err error) {\n\tconfig.DB.Where(\"id = ?\", id).Delete(user)\n\treturn nil\n}", "func (s *Server) deleteUser(request *restful.Request, response *restful.Response) {\n\t// Authorize\n\tif !s.auth(request, response) {\n\t\treturn\n\t}\n\t// get user-id and put into temp\n\tuserId := request.PathParameter(\"user-id\")\n\tif err := s.DataStore.DeleteUser(userId); err != nil {\n\t\tinternalServerError(response, err)\n\t\treturn\n\t}\n\tok(response, Success{RowAffected: 1})\n}", "func _delete(context echo.Context, user *User) error {\n\tdeleteErr := Remove(user.Key)\n\tif deleteErr != nil {\n\t\tlog.Printf(\"Cannot delete user %v\", deleteErr)\n\t\treturn context.JSON(http.StatusInternalServerError, errors.New(\"Cannot delete user with ID: \"+user.ID))\n\t}\n\treturn context.NoContent(http.StatusNoContent)\n}", "func Delete(c *gin.Context) {\n\tuserID, err := getUserID(c.Param(\"user_id\"))\n\tif err != nil {\n\t\tc.JSON(err.Status, err)\n\t\treturn\n\t}\n\n\tif err := services.UserServ.DeleteUser(userID); err != nil {\n\t\tc.JSON(err.Status, err)\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, map[string]string{\"status\": \"deleted\"})\n}", "func DeleteUser(user *entity.User, id string, client *statsd.Client) (err error) {\n\tt := client.NewTiming()\n\tif config.DB.Where(\"id = ?\", id).First(&user); user.ID == \"\" {\n\t\treturn errors.New(\"the user doesn't exist!!!\")\n\t}\n\tconfig.DB.Where(\"id = ?\", id).Delete(&user)\n\tt.Send(\"delete_user.query_time\")\n\treturn nil\n}", "func Delete(c *gin.Context) {\n\tuserId, idErr := getUserID(c.Param(\"user_id\"))\n\tif idErr != nil {\n\t\tc.JSON(idErr.Status, idErr)\n\t\treturn\n\t}\n\n\tif err := services.UsersService.DeleteUser(userId); err != nil {\n\t\tc.JSON(err.Status, err)\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, map[string]string{\"status\": \"deleted\"})\n}", "func (serv *AppServer) DeleteUser(delID int) {\n\tserv.ServerRequest([]string{\"DeleteUser\", strconv.Itoa(delID)})\n}", "func DeleteUser(c *gin.Context, client *statsd.Client) {\n\tlog.Info(\"deleting user\")\n\tvar user entity.User\n\tid := c.Params.ByName(\"id\")\n\terr := model.DeleteUser(&user, id, client)\n\n\tif err != nil {\n\t\tlog.Error(err)\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t\treturn\n\t} else {\n\t\tc.JSON(http.StatusOK, gin.H{\"id\" + id: \"is deleted\"})\n\t}\n\tlog.Info(\"user deleted\")\n}", "func DeleteUser(person *Person, id string) (err error) {\n\tConfig.DB.Where(\"id = ?\", id).Delete(person)\n\treturn nil\n}", "func DeleteUser(w http.ResponseWriter, r *http.Request) {\n\n\thttpext.SuccessAPI(w, \"ok\")\n}", "func deleteUser(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tparams := mux.Vars(r)\n\tresult := delete.DeleteUserData(params[\"id\"])\n\tjson.NewEncoder(w).Encode(map[string]string{\n\t\t\"result\": result,\n\t})\n}", "func (a *Server) DeleteUser(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"delete a user\")\n}", "func (s *peerRESTServer) DeleteUserHandler(w http.ResponseWriter, r *http.Request) {\n\tif !s.IsValid(w, r) {\n\t\ts.writeErrorResponse(w, errors.New(\"Invalid request\"))\n\t\treturn\n\t}\n\n\tobjAPI := newObjectLayerFn()\n\tif objAPI == nil {\n\t\ts.writeErrorResponse(w, errServerNotInitialized)\n\t\treturn\n\t}\n\n\tvars := mux.Vars(r)\n\taccessKey := vars[peerRESTUser]\n\tif accessKey == \"\" {\n\t\ts.writeErrorResponse(w, errors.New(\"username is missing\"))\n\t\treturn\n\t}\n\n\tif err := globalIAMSys.DeleteUser(accessKey); err != nil {\n\t\ts.writeErrorResponse(w, err)\n\t\treturn\n\t}\n\n\tw.(http.Flusher).Flush()\n}", "func (uv *userValidator) Delete(id uint) error{\n\tvar user User\n\tuser.ID = id\n\terr := runUserValidatorFunction(&user, uv.idGreaterThan(0))\n\tif err != nil{\n\t\treturn err\n\t}\n\treturn uv.UserDB.Delete(id)\n}", "func DeleteUser(c *gin.Context) {\n\tuserID := c.Param(\"userID\")\n\tuser := &userModel.User{ID: userID}\n\n\terr := dbConnect.Delete(user)\n\tif err != nil {\n\t\tlog.Printf(\"Error while deleting a single user, Reason: %v\\n\", err)\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\n\t\t\t\"status\": http.StatusInternalServerError,\n\t\t\t\"message\": \"Something went wrong\",\n\t\t})\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"status\": http.StatusOK,\n\t\t\"message\": \"User deleted successfully\",\n\t})\n\treturn\n}", "func DeleteUser(dbmap *gorp.DbMap, id string) error {\n\tvar u User\n\terr := dbmap.SelectOne(&u, \"SELECT * FROM user WHERE object_id = ?\", id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttx, err := dbmap.Begin()\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\t_, err = tx.Exec(\"DELETE FROM user_session WHERE user_id = ?;\", u.PK)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\t_, err = tx.Exec(\"DELETE FROM user_role WHERE user_id = ?;\", u.PK)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\t_, err = tx.Exec(\"DELETE FROM domain_user WHERE user_id = ?;\", u.PK)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\t_, err = tx.Delete(&u)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\treturn tx.Commit()\n}", "func (uc UserController) DeleteUser(w http.ResponseWriter, r *http.Request, p httprouter.Params) {\n\tid := p.ByName(\"id\")\n\n\tif _, ok := users[id]; !ok {\n\t\tw.WriteHeader(404)\n\t\treturn\n\t}\n\n\t// Delete user\n\tdelete(users, id)\n\n\tw.WriteHeader(http.StatusOK) // 200\n\tfmt.Fprint(w, \"Deleted user\", id, \"\\n\")\n}", "func DeleteUser(id int) (err error) {\n\to := orm.NewOrm()\n\tv := User{Id: id}\n\t// ascertain id exists in the database\n\tif err = o.Read(&v); err == nil {\n\t\tvar num int64\n\t\tif num, err = o.Delete(&User{Id: id}); err == nil {\n\t\t\tfmt.Println(\"Number of records deleted in database:\", num)\n\t\t}\n\t}\n\treturn\n}", "func DeleteUser(userid int64) error {\n _, err := model.Database.Exec(\"DELETE FROM users WHERE userid = ? AND isadmin = ?\", userid, false)\n if err != nil {\n return err\n }\n return nil\n}", "func DeleteUser(db *pg.DB, pk int64) error {\n\n\tuser := models.User{ID: pk}\n\n\tfmt.Println(\"Deleting User...\")\n\n\terr := db.Delete(&user)\n\n\treturn err\n}", "func (w *ServerInterfaceWrapper) DeleteUser(ctx echo.Context) error {\n\tvar err error\n\t// ------------- Path parameter \"id\" -------------\n\tvar id int\n\n\terr = runtime.BindStyledParameter(\"simple\", false, \"id\", ctx.Param(\"id\"), &id)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"Invalid format for parameter id: %s\", err))\n\t}\n\n\tctx.Set(\"OAuth.Scopes\", []string{\"\"})\n\n\t// Invoke the callback with all the unmarshalled arguments\n\terr = w.Handler.DeleteUser(ctx, id)\n\treturn err\n}", "func DeleteUser(id int) error {\n\tuser := User{ID: id}\n\t_, err := db.Model(&user).WherePK().Delete()\n\treturn err\n}", "func (uv *userValidator) Delete(id uint) error {\n\tvar user User\n\tuser.ID = id\n\terr := runUserValFuncs(&user, uv.idGreaterThan(0))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn uv.UserDB.Delete(id)\n}", "func (uv *userValidator) Delete(id uint) error {\n\tvar user User\n\tuser.ID = id\n\terr := runUserValFuncs(&user, uv.idGreaterThan(0))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn uv.UserDB.Delete(id)\n}", "func (uv *userValidator) Delete(id uint) error {\r\n\tvar user User\r\n\tuser.ID = id\r\n\terr := runUserValFns(&user, uv.idGreaterThan(0))\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\treturn uv.UserDB.Delete(id)\r\n}", "func DeleteUser(\n\tctx context.Context,\n\ttx *sql.Tx,\n\trequest *models.DeleteUserRequest) error {\n\tdeleteQuery := deleteUserQuery\n\tselectQuery := \"select count(uuid) from user where uuid = ?\"\n\tvar err error\n\tvar count int\n\tuuid := request.ID\n\tauth := common.GetAuthCTX(ctx)\n\tif auth.IsAdmin() {\n\t\trow := tx.QueryRowContext(ctx, selectQuery, uuid)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"not found\")\n\t\t}\n\t\trow.Scan(&count)\n\t\tif count == 0 {\n\t\t\treturn errors.New(\"Not found\")\n\t\t}\n\t\t_, err = tx.ExecContext(ctx, deleteQuery, uuid)\n\t} else {\n\t\tdeleteQuery += \" and owner = ?\"\n\t\tselectQuery += \" and owner = ?\"\n\t\trow := tx.QueryRowContext(ctx, selectQuery, uuid, auth.ProjectID())\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"not found\")\n\t\t}\n\t\trow.Scan(&count)\n\t\tif count == 0 {\n\t\t\treturn errors.New(\"Not found\")\n\t\t}\n\t\t_, err = tx.ExecContext(ctx, deleteQuery, uuid, auth.ProjectID())\n\t}\n\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"delete failed\")\n\t}\n\n\terr = common.DeleteMetaData(tx, uuid)\n\tlog.WithFields(log.Fields{\n\t\t\"uuid\": uuid,\n\t}).Debug(\"deleted\")\n\treturn err\n}", "func DeleteHandler(w http.ResponseWriter, r *http.Request) {\n\t_, _, ok := r.BasicAuth()\n\tif !ok {\n\t\tw.Header().Set(\"WWW-Authenticate\", fmt.Sprintf(`Basic realm=\"%s\"`, BasicAuthRealm))\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\tw.Write([]byte(http.StatusText(http.StatusUnauthorized) + \"\\n\"))\n\t\treturn\n\t}\n\tif !reqIsAdmin(r) {\n\t\thttp.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)\n\t\treturn\n\t}\n\tu := &User{\n\t\tUsername: strings.ToLower(r.FormValue(\"username\")),\n\t}\n\terr := u.Delete()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusUnauthorized)\n\t\treturn\n\t}\n\tfmt.Fprintf(w, \"User Deleted\\n\")\n}", "func DeleteUser(c echo.Context) error {\n\tid, _ := strconv.Atoi(c.Param(\"id\"))\n\tdelete(users, id)\n\treturn c.NoContent(http.StatusNoContent)\n}", "func (uv *userValidator) Delete(id uint) error {\n\tvar user User\n\tuser.ID = id\n\terr := runUserValFns(&user, uv.idGreaterThan(0))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn uv.UserDB.Delete(id)\n}", "func (uv *userValidator) Delete(id uint) error {\n\tvar user User\n\tuser.ID = id\n\terr := runUserValFns(&user, uv.idGreaterThan(0))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn uv.UserDB.Delete(id)\n}", "func (uv *userValidator) Delete(id uint) error {\n\tvar user User\n\tuser.ID = id\n\terr := runUserValFns(&user, uv.idGreaterThan(0))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn uv.UserDB.Delete(id)\n}", "func (s *Service) DeleteUser(c *tokay.Context) {\n\tID := uint64(c.ParamUint(\"id\"))\n\n\tfilter := obj{\"_id\": ID}\n\terr = db.UserCol.Remove(filter)\n\tif errorAlert(\"User was not deleted\", err, c) {\n\t\treturn\n\t}\n\n\tc.JSON(200, obj{\"ok\": \"true\"})\n}", "func DeleteUser(c *gin.Context) {\n\tvar json db.UserDeleteForm\n\tif err := c.ShouldBind(&json); err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\n\t\t\t\"msg\": \"Form doens't bind.\",\n\t\t\t\"err\": err.Error(),\n\t\t})\n\t\treturn\n\t}\n\tsession := sessions.Default(c)\n\tuserID := session.Get(\"userID\")\n\tvar user db.Users\n\tif err := db.DB.Where(userID).\n\t\tFirst(&user).Error; gorm.IsRecordNotFoundError(err) {\n\t\t// User not found\n\t\tc.JSON(http.StatusOK, gin.H{\n\t\t\t\"msg\": \"User not found in database.\",\n\t\t\t\"err\": err,\n\t\t})\n\t\treturn\n\t}\n\tif checkPasswordHash(json.Password, user.Password) {\n\t\tsession.Clear()\n\t\tsession.Save()\n\t\t// Soft delete user\n\t\tdb.DB.Where(userID).Delete(&user)\n\t\tc.JSON(http.StatusOK, gin.H{\n\t\t\t\"msg\": user.Username,\n\t\t\t\"err\": \"\",\n\t\t})\n\t} else {\n\t\tc.JSON(http.StatusUnauthorized, gin.H{\n\t\t\t\"msg\": fmt.Sprintf(\"Check password hash failed for user %s\", user.Username),\n\t\t\t\"err\": user.Username,\n\t\t})\n\t}\n}", "func (env *Env) DeleteUser(c *gin.Context) {\n\n\t//Convert ID Parameter into int32\n\ttmp, err := strconv.Atoi(c.Param(\"id\"))\n\tif err != nil {\n\t\tLog.WithField(\"module\", \"handler\").WithError(err)\n\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.RQST001)\n\t\treturn\n\t}\n\tuserid := int32(tmp)\n\n\treqUserId, _ := c.Get(\"userid\")\n\n\t//Check if UserID\n\tvar exists int64\n\tresult := env.db.Model(mysql.User{}).Where(\"id = ?\", userid).Count(&exists)\n\tif result.Error != nil {\n\t\tLog.WithField(\"module\", \"sql\").WithError(result.Error)\n\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.DBSQ001)\n\t\treturn\n\t}\n\n\tif exists == 0 {\n\t\tLog.WithField(\"module\", \"handler\").Error(\"User not Found in Database\")\n\t\tc.AbortWithStatusJSON(http.StatusNotFound, errs.DBSQ006)\n\t\treturn\n\t}\n\n\tif userid != reqUserId {\n\t\tvar user mysql.User\n\n\t\tresult := env.db.Where(\"id = ?\", reqUserId).First(&user)\n\t\tif result.Error != nil {\n\t\t\tLog.WithField(\"module\", \"sql\").WithError(result.Error)\n\t\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.DBSQ001)\n\t\t\treturn\n\t\t}\n\n\t\tLog.Debug(user)\n\n\t\terr = env.db.Model(&user).Association(\"Permissions\").Find(&user.Permissions)\n\t\tif err != nil {\n\t\t\tLog.WithField(\"module\", \"sql\").WithError(err)\n\t\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.DBSQ001)\n\t\t\treturn\n\t\t}\n\n\t\tif !user.Permissions.Admin {\n\t\t\tLog.WithField(\"module\", \"handler\").Error(\"User not Authorized for this Action\")\n\t\t\tc.AbortWithStatusJSON(http.StatusUnauthorized, errs.AUTH009)\n\t\t\treturn\n\t\t}\n\t}\n\n\tresult = env.db.Delete(mysql.User{}, userid)\n\tif result.Error != nil {\n\t\tif errors.Is(result.Error, gorm.ErrRecordNotFound) {\n\t\t\tLog.WithField(\"module\", \"handler\").WithError(result.Error)\n\t\t\tc.AbortWithStatusJSON(http.StatusNotFound, errs.DBSQ006)\n\t\t\treturn\n\t\t} else {\n\t\t\tLog.WithField(\"module\", \"sql\").WithError(result.Error)\n\t\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, errs.DBSQ001)\n\t\t\treturn\n\t\t}\n\t}\n\n}", "func DeleteUser(id string) error {\n\t_, err := db.Exec(\"DELETE FROM web_users WHERE ID = ?\", id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func DeleteUser(c echo.Context) error {\n\tid := c.FormValue(\"id\")\n\n\tconvID, err := strconv.Atoi(id)\n\tif err != nil {\n\t\treturn c.JSON(http.StatusInternalServerError, err.Error())\n\t}\n\n\tresult, err := models.DeleteUser(convID)\n\tif err != nil {\n\t\treturn c.JSON(http.StatusInternalServerError, err.Error())\n\t}\n\n\treturn c.JSON(http.StatusOK, result)\n}", "func DeleteUserHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\n\tname := vars[\"name\"]\n\tselector := r.URL.Query().Get(\"selector\")\n\tnamespace := r.URL.Query().Get(\"namespace\")\n\tclientVersion := r.URL.Query().Get(\"version\")\n\n\tlog.Debugf(\"DeleteUserHandler parameters selector [%s] namespace [%s] version [%s] name [%s]\", selector, namespace, clientVersion, name)\n\n\tusername, err := apiserver.Authn(apiserver.DELETE_USER_PERM, w, r)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tw.Header().Set(\"WWW-Authenticate\", `Basic realm=\"Restricted\"`)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\n\tresp := msgs.DeleteUserResponse{}\n\n\tvar ns string\n\tns, err = apiserver.GetNamespace(apiserver.Clientset, username, namespace)\n\tif err != nil {\n\t\tresp.Status.Code = msgs.Error\n\t\tresp.Status.Msg = err.Error()\n\t\tjson.NewEncoder(w).Encode(resp)\n\t\treturn\n\t}\n\n\tif clientVersion != msgs.PGO_VERSION {\n\t\tresp.Status.Code = msgs.Error\n\t\tresp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR\n\t\tjson.NewEncoder(w).Encode(resp)\n\t\treturn\n\t}\n\n\tresp = DeleteUser(name, selector, ns)\n\tjson.NewEncoder(w).Encode(resp)\n\n}", "func (m *MgoUserManager) DeleteUser(id interface{}) error {\n\toid, err := getId(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn m.UserColl.RemoveId(oid)\n}", "func (u *User) Delete() *errorsutils.RestErr {\n\tstmt, err := usersdb.Client.Prepare(queryDeleteUser)\n\tif err != nil {\n\t\tlogger.Error(\"error when trying to prepare delete user statement\", err)\n\t\treturn errorsutils.NewInternalServerError(\"database error\", errors.New(\"database error\"))\n\t}\n\tdefer stmt.Close()\n\n\tif _, err = stmt.Exec(u.ID); err != nil {\n\t\tlogger.Error(\"error when trying to delete user\", err)\n\t\treturn errorsutils.NewInternalServerError(\"database error\", errors.New(\"database error\"))\n\t}\n\n\treturn nil\n}", "func (app *application) DeleteUser(w http.ResponseWriter, r *http.Request) {\n\tid := chi.URLParam(r, \"id\")\n\tuserID, _ := strconv.Atoi(id)\n\n\terr := app.DB.DeleteUser(userID)\n\tif err != nil {\n\t\tapp.badRequest(w, r, err)\n\t\treturn\n\t}\n\n\tvar resp struct {\n\t\tError bool `json:\"error\"`\n\t\tMessage string `json:\"message\"`\n\t}\n\n\tresp.Error = false\n\tapp.writeJSON(w, http.StatusOK, resp)\n}", "func DeleteUser(c *gin.Context) {\n\tuserID, err := strconv.ParseInt(c.Param(\"user_id\"), 10, 64)\n\tif err != nil {\n\t\tparamErr := errors.NewBadRequestError(\"user id should be a number\")\n\t\tc.JSON(paramErr.Status, paramErr)\n\t\treturn\n\t}\n\n\t//send the userID to the services\n\tresult, deleteErr := services.UsersService.DeleteUser(userID)\n\tif deleteErr != nil {\n\t\tc.JSON(deleteErr.Status, deleteErr)\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, result)\n}", "func DeleteHandler(db *sql.DB) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvars := mux.Vars(r)\n\t\tuser, err := delete(db, vars[\"id\"])\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(w, \"%v\\n\", err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tjson.NewEncoder(w).Encode(user)\n\t\treturn\n\t}\n}", "func UserDelete(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\n}", "func (user *User) Delete() *errors.RestErr {\n\t//prepare and execute the delete query\n\tstmt, err := usersdb.Client.Prepare(queryDeleteUser)\n\tif err != nil {\n\t\treturn errors.NewInternalServerError(err.Error())\n\t}\n\tdefer stmt.Close()\n\n\t//\n\tif _, err = stmt.Exec(user.ID); err != nil {\n\t\treturn errors.ParseError(err)\n\t}\n\n\treturn nil\n\n}", "func (userRepository UserRepository) Delete(userId uint64) error {\n\tstatement, err := userRepository.db.Prepare(\n\t\t\"delete from users where id = ?\",\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer statement.Close()\n\n\tif _, err = statement.Exec(userId); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (u *User) Delete(ctx context.Context, w http.ResponseWriter, r *http.Request, params map[string]string) error {\n\tctx, span := trace.StartSpan(ctx, \"handlers.User.Delete\")\n\tdefer span.End()\n\n\terr := user.Delete(ctx, u.db, params[\"id\"])\n\tif err != nil {\n\t\tswitch err {\n\t\tcase user.ErrInvalidID:\n\t\t\treturn web.NewRequestError(err, http.StatusBadRequest)\n\t\tcase user.ErrNotFound:\n\t\t\treturn web.NewRequestError(err, http.StatusNotFound)\n\t\tcase user.ErrForbidden:\n\t\t\treturn web.NewRequestError(err, http.StatusForbidden)\n\t\tdefault:\n\t\t\treturn errors.Wrapf(err, \"Id: %s\", params[\"id\"])\n\t\t}\n\t}\n\n\treturn web.Respond(ctx, w, nil, http.StatusNoContent)\n}", "func DeleteUser(id int) error {\n\tq := \"DELETE FROM users WHERE id=$1\"\n\t_, err := dbConn.Exec(q, id)\n\treturn err\n}", "func (uc UserController) DeleteUser(w http.ResponseWriter, req *http.Request, p httprouter.Params) {\n\tw.WriteHeader(http.StatusOK)\n\tfmt.Fprint(w, \"Write code to delete user\\n\")\n}", "func Duser(w http.ResponseWriter, r *http.Request) {\n\tid := r.PostFormValue(\"id\")\n\tidint, _ := strconv.Atoi(id)\n\tstmt := datastorage.GetDataRouter().GetStmt(\"delete_user\")\n\t_, err := stmt.Exec(idint)\n\tif err != nil {\n\t\tmessages.SetMessage(r, \"Σφάλμα κατά την διαγραφή του χρήστη\")\n\t\tlog.Println(err)\n\t\thttp.Redirect(w, r, \"/retrieveuser?id=\"+id, http.StatusMovedPermanently)\n\t\treturn\n\t}\n\thttp.Redirect(w, r, \"/listusers\", http.StatusMovedPermanently)\n}", "func DeleteHandler(w http.ResponseWriter, r *http.Request, serv *AppServer) {\n\tsession, err := r.Cookie(\"UserID\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tdelID, err := strconv.Atoi(session.Value)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tserv.DeleteUser(delID)\n\n\tLogoutHandler(w, r, serv)\n}", "func (u *User) Delete(tx *sql.Tx) error {\n\tif u.ID == \"\" {\n\t\treturn errors.New(`user ID is not valid`)\n\t}\n\tlog.Printf(\"db.User.Delete %s\", u.ID)\n\n\tstmt := bytes.Buffer{}\n\tstmt.WriteString(`DELETE FROM `)\n\tstmt.WriteString(userTable)\n\tstmt.WriteString(` WHERE id = ?`)\n\tlog.Printf(\"SQL QUERY: %s: with values %s\", stmt.String(), u.ID)\n\n\t_, err := tx.Exec(stmt.String(), u.ID)\n\n\treturn err\n}", "func DeleteUser(db sqlx.Execer, id int64) error {\n\tres, err := db.Exec(\"delete from \\\"user\\\" where id = $1\", id)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"delete error\")\n\t}\n\tra, err := res.RowsAffected()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"get rows affected error\")\n\t}\n\tif ra == 0 {\n\t\treturn ErrDoesNotExist\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"id\": id,\n\t}).Info(\"user deleted\")\n\treturn nil\n}", "func DeleteUser(c *gin.Context) {\n\tnID := c.Param(\"user_id\")\n\tdb := dbConn()\n\tstatement, _ := db.Prepare(\"CALL delete_user(?)\")\n\tstatement.Exec(nID)\n\tdefer db.Close()\n}", "func (ug *userDbHandle) Delete(id uint) error {\n\tuser := User{Model: gorm.Model{ID: id}}\n\treturn ug.db.Delete(&user).Error\n}", "func DeleteUser(w http.ResponseWriter, r *http.Request) {\n\tfLog := userMgmtLogger.WithField(\"func\", \"DeleteUser\").WithField(\"RequestID\", r.Context().Value(constants.RequestID)).WithField(\"path\", r.URL.Path).WithField(\"method\", r.Method)\n\tparams, err := helper.ParsePathParams(fmt.Sprintf(\"%s/management/user/{userRecId}\", apiPrefix), r.URL.Path)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tuser, err := UserRepo.GetUserByRecID(r.Context(), params[\"userRecId\"])\n\tif err != nil {\n\t\tfLog.Errorf(\"UserRepo.GetUserByRecID got %s\", err.Error())\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusInternalServerError, err.Error(), nil, nil)\n\t\treturn\n\t}\n\tif user == nil {\n\t\thelper.WriteHTTPResponse(r.Context(), w, http.StatusNotFound, fmt.Sprintf(\"User recid %s not found\", params[\"userRecId\"]), nil, nil)\n\t\treturn\n\t}\n\tUserRepo.DeleteUser(r.Context(), user)\n\tRevocationRepo.Revoke(r.Context(), user.Email)\n\thelper.WriteHTTPResponse(r.Context(), w, http.StatusOK, \"User deleted\", nil, nil)\n}", "func (u *UserServiceHandler) Delete(ctx context.Context, userID string) error {\n\n\turi := \"/v1/user/delete\"\n\n\tvalues := url.Values{\n\t\t\"USERID\": {userID},\n\t}\n\n\treq, err := u.client.NewRequest(ctx, http.MethodPost, uri, values)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = u.client.DoWithContext(ctx, req, nil)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (us *UserStorage) DeleteUser(id string) error {\n\tif !bson.IsObjectIdHex(id) {\n\t\treturn model.ErrorWrongDataFormat\n\t}\n\ts := us.db.Session(UsersCollection)\n\tdefer s.Close()\n\n\terr := s.C.RemoveId(bson.ObjectIdHex(id))\n\treturn err\n}", "func DeleteUser(id int32) error {\n\treturn dalums.DeleteUser(id)\n}", "func DeleteUser(w http.ResponseWriter, r *http.Request) {\n\tcookie, _ := cookies.Read(r)\n\tuserID, _ := strconv.ParseUint(cookie[\"id\"], 10, 64)\n\n\turl := fmt.Sprintf(\"%s/users/%d\", config.APIURL, userID)\n\n\tresponse, err := requests.RequestsWithAuthentication(r, http.MethodDelete, url, nil)\n\tif err != nil {\n\t\tresponses.JSON(w, http.StatusInternalServerError, responses.ErrorAPI{Err: err.Error()})\n\t\treturn\n\t}\n\tdefer response.Body.Close()\n\n\tif response.StatusCode >= 400 {\n\t\tresponses.TreatStatusCode(w, response)\n\t\treturn\n\t}\n\n\tresponses.JSON(w, response.StatusCode, nil)\n}", "func (ur *UserRepository) Delete(ctx context.Context, id uint) error {\n\tq := `\n\tDELETE FROM users WHERE id=$1;\n\t`\n\n\tstmt, err := ur.Data.DB.PrepareContext(ctx, q)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer stmt.Close()\n\n\t_, err = stmt.ExecContext(ctx, id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (ur *UserRepository) Delete(ctx context.Context, id uint) error {\n\tq := `\n\tDELETE FROM users WHERE id=$1;\n\t`\n\n\tstmt, err := ur.Data.DB.PrepareContext(ctx, q)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer stmt.Close()\n\n\t_, err = stmt.ExecContext(ctx, id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (chat *Chat) DeleteUser(id string) {\n\tchat.lock.Lock()\n\tdefer chat.lock.Unlock()\n\n\tusers := []*User{}\n\tfor _, chatUser := range chat.users {\n\t\tif chatUser.Id == id {\n\t\t\t//close ws\n\t\t\tchatUser.Ws.Close()\n\t\t\tchatUser.Dt = time.Since(chatUser.OnlineAt) / 1e9\n\n\t\t\t//进行数据跟踪\n\t\t\tgo httpPostForm(chatUser)\n\n\t\t\tcontinue\n\t\t}\n\t\tusers = append(users, chatUser)\n\t}\n\n\tchat.users = users\n}", "func (c *UserRepoImpl) Delete(id int) (*model.User, error) {\n\n\tuser := new(model.User)\n\n\tif err := c.db.Table(\"user\").First(&user, id).Error; err != nil {\n\t\treturn nil, errors.New(\"id is doesnt exists\")\n\t}\n\n\tif err := c.db.Table(\"user\").Where(\"user_id = ?\", id).Delete(&model.User{}).Error; err != nil {\n\t\treturn nil, errors.New(\"delete courier data: error\")\n\t}\n\n\treturn nil, nil\n}" ]
[ "0.7829194", "0.75990427", "0.758616", "0.7584279", "0.75630176", "0.7557841", "0.7556832", "0.75329816", "0.75098014", "0.7507609", "0.750512", "0.74947095", "0.7493544", "0.7471709", "0.7445412", "0.74386734", "0.7437152", "0.74366146", "0.7431234", "0.73927724", "0.7389141", "0.7386181", "0.7377011", "0.7373788", "0.7367931", "0.7358367", "0.73526007", "0.73381394", "0.73310083", "0.7319692", "0.73159283", "0.7310957", "0.7308265", "0.7305798", "0.7273584", "0.72679794", "0.7265321", "0.72636884", "0.7245354", "0.7238874", "0.72126997", "0.72069526", "0.7206773", "0.7199146", "0.7195916", "0.7187915", "0.71802515", "0.716842", "0.71662205", "0.71468097", "0.7141946", "0.71327543", "0.71180695", "0.70857936", "0.7078777", "0.7064752", "0.7054795", "0.7051538", "0.70468664", "0.70355415", "0.70355415", "0.7034293", "0.7031061", "0.7017051", "0.70161796", "0.70114845", "0.70114845", "0.70114845", "0.69995874", "0.6999347", "0.698853", "0.6981889", "0.69790256", "0.69607943", "0.69601154", "0.69526964", "0.6952403", "0.69521546", "0.6947989", "0.69454104", "0.6942407", "0.69422156", "0.6938683", "0.6933395", "0.6932374", "0.6925556", "0.6923764", "0.692273", "0.6922459", "0.6917455", "0.69152564", "0.6896339", "0.68757886", "0.68674433", "0.6863079", "0.6862909", "0.68577963", "0.68577963", "0.6857009", "0.6839651" ]
0.77397424
1
UnmarshalEasyJSON supports easyjson.Unmarshaler interface
UnmarshalEasyJSON поддерживает интерфейс easyjson.Unmarshaler
func (v *BlitzedItemResponse) UnmarshalEasyJSON(l *jlexer.Lexer) { easyjson6a975c40DecodeJsonBenchmark4(l, v) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (v *Fruit) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels11(l, v)\n}", "func (v *Boo) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeMsgpJson(l, v)\n}", "func (v *Element) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson2(l, v)\n}", "func (c *Context) UnmarshalEasyJSON(in *jlexer.Lexer) {\n\tContextSerialization.UnmarshalFromEasyJSONLexer(in, c)\n}", "func (v *Format) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson14(l, v)\n}", "func (v *DetectedFruit) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels12(l, v)\n}", "func (v *item) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeGithubComZhekabyGoGeneratorMongoRequestwrapperTests(l, v)\n}", "func (v *ItemCheckResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeJsonBenchmark2(l, v)\n}", "func (v *Native) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson10(l, v)\n}", "func (v *FormDataMQ) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson(l, v)\n}", "func (v *DocumentResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeJsonBenchmark3(l, v)\n}", "func (v *Node) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6601e8cdDecodeGithubComSkydiveProjectSkydiveGraffitiApiTypes1(l, v)\n}", "func (v *flattenedField) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson390b7126DecodeGithubComChancedPicker35(l, v)\n}", "func (v *ExtFilter) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson795c59c6DecodeGrapeGuardRules11(l, v)\n}", "func (v *Deal) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson8a221a72DecodeGithubComVungleVungoOpenrtb(l, v)\n}", "func (v *OrderCheckResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeJsonBenchmark1(l, v)\n}", "func (v *Visit) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonE564fc13DecodeGithubComLa0rgHighloadcupModel1(l, v)\n}", "func (v *Error) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer26(l, v)\n}", "func (v *GetUserResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson84c0690eDecodeMainHandlers1(l, v)\n}", "func (v *User) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson9e1087fdDecodeHw3Bench(l, v)\n}", "func (v *IngredientArr) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels7(l, v)\n}", "func (v *VisitArray) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonE564fc13DecodeGithubComLa0rgHighloadcupModel(l, v)\n}", "func (v *Foo) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonAbe23ddeDecodeGithubComUberZanzibarExamplesExampleGatewayBuildGenCodeClientsCorgeCorge1(l, v)\n}", "func (v *Ingredient) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels8(l, v)\n}", "func (v *Musician) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson62dc445bDecode20211NoskoolTeamInternalAppMusiciansModels2(l, v)\n}", "func (v *ThirdParty) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson2(l, v)\n}", "func (v *Data) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson17(l, v)\n}", "func (v *Deal) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson16(l, v)\n}", "func (v *Raw) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer10(l, v)\n}", "func (v *EasyResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6ff3ac1dDecodeGithubComWenweihBitcoinRpcGolangProto1(l, v)\n}", "func (v *AdvFormData) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson3(l, v)\n}", "func (v *Message) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer18(l, v)\n}", "func (v *Teacher) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonF1627ba7DecodeGithubComDuchiporexiaGoutilsXmsgTests(l, v)\n}", "func (v *Invite) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer22(l, v)\n}", "func (v *CBPerson) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonE242b40eDecodeGithubComExampleSample2(l, v)\n}", "func (v *BidResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson326edDecodeGithubComMxmCherryOpenrtb(l, v)\n}", "func (v *BidRequest) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson21(l, v)\n}", "func (v *Impression) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson12(l, v)\n}", "func (v *Msg) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels6(l, v)\n}", "func (v *BidResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson326edDecodeGithubComApplifierGoOpenrtbOpenrtb2(l, v)\n}", "func (v *Info) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonC80ae7adDecodeGithubComDeiklovTechDbRomanovAndrGolangModels13(l, v)\n}", "func (v *MediumPayload) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonE242b40eDecodeGithubComExampleSample1(l, v)\n}", "func (v *Part) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer12(l, v)\n}", "func (v *ProductExtendedArr) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels4(l, v)\n}", "func (v *Whois) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer1(l, v)\n}", "func (v *App) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson25(l, v)\n}", "func (v *Content) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson18(l, v)\n}", "func (v *Responce) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeGithubComSerhio83DruidPkgStructs(l, v)\n}", "func (v *TransactionResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonE82c8e88DecodeGithubComKamaiuOandaGoModel5(l, v)\n}", "func (v *ProductExtended) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels5(l, v)\n}", "func (v *Error) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonC5a4559bDecodeGithubComChromedpCdproto2(l, v)\n}", "func (v *BidResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson20(l, v)\n}", "func (v *HireManager) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonAf94a8adDecodeGithubComGoParkMailRu20192ComandusInternalModel(l, v)\n}", "func (v *PlantainerShadowMetadataSt) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson5bd79fa1DecodeMevericcoreMcplantainer9(l, v)\n}", "func (v *User) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels(l, v)\n}", "func (v *RespStruct) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeDrhyuComIndexerModels1(l, v)\n}", "func (v *Item) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeDrhyuComIndexerModels2(l, v)\n}", "func (v *Annotation) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeGithubComSerhio83DruidPkgStructs3(l, v)\n}", "func (v *Fundamental) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson3e8ab7adDecodeGithubComAlpacahqAlpacaTradeApiGoV3Alpaca14(l, v)\n}", "func (v *BasicUser) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6601e8cdDecodeJsongen4(l, v)\n}", "func (v *Features) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer25(l, v)\n}", "func (v *Edge) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6601e8cdDecodeGithubComSkydiveProjectSkydiveGraffitiApiTypes2(l, v)\n}", "func (v *ShadowModelSt) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB7ed31d3DecodeMevericcoreMccommon5(l, v)\n}", "func (v *AdvForm) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson4(l, v)\n}", "func (v *binaryField) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson390b7126DecodeGithubComChancedPicker46(l, v)\n}", "func (v *ShadowUpdateMsgSt) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB7ed31d3DecodeMevericcoreMccommon1(l, v)\n}", "func (v *User) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson393a2a40DecodeCodegen(l, v)\n}", "func (v *InfoUser) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6601e8cdDecodeJsongen3(l, v)\n}", "func (v *Message) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonC5a4559bDecodeGithubComChromedpCdproto1(l, v)\n}", "func (v *Pmp) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson9(l, v)\n}", "func (v *MOTD) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer19(l, v)\n}", "func (v *Attack) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6601e8cdDecodeGithubComGoParkMailRu2018242GameServerTypes4(l, v)\n}", "func (v *moreLikeThisQuery) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson390b7126DecodeGithubComChancedPicker18(l, v)\n}", "func (v *ExportItem) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB83d7b77DecodeGoplaygroundMyjson1(l, v)\n}", "func (v *EventLoadEventFired) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonC5a4559bDecodeGithubComChromedpCdprotoPage70(l, v)\n}", "func (v *managerListener) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonEd74d837DecodeGithubComKihamoBoggartComponentsBoggartInternalHandlers(l, v)\n}", "func (v *WSResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer2(l, v)\n}", "func (v *UnloadCheckResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson6a975c40DecodeJsonBenchmark(l, v)\n}", "func (v *PbTestObject) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson5fcf962eDecodeGithubComJsonIteratorGoBenchmarkWith10IntFields(l, v)\n}", "func (v *Student) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonF1627ba7DecodeGithubComDuchiporexiaGoutilsXmsgTests1(l, v)\n}", "func (v *Device) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson15(l, v)\n}", "func (v *Messages) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer17(l, v)\n}", "func (v *User) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson2bc03518DecodeLangTaskOnBench(l, v)\n}", "func (v *BaseTickerInfo) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson25363b2dDecodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi117(l, v)\n}", "func (v *Topic) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson42239ddeDecodeGithubComKhliengDispatchServer5(l, v)\n}", "func (v *BaseLedgerInfo) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson25363b2dDecodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi127(l, v)\n}", "func (v *Banner) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson23(l, v)\n}", "func (v *APIError) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson3e8ab7adDecodeGithubComAlpacahqAlpacaTradeApiGoV3Alpaca24(l, v)\n}", "func (v *Bid) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson22(l, v)\n}", "func (v *Post) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson783c1624DecodeGithubComGobwasVk7(l, v)\n}", "func (v *BaseTradeInfo) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson25363b2dDecodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi116(l, v)\n}", "func (v *MusicianFullInformation) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson62dc445bDecode20211NoskoolTeamInternalAppMusiciansModels1(l, v)\n}", "func (v *matchRule) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson390b7126DecodeGithubComChancedPicker19(l, v)\n}", "func (v *managerHandlerDevice) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonEd74d837DecodeGithubComKihamoBoggartComponentsBoggartInternalHandlers1(l, v)\n}", "func (v *ResultReq) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson25363b2dDecodeGithubComDarkfoxs96OpenApiV3SdkOkexGoSdkApi54(l, v)\n}", "func (v *invocationMessage) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson2802b09fDecodeGithubComPhilippseithSignalr1(l, v)\n}", "func (v *fuzzyRule) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson390b7126DecodeGithubComChancedPicker34(l, v)\n}", "func (v *PlantainerShadowSt) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson5bd79fa1DecodeMevericcoreMcplantainer8(l, v)\n}", "func (v *completionMessage) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson2802b09fDecodeGithubComPhilippseithSignalr5(l, v)\n}", "func (v *Source) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonB27eec76DecodeGithubComTisonetOpenrtbEasyjson3(l, v)\n}" ]
[ "0.73405373", "0.72584677", "0.72040373", "0.71776104", "0.71510446", "0.7143438", "0.71413064", "0.71286225", "0.7112999", "0.7103849", "0.71005577", "0.7097653", "0.7085183", "0.70850646", "0.7081146", "0.7077145", "0.70403785", "0.70357895", "0.7030433", "0.7028725", "0.7021155", "0.70114094", "0.70109946", "0.70103574", "0.7002987", "0.69937176", "0.6981908", "0.6981736", "0.69811034", "0.6980795", "0.69711286", "0.6965327", "0.695678", "0.69543517", "0.6948873", "0.69404715", "0.69387776", "0.6935085", "0.6930436", "0.6922759", "0.6904652", "0.6894174", "0.68897486", "0.6889671", "0.6888647", "0.6887437", "0.6887124", "0.68862444", "0.68853265", "0.68804044", "0.6874087", "0.6870016", "0.6869092", "0.6868185", "0.6858964", "0.6846011", "0.68405616", "0.6836571", "0.6835831", "0.68291616", "0.6823791", "0.6822216", "0.6817067", "0.6815519", "0.68133044", "0.6812743", "0.6811037", "0.68107563", "0.6809271", "0.680744", "0.68065774", "0.68030846", "0.68029016", "0.67965585", "0.6794714", "0.678028", "0.67772484", "0.6772522", "0.67714006", "0.6769638", "0.67685604", "0.67657346", "0.6763771", "0.67634416", "0.6762939", "0.67570746", "0.6756749", "0.6754731", "0.6750861", "0.6749626", "0.6745531", "0.6744763", "0.6743289", "0.67418313", "0.6734197", "0.6732776", "0.67303044", "0.67287326", "0.67265445", "0.67261595" ]
0.73436594
0
People ... Create a router group to rs/crud/persons and relative routes
Люди ... Создайте группу роутеров для rs/crud/persons и относительных маршрутов
func People(engine *gin.Engine, midlewares ...gin.HandlerFunc) { personGroup := engine.Group("rs/crud/person") personGroup.GET("/:id", controllers.GetPerson) personGroup.GET("/", controllers.GetPagePerson) personGroup.PUT("/:id", controllers.PutPerson) personGroup.DELETE("/:id", controllers.DeletePerson) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func MakePersonHandlers(r *mux.Router, n negroni.Negroni, service person.UseCase) {\n\tr.Handle(\"/person\", n.With(\n\t\tnegroni.Wrap(findAllPersons(service)),\n\t)).Methods(\"GET\", \"OPTIONS\").Name(\"findAllPersons\")\n\n\tr.Handle(\"/person/{key}\", n.With(\n\t\tnegroni.Wrap(findPersonByKey(service)),\n\t)).Methods(\"GET\", \"OPTIONS\").Name(\"findPersonByKey\")\n\n\tr.Handle(\"/person\", n.With(\n\t\tnegroni.Wrap(personAdd(service)),\n\t)).Methods(\"POST\", \"OPTIONS\").Name(\"personAdd\")\n\n\tr.Handle(\"/persons\", n.With(\n\t\tnegroni.Wrap(personMultiAdd(service)),\n\t)).Methods(\"POST\", \"OPTIONS\").Name(\"personMultiAdd\")\n\n\tr.Handle(\"/person/{key}\", n.With(\n\t\tnegroni.Wrap(deletePerson(service)),\n\t)).Methods(\"DELETE\", \"OPTIONS\").Name(\"deletePerson\")\n\n}", "func RegisterRoutesPersons(mux *mux.Router, person interfaces.PersonDao) {\n\thandler := handlers.NewPersonHandler(person)\n\tmux.HandleFunc(\"/persons\", handler.CreatePerson).Methods(http.MethodPost)\n\tmux.HandleFunc(\"/persons/{id}\", handler.GetOne).Methods(http.MethodGet)\n\tmux.HandleFunc(\"/persons/signin\", handler.SignIn).Methods(http.MethodPost)\n\tmux.Handle(\"/persons/update\", middlewares.Authenticate(http.HandlerFunc(handler.Update))).Methods(http.MethodPut)\n\tmux.Handle(\"/persons/new-professor\", middlewares.Authenticate(middlewares.PersonRole(http.HandlerFunc(handler.CreateProfessor), 0))).Methods(http.MethodPost)\n\tmux.HandleFunc(\"/persons/section/{id}/{startDate}/{endDate}\", handler.GetAllBySectionIDAndDateRange).Methods(http.MethodGet)\n}", "func main() {\n\trouter := mux.NewRouter()\n\trouter.HandleFunc(\"/people\", routes.GetPeople).Methods(\"GET\")\n\trouter.HandleFunc(\"/people/{id}\", routes.GetPerson).Methods(\"GET\")\n\trouter.HandleFunc(\"/people/{id}\", routes.CreatePerson).Methods(\"POST\")\n\trouter.HandleFunc(\"/people/{id}\", routes.DeletePerson).Methods(\"DELETE\")\n\tlog.Fatal(http.ListenAndServe(\":8080\", router))\n}", "func Router(route *gin.Engine) gin.IRoutes {\n\treturn route.Handle(\"GET\", common.Prefix+\"/person\", ControllerGetAll).\n\t\tHandle(\"GET\", common.Prefix+\"/person/:id\", ControllerGetByID).\n\t\tHandle(\"POST\", common.Prefix+\"/person\", ControllerCreate).\n\t\tHandle(\"PUT\", common.Prefix+\"/person/:id\", ControllerUpdate)\n}", "func (api *Api) setRouters() {\n\tapi.Post(\"/person\", api.handleRequest(handler.CreatePerson))\n\tapi.Patch(\"/person/{id}\", api.handleRequest(handler.UpdatePerson))\n\tapi.Put(\"/person/{id}\", api.handleRequest(handler.UpdatePerson))\n\tapi.Get(\"/person/{id}\", api.handleRequest(handler.GetPerson))\n\tapi.Get(\"/person\", api.handleRequest(handler.GetPersons))\n\tapi.Get(\"/person\", api.handleRequest(handler.GetPersons), \"page\", \"{page}\")\n}", "func HandlePerson(w http.ResponseWriter, r *http.Request) {\n\tkeys := r.URL.Query()[\"id\"]\n\tpersonID := keys[0]\n\n\ttmpl := template.Must(template.ParseFiles(\"views/detail-page.html\"))\n\tvar page model.DetailPageResponse\n\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tperson, pErr := model.GetInitialPerson(lib.BaseURL+\"people/\"+personID, &wg)\n\tif pErr != nil {\n\t\tfmt.Println(pErr.Error())\n\t}\n\twg.Wait()\n\n\tfmt.Println(\"person\", person)\n\tpage.PageTitle = \"Person\"\n\tpage.MainCard.Title = \"Name: \" + person.Name\n\tpage.MainCard.Body1 = \"Born: \" + person.BirthYear\n\tpage.MainCard.Body2 = \"Gender: \" + person.Gender\n\tpage.MainCard.Body3 = \"Height: \" + person.Height + \" CM\"\n\tpage.MainCard.Body4 = \"Mass: \" + person.Mass + \" KG\"\n\tpage.MainCard.Body5 = \"Eyes: \" + person.EyeColor + \" eyes\"\n\n\thomeworld, hErr := person.GetHomeworld()\n\tif hErr != nil {\n\t\tfmt.Println(hErr.Error())\n\t}\n\tpage.MainCard.SubTitle = \"Homeworld: \" + homeworld.Name\n\tfmt.Println(\"Homeworld\", homeworld)\n\n\t// vehicles\n\tif len(person.Vehicles) > 0 {\n\t\tvehicleChannel := make(chan []model.Vehicle)\n\t\tgo person.GetVehicles(vehicleChannel)\n\t\tvehicles := <-vehicleChannel\n\t\tpage.Cards1 = make([]model.SubCard, 0)\n\t\tfor _, vehicle := range vehicles {\n\t\t\tpage.Cards1 = append(page.Cards1, model.GetVehicleCard(vehicle))\n\t\t}\n\t}\n\n\tif len(person.Species) > 0 {\n\t\tspeciesChannel := make(chan []model.Species)\n\t\tgo person.GetSpecies(speciesChannel)\n\t\tspecies := <-speciesChannel\n\t\tpage.Cards2Title = \"Species\"\n\t\tpage.Cards2 = make([]model.SubCard, 0)\n\t\tfor _, specie := range species {\n\t\t\tpage.Cards2 = append(page.Cards2, model.GetSpeciesCard(specie))\n\t\t}\n\t}\n\n\t// starships\n\tif len(person.Starships) > 0 {\n\t\tstarshipChannel := make(chan []model.Starship)\n\t\tgo person.GetStarships(starshipChannel)\n\t\tstarships := <-starshipChannel\n\t\tpage.Cards3Title = \"Starships\"\n\t\tpage.Cards3 = make([]model.SubCard, 0)\n\t\tfor _, ship := range starships {\n\t\t\tpage.Cards3 = append(page.Cards3, model.GetStarshipCard(ship))\n\t\t}\n\t}\n\n\ttmpl.Execute(w, page)\n}", "func RegisterPersons(party iris.Party) {\n\tcrs := cors.New(cors.Options{\n\t\tAllowedOrigins: []string{\"*\"}, // allows everything, use that to change the hosts.\n\t\tAllowCredentials: true,\n\t\tAllowedMethods: []string{iris.MethodGet, iris.MethodPost, iris.MethodPut, iris.MethodDelete},\n\t})\n\tpersons := party.Party(\"/persons\", crs).AllowMethods(iris.MethodOptions)\n\t{\n\t\tpersons.Get(\"\", GetPersons)\n\t\tpersons.Get(\"/{id:int}\", GetPersonByID)\n\t\tpersons.Post(\"\", AddPerson)\n\t\tpersons.Put(\"/{id:int}\", UpdatePerson)\n\t\tpersons.Delete(\"/{id:int}\", DeletePerson)\n\t}\n}", "func Routes(r *gin.Engine) {\n\n\tv1 := r.Group(\"/v1\")\n\t{\n\t\t//// rutas publicas\n\t\tv1.GET(\"/testv1\", users.ServiceTest)\n\t\tv1.POST(\"/register\", users.Register)\n\t\tv1.POST(\"/login\", auth.HandleLogin)\n\t}\n\t//// rutas privadas\n\tv1.Use(middlewares.AuthHandler(\"\"))\n\t{\n\t\t// Companies\n\t\tcompaniesGroup := v1.Group(\"companies\")\n\t\tcompaniesGroup.GET(\"/\", company.GetAll)\n\t\tcompaniesGroup.POST(\"/create\", company.CreateNewCompany)\n\t\tcompaniesGroup.PUT(\"/changeStatus\", company.ChangeStatusCompany)\n\t\tcompaniesGroup.DELETE(\"/deleteCompany/:id\", company.DeleteCompany)\n\n\t\tcompaniesGroup.GET(\"/myCompany\", company.MyCompany)\n\t\tcompaniesGroup.GET(\"/myWorkShop\", company.MyWorkShop)\n\n\t\t// // Mechanic\n\t\tworkshops := v1.Group(\"workshop\")\n\t\tworkshops.GET(\"/\", workshop.Get)\n\t\tworkshops.GET(\"/search/:workshopID\", workshop.GetByID)\n\t\tworkshops.GET(\"/all\", workshop.GetAll)\n\t\tworkshops.POST(\"/create\", workshop.Create)\n\n\t\t// Mechanic\n\t\tmechanicGroup := v1.Group(\"mechanic\")\n\t\tmechanicGroup.POST(\"/create\", mechanic.Create)\n\t\tmechanicGroup.GET(\"/\", mechanic.Get)\n\t\tmechanicGroup.GET(\"/myMechanics\", mechanic.MyMechanics)\n\n\t\t// routines\n\t\troutine := v1.Group(\"routines\")\n\t\troutine.GET(\"\", routines.Get)\n\t\troutine.GET(\"/byWorkshop\", routines.GetByWorkshop)\n\t\troutine.GET(\"/byWorkshopID/:workshopID\", routines.GetByWorkshopID)\n\n\t\troutine.POST(\"/addRoutineByWorkshop\", routines.AddRoutineByWorkshop)\n\t\troutine.GET(\"/getTreatingMechanic/:workshopID/:vehicleID\", routines.GetTreatingMechanic)\n\n\t\tappointments := v1.Group(\"appointments\")\n\t\tappointments.GET(\"/\", appointment.GetAppointments)\n\n\t\t// users\n\t\tuser := v1.Group(\"user\")\n\t\tuser.GET(\"/\", users.GetDataUser)\n\t\tuser.GET(\"/myWorkShop\", users.GetDataWorkShopData)\n\t\tuser.PUT(\"/reset\", users.ResetPassword)\n\n\t\t// brands\n\t\tbrand := v1.Group(\"brands\")\n\t\tbrand.GET(\"\", brands.GetAll)\n\n\t\t// brands\n\t\tvehicle := v1.Group(\"vehicles\")\n\t\tvehicle.GET(\"\", vehicles.GetAllVehicles)\n\t\tvehicle.POST(\"\", vehicles.Create)\n\n\t}\n}", "func (r *RestAPI[T]) Router(router *gin.RouterGroup) {\n\trouter.GET(\"/\", r.list)\n\trouter.POST(\"\", r.create)\n\trouter.PUT(\"/:id\", r.replace)\n\trouter.DELETE(\"/:id\", r.delete)\n}", "func Routes(DB *gorm.DB, group echo.Group) {\n\t/*\n\t\tPasamos al controlador de la entidad el grupo de las rutas y le inyectamos la configuracion de base de datos\n\n\t\tWe pass to the heandler of the entity the group of the routes and we inject the database configuration\n\t*/\n\n\tv1.NewUserController(group, DB)\n\tv1.NewCharacterController(group, DB)\n\tv1.NewAwardController(group, DB)\n\tv1.NewTaskController(group, DB)\n\n}", "func RouteToV1(r *router.MyRouter) {\n\t// User\n\tr.HandlerFunc(\"POST\", \"/members/\", \"CreateMemberUser\", handler.CreateMemberUser)\n\tr.HandlerFunc(\"GET\", \"/members/\", \"ListMemberUsers\", handler.ListMemberUsers)\n\tr.HandlerFunc(\"GET\", \"/members/:mid/\", \"GetMemberUser\", handler.GetMemberUser)\n\tr.HandlerFunc(\"DELETE\", \"/members/:mid/\", \"DeleteMemberUser\", handler.DeleteMemberUser)\n\n\t// Token\n\tr.HandlerFunc(\"POST\", \"/oauth2/tokens/\", \"IssueToken\", handler.IssueToken)\n\tr.HandlerFunc(\"GET\", \"/oauth2/tokens/:tk/\", \"ValidateToken\", handler.ValidateToken)\n\tr.HandlerFunc(\"DELETE\", \"/oauth2/tokens/:tk/\", \"RevolkToken\", handler.RevolkToken)\n\n\t// Project\n\tr.HandlerFunc(\"POST\", \"/projects/\", \"CreateProject\", handler.CreateProject)\n\tr.HandlerFunc(\"GET\", \"/projects/\", \"ListDomainProjects\", handler.ListDomainProjects)\n\tr.HandlerFunc(\"GET\", \"/self/projects/\", \"ListUserProjects\", handler.ListUserProjects)\n\tr.HandlerFunc(\"GET\", \"/projects/:pid/\", \"GetProject\", handler.GetProject)\n\tr.HandlerFunc(\"DELETE\", \"/projects/:pid/\", \"DeleteProject\", handler.DeleteProject)\n\tr.HandlerFunc(\"GET\", \"/projects/:pid/members/\", \"ListProjectUser\", handler.ListProjectUser)\n\tr.HandlerFunc(\"POST\", \"/projects/:pid/members/\", \"AddUsersToProject\", handler.AddUsersToProject)\n\tr.HandlerFunc(\"DELETE\", \"/projects/:pid/members/\", \"RemoveUsersFromProject\", handler.RemoveUsersFromProject)\n\n\t// Application\n\tr.HandlerFunc(\"POST\", \"/applications/\", \"CreateApplication\", handler.CreateApplication)\n\tr.HandlerFunc(\"GET\", \"/applications/\", \"ListUserApplications\", handler.ListUserApplications)\n\tr.HandlerFunc(\"GET\", \"/applications/:aid/\", \"GetApplication\", handler.GetApplication)\n\tr.HandlerFunc(\"DELETE\", \"/applications/:aid/\", \"DeleteApplication\", handler.DeleteApplication)\n\t// // r.HandlerFunc(\"PUT\", \"/v1/users/:uid/applications/:aid/\", handler.UpdateApplication)\n\n\t// Service\n\tr.HandlerFunc(\"POST\", \"/services/\", \"CreateService\", handler.CreateService)\n\tr.HandlerFunc(\"GET\", \"/services/\", \"ListServices\", handler.ListServices)\n\tr.HandlerFunc(\"GET\", \"/services/:sid/\", \"GetService\", handler.GetService)\n\tr.HandlerFunc(\"DELETE\", \"/services/:sid/\", \"DeleteService\", handler.DeleteService)\n\tr.HandlerFunc(\"POST\", \"/features/\", \"RegistryServiceFeatures\", handler.RegistryServiceFeatures)\n\tr.HandlerFunc(\"GET\", \"/services/:sid/features/\", \"ListServiceFeatures\", handler.ListServiceFeatures)\n\n\t// Role\n\tr.HandlerFunc(\"POST\", \"/roles/\", \"CreateRole\", handler.CreateRole)\n\tr.HandlerFunc(\"GET\", \"/roles/\", \"ListRoles\", handler.ListRoles)\n\tr.HandlerFunc(\"GET\", \"/roles/:ri/\", \"GetRole\", handler.GetRole)\n\tr.HandlerFunc(\"DELETE\", \"/roles/:ri/\", \"DeleteRole\", handler.DeleteRole)\n\tr.HandlerFunc(\"POST\", \"/roles/:ri/features/\", \"AddFeaturesToRole\", handler.AddFeaturesToRole)\n\tr.HandlerFunc(\"DELETE\", \"/roles/:ri/features/\", \"RemoveFeaturesFromRole\", handler.RemoveFeaturesFromRole)\n\n\t// r.HandlerFunc(\"POST\", \"/v1/domains/users/\", \"CreateDomainUser\", handler.CreateDomainUser)\n\t// r.HandlerFunc(\"GET\", \"/v1/users/:uid/domains/\", \"ListUserDomain\", handler.ListUserDomain)\n\t// r.HandlerFunc(\"PUT\", \"/v1/users/:uid/password/\", \"SetUserPassword\", handler.SetUserPassword)\n\t// r.HandlerFunc(\"DELETE\", \"/v1/unregistry/\", \"UnRegistry\", handler.UnRegistry)\n\t// r.HandlerFunc(\"POST\", \"/v1/users/:uid/projects/\", \"AddProjectsToUser\", handler.AddProjectsToUser)\n\t// r.HandlerFunc(\"DELETE\", \"/v1/users/:uid/projects/\", \"RemoveProjectsFromUser\", handler.RemoveProjectsFromUser)\n\t// r.HandlerFunc(\"POST\", \"/v1/users/:uid/bind/roles/:rn/\", \"BindRole\", handler.BindRole)\n\t// r.HandlerFunc(\"POST\", \"/v1/users/:uid/unbind/roles/:rn/\", \"UnBindRole\", handler.UnBindRole)\n\t// r.HandlerFunc(\"POST\", \"/v1/invitations/\", \"InvitationsUser\", handler.InvitationsUser)\n\t// r.HandlerFunc(\"DELETE\", \"/v1/invitations/:code/\", \"RevolkInvitation\", handler.RevolkInvitation)\n\t// r.HandlerFunc(\"GET\", \"/v1/invitations/\", \"ListInvitationsRecords\", handler.ListInvitationsRecords)\n\t// r.HandlerFunc(\"GET\", \"/v1/invitations/:code/\", \"GetInvitationsRecord\", handler.GetInvitationsRecord)\n\t// r.HandlerFunc(\"POST\", \"/v1/registry/\", \"RegistryUser\", handler.RegistryUser)\n\t// r.HandlerFunc(\"POST\", \"/v1/verifycode/\", \"IssueVerifyCode\", handler.IssueVerifyCode)\n\t// r.HandlerFunc(\"POST\", \"/v1/invitations/users/:uid/code/:code/\", \"AcceptInvitation\", handler.AcceptInvitation)\n\t// r.HandlerFunc(\"PUT\", \"/v1/users/:uid/\", handler.UpdateUser)\n\t// r.HandlerFunc(\"POST\", \"/v1/default/projects/:pid/\", \"SetUserDefaultProject\", handler.SetUserDefaultProject)\n\n\tr.AddV1Root()\n}", "func PersonsIndex(c *gin.Context) {\r\n\tvar lis []models.Person\r\n\r\n\tdb, _ := c.Get(\"db\")\r\n\tconn := db.(gorm.DB)\r\n\r\n\t// Migrate the schema\r\n\tconn.AutoMigrate(&models.Person{})\r\n\r\n\tconn.Find(&lis)\r\n\tc.JSON(http.StatusOK, gin.H{\r\n\t\t\"msg\": \"thank you\",\r\n\t\t\"r\": lis,\r\n\t})\r\n\r\n}", "func makeRestRoutes(router *gin.Engine, controller *dependency.Dependency) {\n\tapi := router.Group(\"api\")\n\t{\n\t\t// link forward , hitting this route will cause the link to be resolved and forwarded in the browser\n\t\tapi.GET(\"forward/links/:id\", controller.ForwardLink)\n\t\t// get them the json record\n\t\tapi.GET(\"links/:id\", controller.FetchLink)\n\t\t// generate a new link\n\t\tapi.POST(\"links\", controller.ShortenLink)\n\t}\n}", "func peliculaRouter(pHandler *ph.Pelicula) http.Handler {\n\tr := chi.NewRouter()\n\tr.Get(\"/\", pHandler.Fetch)\n\tr.Get(\"/{id:[0-9]+}\", pHandler.GetByID)\n\tr.Post(\"/\", pHandler.Create)\n\tr.Put(\"/{id:[0-9]+}\", pHandler.Update)\n\tr.Delete(\"/{id:[0-9]+}\", pHandler.Delete)\n\treturn r\n}", "func Routes(router *gin.Engine) {\n\tuserGroup := router.Group(\"/users\")\n\t{\n\t\tuserGroup.GET(\"/\", usersroutes.NewUserRoutes().Show)\n\t\tuserGroup.POST(\"/\", usersroutes.NewUserRoutes().Create)\n\t\tuserGroup.OPTIONS(\"/\", usersroutes.NewUserRoutes().Options)\n\t}\n}", "func UsersRoutes(router *gin.Engine, controller *controllers.UsersController) {\n\tuserRoutes := router.Group(\"/users\")\n\t{\n\t\tuserRoutes.POST(\"/\", controller.CreateUserController)\n\t}\n}", "func (projectController *ProjectController) Routes(base *gin.RouterGroup, authNormal *jwt.GinJWTMiddleware) *gin.RouterGroup {\n\n\t// Projects - Rutas\n\tprojectRouter := base.Group(\"/projects\") //, middleware.SetRoles(RolAdmin, RolUser), authNormal.MiddlewareFunc())\n\t{\n\t\tprojectRouter.GET(\"\", projectController.GetAll())\n\t\t// Al agregar asociar con usuario\n\t\tprojectRouter.POST(\"\", authNormal.MiddlewareFunc(), projectController.Create())\n\t\tprojectRouter.GET(\"/:id\", projectController.One())\n\t\t// Verificar en handler que el perro sea dueño de usuario\n\t\tprojectRouter.PUT(\"/:id\", authNormal.MiddlewareFunc(), projectController.Update())\n\t\t// Solo admin puede eliminar\n\t\tprojectRouter.DELETE(\"/:id\", middleware.SetRoles(RolAdmin), authNormal.MiddlewareFunc(), projectController.Delete())\n\t}\n\treturn projectRouter\n}", "func Routes(router *gin.RouterGroup) {\n\tr := &pipeRoutes{}\n\n\trouter.GET(\"/\", r.get)\n\trouter.GET(\"/:pipe-id\", r.get)\n\trouter.DELETE(\"/:pipe-id\", r.delete)\n\trouter.POST(\"/\", r.post)\n}", "func GetPersonEndpoint(w http.ResponseWriter, req *http.Request) {\n\tparams := mux.Vars(req)\n\tfor _, item := range people {\n\t\tif item.ID == params[\"id\"] {\n\t\t\tjson.NewEncoder(w).Encode(item)\n\t\t\treturn\n\t\t}\n\t}\n\tjson.NewEncoder(w).Encode(&Person{})\n}", "func registerRoutes() {\n\tuserRoutes()\n\troleRoutes()\n}", "func SetupRouter() *gin.Engine {\n\tr := gin.Default()\n\tgrp1 := r.Group(\"/user-api\")\n\t{\n\t\tgrp1.GET(\"user\", Controllers.GetUsers)\n\t\tgrp1.POST(\"user\", Controllers.CreateUser)\n\t\tgrp1.GET(\"user/:id\", Controllers.GetUserByID)\n\t\tgrp1.PUT(\"user/:id\", Controllers.UpdateUser)\n\t\tgrp1.DELETE(\"user/:id\", Controllers.DeleteUser)\n\t}\n\tgrp2 := r.Group(\"/category-api\")\n\t{\n\t\tgrp2.GET(\"categories\", Controllers.GetCategories)\n\t}\n\tgrp3 := r.Group(\"/food-api\")\n\t{\n\t\tgrp3.GET(\"foods/:id\", Controllers.GetFoods)\n\t}\n\n\treturn r\n}", "func (h Handler) Routes(e *echo.Group) {\n\te.GET(\"\", h.Find)\n\te.GET(\"/:gameID\", h.Retrieve)\n\te.POST(\"\", h.Create)\n\te.PATCH(\"/:gameID\", h.Apply)\n\n}", "func main() {\n\t// Init router\n\trouter := mux.NewRouter()\n\n\t// Hardcoded data - @todo: add database\n/*\treservations = append(reservations, Reservation{ID: \"1\", StartTime: \"438227\", EndTime: \"438227\", CarID: \"23\", GarageID: \"231\"})\n\treservations = append(reservations, Reservation{ID: \"2\", StartTime: \"438227\", EndTime: \"438227\", CarID: \"11\", GarageID: \"232\"})\n\tusers = append(users, User{ID: \"1\", Username: \"filik\", Email: \"yo@dot\", Password: \"1234\"})\n\tusers = append(users, User{ID: \"1\", Username: \"filik\", Email: \"yo@dot\", Password: \"1234\"})\n\tcars = append(cars, Car{ID: \"1\", UserID: \"23\", Model: \"BMW\"})\n\tcars = append(cars, Car{ID: \"2\", UserID: \"23\", Model: \"MERCEDES\"})\n\tgarages = append(garages, Garage{ID: \"1\", Name: \"1st\", MaxCars: \"5\"})\n\tgarages = append(garages, Garage{ID: \"2\", Name: \"2nd\", MaxCars: \"3\"})\n\thistory = append(history, History{ID: \"1\", ReservationID: \"2\"})\n*/\n\t// Reservation route handles & endpoints\n\trouter.HandleFunc(\"/reservations\", GetReservations).Methods(\"GET\")\n\trouter.HandleFunc(\"/reservations/\", GetReservations).Methods(\"GET\")\n\trouter.HandleFunc(\"/reservations/{id}\", GetReservation).Methods(\"GET\")\n\trouter.HandleFunc(\"/reservations/\", CreateReservation).Methods(\"POST\")\n\trouter.HandleFunc(\"/reservations\", CreateReservation).Methods(\"POST\")\n\t//router.HandleFunc(\"/reservations/{id}\", UpdateReservation).Methods(\"PUT\")\n\trouter.HandleFunc(\"/reservations/{id}\", DeleteReservation).Methods(\"DELETE\")\n\trouter.HandleFunc(\"/reservations-by-user/{id}\", GetReservationsByUser).Methods(\"GET\")\n\n\t// Users route handles & endpoints\n\trouter.HandleFunc(\"/users\", GetUsers).Methods(\"GET\")\n\trouter.HandleFunc(\"/users/\", GetUsers).Methods(\"GET\")\n\trouter.HandleFunc(\"/users/{id}\", GetUser).Methods(\"GET\")\n\trouter.HandleFunc(\"/users\", CreateUser).Methods(\"POST\")\n\trouter.HandleFunc(\"/users/\", CreateUser).Methods(\"POST\")\n\t//router.HandleFunc(\"/users/{id}\", UpdateUser).Methods(\"PUT\")\n\trouter.HandleFunc(\"/users/{id}\", DeleteUser).Methods(\"DELETE\")\n\n\t// Garages route handles & endpoints\n\trouter.HandleFunc(\"/garages\", GetGarages).Methods(\"GET\")\n\trouter.HandleFunc(\"/garages/\", GetGarages).Methods(\"GET\")\n\trouter.HandleFunc(\"/garages/{id}\", GetGarage).Methods(\"GET\")\n\trouter.HandleFunc(\"/garages/\", CreateGarage).Methods(\"POST\")\n\trouter.HandleFunc(\"/garages\", CreateGarage).Methods(\"POST\")\n\t//router.HandleFunc(\"/garages/{id}\", UpdateGarage).Methods(\"PUT\")\n\trouter.HandleFunc(\"/garages/{id}\", DeleteGarage).Methods(\"DELETE\")\n\n\t// Cars route handles & endpoints\n\trouter.HandleFunc(\"/cars\", GetCars).Methods(\"GET\")\n\trouter.HandleFunc(\"/cars/\", GetCars).Methods(\"GET\")\n\trouter.HandleFunc(\"/cars/{id}\", GetCar).Methods(\"GET\")\n\trouter.HandleFunc(\"/cars\", CreateCar).Methods(\"POST\")\n\trouter.HandleFunc(\"/cars\", CreateCar).Methods(\"POST\")\n\trouter.HandleFunc(\"/get-cars-by-user/{id}\", GetCarsByUser).Methods(\"GET\")\n\t//router.HandleFunc(\"/cars/{id}\", UpdateCar).Methods(\"PUT\")\n\trouter.HandleFunc(\"/cars/{id}\", DeleteCar).Methods(\"DELETE\")\n\n\t// History route handler & endpoint\n\trouter.HandleFunc(\"/history/\", GetHistory).Methods(\"GET\")\n\n\t// Start server\n\tlog.Fatal(http.ListenAndServe(\":8888\", router))\n}", "func Router(g *gin.RouterGroup) {\n\tcontroller := controllers.UserController{}\n\t{\n\t\tg.GET(\"/users\", controller.GetUsers)\n\t\tg.GET(\"/user/:id\", controller.GetUser)\n\t\tg.POST(\"/user\", controller.CreateUser)\n\t}\n}", "func (api *api) RegisterRouting(g *echo.Group) {\n\n\tgrp := g.Group(\"/v2/products\")\n\tgrp.GET(\"\", api.Service.List)\n\tgrp.GET(\"/:id\", api.Service.Get)\n\tgrp.POST(\"\", api.Service.Create)\n\tgrp.PUT(\"/:id\", api.Service.Update)\n\tgrp.DELETE(\"/:id\", api.Service.Delete)\n\n}", "func (p *Endpoint) Routes(router component.IRouter) {\n\trouter.Post(\"/v1/user\", p.Create)\n\trouter.Get(\"/v1/user/:user_id\", p.Show)\n\trouter.Get(\"/v1/userSelf\", p.ShowSelf)\n\trouter.Get(\"/v1/user\", p.Index)\n\trouter.Put(\"/v1/user/:user_id\", p.Update)\n\trouter.Delete(\"/v1/user/:user_id\", p.Destroy)\n\trouter.Delete(\"/v1/user\", p.DestroyAll)\n}", "func ApplyRoutes(r *gin.RouterGroup) {\n\tposts := r.Group(\"/users\")\n\t{\n\t\tposts.GET(\"/\", list)\n\t}\n}", "func Router(r *gin.Engine) {\n\t// db := common.LoadDatabase()\n\tr.Any(\"/api/v1/location/*path1\", deflt)\n\t// r.GET(\"/user/list\", func(c *gin.Context) {\n\t// \tc.String(http.StatusOK, \"wow\")\n\t// })\n}", "func (ctl *AuthorController) register() {\n\tAuthors := ctl.router.Group(\"/Authors\")\n\n\tAuthors.GET(\"\", ctl.ListAuthor)\n\n\t// CRUD\n\tAuthors.POST(\"\", ctl.CreateAuthor)\n\tAuthors.GET(\":id\", ctl.GetAuthor)\n\tAuthors.PUT(\":id\", ctl.UpdateAuthor)\n\tAuthors.DELETE(\":id\", ctl.DeleteAuthor)\n}", "func generateUserAPIRoutes(router *mux.Router) {\n\tusers := router.PathPrefix(\"/users\").Subrouter()\n\tusers.Use(helpers.LoggingMiddleware)\n\tusers.HandleFunc(\"\", user.GetAll).Methods(\"GET\") // GET Request to handle all data present in the Database\n\n\tsub := router.PathPrefix(\"/user\").Subrouter()\n\tsub.Use(helpers.LoggingMiddleware)\n\t\n\tsub.HandleFunc(\"\", user.GetUser).Methods(\"GET\")\n}", "func Router(db *connectordb.Database, prefix *mux.Router) *mux.Router {\n\tif prefix == nil {\n\t\tprefix = mux.NewRouter()\n\t}\n\n\t//Allow for the application to match /path and /path/ to the same place.\n\tprefix.StrictSlash(true)\n\n\tprefix.HandleFunc(\"/\", restcore.Authenticator(ListUsers, db)).Queries(\"q\", \"ls\")\n\n\t//User CRUD\n\tprefix.HandleFunc(\"/{user}\", restcore.Authenticator(ListDevices, db)).Methods(\"GET\").Queries(\"q\", \"ls\")\n\tprefix.HandleFunc(\"/{user}\", restcore.Authenticator(ListDevices, db)).Methods(\"GET\").Queries(\"q\", \"devices\")\n\tprefix.HandleFunc(\"/{user}\", restcore.Authenticator(ListUserStreams, db)).Methods(\"GET\").Queries(\"q\", \"streams\")\n\tprefix.HandleFunc(\"/{user}\", restcore.Authenticator(ReadUser, db)).Methods(\"GET\")\n\tprefix.HandleFunc(\"/{user}\", restcore.Authenticator(CreateUser, db)).Methods(\"POST\")\n\tprefix.HandleFunc(\"/{user}\", restcore.Authenticator(UpdateUser, db)).Methods(\"PUT\")\n\tprefix.HandleFunc(\"/{user}\", restcore.Authenticator(DeleteUser, db)).Methods(\"DELETE\")\n\n\t//Device CRUD\n\tprefix.HandleFunc(\"/{user}/{device}\", restcore.Authenticator(ListStreams, db)).Methods(\"GET\").Queries(\"q\", \"ls\")\n\tprefix.HandleFunc(\"/{user}/{device}\", restcore.Authenticator(ListStreams, db)).Methods(\"GET\").Queries(\"q\", \"streams\")\n\tprefix.HandleFunc(\"/{user}/{device}\", restcore.Authenticator(ReadDevice, db)).Methods(\"GET\")\n\tprefix.HandleFunc(\"/{user}/{device}\", restcore.Authenticator(CreateDevice, db)).Methods(\"POST\")\n\tprefix.HandleFunc(\"/{user}/{device}\", restcore.Authenticator(UpdateDevice, db)).Methods(\"PUT\")\n\tprefix.HandleFunc(\"/{user}/{device}\", restcore.Authenticator(DeleteDevice, db)).Methods(\"DELETE\")\n\n\t//Stream CRUD\n\tprefix.HandleFunc(\"/{user}/{device}/{stream}\", restcore.Authenticator(ReadStream, db)).Methods(\"GET\")\n\tprefix.HandleFunc(\"/{user}/{device}/{stream}\", restcore.Authenticator(CreateStream, db)).Methods(\"POST\")\n\tprefix.HandleFunc(\"/{user}/{device}/{stream}\", restcore.Authenticator(UpdateStream, db)).Methods(\"PUT\")\n\tprefix.HandleFunc(\"/{user}/{device}/{stream}\", restcore.Authenticator(DeleteStream, db)).Methods(\"DELETE\")\n\n\tprefix.HandleFunc(\"/{user}/{device}/{stream}/data\", restcore.Authenticator(StreamLength, db)).Methods(\"GET\").Queries(\"q\", \"length\")\n\tprefix.HandleFunc(\"/{user}/{device}/{stream}/data\", restcore.Authenticator(StreamTime2Index, db)).Methods(\"GET\").Queries(\"q\", \"time2index\")\n\tprefix.HandleFunc(\"/{user}/{device}/{stream}/data\", restcore.Authenticator(StreamRange, db)).Methods(\"GET\")\n\tprefix.HandleFunc(\"/{user}/{device}/{stream}/data\", restcore.Authenticator(WriteStream, db)).Methods(\"POST\") //Restamp off\n\tprefix.HandleFunc(\"/{user}/{device}/{stream}/data\", restcore.Authenticator(WriteStream, db)).Methods(\"PUT\") //Restamp on\n\n\treturn prefix\n}", "func Router(r *gin.Engine) error {\n\n\t// helth check\n\tr.GET(\"/helth\", func(c *gin.Context) {\n\t\t// apm.TraceSeg(c, \"/helth\")\n\n\t\tc.JSON(200, gin.H{\n\t\t\t\"message\": \"helth check ok\",\n\t\t})\n\t})\n\n\t// Simple group\n\tapi := r.Group(\"/todo\")\n\t{\n\t\t// Entity の操作\n\t\tapi.POST(\"/task\", todo.Add)\n\t\tapi.PATCH(\"/task\", todo.Update)\n\t\tapi.GET(\"/task\", todo.Select)\n\n\t\t// Task event\n\t\tapi.POST(\"/task/done/:id\", todo.Done)\n\t\tapi.DELETE(\"/task/delete\", todo.Delete)\n\t}\n\n\treturn nil\n}", "func AddPeople(w http.ResponseWriter, r *http.Request) {\n\tmyDb, err := db.StartDB(\"mydb.db\")\n\tif err != nil {\n\t\tfmt.Printf(\"Fail in open database: %v\\n\", err)\n\t\treturn\n\t}\n\n\t// Verify token\n\ttoken := r.Header.Get(\"AuthToken\")\n\tif (!myDb.IsLogIn([]byte(token))) {\n\t\tfmt.Printf(\"Unauthorized: %v\\n\", err)\n\t\t// 401: Unauthorized\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\t// Add a people\n\tvars := mux.Vars(r)\n\tpeopleId, err := strconv.Atoi(vars[\"peopleId\"])\n\n\tbody, err := ioutil.ReadAll(r.Body)\n if err != nil {\n\t\tfmt.Printf(\"Read body error: %v\\n\", err)\n\t\tw.WriteHeader(http.StatusNotFound)\n\t}\n\t\n\tif err := myDb.AddObj(\"people\", []byte(strconv.Itoa(peopleId)),[]byte(body)); err != nil {\n\t\tfmt.Printf(\"Read body error: %v\\n\", err)\n\t\tw.WriteHeader(http.StatusNotFound)\n\t}\n\t\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.WriteHeader(http.StatusOK)\n}", "func PublicRoutes(a *fiber.App) {\n\t// Create routes group.\n\troute := a.Group(\"/api/v1\")\n\n\t// Routes for GET method:\n\troute.Get(\"/books\", controllers.GetBooks) // get list of all books\n\troute.Get(\"/book/:id\", controllers.GetBook) // get one book by ID\n\n\t// Routes for POST method:\n\troute.Post(\"/user/sign/up\", controllers.UserSignUp) // register a new user\n\troute.Post(\"/user/sign/in\", controllers.UserSignIn) // auth, return Access & Refresh tokens\n}", "func SetupRouter() *gin.Engine {\n\tr := gin.Default()\n\tgrp1 := r.Group(\"/todolist\")\n\t{\n\t\tgrp1.GET(\"task\", controllers.GetAllTasks)\n\t\tgrp1.POST(\"task\", controllers.CreateTask)\n\t\tgrp1.GET(\"task/:id\", controllers.GetTaskByID)\n\t\t//grp1.GET(\"task/:taskstatus\", controllers.GetTaskByTaskstatus)\n\t\tgrp1.PUT(\"task/:id\", controllers.UpdateTask)\n\t\tgrp1.DELETE(\"task/:id\", controllers.DeleteTask)\n\t}\n\treturn r\n}", "func initalizeRoutes() {\n\n\tv1 := app.Group(\"/v1\")\n\n\t// Auth controller routes\n\taccountRoutes := v1.Group(\"/account\")\n\taccountRoutes.POST(\"/register\", accountController.Register)\n\taccountRoutes.POST(\"/login\", accountController.Login)\n\taccountRoutes.POST(\"/refresh-token\", accountController.RefreshToken)\n\n\t// Post controller routes\n\tpostRoutes := v1.Group(\"/posts\").Use(middleware.Authorization())\n\tpostRoutes.GET(\"/\", postController.GetAll)\n\n}", "func (c *Controller) AddRoutes(router *mux.Router) *mux.Router {\n\tfmt.Println(&c.Repo.users)\n\tbasePath := \"/api\"\n\trouter.HandleFunc(basePath+\"/login\", c.login).Methods(\"POST\")\n\trouter.HandleFunc(basePath+\"/register\", c.register).Methods(\"POST\")\n\trouter.HandleFunc(basePath+\"/users\", c.createUser).Methods(\"POST\")\n\trouter.HandleFunc(basePath+\"/users\", c.listUsers).Methods(\"GET\")\n\trouter.HandleFunc(basePath+\"/users/{id}\", c.getUser).Methods(\"GET\")\n\trouter.HandleFunc(basePath+\"/users/{id}\", c.updateUser).Methods(\"PUT\")\n\trouter.HandleFunc(basePath+\"/users/{id}\", c.deleteUser).Methods(\"DELETE\")\n\treturn router\n}", "func endpointManagement(r common.Router) common.Router {\n\t// programatically set swagger info\n\tdocs.SwaggerInfo.Title = \"gin swagger test\"\n\tdocs.SwaggerInfo.Description = \"This is a sample server for Swagger.!!!!!!\"\n\tdocs.SwaggerInfo.Version = \"1.0\"\n\tdocs.SwaggerInfo.Host = \"localhost:9000\"\n\tdocs.SwaggerInfo.BasePath = \"/v1\"\n\n\tr.Version = r.Engine.Group(\"/v1\")\n\n\t//SECTION x endpoints by function\n\tuser.NewUserV1Router(r, \"/user\")\n\tauth.NewAuthV1Router(r, \"/auth\")\n\n\tr.Engine.GET(\"/swagger/*any\", ginSwagger.WrapHandler(swaggerFiles.Handler))\n\n\treturn r\n}", "func main() {\n router := mux.NewRouter()\n router.HandleFunc(\"/payments\", GetPayments).Methods(\"GET\")\n router.HandleFunc(\"/payment/{id}\", GetPayment).Methods(\"GET\")\n router.HandleFunc(\"/payment/create/{id}\", CreatePayment).Methods(\"POST\")\n router.HandleFunc(\"/payment/{id}\", DeletePayment).Methods(\"DELETE\")\n router.HandleFunc(\"/payment/edit/{id}\", ModifyPayment).Methods(\"PATCH\")\n log.Fatal(http.ListenAndServe(\":8000\", router))\n}", "func main() {\n\trouter := mux.NewRouter().StrictSlash(true)\n\tsub := router.PathPrefix(\"/api/v1\").Subrouter()\n\tsub.Methods(\"GET\").Path(\"/companies\").HandlerFunc(handler.GetCompanies)\n\tsub.Methods(\"POST\").Path(\"/companies\").HandlerFunc(handler.SaveCompany)\n\tsub.Methods(\"GET\").Path(\"/companies/{name}\").HandlerFunc(handler.GetCompany)\n\tsub.Methods(\"PUT\").Path(\"/companies/{name}\").HandlerFunc(handler.UpdateCompany)\n\tsub.Methods(\"DELETE\").Path(\"/companies/{name}\").HandlerFunc(handler.DeleteCompany)\n\n\tlog.Fatal(http.ListenAndServe(\":3000\", router))\n}", "func Router() chi.Router {\n\tr := chi.NewRouter()\n\n\tr.Get(\"/\", list)\n\tr.Get(\"/{user_id}/permissions\", userpermissions)\n\n\treturn r\n}", "func init() {\n\t// system.Router.HandleFunc(\"/app/get/list/{crud}\", HandleListGeneric)\n}", "func CreateRouter(handlerFunc http.HandlerFunc) *mux.Router {\n router := mux.NewRouter()\n\n config := dots_config.GetSystemConfig()\n prefixPath := config.ClientRestfulApiConfiguration.RestfulApiPath\n\n restfulHandlerFunc := createRestfulHandlerFunc(handlerFunc)\n\n // router.HandleFunc(\"/test\", restfulHandlerFunc).Methods(\"GET\")\n router.HandleFunc(prefixPath + MITIGATION_PATH + \"/cuid={cuid}\", restfulHandlerFunc).Methods(\"GET\")\n router.HandleFunc(prefixPath + MITIGATION_PATH + \"/cuid={cuid}/mid={mid}\", restfulHandlerFunc).Methods(\"GET\")\n router.HandleFunc(prefixPath + MITIGATION_PATH + \"/cuid={cuid}/mid={mid}\", restfulHandlerFunc).Methods(\"PUT\")\n router.HandleFunc(prefixPath + MITIGATION_PATH + \"/cuid={cuid}/mid={mid}\", restfulHandlerFunc).Methods(\"DELETE\")\n\n return router\n}", "func CreatePerson(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tparams := mux.Vars(r)\n\tvar person Person\n\t_ = json.NewDecoder(r.Body).Decode(&person)\n\tperson.ID = params[\"id\"]\n\tpeople = append(people, person)\n\tjson.NewEncoder(w).Encode(people)\n}", "func HandleAddPerson(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\treqBody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"Body Read Error : %v\", err), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tvar person db.Person\n\terr = json.Unmarshal(reqBody, &person)\n\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"Request Body parse error : %v\", err), http.StatusBadRequest)\n\t\treturn\n\t}\n\tfmt.Println(\"new person\", person)\n\t_, err = Db.Exec(\"INSERT INTO people(name,city,contactNo,photoUrl) values(?,?,?,?)\", person.Name, person.City, person.ContactNo, person.PhotoUrl)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"ERROR in creating person %s\", err.Error()), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tvar newPerson db.Person\n\tres, err := Db.Query(\"SELECT * FROM RD.people order by id Desc limit 1\")\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"ERROR in creating person %s\", err.Error()), http.StatusBadRequest)\n\t\treturn\n\t}\n\tdefer res.Close()\n\tfor res.Next() {\n\t\terr = res.Scan(&newPerson.Id, &newPerson.Name, &newPerson.City, &newPerson.ContactNo, &newPerson.PhotoUrl)\n\t\tif err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(\"ERROR in creating person %s\", err.Error()), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\tjson.NewEncoder(w).Encode(newPerson)\n}", "func GetAllPerson(c *gin.Context) {\n\tperson, _ := models.LoadPeople()\n\tc.JSON(http.StatusOK, person)\n\n}", "func (a *App) Resource(controllerName string, nested ...*route) *route {\n\troute := a.newRoute(controllerName, nil)\n\troute.subroutes = nested\n\troute.buildPatterns(\"\")\n\treturn route\n}", "func Route(mux *http.ServeMux) {\n\n\tmux.HandleFunc(\"/studyplan/create\", createStudyPlan)\n\tmux.HandleFunc(\"/subject/create\", createSubject)\n\tmux.HandleFunc(\"/career/create\", createCareer)\n\tmux.HandleFunc(\"/teacher/create\", createTeacher)\n\tmux.HandleFunc(\"/student/create\", createStudent)\n\n\tmux.HandleFunc(\"/studyplan/getall\", getAllStudyPlan)\n\tmux.HandleFunc(\"/subject/getall\", getAllSubjects)\n\tmux.HandleFunc(\"/career/getall\", getAllCareers)\n\tmux.HandleFunc(\"/teacher/getall\", getAllTeachers)\n\tmux.HandleFunc(\"/student/getall\", getAllStudents)\n\n\tmux.HandleFunc(\"/studyplan\", getStudyPlanById)\n\tmux.HandleFunc(\"/subject\", getSubjectById)\n\tmux.HandleFunc(\"/career\", getCareerById)\n\tmux.HandleFunc(\"/teacher\", getTeacherById)\n\tmux.HandleFunc(\"/student\", getStudentById)\n\n\tmux.HandleFunc(\"/studyplan/delete\", deleteStudyPlan)\n\tmux.HandleFunc(\"/subject/delete\", deleteSubject)\n\tmux.HandleFunc(\"/career/delete\", deleteCareer)\n\tmux.HandleFunc(\"/teacher/delete\", deleteTeacher)\n\tmux.HandleFunc(\"/student/delete\", deleteStudent)\n\n\tmux.HandleFunc(\"/studyplan/update\", UpdateStudyPlan)\n\tmux.HandleFunc(\"/subject/update\", UpdateSubject)\n\tmux.HandleFunc(\"/career/update\", updateCareer)\n\tmux.HandleFunc(\"/teacher/update\", updateTeacher)\n\tmux.HandleFunc(\"/student/update\", updateStudent)\n\t/* mux.HandleFunc(\"/v1/persons/getall\", middleware.Log(h.getAll))\n\tmux.HandleFunc(\"/v1/persons/update\", middleware.Log(h.update))\n\tmux.HandleFunc(\"/v1/persons/delete\", middleware.Log(h.delete))\n\tmux.HandleFunc(\"/v1/persons/getbyid\", middleware.Log(h.getById)) */\n}", "func Routes(app *fiber.App, service Service) {\n\tapi := app.Group(\"/api/v1\")\n\n\tapi.Get(\"/health\", func(c *fiber.Ctx) error {\n\t\treturn c.Status(http.StatusOK).\n\t\t\tJSON(map[string]interface{}{\n\t\t\t\t\"health\": \"ok\",\n\t\t\t\t\"status\": http.StatusOK,\n\t\t\t})\n\t})\n\n\t// public endpoint\n\tapi.Get(\"/token/new\", GetNewAccessToken)\n\tapi.Get(\"/travels\", service.getTravels)\n\tapi.Get(\"/travels/:id\", service.getTravel)\n\n\t// private endpoint\n\tapi.Post(\"/travels\", JWTProtected(), service.createTravel)\n\tapi.Put(\"/travels/:id\", JWTProtected(), service.updateTravel)\n\tapi.Delete(\"/travels/:id\", JWTProtected(), service.deleteTravel)\n}", "func (s *Server) setupRoutes() {\n\ts.Router.Static(\"/app\", \"./public\")\n\trouter := s.ApiRouter\n\n\t// This handler will match /user/john but will not match neither /user/ or /user\n\trouter.GET(\"/apps\", func(c *gin.Context) {\n\t\tapps, err := getAllApps()\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tc.JSON(200, apps)\n\t})\n\n\t// This handler will match /user/john but will not match neither /user/ or /user\n\trouter.GET(\"/apps/:id\", func(c *gin.Context) {\n\t\tid := c.Param(\"id\")\n\t\tidInt, err := strconv.Atoi(id)\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tapp, err := getApp(uint(idInt))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tc.JSON(200, app)\n\t})\n\n\t// This handler will match /user/john but will not match neither /user/ or /user\n\trouter.GET(\"/apps/:id/history\", func(c *gin.Context) {\n\t\tid := getId(c.Param(\"id\"))\n\n\t\thistories, err := getAppHistory(uint(id))\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tc.JSON(200, histories)\n\t})\n\n\trouter.POST(\"/apps\", func(c *gin.Context) {\n\t\tvar app = domain.App{}\n\t\tif err := c.BindJSON(&app); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\terr := insertApp(&app)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t} else {\n\t\t\tregisterCheck(app)\n\t\t\tc.JSON(http.StatusOK, app)\n\t\t}\n\t})\n\n\trouter.PUT(\"/apps/:id\", func(c *gin.Context) {\n\t\tid := getId(c.Param(\"id\"))\n\n\t\tvar app domain.App\n\t\tif err := c.BindJSON(&app); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\toldApp, _ := getApp(uint(id))\n\n\t\terr := updateApp(uint(id), app)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t} else {\n\t\t\tif app.CheckStatus != oldApp.CheckStatus {\n\t\t\t\tlastApp, _ := getApp(uint(id))\n\t\t\t\tupdateCheck(lastApp)\n\t\t\t}\n\t\t\tc.JSON(http.StatusOK, app)\n\t\t}\n\t})\n\n\trouter.DELETE(\"/apps/:id\", func(c *gin.Context) {\n\t\tid := getId(c.Param(\"id\"))\n\n\t\terr := deleteApp(uint(id))\n\t\tif err != nil {\n\t\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error()})\n\t\t} else {\n\t\t\tc.JSON(http.StatusOK, gin.H{\"status\": \"ok\"})\n\t\t}\n\t})\n}", "func SetupRouter() *gin.Engine {\n\tr := gin.Default()\n\n\tusers := r.Group(\"/users\")\n\t{\n\t\tusers.GET(\"/\", controllers.GetUsers)\n\t\tusers.GET(\"/:id\", controllers.GetUser)\n\t\tusers.POST(\"/\", controllers.CreateUser)\n\t\tusers.PATCH(\"/:id\", controllers.UpdateUser)\n\t\tusers.DELETE(\"/:id\", controllers.DeleteUser)\n\t}\n\n\treturn r\n}", "func SetupRoutes(app *fiber.App) {\n\tapi := app.Group(\"api/v1/\")\n\tusers := app.Group(\"/auth/v1\")\n}", "func AccountRouters(e *gin.Engine, u usecase.UseCase) {\n\te.POST(\"/accounts\", CreateAccount(u))\n\te.GET(\"/accounts/:accountId\", GetAccount(u))\n}", "func SetEmployeeRoutes(router *mux.Router) *mux.Router {\n\tempRouter := mux.NewRouter()\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/add\", controllers.AddEmployee).Methods(\"POST\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/child/add\", controllers.AddChildEmployee).Methods(\"POST\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/\", controllers.GetEmployees).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/org/{orgId}\", controllers.GetEmployeessByOrg).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/employee/{empId}\", controllers.GetEmployeesByMgr).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/departments/{deptId}\", controllers.GetEmployeesByDept).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/branches/{branchId}\", controllers.GetEmployeesByBranchMethods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/{id}\", controllers.GetEmployee).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/users/{userId}\", controllers.GetEmployeeByUser).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/edit/{id}\", controllers.EditEmployeeByAdmin).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/user/edit/{id}\", controllers.EditEmployeeByUser).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/biodata/add\", controllers.AddBiodataByEmp).Methods(\"POST\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/pid/add\", controllers.AddPIDByBio).Methods(\"POST\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/health/detail/add\", controllers.AddHealthDetByBio).Methods(\"POST\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/work/experience/add\", controllers.AddWorkExpByBio).Methods(\"POST\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/education/add\", controllers.AddEduByBio).Methods(\"POST\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/biodatas/employees/{id}\", controllers.GetBiosByEmp).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/biodatas/edit/{id}\", controllers.GetBio).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/biodatas/employee/edit/{empId}\", controllers.GetBioByEmp).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/biodatas/{id}\", controllers.EditBioByAdmin).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/biodatas/employee/{empId}\", controllers.EditBioByAdmin).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/pids/biodatas/{bioId}\", controllers.GetPIDsByBio).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/pids/{id}\", controllers.GetPID).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/pids/biodata/{bioId}\", controllers.GetPIDByBio).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/pids/biodata/edit/{bioId}\", controllers.EditPIDByBio).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/pids/edit/{id}\", controllers.EditPIDByAdmin).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/health/details/biodatas/{bioId}\", controllers.GetHealthDetsByBio).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/health/details/{id}\", controllers.GetHealthDetail).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/health/details/biodata/{bioId}\", controllers.GetHealthDetailByBio).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/health/details/biodata/edit/{bioId}\", controllers.EditHealthDetailByBio).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/health/details/edit/{id}\", controllers.EditHealthDetailByAdmin).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/work/experiences/biodatas/{bioId}\", controllers.GetWorkExpsByBio).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/work/experiences/{id}\", controllers.GetWorkExperience).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/work/experiences/biodata/{bioId}\", controllers.GetWorkExperienceByBio).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/work/experiences/biodata/edit/{bioId}\", controllers.EditWorkExperieceByBio).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/work/experiences/edit/{id}\", controllers.EditWorkExperieceByAdmin).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/educations/biodatas/{bioId}\", controllers.GetEdusByBio).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/educations/{id}\", controllers.GetEducation).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/educations/biodata/{bioId}\", controllers.GetEduByBio).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/educations/biodata/edit/{bioId}\", controllers.EditEduByBio).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/educations/{id}\", controllers.EditEduByAdmin).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/expense/claims/add\", controllers.AddExpenseClaimByEmp).Methods(\"POST\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/leave/allocations/add\", controllers.AddLeaveAllocationByEmp).Methods(\"POST\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/leave/applications/add\", controllers.AddLeaveApplicationByEmp).Methods(\"POST\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/leave/blocklist/add\", controllers.AddLeaveBlockListByOrg).Methods(\"POST\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/holiday/list/add\", controllers.AddHolidayListByEmp).Methods(\"POST\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/appraisals/add\", controllers.AddAppraisalByEmp).Methods(\"POST\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/exit/add\", controllers.AddExitByEmp).Methods(\"POST\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/expense/claims/org/{orgId}\", controllers.GetExpenseClaimsByOrg).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/expense/claims/managers/{mgrId}\", controllers.GetExpenseClaimsByMgr).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/expense/claims/employees/{empId}\", controllers.GetExpenseClaimsByEmp).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/expense/claims/{id}\", controllers.GetExpenseClaim).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/expense/claims/managers/edit/{mgrId}\", controllers.EditExpenseClaimByMgr).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/expense/claims/employees/edit/{empId}\", controllers.EditExpenseClaimByEmp).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/leave/applications/orgs/{orgId}\", controllers.GetLeaveApplicationsByOrg).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/leave/applications/managers/{mgrId}\", controllers.GetLeaveApplicationsByMgr).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/leave/applications/employees/{empId}\", controllers.GetLeaveApplicationsByEmp).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/leave/applications/{id}\", controllers.GetLeaveApplication).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/leave/applications/edit/{id}\", controllers.EditLeaveApplicationByAdmin).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/leave/blocklists/orgs/{orgId}\", controllers.GetLeaveBlockListsByOrg).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/leave/blocklists/{id}\", controllers.GetLeaveBlockList).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/leave/blocklists/{id}\", controllers.EditLeaveBlockListByAdmin).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/holiday/lists/orgs/{orgId}\", controllers.GetHolidayListsByOrg).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/holiday/lists/{id}\", controllers.GetHolidayList).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/holiday/lists/edit/{id}\", controllers.EditHolidayListByAdmin).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/appraisals/orgs/{orgId}\", controllers.GetAppraisalsByOrg).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/appraisals/managers/{mgrId}\", controllers.GetAppraisalsByMgr).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/holiday/lists/edit/{id}\", controllers.GetEditHolidayListByAdmin).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/appraisals/edit/{id}\", controllers.EditAppraisalByAdmin).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/exits/orgs/{orgId}\", controllers.GetExitsByOrg).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/exits/managers/{mgrId}\", controllers.GetExitsByMgr).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/exits/{id}\", controllers.GetExit).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/exits/edit/{id}\", controllers.EditExitByAdmin).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/salary/components/add\", controllers.AddSalaryComponent).Methods(\"POST\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/salary/employees/add\", controllers.AddSalaryEmployees).Methods(\"POST\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/salary/structures/add\", controllers.AddSalaryStructure).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/activity/types/add\", controllers.AddActivityType).Methods(\"POST\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/working/hour/add\", controllers.AddWorkingHour).Methods(\"POST\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/operation/add\", controllers.AddOperation).Methods(\"POST\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/workstation/add\", controllers.AddWorkstation).Methods(\"POST\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/timesheet/add\", controllers.AddTimesheet).Methods(\"POST\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/salaryslip/add\", controllers.AddSalarySlip).Methods(\"POST\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/activity/cost/add\", controllers.AddActivityCost).Methods(\"POST\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/salary/components/org/{orgId}\", controllers.GetSalaryComponentsByOrg).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/salary/employees/org/{orgId}\", controllers.GetSalaryEmployeesByOrg).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/salary/structures/org/{orgId}\", controllers.GetSalaryStructuresByOrg).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/salary/components/{id}\", controllers.GetSalaryComponent).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/salary/employees/{id}\", controllers.GetSalaryEmployee).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/salary/structures/{id}\", controllers.GetSalaryStructure).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/salary/components/edit/{id}\", controllers.EditSalaryComponentByAdmin).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/salary/employees/edit/{id}\", controllers.EditSalaryEmployeeByAdmin).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/salary/structures/edit/{id}\", controllers.EditSalaryStructureByAdmin).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/salary/structures/accounts/{acctId}\", controllers.GetSalaryStructuresByAcct).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/salary/structures/managers/{mgrId}\", controllers.GetSalaryStructuresByMgr).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/activity/types/org/{orgId}\", controllers.GetActivityTypesByOrg).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/activity/types/{id}\", controllers.GetActivityType).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/activity/types/edit/{id}\", controllers.EditActivityTypeByAdmin).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/operations/org/{orgId}\", controllers.GetOperationsByOrg).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/operations/{id}\", controllers.GetOperation).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/operations/edit/{id}\", controllers.EditOperationByAdmin).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/working/hours/org/{orgId}\", controllers.GetWorkingHoursByOrg).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/working/hours/{id}\", controllers.GetworkingHour).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/working/hours/edit/{id}\", controllers.EditWorkingHourByAdmin).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/workstations/org/{orgId}\", controllers.GetWorkstationsByOrg).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/workstations/{id}\", controllers.GetWorkstation).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/workstations/edit/{id}\", controllers.EditWorkstationByAdmin).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/timesheets/org/{orgId}\", controllers.GettTimesheetsByOrg).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/timesheets/employees/{empId}\", controllers.GettTimesheetsByEmp).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/timesheets/managers/{mgrId}\", controllers.GettTimesheetsByMgr).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/timesheets/{id}\", controllers.GetTimesheet).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/timesheets/edit/{id}\", controllers.EditTimesheetByAdmin).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/salaryslips/org/{orgId}\", controllers.GetSalarySlipsByOrg).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/salaryslips/managers/{mgrId}\", controllers.GetSalarySlipsByMgr).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/salaryslips/employees/{empId}\", controllers.GetSalarySlipsByEmp).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/salaryslips/{id}\", controllers.GetSalarySlip).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/salaryslips/edit/{id}\", controllers.EditSalarySlipByAdmin).Methods(\"PUT\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/activity/costs/org/{orgId}\", controllers.GetActivityCostsByOrg).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/activity/costs/type/{typeId}\", controllers.GetActivityCostsByType).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/activity/costs/employees/{empId}\", controllers.GetActivityCostsByEmp).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/activity/costs/{id}\", controllers.GetActivityCost).Methods(\"GET\")\n\tempRouter.HandleFunc(\"/api/v1/orgs/employees/activity/costs/edit/{id}\", controllers.EditActivityCostByAdmin).Methods(\"PUT\")\n\trouter.PathPrefix(\"/api/v1/admin/org/employees/\").Handler(negroni.New(\n\t\tnegroni.HandlerFunc(common.Authorize),\n\t\tnegroni.Wrap(empRouter),\n\t))\n\treturn router\n}", "func AppendUsersAPI(router *mux.Router) {\n\trouter.Handle(\"/\", server.AuthenticateWithUser(server.GetAllUsers)).Methods(\"GET\")\n\trouter.Handle(\"/\", server.AuthenticateWithUser(server.CreateUser)).Methods(\"POST\")\n\trouter.Handle(\"/{userId:[a-f\\\\d]{24}}\", server.AuthenticateWithUser(server.GetUserByID)).Methods(\"GET\")\n\trouter.Handle(\"/{userId:[a-f\\\\d]{24}}\", server.AuthenticateWithUser(server.UpdateUser)).Methods(\"POST\")\n\trouter.Handle(\"/{userId:[a-f\\\\d]{24}}\", server.AuthenticateWithUser(server.DeleteUser)).Methods(\"DELETE\")\n\trouter.Handle(\"/{userId:[a-f\\\\d]{24}}/termofservices\", server.AuthenticateWithUser(server.TermOfServicesAcceptedByUser)).Methods(\"POST\")\n\trouter.Handle(\"/register\", server.Wrap(server.RegisterUser)).Methods(\"POST\")\n\trouter.Handle(\"/technicians\", server.AuthenticateWithUser(server.GetAllTechnicians)).Methods(\"GET\")\n\trouter.Handle(\"/workers\", server.AuthenticateWithUser(server.GetAllWorkers)).Methods(\"GET\")\n\trouter.Handle(\"/cooperative/{cooperativeId:[a-f\\\\d]{24}}\", server.AuthenticateWithUser(server.UserMatchRoles(constants.RoleTechnician, constants.RoleAdmin, constants.RoleWorker), server.GetClientsByCooperativeID)).Methods(\"GET\")\n}", "func UsersRoute(route *mux.Route) *mux.Router {\n\tr := route.Subrouter()\n\treturn r\n}", "func (ctl *ManagerController) register() {\n\tmanagers := ctl.router.Group(\"/managers\")\n\n\tmanagers.GET(\"\", ctl.ListManager)\n\n\t// CRUD\n\tmanagers.POST(\"\", ctl.CreateManager)\n\tmanagers.GET(\":id\", ctl.GetManager)\n\tmanagers.PUT(\":id\", ctl.UpdateManager)\n\tmanagers.DELETE(\":id\", ctl.DeleteManager)\n}", "func RegisterRoutes(cliCtx context.CLIContext, r *mux.Router) {\n // this line is used by starport scaffolding # 1\n\t\tr.HandleFunc(\"/policingnetworkcosmos/judgement\", createJudgementHandler(cliCtx)).Methods(\"POST\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/judgement\", listJudgementHandler(cliCtx, \"policingnetworkcosmos\")).Methods(\"GET\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/judgement/{key}\", getJudgementHandler(cliCtx, \"policingnetworkcosmos\")).Methods(\"GET\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/judgement\", setJudgementHandler(cliCtx)).Methods(\"PUT\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/judgement\", deleteJudgementHandler(cliCtx)).Methods(\"DELETE\")\n\n\t\t\n\t\tr.HandleFunc(\"/policingnetworkcosmos/chargesheet\", createChargesheetHandler(cliCtx)).Methods(\"POST\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/chargesheet\", listChargesheetHandler(cliCtx, \"policingnetworkcosmos\")).Methods(\"GET\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/chargesheet/{key}\", getChargesheetHandler(cliCtx, \"policingnetworkcosmos\")).Methods(\"GET\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/chargesheet\", setChargesheetHandler(cliCtx)).Methods(\"PUT\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/chargesheet\", deleteChargesheetHandler(cliCtx)).Methods(\"DELETE\")\n\n\t\t\n\t\tr.HandleFunc(\"/policingnetworkcosmos/evidence\", createEvidenceHandler(cliCtx)).Methods(\"POST\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/evidence\", listEvidenceHandler(cliCtx, \"policingnetworkcosmos\")).Methods(\"GET\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/evidence/{key}\", getEvidenceHandler(cliCtx, \"policingnetworkcosmos\")).Methods(\"GET\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/evidence\", setEvidenceHandler(cliCtx)).Methods(\"PUT\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/evidence\", deleteEvidenceHandler(cliCtx)).Methods(\"DELETE\")\n\n\t\t\n\t\tr.HandleFunc(\"/policingnetworkcosmos/investigation\", createInvestigationHandler(cliCtx)).Methods(\"POST\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/investigation\", listInvestigationHandler(cliCtx, \"policingnetworkcosmos\")).Methods(\"GET\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/investigation/{key}\", getInvestigationHandler(cliCtx, \"policingnetworkcosmos\")).Methods(\"GET\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/investigation\", setInvestigationHandler(cliCtx)).Methods(\"PUT\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/investigation\", deleteInvestigationHandler(cliCtx)).Methods(\"DELETE\")\n\n\t\t\n\t\tr.HandleFunc(\"/policingnetworkcosmos/fir\", createFirHandler(cliCtx)).Methods(\"POST\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/fir\", listFirHandler(cliCtx, \"policingnetworkcosmos\")).Methods(\"GET\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/fir/{key}\", getFirHandler(cliCtx, \"policingnetworkcosmos\")).Methods(\"GET\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/fir\", setFirHandler(cliCtx)).Methods(\"PUT\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/fir\", deleteFirHandler(cliCtx)).Methods(\"DELETE\")\n\n\t\t\n\t\tr.HandleFunc(\"/policingnetworkcosmos/profile\", createProfileHandler(cliCtx)).Methods(\"POST\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/profile\", listProfileHandler(cliCtx, \"policingnetworkcosmos\")).Methods(\"GET\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/profile/{key}\", getProfileHandler(cliCtx, \"policingnetworkcosmos\")).Methods(\"GET\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/profile\", setProfileHandler(cliCtx)).Methods(\"PUT\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/profile\", deleteProfileHandler(cliCtx)).Methods(\"DELETE\")\n\n\t\t\n}", "func SetupRouter() *gin.Engine {\n\tr := gin.Default()\n\trouter := r.Group(\"/\")\n\t{\n\t\trouter.GET(\"customers\", Controllers.GetCustomers)\n\t\trouter.POST(\"customers\", Controllers.AddCustomerDetails)\n\t\trouter.GET(\"customers/:id\", Controllers.GetCustomerByID)\n\n\t\trouter.GET(\"customers/orders/:id\", Controllers.GetOrdersByCustomerID)\n\t\trouter.PUT(\"customers/:id\", Controllers.UpdateCustomerDetails)\n\t\trouter.DELETE(\"customers/:id\", Controllers.DeleteCustomerDetails)\n\n\t\trouter.GET(\"products\", Controllers.GetProducts)\n\n\t\trouter.POST(\"products\", Controllers.AddProductDetails)\n\t\trouter.GET(\"products/:id\", Controllers.GetProductByID)\n\t\trouter.GET(\"products/orders/:id\", Controllers.GetOrdersByProductID)\n\t\t// update it to patch\n\t\trouter.PUT(\"products/:id\", Controllers.UpdateProductDetails)\n\t\trouter.DELETE(\"products/:id\", Controllers.DeleteProductDetails)\n\n\t\trouter.GET(\"orders\", Controllers.GetOrders)\n\n\t\t//fetch the customer and product\n\t\trouter.POST(\"orders\", Controllers.PlaceOrderDetails)\n\t\trouter.GET(\"orders/:id\", Controllers.GetOrderByID)\n\t\trouter.PUT(\"order/:id\", Controllers.UpdateOrderDetails)\n\t\trouter.DELETE(\"order/:id\", Controllers.DeleteOrderDetails)\n\t}\n\n\treturn r\n}", "func ApplyRoutes(r *gin.RouterGroup) {\n\tpassengerlocation := r.Group(\"/passengerlocation\")\n\t{\n\t\tpassengerlocation.POST(\"/\", create)\n\t\tpassengerlocation.GET(\"/\", list)\n\t\tpassengerlocation.GET(\"/:id\", read)\n\t\tpassengerlocation.DELETE(\"/:id\", remove)\n\t\tpassengerlocation.PATCH(\"/:id\", update)\n\t}\n}", "func NewPeopleAPI(db types.Store) *PeopleAPI {\n\tpeopleAPI := &PeopleAPI{db: db}\n\n\trouter := mux.NewRouter()\n\trouter.HandleFunc(\"/people\", peopleAPI.get).Methods(\"GET\")\n\tpeopleAPI.Router = router\n\n\treturn peopleAPI\n}", "func setupRouter() *gin.Engine {\n\tc := gin.Default()\n\tclient := c.Group(\"/apx\")\n\t{\n\t\tclient.GET(\"/user\", controllers.GetAllUser)\n\t\tclient.GET(\"/user/:id\", controllers.GetUserByID)\n\t\tclient.DELETE(\"/user/:id\", controllers.DeleteUserByID)\n\t\tclient.POST(\"/user\", controllers.CreateUser)\n\t\tclient.PUT(\"/user/:id\", controllers.UpdateUserByID)\n\t}\n\treturn c\n}", "func GetPeopleEndpoint(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"content-type\", \"application/json\")\n\tvar people []Person\n\tcollection := client.Database(\"villagepeople\").Collection(\"people\")\n\tctx, _ := context.WithTimeout(context.Background(), 10*time.Second)\n\tcursor, err := collection.Find(ctx, bson.M{})\n\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(`{ \"message\": \"` + err.Error() + `\" }`))\n\t\treturn\n\t}\n\n\tdefer cursor.Close(ctx)\n\tfor cursor.Next(ctx) {\n\t\tvar person Person\n\t\tcursor.Decode(&person)\n\t\tpeople = append(people, person)\n\t}\n\tif err := cursor.Err(); err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(`{ \"message\": \"` + err.Error() + `\" }`))\n\t\treturn\n\t}\n\tjson.NewEncoder(w).Encode(people)\n}", "func registerUserRoutes(api iris.Party) {\n\tapi.Post(\"/users\", service.UserRegister).Name = \"UserRegister\"\n\n\tuserRoutes := api.Party(\"/users\")\n\tuserRoutes.Use(middlewares.CheckLoginStatus)\n\n\tuserRoutes.Patch(\"/password\", service.ChangePassword).Name = \"ChangePassword\"\n\t// User Operation url\n\t// userRoutes.Delete(\"/{userId:int min(1)}\", handler)\n}", "func GetPersonRelations(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tid := r.URL.Query().Get(\"id\")\n\tif id == \"\" {\n\t\thttp.Error(w, \"id parameter is not found\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tvar relations []db.Relation\n\trows, err := Db.Query(\"SELECT * FROM relations where p1=?\", id)\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"Error in fetching all Persons %s\", err.Error()), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tfor rows.Next() {\n\t\tvar r db.Relation\n\t\terr := rows.Scan(&r.P1, &r.P2, &r.Name)\n\t\tif err != nil {\n\t\t\thttp.Error(w, fmt.Sprintf(\"Error in fetching all Persons %s\", err.Error()), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\trelations = append(relations, r)\n\t}\n\tw.WriteHeader(http.StatusOK)\n\tjson.NewEncoder(w).Encode(relations)\n}", "func SetupRouter() *gin.Engine {\n\tr := gin.Default()\n\tgrp1 := r.Group(\"/user-api\")\n\t{\n\t\tgrp1.GET(\"products\", Controllers.GetUsers)\n\t\tgrp1.POST(\"product\", Controllers.CreateProd)\n\t\tgrp1.GET(\"product/:id\", Controllers.GetProdByID)\n\t\tgrp1.PATCH(\"product/:id\", Controllers.UpdateProd)\n\t\tgrp1.DELETE(\"product/:id\", Controllers.DeleteUser)\n\t\tgrp1.POST(\"order\", Controllers.CreateOrder)\n\t}\n\treturn r\n}", "func SetRouters(m *macaron.Macaron) {\n\tm.Get(\"/\", handler.Home)\n\tm.Group(\"/login\", func() {\n\t\tm.Get(\"\", handler.Login)\n\t\tm.Post(\"\", binding.Bind(handler.SignIn{}), handler.LoginPost)\n\t})\n\tm.Group(\"/category\", func() {\n\t\tm.Get(\"\", handler.GetCategories)\n\t})\n\tm.Group(\"/topic\", func() {\n\t\tm.Get(\"\", handler.GetTopics)\n\t\tm.Get(\"/add\", handler.TopicAdd)\n\t\tm.Post(\"\", binding.Bind(handler.TopicAddForm{}), handler.TopicPost)\n\n\t\tm.Get(\"/:topicId\", handler.TopicView)\n\t\tm.Get(\"/modify\", handler.ModifyTopic)\n\t\t// m.Put(\"\", binding.Bind(handler.TopicModifyForm{}), handler.TopicPut)\n\t\t// m.Delete(\"/:topicId\", handler.DeleteTopic)\n\t})\n\t// m.Group(\"/v1\", func() {\n\t// \tm.Get(\"/\", handler.IndexV1Handler)\n\t// \tm.Group(\"/topic\", func() {\n\t// \t\tm.Get(\"\", handler.GetTopics)\n\t// \t\tm.Post(\"\", handler.PostTopic)\n\n\t// \t\tm.Get(\"/:topic\", handler.GetTopic)\n\t// \t\tm.Put(\"/:topic\", handler.PutTopic)\n\t// \t\tm.Delete(\"/:topic\", handler.DeleteTopic)\n\t// \t})\n\t// \tm.Group(\"/categories\", func() {\n\t// \t\tm.Get(\"\", handler.GetCategories)\n\t// \t\tm.Post(\"\", handler.PostCategory)\n\n\t// \t\tm.Get(\"/:category\", handler.GetCategory)\n\t// \t\tm.Put(\"/:category\", handler.PutCategory)\n\t// \t\tm.Delete(\"/:category\", handler.DeleteCategory)\n\t// \t})\n\t// })\n}", "func InitRouter(b *Bloodlines) {\n\tb.router = gin.Default()\n\tb.router.Use(handlers.GetCors())\n\n\tcontent := b.router.Group(\"/api/content\")\n\t{\n\t\tcontent.Use(b.content.GetJWT())\n\t\tcontent.Use(b.content.Time())\n\t\tcontent.POST(\"\", b.content.New)\n\t\tcontent.GET(\"\", b.content.ViewAll)\n\t\tcontent.GET(\"/:contentId\", b.content.View)\n\t\tcontent.PUT(\"/:contentId\", b.content.Update)\n\t\tcontent.DELETE(\"/:contentId\", b.content.Deactivate)\n\t}\n\n\treceipt := b.router.Group(\"/api/receipt\")\n\t{\n\t\treceipt.Use(b.receipt.GetJWT())\n\t\treceipt.Use(b.receipt.Time())\n\t\treceipt.GET(\"\", b.receipt.ViewAll)\n\t\treceipt.POST(\"/send\", b.receipt.Send)\n\t\treceipt.GET(\"/:receiptId\", b.receipt.View)\n\t}\n\n\tjob := b.router.Group(\"/api/job\")\n\t{\n\t\tjob.Use(b.job.GetJWT())\n\t\tjob.Use(b.job.Time())\n\t\tjob.GET(\"\", b.job.ViewAll)\n\t\tjob.POST(\"\", b.job.New)\n\t\tjob.GET(\"/:jobId\", b.job.View)\n\t\tjob.PUT(\"/:jobId\", b.job.Update)\n\t\tjob.DELETE(\"/:jobId\", b.job.Stop)\n\t}\n\n\ttrigger := b.router.Group(\"/api/trigger\")\n\t{\n\t\ttrigger.Use(b.trigger.GetJWT())\n\t\ttrigger.Use(b.trigger.Time())\n\t\ttrigger.POST(\"\", b.trigger.New)\n\t\ttrigger.GET(\"\", b.trigger.ViewAll)\n\t\ttrigger.GET(\"/:key\", b.trigger.View)\n\t\ttrigger.PUT(\"/:key\", b.trigger.Update)\n\t\ttrigger.DELETE(\"/:key\", b.trigger.Remove)\n\t\ttrigger.POST(\"/:key/activate\", b.trigger.Activate)\n\t}\n\n\tpref := b.router.Group(\"/api/preference\")\n\t{\n\t\tpref.Use(b.preference.Time())\n\t\tpref.Use(b.preference.GetJWT())\n\t\tpref.POST(\"\", b.preference.New)\n\t\tpref.GET(\"/:userId\", b.preference.View)\n\t\tpref.PATCH(\"/:userId\", b.preference.Update)\n\t\tpref.DELETE(\"/:userId\", b.preference.Deactivate)\n\t}\n\n\tfor _, w := range b.workers {\n\t\tw.Consume()\n\t}\n}", "func LoadRoute() *gin.Engine {\n\tr := gin.Default()\n\n\t// create controller\n\tvar (\n\t\tsqliteRepo = db.NewSQLiteRepository(\"./dbsocmed.db\")\n\t\trelationController = relation.NewController(sqliteRepo)\n\t)\n\n\tr.Use(Cors())\n\tv1 := r.Group(\"api/v1\")\n\t{\n\t\tv1.POST(\"/friendconnection\", relationController.PostFriendConnection)\n\t\tv1.POST(\"/friendlist\", relationController.PostFriendList)\n\t\tv1.POST(\"/friendcommonlist\", relationController.PostFriendCommonList)\n\t\tv1.POST(\"/friendsubscribe\", relationController.PostFriendSubscribe)\n\t\tv1.POST(\"/friendblock\", relationController.PostFriendBlock)\n\t\tv1.POST(\"/friendupdates\", relationController.PostFriendUpdates)\n\t}\n\n\treturn r\n}", "func Router(e *echo.Echo) {\n\th := NewHandler()\n\tg := e.Group(\"/admin\")\n\t{\n\t\tg.GET(\"/\", h.IndexPage)\n\t\tg.GET(\"/list\", h.ListPage)\n\t\tg.GET(\"/customer/list\", h.CustomerListPage)\n\t\tg.GET(\"/customer/chat/:customerID\", h.CustomerChatPage)\n\t\t//g.GET(\"/products/:productId\", h.ProductPage)\n\t}\n}", "func (ctl *PatientrecordController) register() {\n\tpatientrecords := ctl.router.Group(\"/patientrecords\")\n \n\tpatientrecords.GET(\"\", ctl.ListPatientrecord)\n \n\t// CRUD\n\tpatientrecords.POST(\"\", ctl.CreatePatientrecord)\n\tpatientrecords.GET(\":id\", ctl.GetPatientrecord)\n\tpatientrecords.PUT(\":id\", ctl.UpdatePatientrecord)\n\tpatientrecords.DELETE(\":id\", ctl.DeletePatientrecord)\n }", "func (r *ManagerResource) Routes() http.Handler {\n\trouter := chi.NewRouter()\n\t// router.Use(render.SetContentType(render.ContentTypeJSON))\n\n\tcors := cors.New(cors.Options{\n\t\t// AllowedOrigins: []string{\"https://foo.com\"}, // Use this to allow specific origin hosts\n\t\tAllowedOrigins: []string{\"*\"},\n\t\t// AllowOriginFunc: func(r *http.Request, origin string) bool { return true },\n\t\tAllowedMethods: []string{\"GET\", \"POST\", \"PUT\", \"DELETE\", \"OPTIONS\"},\n\t\tAllowedHeaders: []string{\"Accept\", \"Authorization\", \"Content-Type\", \"X-CSRF-Token\"},\n\t\tExposedHeaders: []string{\"Link\"},\n\t\tAllowCredentials: true,\n\t\tMaxAge: 300, // Maximum value not ignored by any of major browsers\n\t})\n\n\trouter.Use(cors.Handler)\n\n\tprivKey, err := crypto.HexToECDSA(r.config.Keys.Admin)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Failed to load key\")\n\t}\n\n\tec, err := ethereum.CreateEthClient(r.config.Infura.URL + r.config.Infura.Key)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Failed to initialize ethereum client\")\n\t}\n\n\tauth := bind.NewKeyedTransactor(privKey)\n\n\tvaultManagerContract, err := vaultmanager.NewManager(ec, r.config.Contracts.VaultManagerAddress, auth)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Failed to create vault manager\")\n\t}\n\n\trouter.Get(\"/get-all-vaults\", r.getAllAddresses(vaultManagerContract, ec))\n\trouter.Post(\"/add-vault\", r.addNewVault(vaultManagerContract))\n\n\tlog.WithFields(log.Fields{\"Contract\": \"Vault Manager\", \"Address\": r.config.Contracts.VaultManagerAddress}).Info(\"Created manager abstraction\")\n\n\treturn router\n}", "func Route(router *gin.Engine) {\n\tnpc := router.Group(\"/npcs\")\n\t{\n\t\tnpc.DELETE(\"/:id\", delete)\n\t\tnpc.GET(\"/\", getMany)\n\t\tnpc.GET(\"/:id\", getOne)\n\t\tnpc.POST(\"/\", create)\n\t\tnpc.POST(\"/generate\", generate)\n\t}\n}", "func DeletePerson(w http.ResponseWriter, req *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tparams := mux.Vars(req)\n\tid, _ := strconv.Atoi(params[\"id\"])\n\tfor index, person := range models.People {\n\t\tif person.ID == id {\n\t\t\tmodels.People = append(models.People[:index], models.People[index+1:]...)\n\t\t}\n\t}\n}", "func GetPerson(w http.ResponseWriter, r *http.Request) {\n\tparams := mux.Vars(r)\n\tfor _, item := range people {\n\t\tif item.ID == params[\"id\"] {\n\t\t\tjson.NewEncoder(w).Encode(item)\n\t\t\treturn\n\t\t}\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tjson.NewEncoder(w).Encode(&model.Person{})\n\t//r = mux.Vars(r)\n}", "func (a *App) initializeRoutes() {\n\t// Root\n\ta.Router.HandleFunc(\"/\", authn(a.getRoot)).Methods(\"GET\")\n\t// AuthZ and AuthN\n\ta.Router.HandleFunc(\"/login\", a.getLogin).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/login\", a.processLogin).Methods(\"POST\")\n\ta.Router.HandleFunc(\"/logout\", a.processLogout).Methods(\"GET\")\n\t// Images and stuff\n\ta.Router.PathPrefix(\"/resources/\").Handler(http.StripPrefix(\"/resources/\", http.FileServer(http.Dir(\"./resources/\"))))\n\t// Contacts\n\ta.Router.HandleFunc(\"/contacts\", authn(a.getContacts)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/contact/{id:[0-9]+}\", authn(a.editContact)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/contact/create\", authn(a.createContact)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/contact/save\", authn(a.saveContact)).Methods(\"POST\")\n\ta.Router.HandleFunc(\"/contact/delete/{id:[0-9]+}\", authn(a.deleteContact)).Methods(\"GET\")\n\t// Customers\n\ta.Router.HandleFunc(\"/customers\", authn(a.getCustomers)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/customer/{id:[0-9]+}\", authn(a.editCustomer)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/customer/create\", authn(a.createCustomer)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/customer/save\", authn(a.saveCustomer)).Methods(\"POST\")\n\ta.Router.HandleFunc(\"/customer/delete/{id:[0-9]+}\", a.deleteCustomer).Methods(\"GET\")\n\t// Projects\n\ta.Router.HandleFunc(\"/projects\", authn(a.getProjects)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/project/{id:[0-9]+}\", authn(a.editProject)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/project/create\", authn(a.createProject)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/project/save\", authn(a.saveProject)).Methods(\"POST\")\n\ta.Router.HandleFunc(\"/project/delete/{id:[0-9]+}\", authn(a.deleteProject)).Methods(\"GET\")\n\t// Dashboard\n\ta.Router.HandleFunc(\"/dashboard\", authn(a.getDashboard)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/dashboard/notifications\", authn(a.getDashboardNotifications)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/dashboard/numberofprojects\", authn(a.getDashboardNumberOfProjects)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/dashboard/numberofhappy\", authn(a.getDashboardHappyCustomer)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/dashboard/completedtask\", authn(a.getDashboardCompletedTask)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/dashboard/resources\", authn(a.getDashboardResources)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/dashboard/tasks\", authn(a.getDashboardProjectTasksForUser)).Methods(\"GET\")\n\t// System Notification\n\ta.Router.HandleFunc(\"/notifications\", authn(a.getSystemNotifications)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/notification/{id:[0-9]+}\", authn(a.editSystemNotification)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/notification/create\", authn(a.createSystemNotification)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/notification/save\", authn(a.saveSystemNotification)).Methods(\"POST\")\n\ta.Router.HandleFunc(\"/notification/delete/{id:[0-9]+}\", authn(a.deleteSystemNotification)).Methods(\"GET\")\n\t// Internal Resources\n\ta.Router.HandleFunc(\"/resources\", authn(a.getResources)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/resource/{id:[0-9]+}\", authn(a.editResource)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/resource/create\", authn(a.createResource)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/resource/save\", authn(a.saveResource)).Methods(\"POST\")\n\ta.Router.HandleFunc(\"/resource/delete/{id:[0-9]+}\", authn(a.deleteResource)).Methods(\"GET\")\n\t// Project Task\n\ta.Router.HandleFunc(\"/tasks\", authn(a.getProjectTasks)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/task/{id:[0-9]+}\", authn(a.editProjectTask)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/task/create\", authn(a.createProjectTask)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/task/save\", authn(a.saveProjectTask)).Methods(\"POST\")\n\ta.Router.HandleFunc(\"/task/delete/{id:[0-9]+}\", authn(a.deleteProjectTask)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/task/attachment\", authn(a.getAttachment)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/mytask/{id:[0-9]+}\", authn(a.getUserTasks)).Methods(\"GET\")\n\t// Settings\n\ta.Router.HandleFunc(\"/settings\", authn(a.getSettings)).Methods(\"GET\")\n\t// System Backup\n\ta.Router.HandleFunc(\"/backup\", authn(a.getBackup)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/backup/start\", authn(a.startBackup)).Methods(\"POST\")\n\t// Application Users\n\ta.Router.HandleFunc(\"/users\", authn(a.getUsers)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/user/create\", authn(a.createUser)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/user/save\", authn(a.saveUser)).Methods(\"POST\")\n\ta.Router.HandleFunc(\"/user/{id:[0-9]+}\", authn(a.editUser)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/user/delete/{id:[0-9]+}\", authn(a.deleteUser)).Methods(\"GET\")\n\t// Static Files\n\ta.Router.PathPrefix(\"/public/\").Handler(http.StripPrefix(\"/public/\", http.FileServer(rice.MustFindBox(\"public\").HTTPBox())))\n}", "func (s *Server) setupRoutes(devMode bool) {\n\ts.Router.NoRoute(NoRoute)\n\tv1 := s.Router.Group(\"/api/v1\")\n\tv1.GET(\"/healthcheck\", s.Healthcheck)\n\n\tv1.GET(\"/articles\", s.GetArticles)\n\tv1.POST(\"/articles\", s.AddArticle)\n\tv1.POST(\"/articles:batch\", s.AddArticles)\n\n\t// Profiler\n\t// URL: https://<IP>:<PORT>/debug/pprof/\n\tif devMode {\n\t\ts.Logger.Info(\"activating pprof (devmode on)\", log.Field(\"type\", \"debug\"))\n\t\tpprof.Register(s.Router)\n\t}\n}", "func SetupRoutes(r chi.Router, s *Service) {\n\tr.Route(\"/api/pet\", func(r chi.Router) {\n\t\tr.Post(\"/\", s.PostPet)\n\t\tr.Route(\"/{id}\", func(r chi.Router) {\n\t\t\tr.Use(urlParamContextSaverMiddleware(\"id\", idKey))\n\t\t\tr.Get(\"/\", s.GetPet)\n\t\t\tr.Put(\"/\", s.PutPet)\n\t\t\tr.Delete(\"/\", s.DeletePet)\n\t\t})\n\t})\n}", "func initializeRoutes(port string) {\n\t/*\n\t\tAll the urls will be mentioned and configured.\n\t*/\n\t/*\n\t\turl : /test\n\t*/\n\tr.GET(\"/test\", showHomePage)\n\t/*\n\t\turl : /\n\t*/\n\tr.GET(\"/\", showHomePage)\n\t/*\n\t\tDefining group route for users\n\t*/\n\tuserRoutes := r.Group(\"/user\")\n\t{\n\t\t/*\n\t\t\turl : /user/\n\t\t*/\n\t\tuserRoutes.GET(\"/\", showHomePage)\n\t\t/*\n\t\t\turl : /user/login (method is get)\n\t\t*/\n\t\tuserRoutes.GET(\"/login\", showLoginPage)\n\t\t/*\n\t\t\turl : /user/login (method is post)\n\t\t*/\n\t\tuserRoutes.POST(\"/login\", performLogin)\n\t\t/*\n\t\t\turl : /user/jsonresponse\n\t\t*/\n\t\tuserRoutes.GET(\"/jsonresponse\", jsonresponse)\n\t}\n\tfmt.Println(\"-------Starting server-------------\")\n}", "func UserCabV1Routes(router *gin.Engine) {\n\n loginRouter := router.Group(\"/v1/new\")\n {\n //for new user login\n loginRouter.POST(\"login\", newUserLogin)\n }\n\n\tuserRouter := router.Group(\"/v1/user\")\n\t{\n userRouter.Use(ValidateUserRequestAndFetchUser())\n\n //user can book a cab\n userRouter.POST(\"/book\", bookCab)\n\n //user's all past rides\n userRouter.GET(\"/rides\", getUserRides)\n }\n\n}", "func mapUrls() {\n\t// Ping Test\n\trouter.GET(\"/gin/ping\", ping.Ping)\n\n\t// User Routes\n\trouter.GET(\"/gin/user/:user_id\", users.Get)\n\trouter.GET(\"/gin/internal/users/search\", users.Search)\n\trouter.POST(\"/gin/user/new\", users.Create)\n\trouter.PUT(\"/gin/user/:user_id\", users.Update)\n\trouter.PATCH(\"/gin/user/:user_id\", users.Update)\n\trouter.DELETE(\"/gin/user/:user_id\", users.Delete)\n}", "func SetupRouter() *mux.Router {\n\trouter := mux.NewRouter()\n\n\tcontentManager, err := services.NewContentManagerService()\n\tif err != nil {\n\t\tlog.Fatalf(\"Creating the ContentManagerService Failed %s\", err)\n\t}\n\n\trouter.HandleFunc(\"/articles\", contentManager.CreateAnArticle).Methods(\"POST\")\n\trouter.HandleFunc(\"/articles/{uuid}\", contentManager.RetrieveAnArticle).Methods(\"GET\")\n\trouter.HandleFunc(\"/articles/{uuid}\", contentManager.DeleteAnArticle).Methods(\"DELETE\")\n\trouter.HandleFunc(\"/articles/{uuid}\", contentManager.UpdateAnArticle).Methods(\"PUT\")\n\n\trouter.HandleFunc(\"/articles/search-by-topic/{topic}\", contentManager.SearchArticlesByTopic).Methods(\"GET\")\n\trouter.HandleFunc(\"/articles/search-by-status/{status}\", contentManager.SearchArticlesByStatus).Methods(\"GET\")\n\trouter.HandleFunc(\"/articles\", contentManager.ListAllArticles).Methods(\"GET\")\n\n\trouter.HandleFunc(\"/articles/tags\", contentManager.CreateATag).Methods(\"POST\")\n\trouter.HandleFunc(\"/articles/tags/{uuid}\", contentManager.RenameATag).Methods(\"PUT\")\n\trouter.HandleFunc(\"/articles/tags/{uuid}\", contentManager.RetrieveATag).Methods(\"GET\")\n\trouter.HandleFunc(\"/articles/all/tags\", contentManager.ListAllTags).Methods(\"GET\")\n\n\t/* attach a tag to an article, remove a tag from an article\n\trouter.HandleFunc(\"/articles/{article-uuid}/tags/{tag-uuid}\", contentManager.AttachTagToArticle).Methods(\"POST\")\n\trouter.HandleFunc(\"/articles/{article-uuid}/tags/{tag-uuid}\", contentManager.RemoveTagFromArticle).Methods(\"DELETE\")\n\t*/\n\n\treturn router\n}", "func Router() chi.Router {\n\tr := chi.NewRouter()\n\n\tentity := \"claims\"\n\n\tr.With(util.CheckKetoPolicy(entity, \"get\")).Get(\"/\", list)\n\tr.With(util.CheckKetoPolicy(entity, \"create\")).Post(\"/\", create)\n\n\tr.Route(\"/{claim_id}\", func(r chi.Router) {\n\t\tr.With(util.CheckKetoPolicy(entity, \"get\")).Get(\"/\", details)\n\t\tr.With(util.CheckKetoPolicy(entity, \"update\")).Put(\"/\", update)\n\t\tr.With(util.CheckKetoPolicy(entity, \"delete\")).Delete(\"/\", delete)\n\t})\n\n\treturn r\n\n}", "func configureRoutes() *mux.Router {\n\t// establish router\n\trouter := mux.NewRouter()\n\n\t// add routes\n\t// Basic service endpoints\n\trouter.HandleFunc(\"/\", home).Methods(\"GET\", \"OPTIONS\", \"POST\", \"PUT\", \"DELETE\")\n\trouter.HandleFunc(\"/ping\", ping).Methods(\"GET\", \"OPTIONS\")\n\trouter.HandleFunc(\"/register\", register).Methods(\"POST\", \"OPTIONS\")\n\trouter.HandleFunc(\"/auth\", auth).Methods(\"GET\", \"OPTIONS\")\n\n\t// Basic image creation endpoint\n\trouter.HandleFunc(\"/image\", addImage).Methods(\"POST\", \"OPTIONS\")\n\n\t// Image data endpoints\n\trouter.HandleFunc(\"/image/{uid:[0-9]+}/{fileId}\", getImage).Methods(\"GET\", \"OPTIONS\")\n\trouter.HandleFunc(\"/image/{uid:[0-9]+}/{fileId}\", delImage).Methods(\"DELETE\", \"OPTIONS\")\n\trouter.HandleFunc(\"/image/{uid:[0-9]+}/{fileId}\", updateImage).Methods(\"PUT\", \"OPTIONS\")\n\n\t// Image meta query methods\n\trouter.HandleFunc(\"/image/meta?\", imageMetaRequest).Queries(\n\t\t\"page\", \"{page:[0-9]+}\",\n\t\t\"id\", \"{id:[0-9]+}\",\n\t\t\"uid\", \"{uid:[0-9]+}\",\n\t\t\"title\", \"{title}\",\n\t\t\"encoding\", \"{encoding}\",\n\t\t\"shareable\", \"{shareable)\").Methods(\"GET\")\n\trouter.HandleFunc(\"/image/meta\", imageMetaRequest).Methods(\"GET\", \"OPTIONS\")\n\n\treturn router\n}", "func HandlePeople(w http.ResponseWriter, r *http.Request) {\n\tdb := PGSQLConnect()\n\n\trows := db.Query(FetchUsers, []interface{}{})\n\n\tvar response Response\n\tvar users []User\n\n\tfor rows.Next() {\n\t\tvar user User\n\n\t\terr := rows.Scan(&user.ID, &user.Name, &user.Mail, &user.Phone,\n\t\t\t&user.Birth,\n\t\t\t&user.Type)\n\n\t\tswitch err {\n\t\tcase sql.ErrNoRows:\n\t\t\tresponse.Status = StatusError\n\n\t\t\terrorMessage := fmt.Sprintf(\"Database request error, \"+\n\t\t\t\t\"notify the developer about %v.\", err.Error())\n\n\t\t\te := Error{\n\t\t\t\tCode: ErrorDatabaseResponse,\n\t\t\t\tDescription: errorMessage,\n\t\t\t}\n\n\t\t\tresponse.Content = e\n\t\t\tbreak\n\t\tcase nil:\n\t\t\tresponse.Status = StatusOk\n\n\t\t\tusers = append(users, user)\n\n\t\t\tresponse.Content = users\n\t\t}\n\t}\n\tmessage, err := json.Marshal(response)\n\n\tif err != nil {\n\t\tReport500(&w,\n\t\t\tfmt.Sprintf(\"[!] Error encoding data to json. Reason %v\", err))\n\t}\n\n\tw.Write(message)\n}", "func SvcPeople(w http.ResponseWriter, r *http.Request, d *ServiceData) {\n\tconst funcname = \"SvcPeople\"\n\tvar err error\n\tlib.Console(\"Entered %s\\n\", funcname)\n\t// lib.Console(\"Request: %s: BID = %d, ID = %d\\n\", d.wsSearchReq.Cmd, d.BID, d.ID)\n\n\tswitch d.wsSearchReq.Cmd {\n\tcase \"get\":\n\t\tif d.ID, err = SvcExtractIDFromURI(r.RequestURI, \"ID\", 3, w); err != nil {\n\t\t\tSvcErrorReturn(w, err, funcname)\n\t\t\treturn\n\t\t}\n\t\tgetPerson(w, r, d)\n\t\tbreak\n\tcase \"getlist\":\n\t\tgetPersonList(w, r, d)\n\t\tbreak\n\n\t// case \"save\":\n\t// \tsavePerson(w, r, d)\n\t// \tbreak\n\t// case \"delete\":\n\t// \tdeletePerson(w, r, d)\n\t// \tbreak\n\n\tdefault:\n\t\terr = fmt.Errorf(\"Unhandled command: %s\", d.wsSearchReq.Cmd)\n\t\tSvcErrorReturn(w, err, funcname)\n\t\treturn\n\t}\n}", "func assignRoutes(router *mux.Router) *mux.Router {\n\tvar logger log.Logger\n\t{\n\t\tlogger = log.NewLogfmtLogger(os.Stderr)\n\t\tlogger = log.NewSyncLogger(logger)\n\t\tlogger = level.NewFilter(logger, level.AllowDebug())\n\t\tlogger = log.With(logger,\n\t\t\t\"svc:\", \"pilot-management\",\n\t\t\t\"ts:\", log.DefaultTimestampUTC,\n\t\t\t\"caller:\", log.DefaultCaller,\n\t\t)\n\t}\n\n\tlevel.Info(logger).Log(\"msg\", \"service started\")\n\tdefer level.Info(logger).Log(\"msg\", \"service ended\")\n\n\tservice := impl.MakeServiceImpl(logger)\n\n\toptions := []httpTransport.ServerOption{\n\t\thttpTransport.ServerErrorEncoder(EncodeErrorResponse),\n\t\thttpTransport.ServerErrorLogger(logger),\n\t}\n\n\tstatusHandler := httpTransport.NewServer(\n\t\tMakeStatusEndpoint(service),\n\t\tDecodeStatusRequest,\n\t\tEncodeResponse,\n\t)\n\n\tlistPilotsHandler := httpTransport.NewServer(\n\t\tMakeListPilotsEndpoint(service),\n\t\tDecodeListPilotsRequest,\n\t\tEncodeResponse,\n\t\toptions...,\n\t)\n\n\tgetPilotHandler := httpTransport.NewServer(\n\t\tMakeGetPilotEndpoint(service),\n\t\tDecodeGetPilotRequest,\n\t\tEncodeResponse,\n\t\toptions...,\n\t)\n\n\tCreatePilotHandler := httpTransport.NewServer(\n\t\tMakeCreatePilotEndpoint(service),\n\t\tDecodeCreatePilotRequest,\n\t\tEncodeResponse,\n\t\toptions...,\n\t)\n\n\tUpdatePilotHandler := httpTransport.NewServer(\n\t\tMakeUpdatePilotEndpoint(service),\n\t\tDecodeUpdatePilotRequest,\n\t\tEncodeResponse,\n\t\toptions...,\n\t)\n\n\tDeletePilotHandler := httpTransport.NewServer(\n\t\tMakeDeletePilotEndpoint(service),\n\t\tDecodeDeletePilotRequest,\n\t\tEncodeResponse,\n\t\toptions...,\n\t)\n\n\tChangePilotStatusHandler := httpTransport.NewServer(\n\t\tMakeChangePilotStatusEndpoint(service),\n\t\tDecodeChangePilotStatusRequest,\n\t\tEncodeResponse,\n\t\toptions...,\n\t)\n\n\trouter.Handle(\"/supply/pilots/status\", statusHandler).Methods(\"GET\")\n\trouter.Handle(\"/supply/pilots\", listPilotsHandler).Methods(\"GET\")\n\trouter.Handle(\"/supply/pilots/{id}\", getPilotHandler).Methods(\"GET\")\n\trouter.Handle(\"/supply/pilots\", CreatePilotHandler).Methods(\"POST\")\n\trouter.Handle(\"/supply/pilots/{id}\", UpdatePilotHandler).Methods(\"PATCH\")\n\trouter.Handle(\"/supply/pilots/{id}\", DeletePilotHandler).Methods(\"DELETE\")\n\trouter.Handle(\"/supply/pilots/{id}/{status}\", ChangePilotStatusHandler).Methods(\"PATCH\")\n\treturn router\n}", "func createPerson(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tnewID := ps.ByName(\"id\")\n\tvar newPerson person\n\t_ = json.NewDecoder(r.Body).Decode(&newPerson)\n\tnewPerson.ID = string(newID)\n\tpeople = append(people, newPerson)\n\tjson.NewEncoder(w).Encode(people)\n}", "func RegisterStaffEndpoints(router *gin.RouterGroup) {\n\tstaffRouter := router.Group(\"\")\n\t// Annonymous Routes\n\tstaffRouter.POST(\"/login\", staffLogin)\n\n\t// Admin Routes\n\tstaffRouter.Use(middlewares.LoginRequire())\n\t{\n\t\tstaffRouter.GET(\"\", getStaffList)\n\t\tstaffRouter.GET(\"/admins\", getAdminList)\n\t\tstaffRouter.GET(\"/check/:uname/station/:sID\", checkIfUsernameExist)\n\t\tstaffRouter.GET(\"/roles\", getStaffRoleList)\n\t\tstaffRouter.GET(\"/multistation/station/:sID\", getUserByStation)\n\t\tstaffRouter.GET(\"/multistation/room/:rID\", getUserByRoom)\n\t}\n\n\tstaffRouter.Use(middlewares.SuperAdminRequired())\n\t{\n\t\tstaffRouter.GET(\"/info/:staffID\", getStaffInfomation)\n\t\tstaffRouter.POST(\"/create\", createStaff)\n\t\tstaffRouter.POST(\"/upload/image\", uploadImage)\n\t\tstaffRouter.POST(\"/reorder\", reorderStaff)\n\t\tstaffRouter.POST(\"/edit/:staffID\", editStaff)\n\t}\n}", "func initRoutes() {\r\n\trouter.Use(setUserStatus())\r\n\r\n\trouter.GET(\"/contact\", showContactForm)\r\n\trouter.POST(\"/contact\", contactPost)\r\n\trouter.GET(\"/admin\", ensureLoggedIn(), func(c *gin.Context) {\r\n\t\tc.Redirect(307, \"/admin/job_openings\")\r\n\t})\r\n\trouter.GET(\"/test\", func(c *gin.Context) {\r\n\t\tc.HTML(200, \"test.html\", nil)\r\n\t})\r\n\r\n\t// Admin Handler\r\n\tadminRoutes := router.Group(\"/admin\")\r\n\t{\r\n\t\t// Login-Logut\r\n\t\tadminRoutes.GET(\"/login\", ensureNotLoggedIn(), showLoginPage)\r\n\t\tadminRoutes.GET(\"/logout\", ensureLoggedIn(), logout)\r\n\r\n\t\t// JOB-Details\r\n\t\tadminRoutes.POST(\"/job_openings\", ensureNotLoggedIn(), performLogin)\r\n\t\tadminRoutes.GET(\"/job_openings\", ensureLoggedIn(), showIndexPage)\r\n\r\n\t\tadminRoutes.GET(\"/add_new_job\", ensureLoggedIn(), showNewJobPage)\r\n\t\tadminRoutes.POST(\"/add_new_job\", ensureLoggedIn(), addNewJob)\r\n\t\tadminRoutes.GET(\"/edit\", ensureLoggedIn(), showEditPage)\r\n\t\tadminRoutes.POST(\"/edit\", ensureLoggedIn(), editPage)\r\n\t\tadminRoutes.GET(\"/delete/:id\", ensureLoggedIn(), deleteJobList)\r\n\r\n\t\t// Blog-Details\r\n\t\tadminRoutes.GET(\"/blogs\", ensureLoggedIn(), showBlogs)\r\n\t\tadminRoutes.GET(\"/add_blog\", ensureLoggedIn(), showAddBlogPage)\r\n\t\tadminRoutes.POST(\"/add_blog\", ensureLoggedIn(), AddBlogPage)\r\n\t\tadminRoutes.GET(\"/editBlog\", ensureLoggedIn(), showEditBlogPage)\r\n\t\tadminRoutes.POST(\"/editBlog\", ensureLoggedIn(), editBlog)\r\n\t\tadminRoutes.GET(\"/blogs/delete/:id\", ensureLoggedIn(), deleteBlog)\r\n\r\n\t\t// Category\r\n\t\tadminRoutes.GET(\"/categories\", ensureLoggedIn(), showCategories)\r\n\t\tadminRoutes.POST(\"/categories\", ensureLoggedIn(), addCategory)\r\n\t\tadminRoutes.POST(\"/categorieEdit/:id\", ensureLoggedIn(), editCategory)\r\n\t\tadminRoutes.GET(\"/categories/delete/:id\", ensureLoggedIn(), deleteCategory)\r\n\r\n\t\t// Tag\r\n\t\tadminRoutes.GET(\"/tags\", ensureLoggedIn(), showTags)\r\n\t\tadminRoutes.POST(\"/tags\", ensureLoggedIn(), addTag)\r\n\t\tadminRoutes.POST(\"/tags/edit/:id\", ensureLoggedIn(), editTag)\r\n\t\tadminRoutes.GET(\"/tags/delete/:id\", ensureLoggedIn(), deleteTag)\r\n\t}\r\n}", "func (ctl *RentController) register() {\n\trents := ctl.router.Group(\"/rents\")\n\n\trents.GET(\"\", ctl.ListRent)\n\n\t// CRUD\n\trents.POST(\"\", ctl.CreateRent)\n\trents.GET(\":id\", ctl.GetRent)\n\trents.PUT(\":id\", ctl.UpdateRent)\n\trents.DELETE(\":id\", ctl.DeleteRent)\n}", "func (ctl *ResearchController) register() {\n\tresearches := ctl.router.Group(\"/researches\")\n\tresearches.GET(\"\", ctl.ListResearch)\n\n searchresearchs := ctl.router.Group(\"/searchresearchs\")\n\tsearchresearchs.GET(\"\",ctl.GetSearchResearch)\n\n\t// CRUD\n\tresearches.POST(\"\", ctl.CreateResearch)\n\tresearches.GET(\":id\", ctl.GetResearch)\n\tresearches.PUT(\":id\", ctl.UpdateResearch)\n\tresearches.DELETE(\":id\", ctl.DeleteResearch)\n}", "func createRoutes(pHandler pingInterface.Handler, cHandler customerDomain.Handler) {\n\tv1 := router.Group(baseRouteV1)\n\t{\n\t\tv1.GET(\"ping\", pHandler.Ping)\n\t\tv1.POST(\"customers\", cHandler.Create)\n\t}\n}", "func MapRouteBySchema(server *Server, dataStore db.DB, s *schema.Schema) {\n\tif s.IsAbstract() {\n\t\treturn\n\t}\n\troute := server.martini\n\n\tsingleURL := s.GetSingleURL()\n\tpluralURL := s.GetPluralURL()\n\tsingleURLWithParents := s.GetSingleURLWithParents()\n\tpluralURLWithParents := s.GetPluralURLWithParents()\n\n\t//load extension environments\n\tenvironmentManager := extension.GetManager()\n\tif _, ok := environmentManager.GetEnvironment(s.ID); !ok {\n\t\tenv, err := server.NewEnvironmentForPath(s.ID, pluralURL)\n\t\tif err != nil {\n\t\t\tlog.Fatal(fmt.Sprintf(\"[%s] %v\", pluralURL, err))\n\t\t}\n\t\tenvironmentManager.RegisterEnvironment(s.ID, env)\n\t}\n\n\tlog.Debug(\"[Plural Path] %s\", pluralURL)\n\tlog.Debug(\"[Singular Path] %s\", singleURL)\n\tlog.Debug(\"[Plural Path With Parents] %s\", pluralURLWithParents)\n\tlog.Debug(\"[Singular Path With Parents] %s\", singleURLWithParents)\n\n\t//setup list route\n\tgetPluralFunc := func(w http.ResponseWriter, r *http.Request, p martini.Params, identityService middleware.IdentityService, context middleware.Context) {\n\t\taddJSONContentTypeHeader(w)\n\t\tfillInContext(context, dataStore, r, w, s, p, server.sync, identityService, nil)\n\t\tif err := resources.GetMultipleResources(context, dataStore, s, r.URL.Query()); err != nil {\n\t\t\thandleError(w, err)\n\t\t\treturn\n\t\t}\n\t\tw.Header().Add(\"X-Total-Count\", fmt.Sprint(context[\"total\"]))\n\t\troutes.ServeJson(w, context[\"response\"])\n\t}\n\troute.Get(pluralURL, middleware.Authorization(schema.ActionRead), getPluralFunc)\n\troute.Get(pluralURLWithParents, middleware.Authorization(schema.ActionRead), func(w http.ResponseWriter, r *http.Request, p martini.Params, identityService middleware.IdentityService, context middleware.Context) {\n\t\taddParamToQuery(r, schema.FormatParentID(s.Parent), p[s.Parent])\n\t\tgetPluralFunc(w, r, p, identityService, context)\n\t})\n\n\t//setup show route\n\tgetSingleFunc := func(w http.ResponseWriter, r *http.Request, p martini.Params, identityService middleware.IdentityService, context middleware.Context) {\n\t\taddJSONContentTypeHeader(w)\n\t\tfillInContext(context, dataStore, r, w, s, p, server.sync, identityService, nil)\n\t\tid := p[\"id\"]\n\t\tif err := resources.GetSingleResource(context, dataStore, s, id); err != nil {\n\t\t\thandleError(w, err)\n\t\t\treturn\n\t\t}\n\t\troutes.ServeJson(w, context[\"response\"])\n\t}\n\troute.Get(singleURL, middleware.Authorization(schema.ActionRead), getSingleFunc)\n\troute.Get(singleURLWithParents, middleware.Authorization(schema.ActionRead), func(w http.ResponseWriter, r *http.Request, p martini.Params, identityService middleware.IdentityService, context middleware.Context) {\n\t\taddParamToQuery(r, schema.FormatParentID(s.Parent), p[s.Parent])\n\t\tgetSingleFunc(w, r, p, identityService, context)\n\t})\n\n\t//setup delete route\n\tdeleteSingleFunc := func(w http.ResponseWriter, r *http.Request, p martini.Params, identityService middleware.IdentityService, context middleware.Context) {\n\t\taddJSONContentTypeHeader(w)\n\t\tfillInContext(context, dataStore, r, w, s, p, server.sync, identityService, nil)\n\t\tid := p[\"id\"]\n\t\tif err := resources.DeleteResource(context, dataStore, s, id); err != nil {\n\t\t\thandleError(w, err)\n\t\t\treturn\n\t\t}\n\t\tw.WriteHeader(http.StatusNoContent)\n\t}\n\troute.Delete(singleURL, middleware.Authorization(schema.ActionDelete), deleteSingleFunc)\n\troute.Delete(singleURLWithParents, middleware.Authorization(schema.ActionDelete), func(w http.ResponseWriter, r *http.Request, p martini.Params, identityService middleware.IdentityService, context middleware.Context) {\n\t\taddParamToQuery(r, schema.FormatParentID(s.Parent), p[s.Parent])\n\t\tdeleteSingleFunc(w, r, p, identityService, context)\n\t})\n\n\t//setup create route\n\tpostPluralFunc := func(w http.ResponseWriter, r *http.Request, p martini.Params, identityService middleware.IdentityService, context middleware.Context) {\n\t\taddJSONContentTypeHeader(w)\n\t\tdataMap, err := middleware.ReadJSON(r)\n\t\tif err != nil {\n\t\t\thandleError(w, resources.NewResourceError(err, fmt.Sprintf(\"Failed to parse data: %s\", err), resources.WrongData))\n\t\t\treturn\n\t\t}\n\t\tdataMap = removeResourceWrapper(s, dataMap)\n\t\tfillInContext(context, dataStore, r, w, s, p, server.sync, identityService, dataMap)\n\t\tif s.Parent != \"\" {\n\t\t\tif _, ok := dataMap[s.ParentID()]; !ok {\n\t\t\t\tqueryParams := r.URL.Query()\n\t\t\t\tparentIDParam := queryParams.Get(s.ParentID())\n\t\t\t\tif parentIDParam != \"\" {\n\t\t\t\t\tdataMap[s.ParentID()] = parentIDParam\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif err := resources.CreateResource(context, dataStore, s, dataMap); err != nil {\n\t\t\thandleError(w, err)\n\t\t\treturn\n\t\t}\n\t\tw.WriteHeader(http.StatusCreated)\n\t\troutes.ServeJson(w, context[\"response\"])\n\t}\n\troute.Post(pluralURL, middleware.Authorization(schema.ActionCreate), postPluralFunc)\n\troute.Post(pluralURLWithParents, middleware.Authorization(schema.ActionCreate),\n\t\tfunc(w http.ResponseWriter, r *http.Request, p martini.Params, identityService middleware.IdentityService, context middleware.Context) {\n\t\t\taddParamToQuery(r, schema.FormatParentID(s.Parent), p[s.Parent])\n\t\t\tpostPluralFunc(w, r, p, identityService, context)\n\t\t})\n\n\t//setup create or update route\n\tputSingleFunc := func(w http.ResponseWriter, r *http.Request, p martini.Params, identityService middleware.IdentityService, context middleware.Context) {\n\t\taddJSONContentTypeHeader(w)\n\t\tid := p[\"id\"]\n\t\tdataMap, err := middleware.ReadJSON(r)\n\t\tif err != nil {\n\t\t\thandleError(w, resources.NewResourceError(err, fmt.Sprintf(\"Failed to parse data: %s\", err), resources.WrongData))\n\t\t\treturn\n\t\t}\n\t\tdataMap = removeResourceWrapper(s, dataMap)\n\t\tfillInContext(context, dataStore, r, w, s, p, server.sync, identityService, dataMap)\n\t\tif isCreated, err := resources.CreateOrUpdateResource(\n\t\t\tcontext, dataStore, s, id, dataMap); err != nil {\n\t\t\thandleError(w, err)\n\t\t\treturn\n\t\t} else if isCreated {\n\t\t\tw.WriteHeader(http.StatusCreated)\n\t\t}\n\t\troutes.ServeJson(w, context[\"response\"])\n\t}\n\troute.Put(singleURL, middleware.Authorization(schema.ActionUpdate), putSingleFunc)\n\troute.Put(singleURLWithParents, middleware.Authorization(schema.ActionUpdate),\n\t\tfunc(w http.ResponseWriter, r *http.Request, p martini.Params, identityService middleware.IdentityService, context middleware.Context) {\n\t\t\taddParamToQuery(r, schema.FormatParentID(s.Parent), p[s.Parent])\n\t\t\tputSingleFunc(w, r, p, identityService, context)\n\t\t})\n\n\t//setup update route\n\tpatchSingleFunc := func(w http.ResponseWriter, r *http.Request, p martini.Params, identityService middleware.IdentityService, context middleware.Context) {\n\t\taddJSONContentTypeHeader(w)\n\t\tid := p[\"id\"]\n\t\tdataMap, err := middleware.ReadJSON(r)\n\t\tif err != nil {\n\t\t\thandleError(w, resources.NewResourceError(err, fmt.Sprintf(\"Failed to parse data: %s\", err), resources.WrongData))\n\t\t\treturn\n\t\t}\n\t\tdataMap = removeResourceWrapper(s, dataMap)\n\t\tfillInContext(context, dataStore, r, w, s, p, server.sync, identityService, dataMap)\n\t\tif err := resources.UpdateResource(\n\t\t\tcontext, dataStore, s, id, dataMap); err != nil {\n\t\t\thandleError(w, err)\n\t\t\treturn\n\t\t}\n\t\troutes.ServeJson(w, context[\"response\"])\n\t}\n\troute.Patch(singleURL, middleware.Authorization(schema.ActionUpdate), patchSingleFunc)\n\troute.Patch(singleURLWithParents, middleware.Authorization(schema.ActionUpdate),\n\t\tfunc(w http.ResponseWriter, r *http.Request, p martini.Params, identityService middleware.IdentityService, context middleware.Context) {\n\t\t\taddParamToQuery(r, schema.FormatParentID(s.Parent), p[s.Parent])\n\t\t\tpatchSingleFunc(w, r, p, identityService, context)\n\t\t})\n\n\t//Custom action support\n\tfor _, actionExt := range s.Actions {\n\t\taction := actionExt\n\t\tActionFunc := func(w http.ResponseWriter, r *http.Request, p martini.Params,\n\t\t\tidentityService middleware.IdentityService, auth schema.Authorization, context middleware.Context) {\n\t\t\taddJSONContentTypeHeader(w)\n\t\t\tid := p[\"id\"]\n\t\t\tinput := make(map[string]interface{})\n\t\t\tif action.InputSchema != nil && action.Protocol == \"\" {\n\t\t\t\tvar err error\n\t\t\t\tinput, err = middleware.ReadJSON(r)\n\t\t\t\tif err != nil {\n\t\t\t\t\thandleError(w, resources.NewResourceError(err, fmt.Sprintf(\"Failed to parse data: %s\", err), resources.WrongData))\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tfillInContext(context, dataStore, r, w, s, p, server.sync, identityService, input)\n\n\t\t\tif err := resources.ActionResource(context, dataStore, s, action, id, input); err != nil {\n\t\t\t\thandleError(w, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif response, ok := context[\"response\"]; ok {\n\t\t\t\troutes.ServeJson(w, response)\n\t\t\t}\n\t\t}\n\t\troute.AddRoute(action.Method, s.GetActionURL(action.Path), middleware.Authorization(action.ID), ActionFunc)\n\t\tif s.ParentSchema != nil {\n\t\t\troute.AddRoute(action.Method, s.GetActionURLWithParents(action.Path), middleware.Authorization(action.ID), ActionFunc)\n\t\t}\n\t}\n}", "func mapRoutes() {\n\t//http.HandleFunc(\"/user\", controllers.GetUser)\n}", "func ApplyRoutes(r *gin.RouterGroup) {\n\tclients := r.Group(\"/clients\")\n\t{\n\t\tclients.POST(\"/\", middlewares.Authorized, create)\n\t\tclients.GET(\"/\", middlewares.Authorized, list)\n\t}\n}", "func Index(w http.ResponseWriter, r *http.Request) {\r\n\t\r\n\tdb := Database.Dbconn()\r\n\tselDB, err := db.Query(\"SELECT * FROM employee.dbo.employee\")\r\n\tif err != nil {\r\n\t\tpanic(err.Error())\r\n\t}\r\n\r\n\tper := persona{}\r\n\tres := []persona{}\r\n\r\n\tfor selDB.Next() {\r\n\t\tvar id string\r\n\t\tvar name string\r\n\t\tvar location string\r\n\r\n\t\terr = selDB.Scan(&id, &name, &location)\r\n\t\tif err != nil {\r\n\t\t\tpanic(err.Error())\r\n\t\t}\r\n\t\tper.Id = id\r\n\t\tper.Name = name\r\n\t\tper.Location = location\r\n\r\n\t\tres = append(res, per)\r\n\t}\r\n\ttmpl.ExecuteTemplate(w, \"Index\", res)\r\n\tdefer db.Close()\r\n}", "func main() {\n\trouter := gin.Default()\n\n\t// This handler will match /user/john but will not match /user/ or /user\n\trouter.GET(\"/user/:name\", func(c *gin.Context) {\n\t\tname := c.Param(\"name\")\n\t\tc.String(http.StatusOK, \"Hello %s\", name)\n\t})\n\n\t// However, this one will match /user/john/ and also /user/john/send\n\t// If no other routers match /user/john, it will redirect to /user/john/\n\trouter.GET(\"/user/:name/*action\", func(c *gin.Context) {\n\t\tname := c.Param(\"name\")\n\t\taction := c.Param(\"action\")\n\t\tmessage := name + \" is \" + action\n\t\tc.String(http.StatusOK, message)\n\t})\n\n\t// Query string parameters are parsed using the existing underlying request object.\n\t// The request responds to a url matching: /welcome?firstname=Jane&lastname=Doe\n\trouter.GET(\"/hola\", func(c *gin.Context) {\n\t\tnombre := c.DefaultQuery(\"nombre\", \"Guest\")\n\t\tapellido := c.Query(\"apellido\") // shortcut for c.Request.URL.Query().Get(\"lastname\")\n\n\t\tc.String(http.StatusOK, \"Hello %s %s\", nombre, apellido)\n\t})\n\trouter.Run(\":8080\")\n}", "func (ctl *EquipmentrentalController) register() {\n\tEqrentals := ctl.router.Group(\"/equipmentrentals\")\n\tEqrentalss := ctl.router.Group(\"/equipmentrentalbymembers\")\n\tEqrentalss.GET(\"\", ctl.GetEquipmentrentalbyMember)\n\tEqrentals.GET(\"\", ctl.ListEquipmentrental)\n\n\t// CRUD\n\tEqrentals.POST(\"\", ctl.CreateEquipmentrental)\n\tEqrentals.GET(\":id\", ctl.GetEquipmentrental)\n}", "func BookRoutes(rg *gin.RouterGroup, db *repo.LibraryDatabase) {\n\tbooksRouter := rg.Group(\"/books\")\n\n\tbooksRouter.POST(\"/\", addBookHandler(db))\n\tbooksRouter.GET(\"/on-hold\", getBooksOnHoldHandler(db))\n}" ]
[ "0.67869747", "0.67589456", "0.6640335", "0.6442103", "0.58659965", "0.56205046", "0.55636376", "0.5562881", "0.5536978", "0.5526658", "0.5520917", "0.54956365", "0.54703736", "0.54550976", "0.5444999", "0.54362804", "0.5415051", "0.53973025", "0.5367491", "0.53587633", "0.5330962", "0.53066117", "0.5304349", "0.5298595", "0.5286063", "0.5281689", "0.52792364", "0.5273156", "0.5271949", "0.5270754", "0.526977", "0.5261317", "0.5259778", "0.5250543", "0.52504647", "0.52363575", "0.5222265", "0.5218962", "0.520191", "0.51981306", "0.5197458", "0.5196607", "0.51920646", "0.51907635", "0.51806426", "0.5178173", "0.5165723", "0.5160037", "0.5155291", "0.5153615", "0.51532704", "0.51492506", "0.51275706", "0.512239", "0.51201403", "0.5116595", "0.51094747", "0.51052123", "0.510178", "0.510103", "0.50999933", "0.5098664", "0.50962466", "0.5087349", "0.5079172", "0.5077427", "0.507645", "0.5065957", "0.5065576", "0.5058524", "0.5055165", "0.5050777", "0.5042191", "0.50417763", "0.5039951", "0.5038658", "0.5038352", "0.50370294", "0.50333416", "0.50330365", "0.5024405", "0.5016505", "0.5014372", "0.5010624", "0.5003089", "0.49858627", "0.49835917", "0.49821696", "0.49781796", "0.49725932", "0.49694163", "0.4967652", "0.49596676", "0.49544808", "0.49539414", "0.4951012", "0.49462226", "0.49458176", "0.49430287", "0.49420232" ]
0.74560905
0
Validate validates this io k8s api core v1 scoped resource selector requirement
Проверяет этот io k8s api core v1 scoped resource selector требование
func (m *IoK8sAPICoreV1ScopedResourceSelectorRequirement) Validate(formats strfmt.Registry) error { var res []error if err := m.validateOperator(formats); err != nil { res = append(res, err) } if err := m.validateScopeName(formats); err != nil { res = append(res, err) } if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (pr *ResourceDescription) Validate() error {\n\tif len(pr.Kinds) == 0 {\n\t\treturn errors.New(\"The Kind is not specified\")\n\t}\n\n\tif pr.Selector != nil {\n\t\tselector, err := metav1.LabelSelectorAsSelector(pr.Selector)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\trequirements, _ := selector.Requirements()\n\t\tif len(requirements) == 0 {\n\t\t\treturn errors.New(\"The requirements are not specified in selector\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func (m *JsonToMetadata_Selector) Validate() error {\n\treturn m.validate(false)\n}", "func (s *Selector) Validate(ver, path string, ignoreStatus, ignoreSpec bool) []error {\n\treturn nil\n}", "func (t *OpenconfigSystem_System_Logging_Console_Selectors_Selector) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigSystem_System_Logging_Console_Selectors_Selector\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (i LabelSelectorInput) Validate() error {\n\treturn validation.ValidateStruct(&i,\n\t\tvalidation.Field(&i.Key, validation.In(\"global_subaccount_id\")),\n\t)\n}", "func (s *OpenconfigOfficeAp_System_Logging_Console_Selectors_Selector) Validate() error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigOfficeAp_System_Logging_Console_Selectors_Selector\"], s); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (t *OpenconfigSystem_System_Logging_RemoteServers_RemoteServer_Selectors_Selector) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigSystem_System_Logging_RemoteServers_RemoteServer_Selectors_Selector\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (m *IoK8sAPICoreV1TopologySelectorLabelRequirement) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateKey(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateValues(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func validateEndpointRequestResource(req *request.Request, resource arn.Resource) error {\n\tresReq := s3shared.ResourceRequest{Request: req, Resource: resource}\n\n\tif len(resReq.Request.ClientInfo.PartitionID) != 0 && resReq.IsCrossPartition() {\n\t\treturn s3shared.NewClientPartitionMismatchError(resource,\n\t\t\treq.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)\n\t}\n\n\tif !resReq.AllowCrossRegion() && resReq.IsCrossRegion() {\n\t\treturn s3shared.NewClientRegionMismatchError(resource,\n\t\t\treq.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)\n\t}\n\n\t// Accelerate not supported\n\tif aws.BoolValue(req.Config.S3UseAccelerate) {\n\t\treturn s3shared.NewClientConfiguredForAccelerateError(resource,\n\t\t\treq.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)\n\t}\n\treturn nil\n}", "func (m LabelSelectorOperator) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (m *IoK8sAPICoreV1ResourceQuotaSpec) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateHard(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateScopeSelector(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *IoK8sApimachineryPkgApisMetaV1LabelSelectorRequirement) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateKey(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOperator(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (opts resourceOptions) validate() error {\n\t// Check that the required flags did not get a flag as their value.\n\t// We can safely look for a '-' as the first char as none of the fields accepts it.\n\t// NOTE: We must do this for all the required flags first or we may output the wrong\n\t// error as flags may seem to be missing because Cobra assigned them to another flag.\n\tif strings.HasPrefix(opts.Group, \"-\") {\n\t\treturn fmt.Errorf(groupPresent)\n\t}\n\tif strings.HasPrefix(opts.Version, \"-\") {\n\t\treturn fmt.Errorf(versionPresent)\n\t}\n\tif strings.HasPrefix(opts.Kind, \"-\") {\n\t\treturn fmt.Errorf(kindPresent)\n\t}\n\n\t// We do not check here if the GVK values are empty because that would\n\t// make them mandatory and some plugins may want to set default values.\n\t// Instead, this is checked by resource.GVK.Validate()\n\n\treturn nil\n}", "func ValidateControllerRegistrationSpec(spec *core.ControllerRegistrationSpec, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tvar (\n\t\tresourcesPath = fldPath.Child(\"resources\")\n\t\tdeploymentPath = fldPath.Child(\"deployment\")\n\n\t\tresources = make(map[string]string, len(spec.Resources))\n\t\tcontrolsResourcesPrimarily = false\n\t)\n\n\tfor i, resource := range spec.Resources {\n\t\tidxPath := resourcesPath.Index(i)\n\n\t\tif len(resource.Kind) == 0 {\n\t\t\tallErrs = append(allErrs, field.Required(idxPath.Child(\"kind\"), \"field is required\"))\n\t\t}\n\n\t\tif !extensionsv1alpha1.ExtensionKinds.Has(resource.Kind) {\n\t\t\tallErrs = append(allErrs, field.NotSupported(idxPath.Child(\"kind\"), resource.Kind, extensionsv1alpha1.ExtensionKinds.UnsortedList()))\n\t\t}\n\n\t\tif len(resource.Type) == 0 {\n\t\t\tallErrs = append(allErrs, field.Required(idxPath.Child(\"type\"), \"field is required\"))\n\t\t}\n\t\tif t, ok := resources[resource.Kind]; ok && t == resource.Type {\n\t\t\tallErrs = append(allErrs, field.Duplicate(idxPath, common.ExtensionID(resource.Kind, resource.Type)))\n\t\t}\n\t\tif resource.Kind != extensionsv1alpha1.ExtensionResource {\n\t\t\tif resource.GloballyEnabled != nil {\n\t\t\t\tallErrs = append(allErrs, field.Forbidden(idxPath.Child(\"globallyEnabled\"), fmt.Sprintf(\"field must not be set when kind != %s\", extensionsv1alpha1.ExtensionResource)))\n\t\t\t}\n\t\t\tif resource.ReconcileTimeout != nil {\n\t\t\t\tallErrs = append(allErrs, field.Forbidden(idxPath.Child(\"reconcileTimeout\"), fmt.Sprintf(\"field must not be set when kind != %s\", extensionsv1alpha1.ExtensionResource)))\n\t\t\t}\n\t\t}\n\n\t\tresources[resource.Kind] = resource.Type\n\t\tif resource.Primary == nil || *resource.Primary {\n\t\t\tcontrolsResourcesPrimarily = true\n\t\t}\n\t}\n\n\tif spec.Deployment != nil {\n\t\tif policy := spec.Deployment.Policy; policy != nil && !availablePolicies.Has(string(*policy)) {\n\t\t\tallErrs = append(allErrs, field.NotSupported(deploymentPath.Child(\"policy\"), *policy, availablePolicies.List()))\n\t\t}\n\n\t\tif spec.Deployment.SeedSelector != nil {\n\t\t\tif controlsResourcesPrimarily {\n\t\t\t\tallErrs = append(allErrs, field.Forbidden(deploymentPath.Child(\"seedSelector\"), \"specifying a seed selector is not allowed when controlling resources primarily\"))\n\t\t\t}\n\n\t\t\tallErrs = append(allErrs, metav1validation.ValidateLabelSelector(spec.Deployment.SeedSelector, deploymentPath.Child(\"seedSelector\"))...)\n\t\t}\n\t}\n\n\treturn allErrs\n}", "func (t *OpenconfigSystem_System_Logging_Console_Selectors_Selector_Config) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigSystem_System_Logging_Console_Selectors_Selector_Config\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *OpenconfigOfficeAp_System_Logging_RemoteServers_RemoteServer_Selectors_Selector) Validate() error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigOfficeAp_System_Logging_RemoteServers_RemoteServer_Selectors_Selector\"], s); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (t *OpenconfigSystem_System_Logging_Console_Selectors_Selector_State) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigSystem_System_Logging_Console_Selectors_Selector_State\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (r *ScaledObjectReconciler) checkTargetResourceIsScalable(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) (kedav1alpha1.GroupVersionKindResource, error) {\n\tgvkr, err := kedautil.ParseGVKR(r.restMapper, scaledObject.Spec.ScaleTargetRef.APIVersion, scaledObject.Spec.ScaleTargetRef.Kind)\n\tif err != nil {\n\t\tlogger.Error(err, \"Failed to parse Group, Version, Kind, Resource\", \"apiVersion\", scaledObject.Spec.ScaleTargetRef.APIVersion, \"kind\", scaledObject.Spec.ScaleTargetRef.Kind)\n\t\treturn gvkr, err\n\t}\n\tgvkString := gvkr.GVKString()\n\tlogger.V(1).Info(\"Parsed Group, Version, Kind, Resource\", \"GVK\", gvkString, \"Resource\", gvkr.Resource)\n\n\t// let's try to detect /scale subresource\n\tscale, errScale := (*r.scaleClient).Scales(scaledObject.Namespace).Get(context.TODO(), gvkr.GroupResource(), scaledObject.Spec.ScaleTargetRef.Name, metav1.GetOptions{})\n\tif errScale != nil {\n\t\t// not able to get /scale subresource -> let's check if the resource even exist in the cluster\n\t\tunstruct := &unstructured.Unstructured{}\n\t\tunstruct.SetGroupVersionKind(gvkr.GroupVersionKind())\n\t\tif err := r.Client.Get(context.TODO(), client.ObjectKey{Namespace: scaledObject.Namespace, Name: scaledObject.Spec.ScaleTargetRef.Name}, unstruct); err != nil {\n\t\t\t// resource doesn't exist\n\t\t\tlogger.Error(err, \"Target resource doesn't exist\", \"resource\", gvkString, \"name\", scaledObject.Spec.ScaleTargetRef.Name)\n\t\t\treturn gvkr, err\n\t\t}\n\t\t// resource exist but doesn't expose /scale subresource\n\t\tlogger.Error(errScale, \"Target resource doesn't expose /scale subresource\", \"resource\", gvkString, \"name\", scaledObject.Spec.ScaleTargetRef.Name)\n\t\treturn gvkr, errScale\n\t}\n\n\t// if it is not already present in ScaledObject Status:\n\t// - store discovered GVK and GVKR\n\t// - store original scaleTarget's replica count (before scaling with KEDA)\n\tif scaledObject.Status.ScaleTargetKind != gvkString || scaledObject.Status.OriginalReplicaCount == nil {\n\t\tstatus := scaledObject.Status.DeepCopy()\n\t\tif scaledObject.Status.ScaleTargetKind != gvkString {\n\t\t\tstatus.ScaleTargetKind = gvkString\n\t\t\tstatus.ScaleTargetGVKR = &gvkr\n\t\t}\n\t\tif scaledObject.Status.OriginalReplicaCount == nil {\n\t\t\tstatus.OriginalReplicaCount = &scale.Spec.Replicas\n\t\t}\n\n\t\tif err := kedacontrollerutil.UpdateScaledObjectStatus(r.Client, logger, scaledObject, status); err != nil {\n\t\t\treturn gvkr, err\n\t\t}\n\t\tlogger.Info(\"Detected resource targeted for scaling\", \"resource\", gvkString, \"name\", scaledObject.Spec.ScaleTargetRef.Name)\n\t}\n\n\treturn gvkr, nil\n}", "func validateResource(request resource.Quantity, limit resource.Quantity, resourceName corev1.ResourceName) []error {\n\tvalidationErrors := make([]error, 0)\n\tif !limit.IsZero() && request.Cmp(limit) > 0 {\n\t\tvalidationErrors = append(validationErrors, errors.Errorf(\"Request must be less than or equal to %s limit\", resourceName))\n\t}\n\tif request.Cmp(resource.Quantity{}) < 0 {\n\t\tvalidationErrors = append(validationErrors, errors.Errorf(\"Resource %s request value must be non negative\", resourceName))\n\t}\n\tif limit.Cmp(resource.Quantity{}) < 0 {\n\t\tvalidationErrors = append(validationErrors, errors.Errorf(\"Resource %s limit value must be non negative\", resourceName))\n\t}\n\n\treturn validationErrors\n}", "func (r *ResourceSpec) Valid() bool {\n\tif r.Name == \"\" {\n\t\tfmt.Println(\"no resource spec label\")\n\t\treturn false\n\t}\n\n\tfor _, c := range r.Credentials {\n\t\tif !c.Valid() {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (t *OpenconfigSystem_System_Logging_RemoteServers_RemoteServer_Selectors_Selector_Config) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigSystem_System_Logging_RemoteServers_RemoteServer_Selectors_Selector_Config\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func resourceVolterraK8SPodSecurityPolicy() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceVolterraK8SPodSecurityPolicyCreate,\n\t\tRead: resourceVolterraK8SPodSecurityPolicyRead,\n\t\tUpdate: resourceVolterraK8SPodSecurityPolicyUpdate,\n\t\tDelete: resourceVolterraK8SPodSecurityPolicyDelete,\n\n\t\tSchema: map[string]*schema.Schema{\n\n\t\t\t\"annotations\": {\n\t\t\t\tType: schema.TypeMap,\n\t\t\t\tOptional: true,\n\t\t\t},\n\n\t\t\t\"description\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\n\t\t\t\"disable\": {\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t},\n\n\t\t\t\"labels\": {\n\t\t\t\tType: schema.TypeMap,\n\t\t\t\tOptional: true,\n\t\t\t},\n\n\t\t\t\"name\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"namespace\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"psp_spec\": {\n\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true,\n\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\tSchema: map[string]*schema.Schema{\n\n\t\t\t\t\t\t\"allow_privilege_escalation\": {\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"allowed_capabilities\": {\n\n\t\t\t\t\t\t\tType: schema.TypeSet,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\t\t\t\tSchema: map[string]*schema.Schema{\n\n\t\t\t\t\t\t\t\t\t\"capabilities\": {\n\n\t\t\t\t\t\t\t\t\t\tType: schema.TypeList,\n\n\t\t\t\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t\t\t\t\tElem: &schema.Schema{\n\t\t\t\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"no_allowed_capabilities\": {\n\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"allowed_csi_drivers\": {\n\n\t\t\t\t\t\t\tType: schema.TypeList,\n\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Schema{\n\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"allowed_flex_volumes\": {\n\n\t\t\t\t\t\t\tType: schema.TypeList,\n\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Schema{\n\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"allowed_host_paths\": {\n\n\t\t\t\t\t\t\tType: schema.TypeList,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\t\t\t\tSchema: map[string]*schema.Schema{\n\n\t\t\t\t\t\t\t\t\t\"path_prefix\": {\n\t\t\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\t\t\t\"read_only\": {\n\t\t\t\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"allowed_proc_mounts\": {\n\n\t\t\t\t\t\t\tType: schema.TypeList,\n\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Schema{\n\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"allowed_unsafe_sysctls\": {\n\n\t\t\t\t\t\t\tType: schema.TypeList,\n\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Schema{\n\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"default_allow_privilege_escalation\": {\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"default_capabilities\": {\n\n\t\t\t\t\t\t\tType: schema.TypeSet,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\t\t\t\tSchema: map[string]*schema.Schema{\n\n\t\t\t\t\t\t\t\t\t\"capabilities\": {\n\n\t\t\t\t\t\t\t\t\t\tType: schema.TypeList,\n\n\t\t\t\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t\t\t\t\tElem: &schema.Schema{\n\t\t\t\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"no_default_capabilities\": {\n\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"drop_capabilities\": {\n\n\t\t\t\t\t\t\tType: schema.TypeSet,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\t\t\t\tSchema: map[string]*schema.Schema{\n\n\t\t\t\t\t\t\t\t\t\"capabilities\": {\n\n\t\t\t\t\t\t\t\t\t\tType: schema.TypeList,\n\n\t\t\t\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t\t\t\t\tElem: &schema.Schema{\n\t\t\t\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"no_drop_capabilities\": {\n\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"forbidden_sysctls\": {\n\n\t\t\t\t\t\t\tType: schema.TypeList,\n\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Schema{\n\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"fs_group_strategy_options\": {\n\n\t\t\t\t\t\t\tType: schema.TypeSet,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\t\t\t\tSchema: map[string]*schema.Schema{\n\n\t\t\t\t\t\t\t\t\t\"id_ranges\": {\n\n\t\t\t\t\t\t\t\t\t\tType: schema.TypeList,\n\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\t\t\t\t\t\t\tSchema: map[string]*schema.Schema{\n\n\t\t\t\t\t\t\t\t\t\t\t\t\"max_id\": {\n\t\t\t\t\t\t\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\t\t\t\t\t\t\"min_id\": {\n\t\t\t\t\t\t\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\t\t\t\"rule\": {\n\t\t\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"no_fs_groups\": {\n\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"no_run_as_group\": {\n\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"run_as_group\": {\n\n\t\t\t\t\t\t\tType: schema.TypeSet,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\t\t\t\tSchema: map[string]*schema.Schema{\n\n\t\t\t\t\t\t\t\t\t\"id_ranges\": {\n\n\t\t\t\t\t\t\t\t\t\tType: schema.TypeList,\n\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\t\t\t\t\t\t\tSchema: map[string]*schema.Schema{\n\n\t\t\t\t\t\t\t\t\t\t\t\t\"max_id\": {\n\t\t\t\t\t\t\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\t\t\t\t\t\t\"min_id\": {\n\t\t\t\t\t\t\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\t\t\t\"rule\": {\n\t\t\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"host_ipc\": {\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"host_network\": {\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"host_pid\": {\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"host_port_ranges\": {\n\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"privileged\": {\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"read_only_root_filesystem\": {\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"no_runtime_class\": {\n\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"runtime_class\": {\n\n\t\t\t\t\t\t\tType: schema.TypeSet,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\t\t\t\tSchema: map[string]*schema.Schema{\n\n\t\t\t\t\t\t\t\t\t\"allowed_runtime_class_names\": {\n\n\t\t\t\t\t\t\t\t\t\tType: schema.TypeList,\n\n\t\t\t\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t\t\t\t\tElem: &schema.Schema{\n\t\t\t\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\t\t\t\"default_runtime_class_name\": {\n\t\t\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"no_se_linux_options\": {\n\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"se_linux_options\": {\n\n\t\t\t\t\t\t\tType: schema.TypeSet,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\t\t\t\tSchema: map[string]*schema.Schema{\n\n\t\t\t\t\t\t\t\t\t\"level\": {\n\t\t\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\t\t\t\"role\": {\n\t\t\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\t\t\t\"rule\": {\n\t\t\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\t\t\t\"type\": {\n\t\t\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\t\t\t\"user\": {\n\t\t\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"no_supplemental_groups\": {\n\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"supplemental_groups\": {\n\n\t\t\t\t\t\t\tType: schema.TypeSet,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\t\t\t\tSchema: map[string]*schema.Schema{\n\n\t\t\t\t\t\t\t\t\t\"id_ranges\": {\n\n\t\t\t\t\t\t\t\t\t\tType: schema.TypeList,\n\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\t\t\t\t\t\t\tSchema: map[string]*schema.Schema{\n\n\t\t\t\t\t\t\t\t\t\t\t\t\"max_id\": {\n\t\t\t\t\t\t\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\t\t\t\t\t\t\"min_id\": {\n\t\t\t\t\t\t\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\t\t\t\"rule\": {\n\t\t\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"no_run_as_user\": {\n\n\t\t\t\t\t\t\tType: schema.TypeBool,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"run_as_user\": {\n\n\t\t\t\t\t\t\tType: schema.TypeSet,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\t\t\t\tSchema: map[string]*schema.Schema{\n\n\t\t\t\t\t\t\t\t\t\"id_ranges\": {\n\n\t\t\t\t\t\t\t\t\t\tType: schema.TypeList,\n\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\t\t\t\t\t\t\tSchema: map[string]*schema.Schema{\n\n\t\t\t\t\t\t\t\t\t\t\t\t\"max_id\": {\n\t\t\t\t\t\t\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\t\t\t\t\t\t\"min_id\": {\n\t\t\t\t\t\t\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\t\t\t\"rule\": {\n\t\t\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"volumes\": {\n\n\t\t\t\t\t\t\tType: schema.TypeList,\n\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Schema{\n\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\"yaml\": {\n\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t},\n\t}\n}", "func (m *ResourceControlUpdateRequest) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func ValidResource(api *kit.API, lookupOrgByResourceID func(context.Context, influxdb.ID) (influxdb.ID, error)) kit.Middleware {\n\treturn func(next http.Handler) http.Handler {\n\t\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\t\tstatusW := kit.NewStatusResponseWriter(w)\n\t\t\tid, err := influxdb.IDFromString(chi.URLParam(r, \"id\"))\n\t\t\tif err != nil {\n\t\t\t\tapi.Err(w, ErrCorruptID(err))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tctx := r.Context()\n\n\t\t\torgID, err := lookupOrgByResourceID(ctx, *id)\n\t\t\tif err != nil {\n\t\t\t\tapi.Err(w, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tnext.ServeHTTP(statusW, r.WithContext(context.WithValue(ctx, ctxOrgKey, orgID)))\n\t\t}\n\t\treturn http.HandlerFunc(fn)\n\t}\n}", "func (t *OpenconfigSystem_System_Logging_Console_Selectors) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigSystem_System_Logging_Console_Selectors\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (r *Resource) Valid() bool {\n\tif r.Name == \"\" {\n\t\tfmt.Println(\"no resource spec label\")\n\t\treturn false\n\t}\n\n\tfor _, c := range r.Credentials {\n\t\tif !c.Valid() {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (s *OpenconfigOfficeAp_System_Logging_Console_Selectors) Validate() error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigOfficeAp_System_Logging_Console_Selectors\"], s); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func validateNodeSelector(version int, s string) {\n\t_, err := selector.Parse(s)\n\tif err != nil {\n\t\tlog.Errorf(\"Invalid node selector '%s' for version %d: %s\", s, version, err)\n\t\tutils.Terminate()\n\t}\n}", "func (s *OpenconfigOfficeAp_System_Logging_Console_Selectors_Selector_State) Validate() error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigOfficeAp_System_Logging_Console_Selectors_Selector_State\"], s); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (d *sequenceValidator) ValidateResource(resource *datautils.Resource) *models.ErrorResponseErrors {\n\n\tif !resource.IsCollection() && !resource.IsObject() {\n\t\t// Nothing to do here, this is not a Collection or Object\n\t\treturn nil\n\t}\n\n\t// Load the member resource\n\tstructuralMd := resource.Structural()\n\tif !structuralMd.HasKey(\"hasMemberOrders\") {\n\t\t// Nothing to do here, there are no orders to validate\n\t\treturn nil\n\t}\n\tif !structuralMd.HasKey(\"hasMember\") {\n\t\t// Must have hasMember if they have hasMemberOrders\n\t\treturn d.buildErrors(\"Resource has 'hasMemberOrders' but does not have 'hasMember'\", \"structural.hasMember\")\n\t}\n\tmemberOrders := structuralMd.GetA(\"hasMemberOrders\")\n\tmemberIds := structuralMd.GetA(\"hasMember\").GetS()\n\n\tfor _, order := range memberOrders.GetObj() {\n\t\t// check that order.members is a subset of memberIds\n\t\tmembersOfOrder := order.GetA(\"members\").GetS()\n\t\tif !subset(membersOfOrder, memberIds) {\n\t\t\treturn d.buildErrors(\"'hasMemberOrders.members' must be a subset of 'hasMember'\", \"structural.hasMemberOrders.member\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func (nco *NamespaceCreateOptions) Validate(ctx context.Context) error {\n\treturn dfutil.ValidateK8sResourceName(\"namespace name\", nco.namespaceName)\n}", "func validateController(ctx context.Context, c clientset.Interface, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) {\n\tcontainerImage = trimDockerRegistry(containerImage)\n\tgetPodsTemplate := \"--template={{range.items}}{{.metadata.name}} {{end}}\"\n\n\tgetContainerStateTemplate := fmt.Sprintf(`--template={{if (exists . \"status\" \"containerStatuses\")}}{{range .status.containerStatuses}}{{if (and (eq .name \"%s\") (exists . \"state\" \"running\"))}}true{{end}}{{end}}{{end}}`, containername)\n\n\tgetImageTemplate := fmt.Sprintf(`--template={{if (exists . \"spec\" \"containers\")}}{{range .spec.containers}}{{if eq .name \"%s\"}}{{.image}}{{end}}{{end}}{{end}}`, containername)\n\n\tginkgo.By(fmt.Sprintf(\"waiting for all containers in %s pods to come up.\", testname)) //testname should be selector\nwaitLoop:\n\tfor start := time.Now(); time.Since(start) < framework.PodStartTimeout && ctx.Err() == nil; time.Sleep(5 * time.Second) {\n\t\tgetPodsOutput := e2ekubectl.RunKubectlOrDie(ns, \"get\", \"pods\", \"-o\", \"template\", getPodsTemplate, \"-l\", testname)\n\t\tpods := strings.Fields(getPodsOutput)\n\t\tif numPods := len(pods); numPods != replicas {\n\t\t\tginkgo.By(fmt.Sprintf(\"Replicas for %s: expected=%d actual=%d\", testname, replicas, numPods))\n\t\t\tcontinue\n\t\t}\n\t\tvar runningPods []string\n\t\tfor _, podID := range pods {\n\t\t\trunning := e2ekubectl.RunKubectlOrDie(ns, \"get\", \"pods\", podID, \"-o\", \"template\", getContainerStateTemplate)\n\t\t\tif running != \"true\" {\n\t\t\t\tframework.Logf(\"%s is created but not running\", podID)\n\t\t\t\tcontinue waitLoop\n\t\t\t}\n\n\t\t\tcurrentImage := e2ekubectl.RunKubectlOrDie(ns, \"get\", \"pods\", podID, \"-o\", \"template\", getImageTemplate)\n\t\t\tcurrentImage = trimDockerRegistry(currentImage)\n\t\t\tif currentImage != containerImage {\n\t\t\t\tframework.Logf(\"%s is created but running wrong image; expected: %s, actual: %s\", podID, containerImage, currentImage)\n\t\t\t\tcontinue waitLoop\n\t\t\t}\n\n\t\t\t// Call the generic validator function here.\n\t\t\t// This might validate for example, that (1) getting a url works and (2) url is serving correct content.\n\t\t\tif err := validator(ctx, c, podID); err != nil {\n\t\t\t\tframework.Logf(\"%s is running right image but validator function failed: %v\", podID, err)\n\t\t\t\tcontinue waitLoop\n\t\t\t}\n\n\t\t\tframework.Logf(\"%s is verified up and running\", podID)\n\t\t\trunningPods = append(runningPods, podID)\n\t\t}\n\t\t// If we reach here, then all our checks passed.\n\t\tif len(runningPods) == replicas {\n\t\t\treturn\n\t\t}\n\t}\n\t// Reaching here means that one of more checks failed multiple times. Assuming its not a race condition, something is broken.\n\tframework.Failf(\"Timed out after %v seconds waiting for %s pods to reach valid state\", framework.PodStartTimeout.Seconds(), testname)\n}", "func (t *OpenconfigSystem_System_Logging_RemoteServers_RemoteServer_Selectors) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigSystem_System_Logging_RemoteServers_RemoteServer_Selectors\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (o *ServiceCreateOptions) Validate() (err error) {\n\t// if we are in interactive mode, all values are already valid\n\tif o.interactive {\n\t\treturn nil\n\t}\n\n\t// we want to find an Operator only if something's passed to the crd flag on CLI\n\tif experimental.IsExperimentalModeEnabled() {\n\t\t// if the user wants to create service from a file, we check for\n\t\t// existence of file and validate if the requested operator and CR\n\t\t// exist on the cluster\n\t\tif o.fromFile != \"\" {\n\t\t\tif _, err := os.Stat(o.fromFile); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"unable to find specified file\")\n\t\t\t}\n\n\t\t\t// Parse the file to find Operator and CR info\n\t\t\tfileContents, err := ioutil.ReadFile(o.fromFile)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// var jsonCR map[string]interface{}\n\t\t\terr = yaml.Unmarshal(fileContents, &o.CustomResourceDefinition)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// Check if the operator and the CR exist on cluster\n\t\t\to.CustomResource = o.CustomResourceDefinition[\"kind\"].(string)\n\t\t\tcsvs, err := o.KClient.GetClusterServiceVersionList()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcsv, err := doesCRExist(o.CustomResource, csvs)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Could not find specified service/custom resource: %s\\nPlease check the \\\"kind\\\" field in the yaml (it's case-sensitive)\", o.CustomResource)\n\t\t\t}\n\n\t\t\t// all is well, let's populate the fields required for creating operator backed service\n\t\t\to.group, o.version = groupVersionALMExample(o.CustomResourceDefinition)\n\t\t\to.resource = resourceFromCSV(csv, o.CustomResource)\n\t\t\to.ServiceName, err = serviceNameFromCRD(o.CustomResourceDefinition, o.ServiceName)\n\t\t\treturn err\n\t\t}\n\t\tif o.CustomResource != \"\" {\n\t\t\t// make sure that CSV of the specified ServiceType exists\n\t\t\tcsv, err := o.KClient.GetClusterServiceVersion(o.ServiceType)\n\t\t\tif err != nil {\n\t\t\t\t// error only occurs when OperatorHub is not installed.\n\t\t\t\t// k8s does't have it installed by default but OCP does\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tvar almExamples []map[string]interface{}\n\t\t\tval, ok := csv.Annotations[\"alm-examples\"]\n\t\t\tif ok {\n\t\t\t\terr = json.Unmarshal([]byte(val), &almExamples)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn errors.Wrap(err, \"unable to unmarshal alm-examples\")\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// There's no alm examples in the CSV's definition\n\t\t\t\treturn fmt.Errorf(\"Could not find alm-examples in operator's definition.\\nPlease provide a file containing yaml specification to start the %s service from %s operator\", o.CustomResource, o.ServiceName)\n\t\t\t}\n\n\t\t\talmExample, err := getAlmExample(almExamples, o.CustomResource, o.ServiceType)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\to.CustomResourceDefinition = almExample\n\t\t\to.group, o.version = groupVersionALMExample(almExample)\n\t\t\to.resource = resourceFromCSV(csv, o.CustomResource)\n\t\t\to.ServiceName, err = serviceNameFromCRD(o.CustomResourceDefinition, o.ServiceName)\n\t\t\treturn err\n\t\t} else {\n\t\t\t// prevent user from executing `odo service create <operator-name>`\n\t\t\t// because the correct way is to execute `odo service\n\t\t\t// <operator-name> --crd <crd-name>`\n\t\t\tcsvs, err := o.KClient.GetClusterServiceVersionList()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfor _, csv := range csvs.Items {\n\t\t\t\tif csv.Name == o.ServiceType {\n\t\t\t\t\t// this is satisfied if user has specified operator but not\n\t\t\t\t\t// a CRD name\n\t\t\t\t\treturn errors.New(\"Please specify service name along with the operator name\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t// make sure the service type exists\n\tclassPtr, err := o.Client.GetClusterServiceClass(o.ServiceType)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to create service because Service Catalog is not enabled in your cluster\")\n\t}\n\tif classPtr == nil {\n\t\treturn fmt.Errorf(\"service %v doesn't exist\\nRun 'odo catalog list services' to see a list of supported services.\\n\", o.ServiceType)\n\t}\n\n\t// check plan\n\tplans, err := o.Client.GetMatchingPlans(*classPtr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(o.Plan) == 0 {\n\t\t// when the plan has not been supplied, if there is only one available plan, we select it\n\t\tif len(plans) == 1 {\n\t\t\tfor k := range plans {\n\t\t\t\to.Plan = k\n\t\t\t}\n\t\t\tklog.V(4).Infof(\"Plan %s was automatically selected since it's the only one available for service %s\", o.Plan, o.ServiceType)\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"no plan was supplied for service %v.\\nPlease select one of: %v\\n\", o.ServiceType, strings.Join(ui.GetServicePlanNames(plans), \",\"))\n\t\t}\n\t} else {\n\t\t// when the plan has been supplied, we need to make sure it exists\n\t\tif _, ok := plans[o.Plan]; !ok {\n\t\t\treturn fmt.Errorf(\"plan %s is invalid for service %v.\\nPlease select one of: %v\\n\", o.Plan, o.ServiceType, strings.Join(ui.GetServicePlanNames(plans), \",\"))\n\t\t}\n\t}\n\t//validate service name\n\treturn o.validateServiceName(o.ServiceName)\n}", "func (t *OpenconfigSystem_System_Logging_RemoteServers_RemoteServer_Selectors_Selector_State) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigSystem_System_Logging_RemoteServers_RemoteServer_Selectors_Selector_State\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (r *Reconciler) validateNSTemplateTier(tierName string) error {\n\tif tierName == \"\" {\n\t\treturn fmt.Errorf(\"tierName cannot be blank\")\n\t}\n\t// check if requested tier exists\n\ttier := &toolchainv1alpha1.NSTemplateTier{}\n\tif err := r.Client.Get(context.TODO(), types.NamespacedName{\n\t\tNamespace: r.Namespace,\n\t\tName: tierName,\n\t}, tier); err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\treturn err\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\treturn errs.Wrap(err, \"unable to get the current NSTemplateTier\")\n\t}\n\treturn nil\n}", "func ValidateNodeSelectorRequirement(rq core.NodeSelectorRequirement, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tswitch rq.Operator {\n\tcase core.NodeSelectorOpIn, core.NodeSelectorOpNotIn:\n\t\tif len(rq.Values) == 0 {\n\t\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"values\"), \"must be specified when `operator` is 'In' or 'NotIn'\"))\n\t\t}\n\tcase core.NodeSelectorOpExists, core.NodeSelectorOpDoesNotExist:\n\t\tif len(rq.Values) > 0 {\n\t\t\tallErrs = append(allErrs, field.Forbidden(fldPath.Child(\"values\"), \"may not be specified when `operator` is 'Exists' or 'DoesNotExist'\"))\n\t\t}\n\n\tcase core.NodeSelectorOpGt, core.NodeSelectorOpLt:\n\t\tif len(rq.Values) != 1 {\n\t\t\tallErrs = append(allErrs, field.Required(fldPath.Child(\"values\"), \"must be specified single value when `operator` is 'Lt' or 'Gt'\"))\n\t\t}\n\tdefault:\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"operator\"), rq.Operator, \"not a valid selector operator\"))\n\t}\n\n\tallErrs = append(allErrs, unversionedvalidation.ValidateLabelName(rq.Key, fldPath.Child(\"key\"))...)\n\n\treturn allErrs\n}", "func (sel *CFNSelector) Resources(msg, finalMsg, help, body string) ([]template.CFNResource, error) {\n\ttpl := struct {\n\t\tResources map[string]struct {\n\t\t\tType string `yaml:\"Type\"`\n\t\t} `yaml:\"Resources\"`\n\t}{}\n\tif err := yaml.Unmarshal([]byte(body), &tpl); err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshal CloudFormation template: %v\", err)\n\t}\n\n\t// Prompt for a selection.\n\tvar options []prompt.Option\n\tfor name, resource := range tpl.Resources {\n\t\tif resource.Type == \"AWS::Lambda::Function\" || strings.HasPrefix(resource.Type, \"Custom::\") {\n\t\t\tcontinue\n\t\t}\n\t\toptions = append(options, prompt.Option{\n\t\t\tValue: name,\n\t\t\tHint: resource.Type,\n\t\t})\n\t}\n\tsort.Slice(options, func(i, j int) bool { // Sort options by resource type, if they're the same resource type then sort by logicalID.\n\t\tif options[i].Hint == options[j].Hint {\n\t\t\treturn options[i].Value < options[j].Value\n\t\t}\n\t\treturn options[i].Hint < options[j].Hint\n\t})\n\tlogicalIDs, err := sel.prompt.MultiSelectOptions(msg, help, options, prompt.WithFinalMessage(finalMsg))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"select CloudFormation resources: %v\", err)\n\t}\n\n\t// Transform to template.CFNResource\n\tout := make([]template.CFNResource, len(logicalIDs))\n\tfor i, logicalID := range logicalIDs {\n\t\tout[i] = template.CFNResource{\n\t\t\tType: template.CFNType(tpl.Resources[logicalID].Type),\n\t\t\tLogicalID: logicalID,\n\t\t}\n\t}\n\treturn out, nil\n}", "func (s *OpenconfigOfficeAp_System_Logging_Console_Selectors_Selector_Config) Validate() error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigOfficeAp_System_Logging_Console_Selectors_Selector_Config\"], s); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func ResourceOwnerOf(obj runtime.Object) Func {\n\treturn func(ownerObj runtime.Object) bool {\n\t\treturn metav1.IsControlledBy(obj.(metav1.Object), ownerObj.(metav1.Object))\n\t}\n}", "func (s *Synk) Init() error {\n\tvTrue := true\n\tcrd := &apiextensions.CustomResourceDefinition{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: \"apiextensions.k8s.io/v1\",\n\t\t\tKind: \"CustomResourceDefinition\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"resourcesets.apps.cloudrobotics.com\",\n\t\t},\n\t\tSpec: apiextensions.CustomResourceDefinitionSpec{\n\t\t\tGroup: \"apps.cloudrobotics.com\",\n\t\t\tNames: apiextensions.CustomResourceDefinitionNames{\n\t\t\t\tKind: \"ResourceSet\",\n\t\t\t\tPlural: \"resourcesets\",\n\t\t\t\tSingular: \"resourceset\",\n\t\t\t},\n\t\t\tScope: apiextensions.ClusterScoped,\n\t\t\tVersions: []apiextensions.CustomResourceDefinitionVersion{{\n\t\t\t\tName: \"v1alpha1\",\n\t\t\t\tServed: true,\n\t\t\t\tStorage: true,\n\t\t\t\t// TODO(ensonic): replace with the actual schema\n\t\t\t\tSchema: &apiextensions.CustomResourceValidation{\n\t\t\t\t\tOpenAPIV3Schema: &apiextensions.JSONSchemaProps{\n\t\t\t\t\t\tType: \"object\",\n\t\t\t\t\t\tProperties: map[string]apiextensions.JSONSchemaProps{\n\t\t\t\t\t\t\t\"spec\": {\n\t\t\t\t\t\t\t\tType: \"object\",\n\t\t\t\t\t\t\t\tXPreserveUnknownFields: &vTrue,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"status\": {\n\t\t\t\t\t\t\t\tType: \"object\",\n\t\t\t\t\t\t\t\tXPreserveUnknownFields: &vTrue,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t}\n\tvar u unstructured.Unstructured\n\tif err := convert(crd, &u); err != nil {\n\t\treturn err\n\t}\n\tif _, err := s.applyOne(context.Background(), &u, nil); err != nil {\n\t\treturn errors.Wrap(err, \"create ResourceSet CRD\")\n\t}\n\n\terr := backoff.Retry(\n\t\tfunc() error {\n\t\t\ts.discovery.Invalidate()\n\t\t\tok, err := s.crdAvailable(&u)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif !ok {\n\t\t\t\treturn errors.New(\"crd not available\")\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t\tbackoff.WithMaxRetries(backoff.NewConstantBackOff(2*time.Second), 60),\n\t)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"wait for ResourceSet CRD\")\n\t}\n\n\treturn nil\n}", "func ValidateResource(r RextResourceDef) (hasError bool) {\n\tif len(r.GetType()) == 0 {\n\t\thasError = true\n\t\tlog.Errorln(\"type is required in metric config\")\n\t}\n\tif len(r.GetResourcePATH(\"\")) == 0 {\n\t\thasError = true\n\t\tlog.Errorln(\"resource path is required in metric config\")\n\t}\n\tif r.GetDecoder() == nil {\n\t\thasError = true\n\t\tlog.Errorln(\"decoder is required in metric config\")\n\t} else if r.GetDecoder().Validate() {\n\t\thasError = true\n\t}\n\tif r.GetAuth(nil) != nil {\n\t\tif r.GetAuth(nil).Validate() {\n\t\t\thasError = true\n\t\t}\n\t}\n\tfor _, mtrDef := range r.GetMetricDefs() {\n\t\tif mtrDef.Validate() {\n\t\t\thasError = true\n\t\t}\n\t}\n\treturn hasError\n}", "func (e EndpointResource) Validate() error {\n\treturn validator.New().Struct(e)\n}", "func IsResourceQuotaScopeValidForResource(scope corev1.ResourceQuotaScope, resource string) bool {\n\tswitch scope {\n\tcase corev1.ResourceQuotaScopeTerminating, corev1.ResourceQuotaScopeNotTerminating, corev1.ResourceQuotaScopeNotBestEffort, corev1.ResourceQuotaScopePriorityClass:\n\t\treturn podObjectCountQuotaResources.Has(resource) || podComputeQuotaResources.Has(resource)\n\tcase corev1.ResourceQuotaScopeBestEffort:\n\t\treturn podObjectCountQuotaResources.Has(resource)\n\tdefault:\n\t\treturn true\n\t}\n}", "func isResourceReady(dynamicClient dynamic.Interface, obj *MetaResource) (bool, error) {\n\t// get the resource's name, namespace and gvr\n\tname := obj.Name\n\tnamespace := obj.Namespace\n\tgvk := obj.GroupVersionKind()\n\tgvr, _ := meta.UnsafeGuessKindToResource(gvk)\n\t// use the helper functions to convert the resource to a KResource duck\n\ttif := &duck.TypedInformerFactory{Client: dynamicClient, Type: &duckv1alpha1.KResource{}}\n\t_, lister, err := tif.Get(gvr)\n\tif err != nil {\n\t\t// Return error to stop the polling.\n\t\treturn false, err\n\t}\n\tuntyped, err := lister.ByNamespace(namespace).Get(name)\n\tif k8serrors.IsNotFound(err) {\n\t\t// Return false as we are not done yet.\n\t\t// We swallow the error to keep on polling.\n\t\t// It should only happen if we wait for the auto-created resources, like default Broker.\n\t\treturn false, nil\n\t} else if err != nil {\n\t\t// Return error to stop the polling.\n\t\treturn false, err\n\t}\n\tkr := untyped.(*duckv1alpha1.KResource)\n\treturn kr.Status.GetCondition(duckv1alpha1.ConditionReady).IsTrue(), nil\n}", "func validateResourceList(resourceList core.ResourceList, upperBound core.ResourceList, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tfor resourceName, quantity := range resourceList {\n\t\tresPath := fldPath.Key(string(resourceName))\n\t\t// Validate resource name.\n\t\tallErrs = append(allErrs, validateResourceName(&resourceName, resPath)...)\n\t\t// Validate resource quantity.\n\t\tallErrs = append(allErrs, corevalidation.ValidateResourceQuantityValue(string(resourceName), quantity, resPath)...)\n\t\tif upperBound != nil {\n\t\t\t// Check that request <= limit.\n\t\t\tupperBoundQuantity, exists := upperBound[resourceName]\n\t\t\tif exists && quantity.Cmp(upperBoundQuantity) > 0 {\n\t\t\t\tallErrs = append(allErrs, field.Invalid(fldPath, quantity.String(),\n\t\t\t\t\t\"must be less than or equal to the upper bound\"))\n\t\t\t}\n\t\t}\n\t}\n\treturn allErrs\n}", "func CheckResource(nsId string, resourceType string, resourceId string) (bool, error) {\n\n\t// Check parameters' emptiness\n\tif nsId == \"\" {\n\t\terr := fmt.Errorf(\"CheckResource failed; nsId given is null.\")\n\t\treturn false, err\n\t} else if resourceType == \"\" {\n\t\terr := fmt.Errorf(\"CheckResource failed; resourceType given is null.\")\n\t\treturn false, err\n\t} else if resourceId == \"\" {\n\t\terr := fmt.Errorf(\"CheckResource failed; resourceId given is null.\")\n\t\treturn false, err\n\t}\n\n\t// Check resourceType's validity\n\tif resourceType == common.StrImage ||\n\t\tresourceType == common.StrSSHKey ||\n\t\tresourceType == common.StrSpec ||\n\t\tresourceType == common.StrVNet ||\n\t\tresourceType == common.StrSecurityGroup {\n\t\t//resourceType == \"subnet\" ||\n\t\t//resourceType == \"publicIp\" ||\n\t\t//resourceType == \"vNic\" {\n\t\t// continue\n\t} else {\n\t\terr := fmt.Errorf(\"invalid resource type\")\n\t\treturn false, err\n\t}\n\n\terr := common.CheckString(nsId)\n\tif err != nil {\n\t\tcommon.CBLog.Error(err)\n\t\treturn false, err\n\t}\n\n\terr = common.CheckString(resourceId)\n\tif err != nil {\n\t\tcommon.CBLog.Error(err)\n\t\treturn false, err\n\t}\n\n\tfmt.Println(\"[Check resource] \" + resourceType + \", \" + resourceId)\n\n\tkey := common.GenResourceKey(nsId, resourceType, resourceId)\n\t//fmt.Println(key)\n\n\tkeyValue, err := common.CBStore.Get(key)\n\tif err != nil {\n\t\tcommon.CBLog.Error(err)\n\t\treturn false, err\n\t}\n\tif keyValue != nil {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n\n}", "func matchesSelector(req *runtimehooksv1.GeneratePatchesRequestItem, templateVariables map[string]apiextensionsv1.JSON, selector clusterv1.PatchSelector) bool {\n\tgvk := req.Object.Object.GetObjectKind().GroupVersionKind()\n\n\t// Check if the apiVersion and kind are matching.\n\tif gvk.GroupVersion().String() != selector.APIVersion {\n\t\treturn false\n\t}\n\tif gvk.Kind != selector.Kind {\n\t\treturn false\n\t}\n\n\t// Check if the request is for an InfrastructureCluster.\n\tif selector.MatchResources.InfrastructureCluster {\n\t\t// Cluster.spec.infrastructureRef holds the InfrastructureCluster.\n\t\tif req.HolderReference.Kind == \"Cluster\" && req.HolderReference.FieldPath == \"spec.infrastructureRef\" {\n\t\t\treturn true\n\t\t}\n\t}\n\n\t// Check if the request is for a ControlPlane or the InfrastructureMachineTemplate of a ControlPlane.\n\tif selector.MatchResources.ControlPlane {\n\t\t// Cluster.spec.controlPlaneRef holds the ControlPlane.\n\t\tif req.HolderReference.Kind == \"Cluster\" && req.HolderReference.FieldPath == \"spec.controlPlaneRef\" {\n\t\t\treturn true\n\t\t}\n\t\t// *.spec.machineTemplate.infrastructureRef holds the InfrastructureMachineTemplate of a ControlPlane.\n\t\t// Note: this field path is only used in this context.\n\t\tif req.HolderReference.FieldPath == strings.Join(contract.ControlPlane().MachineTemplate().InfrastructureRef().Path(), \".\") {\n\t\t\treturn true\n\t\t}\n\t}\n\n\t// Check if the request is for a BootstrapConfigTemplate or an InfrastructureMachineTemplate\n\t// of one of the configured MachineDeploymentClasses.\n\tif selector.MatchResources.MachineDeploymentClass != nil {\n\t\t// MachineDeployment.spec.template.spec.bootstrap.configRef or\n\t\t// MachineDeployment.spec.template.spec.infrastructureRef holds the BootstrapConfigTemplate or\n\t\t// InfrastructureMachineTemplate.\n\t\tif req.HolderReference.Kind == \"MachineDeployment\" &&\n\t\t\t(req.HolderReference.FieldPath == \"spec.template.spec.bootstrap.configRef\" ||\n\t\t\t\treq.HolderReference.FieldPath == \"spec.template.spec.infrastructureRef\") {\n\t\t\t// Read the builtin.machineDeployment.class variable.\n\t\t\ttemplateMDClassJSON, err := patchvariables.GetVariableValue(templateVariables, \"builtin.machineDeployment.class\")\n\n\t\t\t// If the builtin variable could be read.\n\t\t\tif err == nil {\n\t\t\t\t// If templateMDClass matches one of the configured MachineDeploymentClasses.\n\t\t\t\tfor _, mdClass := range selector.MatchResources.MachineDeploymentClass.Names {\n\t\t\t\t\t// We have to quote mdClass as templateMDClassJSON is a JSON string (e.g. \"default-worker\").\n\t\t\t\t\tif mdClass == \"*\" || string(templateMDClassJSON.Raw) == strconv.Quote(mdClass) {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t\tunquoted, _ := strconv.Unquote(string(templateMDClassJSON.Raw))\n\t\t\t\t\tif strings.HasPrefix(mdClass, \"*\") && strings.HasSuffix(unquoted, strings.TrimPrefix(mdClass, \"*\")) {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t\tif strings.HasSuffix(mdClass, \"*\") && strings.HasPrefix(unquoted, strings.TrimSuffix(mdClass, \"*\")) {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Check if the request is for a BootstrapConfigTemplate or an InfrastructureMachinePoolTemplate\n\t// of one of the configured MachinePoolClasses.\n\tif selector.MatchResources.MachinePoolClass != nil {\n\t\tif req.HolderReference.Kind == \"MachinePool\" &&\n\t\t\t(req.HolderReference.FieldPath == \"spec.template.spec.bootstrap.configRef\" ||\n\t\t\t\treq.HolderReference.FieldPath == \"spec.template.spec.infrastructureRef\") {\n\t\t\t// Read the builtin.machinePool.class variable.\n\t\t\ttemplateMPClassJSON, err := patchvariables.GetVariableValue(templateVariables, \"builtin.machinePool.class\")\n\n\t\t\t// If the builtin variable could be read.\n\t\t\tif err == nil {\n\t\t\t\t// If templateMPClass matches one of the configured MachinePoolClasses.\n\t\t\t\tfor _, mpClass := range selector.MatchResources.MachinePoolClass.Names {\n\t\t\t\t\t// We have to quote mpClass as templateMPClassJSON is a JSON string (e.g. \"default-worker\").\n\t\t\t\t\tif mpClass == \"*\" || string(templateMPClassJSON.Raw) == strconv.Quote(mpClass) {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t\tunquoted, _ := strconv.Unquote(string(templateMPClassJSON.Raw))\n\t\t\t\t\tif strings.HasPrefix(mpClass, \"*\") && strings.HasSuffix(unquoted, strings.TrimPrefix(mpClass, \"*\")) {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t\tif strings.HasSuffix(mpClass, \"*\") && strings.HasPrefix(unquoted, strings.TrimSuffix(mpClass, \"*\")) {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}", "func (s *RedisSpec) ObserveSelectors(rsrc interface{}) []ResourceSelector {\n\tr := rsrc.(*AirflowCluster)\n\tselector := selectorLabels(r, ValueAirflowComponentRedis)\n\trsrcSelectos := []ResourceSelector{\n\t\t{&resources.StatefulSet{}, selector},\n\t\t{&resources.Service{}, selector},\n\t\t{&resources.Secret{}, selector},\n\t\t{&resources.PodDisruptionBudget{}, selector},\n\t}\n\t//if s.VolumeClaimTemplate != nil {\n\t//\trsrcSelectos = append(rsrcSelectos, ResourceSelector{s.VolumeClaimTemplate, nil})\n\t//}\n\treturn rsrcSelectos\n}", "func validateFromResourceType(resourceType string) (string, error) {\n\tname, err := k8s.CanonicalResourceNameFromFriendlyName(resourceType)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif name == k8s.Authority {\n\t\treturn \"\", errors.New(\"cannot query traffic --from an authority\")\n\t}\n\treturn name, nil\n}", "func ResourceOwnedBy(owner runtime.Object) Func {\n\treturn func(obj runtime.Object) bool {\n\t\treturn metav1.IsControlledBy(obj.(metav1.Object), owner.(metav1.Object))\n\t}\n}", "func TestValidateResource(t *testing.T) {\n\trunPolicyPackIntegrationTest(t, \"validate_resource\", NodeJS, nil, []policyTestScenario{\n\t\t// Test scenario 1: no resources.\n\t\t{\n\t\t\tWantErrors: nil,\n\t\t},\n\t\t// Test scenario 2: no violations.\n\t\t{\n\t\t\tWantErrors: nil,\n\t\t},\n\t\t// Test scenario 3: violates the first policy.\n\t\t{\n\t\t\tWantErrors: []string{\n\t\t\t\t\"[mandatory] validate-resource-test-policy v0.0.1 dynamic-no-state-with-value-1 (pulumi-nodejs:dynamic:Resource: a)\",\n\t\t\t\t\"Prohibits setting state to 1 on dynamic resources.\",\n\t\t\t\t\"'state' must not have the value 1.\",\n\t\t\t},\n\t\t},\n\t\t// Test scenario 4: violates the second policy.\n\t\t{\n\t\t\tWantErrors: []string{\n\t\t\t\t\"[mandatory] validate-resource-test-policy v0.0.1 dynamic-no-state-with-value-2 (pulumi-nodejs:dynamic:Resource: b)\",\n\t\t\t\t\"Prohibits setting state to 2 on dynamic resources.\",\n\t\t\t\t\"'state' must not have the value 2.\",\n\t\t\t},\n\t\t},\n\t\t// Test scenario 5: violates the first validation function of the third policy.\n\t\t{\n\t\t\tWantErrors: []string{\n\t\t\t\t\"[mandatory] validate-resource-test-policy v0.0.1 dynamic-no-state-with-value-3-or-4 (pulumi-nodejs:dynamic:Resource: c)\",\n\t\t\t\t\"Prohibits setting state to 3 or 4 on dynamic resources.\",\n\t\t\t\t\"'state' must not have the value 3.\",\n\t\t\t},\n\t\t},\n\t\t// Test scenario 6: violates the second validation function of the third policy.\n\t\t{\n\t\t\tWantErrors: []string{\n\t\t\t\t\"[mandatory] validate-resource-test-policy v0.0.1 dynamic-no-state-with-value-3-or-4 (pulumi-nodejs:dynamic:Resource: d)\",\n\t\t\t\t\"Prohibits setting state to 3 or 4 on dynamic resources.\",\n\t\t\t\t\"'state' must not have the value 4.\",\n\t\t\t},\n\t\t},\n\t\t// Test scenario 7: violates the fourth policy.\n\t\t{\n\t\t\tWantErrors: []string{\n\t\t\t\t\"[mandatory] validate-resource-test-policy v0.0.1 randomuuid-no-keepers (random:index/randomUuid:RandomUuid: r1)\",\n\t\t\t\t\"Prohibits creating a RandomUuid without any 'keepers'.\",\n\t\t\t\t\"RandomUuid must not have an empty 'keepers'.\",\n\t\t\t},\n\t\t},\n\t\t// Test scenario 8: no violations.\n\t\t{\n\t\t\tWantErrors: nil,\n\t\t},\n\t\t// Test scenario 9: violates the fifth policy.\n\t\t{\n\t\t\tWantErrors: []string{\n\t\t\t\t\"[mandatory] validate-resource-test-policy v0.0.1 dynamic-no-state-with-value-5 (pulumi-nodejs:dynamic:Resource: e)\",\n\t\t\t\t\"Prohibits setting state to 5 on dynamic resources.\",\n\t\t\t\t\"'state' must not have the value 5.\",\n\t\t\t},\n\t\t},\n\t\t// Test scenario 10: no violations.\n\t\t{\n\t\t\tWantErrors: nil,\n\t\t},\n\t\t// Test scenario 11: no violations.\n\t\t// Test the ability to send large gRPC messages (>4mb).\n\t\t// Issue: https://github.com/pulumi/pulumi/issues/4155\n\t\t{\n\t\t\tWantErrors: nil,\n\t\t},\n\t})\n}", "func (s *selection) validate() error {\n\tif s.hasAntiAffinityLabel() && s.hasPreferAntiAffinityLabel() {\n\t\treturn errors.New(\"invalid selection: both antiAffinityLabel and preferAntiAffinityLabel policies can not be together\")\n\t}\n\treturn nil\n}", "func checkResource(config interface{}, resource *unstructured.Unstructured) (bool, error) {\n\n\t// we are checking if config is a subset of resource with default pattern\n\tpath, err := validateResourceWithPattern(resource.Object, config)\n\tif err != nil {\n\t\tglog.V(4).Infof(\"config not a subset of resource. failed at path %s: %v\", path, err)\n\t\treturn false, err\n\t}\n\treturn true, nil\n}", "func (o *KubeOptions) Validate() error {\n\n\treturn nil\n}", "func (o *KubeOptions) Validate() error {\n\n\treturn nil\n}", "func (sel *AccessSelector) Validate() error {\n\tif err := validation.Validate(&sel.ViewSelector); err != nil {\n\t\treturn errors.Wrap(err, \"validate ViewSelector\")\n\t}\n\t{\n\t\tfields := &sel.FieldSelector\n\t\tif err := validation.ValidateStruct(\n\t\t\tfields,\n\t\t\tvalidation.Field(&fields.Time, validation.Required),\n\t\t\tvalidation.Field(&fields.CodeRecordID, validation.Required),\n\t\t); err != nil {\n\t\t\treturn errors.Wrap(err, \"validate FieldSelector\")\n\t\t}\n\t}\n\treturn nil\n}", "func (t BatchJobReplicateResourceType) Validate() error {\n\tswitch t {\n\tcase BatchJobReplicateResourceMinIO:\n\tdefault:\n\t\treturn errInvalidArgument\n\t}\n\treturn nil\n}", "func (m LabelSelectorOperator) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}", "func (s *OpenconfigOfficeAp_System_Logging_RemoteServers_RemoteServer_Selectors) Validate() error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigOfficeAp_System_Logging_RemoteServers_RemoteServer_Selectors\"], s); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func CheckRequest(quotas []corev1.ResourceQuota, a admission.Attributes, evaluator quota.Evaluator,\n\tlimited []resourcequotaapi.LimitedResource) ([]corev1.ResourceQuota, error) {\n\tif !evaluator.Handles(a) {\n\t\treturn quotas, nil\n\t}\n\n\t// if we have limited resources enabled for this resource, always calculate usage\n\tinputObject := a.GetObject()\n\n\t// Check if object matches AdmissionConfiguration matchScopes\n\tlimitedScopes, err := getMatchedLimitedScopes(evaluator, inputObject, limited)\n\tif err != nil {\n\t\treturn quotas, nil\n\t}\n\n\t// determine the set of resource names that must exist in a covering quota\n\tlimitedResourceNames := []corev1.ResourceName{}\n\tlimitedResources := filterLimitedResourcesByGroupResource(limited, a.GetResource().GroupResource())\n\tif len(limitedResources) > 0 {\n\t\tdeltaUsage, err := evaluator.Usage(inputObject)\n\t\tif err != nil {\n\t\t\treturn quotas, err\n\t\t}\n\t\tlimitedResourceNames = limitedByDefault(deltaUsage, limitedResources)\n\t}\n\tlimitedResourceNamesSet := quota.ToSet(limitedResourceNames)\n\n\t// find the set of quotas that are pertinent to this request\n\t// reject if we match the quota, but usage is not calculated yet\n\t// reject if the input object does not satisfy quota constraints\n\t// if there are no pertinent quotas, we can just return\n\tinterestingQuotaIndexes := []int{}\n\t// track the cumulative set of resources that were required across all quotas\n\t// this is needed to know if we have satisfied any constraints where consumption\n\t// was limited by default.\n\trestrictedResourcesSet := sets.String{}\n\trestrictedScopes := []corev1.ScopedResourceSelectorRequirement{}\n\tfor i := range quotas {\n\t\tresourceQuota := quotas[i]\n\t\tscopeSelectors := getScopeSelectorsFromQuota(resourceQuota)\n\t\tlocalRestrictedScopes, err := evaluator.MatchingScopes(inputObject, scopeSelectors)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error matching scopes of quota %s, err: %v\", resourceQuota.Name, err)\n\t\t}\n\t\trestrictedScopes = append(restrictedScopes, localRestrictedScopes...)\n\n\t\tmatch, err := evaluator.Matches(&resourceQuota, inputObject)\n\t\tif err != nil {\n\t\t\tklog.ErrorS(err, \"Error occurred while matching resource quota against input object\",\n\t\t\t\t\"resourceQuota\", resourceQuota)\n\t\t\treturn quotas, err\n\t\t}\n\t\tif !match {\n\t\t\tcontinue\n\t\t}\n\n\t\thardResources := quota.ResourceNames(resourceQuota.Status.Hard)\n\t\trestrictedResources := evaluator.MatchingResources(hardResources)\n\t\tif err := evaluator.Constraints(restrictedResources, inputObject); err != nil {\n\t\t\treturn nil, admission.NewForbidden(a, fmt.Errorf(\"failed quota: %s: %v\", resourceQuota.Name, err))\n\t\t}\n\t\tif !hasUsageStats(&resourceQuota, restrictedResources) {\n\t\t\treturn nil, admission.NewForbidden(a, fmt.Errorf(\"status unknown for quota: %s, resources: %s\", resourceQuota.Name, prettyPrintResourceNames(restrictedResources)))\n\t\t}\n\t\tinterestingQuotaIndexes = append(interestingQuotaIndexes, i)\n\t\tlocalRestrictedResourcesSet := quota.ToSet(restrictedResources)\n\t\trestrictedResourcesSet.Insert(localRestrictedResourcesSet.List()...)\n\t}\n\n\t// Usage of some resources cannot be counted in isolation. For example, when\n\t// the resource represents a number of unique references to external\n\t// resource. In such a case an evaluator needs to process other objects in\n\t// the same namespace which needs to be known.\n\tnamespace := a.GetNamespace()\n\tif accessor, err := meta.Accessor(inputObject); namespace != \"\" && err == nil {\n\t\tif accessor.GetNamespace() == \"\" {\n\t\t\taccessor.SetNamespace(namespace)\n\t\t}\n\t}\n\t// there is at least one quota that definitely matches our object\n\t// as a result, we need to measure the usage of this object for quota\n\t// on updates, we need to subtract the previous measured usage\n\t// if usage shows no change, just return since it has no impact on quota\n\tdeltaUsage, err := evaluator.Usage(inputObject)\n\tif err != nil {\n\t\treturn quotas, err\n\t}\n\n\t// ensure that usage for input object is never negative (this would mean a resource made a negative resource requirement)\n\tif negativeUsage := quota.IsNegative(deltaUsage); len(negativeUsage) > 0 {\n\t\treturn nil, admission.NewForbidden(a, fmt.Errorf(\"quota usage is negative for resource(s): %s\", prettyPrintResourceNames(negativeUsage)))\n\t}\n\n\tif admission.Update == a.GetOperation() {\n\t\tprevItem := a.GetOldObject()\n\t\tif prevItem == nil {\n\t\t\treturn nil, admission.NewForbidden(a, fmt.Errorf(\"unable to get previous usage since prior version of object was not found\"))\n\t\t}\n\n\t\t// if we can definitively determine that this is not a case of \"create on update\",\n\t\t// then charge based on the delta. Otherwise, bill the maximum\n\t\tmetadata, err := meta.Accessor(prevItem)\n\t\tif err == nil && len(metadata.GetResourceVersion()) > 0 {\n\t\t\tprevUsage, innerErr := evaluator.Usage(prevItem)\n\t\t\tif innerErr != nil {\n\t\t\t\treturn quotas, innerErr\n\t\t\t}\n\t\t\tif feature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {\n\t\t\t\t// allow negative usage for pods as pod resources can increase or decrease\n\t\t\t\tif a.GetResource().GroupResource() == corev1.Resource(\"pods\") {\n\t\t\t\t\tdeltaUsage = quota.Subtract(deltaUsage, prevUsage)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tdeltaUsage = quota.SubtractWithNonNegativeResult(deltaUsage, prevUsage)\n\t\t\t}\n\t\t}\n\t}\n\n\t// ignore items in deltaUsage with zero usage\n\tdeltaUsage = quota.RemoveZeros(deltaUsage)\n\t// if there is no remaining non-zero usage, short-circuit and return\n\tif len(deltaUsage) == 0 {\n\t\treturn quotas, nil\n\t}\n\n\t// verify that for every resource that had limited by default consumption\n\t// enabled that there was a corresponding quota that covered its use.\n\t// if not, we reject the request.\n\thasNoCoveringQuota := limitedResourceNamesSet.Difference(restrictedResourcesSet)\n\tif len(hasNoCoveringQuota) > 0 {\n\t\treturn quotas, admission.NewForbidden(a, fmt.Errorf(\"insufficient quota to consume: %v\", strings.Join(hasNoCoveringQuota.List(), \",\")))\n\t}\n\n\t// verify that for every scope that had limited access enabled\n\t// that there was a corresponding quota that covered it.\n\t// if not, we reject the request.\n\tscopesHasNoCoveringQuota, err := evaluator.UncoveredQuotaScopes(limitedScopes, restrictedScopes)\n\tif err != nil {\n\t\treturn quotas, err\n\t}\n\tif len(scopesHasNoCoveringQuota) > 0 {\n\t\treturn quotas, fmt.Errorf(\"insufficient quota to match these scopes: %v\", scopesHasNoCoveringQuota)\n\t}\n\n\tif len(interestingQuotaIndexes) == 0 {\n\t\treturn quotas, nil\n\t}\n\n\toutQuotas, err := copyQuotas(quotas)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, index := range interestingQuotaIndexes {\n\t\tresourceQuota := outQuotas[index]\n\n\t\thardResources := quota.ResourceNames(resourceQuota.Status.Hard)\n\t\trequestedUsage := quota.Mask(deltaUsage, hardResources)\n\t\tnewUsage := quota.Add(resourceQuota.Status.Used, requestedUsage)\n\t\tmaskedNewUsage := quota.Mask(newUsage, quota.ResourceNames(requestedUsage))\n\n\t\tif allowed, exceeded := quota.LessThanOrEqual(maskedNewUsage, resourceQuota.Status.Hard); !allowed {\n\t\t\tfailedRequestedUsage := quota.Mask(requestedUsage, exceeded)\n\t\t\tfailedUsed := quota.Mask(resourceQuota.Status.Used, exceeded)\n\t\t\tfailedHard := quota.Mask(resourceQuota.Status.Hard, exceeded)\n\t\t\treturn nil, admission.NewForbidden(a,\n\t\t\t\tfmt.Errorf(\"exceeded quota: %s, requested: %s, used: %s, limited: %s\",\n\t\t\t\t\tresourceQuota.Name,\n\t\t\t\t\tprettyPrint(failedRequestedUsage),\n\t\t\t\t\tprettyPrint(failedUsed),\n\t\t\t\t\tprettyPrint(failedHard)))\n\t\t}\n\n\t\t// update to the new usage number\n\t\toutQuotas[index].Status.Used = newUsage\n\t}\n\n\treturn outQuotas, nil\n}", "func (rs *PodAutoscalerSpec) Validate(ctx context.Context) *apis.FieldError {\n\tif equality.Semantic.DeepEqual(rs, &PodAutoscalerSpec{}) {\n\t\treturn apis.ErrMissingField(apis.CurrentField)\n\t}\n\terrs := serving.ValidateNamespacedObjectReference(&rs.ScaleTargetRef).ViaField(\"scaleTargetRef\")\n\terrs = errs.Also(rs.ContainerConcurrency.Validate(ctx).\n\t\tViaField(\"containerConcurrency\"))\n\treturn errs.Also(validateSKSFields(ctx, rs))\n}", "func ValidatePodSpecNodeSelector(nodeSelector map[string]string, fldPath *field.Path) field.ErrorList {\n\treturn unversionedvalidation.ValidateLabels(nodeSelector, fldPath)\n}", "func ValidateSplunkCustomResource(cr *v1alpha1.SplunkEnterprise) error {\n\t// cluster sanity checks\n\tif cr.Spec.Topology.SearchHeads > 0 && cr.Spec.Topology.Indexers <= 0 {\n\t\treturn errors.New(\"You must specify how many indexers the cluster should have\")\n\t}\n\tif cr.Spec.Topology.SearchHeads <= 0 && cr.Spec.Topology.Indexers > 0 {\n\t\treturn errors.New(\"You must specify how many search heads the cluster should have\")\n\t}\n\tif cr.Spec.Topology.Indexers > 0 && cr.Spec.Topology.SearchHeads > 0 && cr.Spec.LicenseURL == \"\" {\n\t\treturn errors.New(\"You must provide a license to create a cluster\")\n\t}\n\n\t// default to using a single standalone instance\n\tif cr.Spec.Topology.SearchHeads <= 0 && cr.Spec.Topology.Indexers <= 0 {\n\t\tif cr.Spec.Topology.Standalones <= 0 {\n\t\t\tcr.Spec.Topology.Standalones = 1\n\t\t}\n\t}\n\n\t// default to a single spark worker\n\tif cr.Spec.EnableDFS && cr.Spec.Topology.SparkWorkers <= 0 {\n\t\tcr.Spec.Topology.SparkWorkers = 1\n\t}\n\n\t// ImagePullPolicy\n\tif cr.Spec.ImagePullPolicy == \"\" {\n\t\tcr.Spec.ImagePullPolicy = os.Getenv(\"IMAGE_PULL_POLICY\")\n\t}\n\tswitch cr.Spec.ImagePullPolicy {\n\tcase \"\":\n\t\tcr.Spec.ImagePullPolicy = \"IfNotPresent\"\n\t\tbreak\n\tcase \"Always\":\n\t\tbreak\n\tcase \"IfNotPresent\":\n\t\tbreak\n\tdefault:\n\t\treturn fmt.Errorf(\"ImagePullPolicy must be one of \\\"Always\\\" or \\\"IfNotPresent\\\"; value=\\\"%s\\\"\",\n\t\t\tcr.Spec.ImagePullPolicy)\n\t}\n\n\t// SchedulerName\n\tif cr.Spec.SchedulerName == \"\" {\n\t\tcr.Spec.SchedulerName = \"default-scheduler\"\n\t}\n\n\treturn nil\n}", "func IsResourceNamespaced(kind string) bool {\n\tswitch kind {\n\tcase \"Namespace\",\n\t\t\"Node\",\n\t\t\"PersistentVolume\",\n\t\t\"PodSecurityPolicy\",\n\t\t\"CertificateSigningRequest\",\n\t\t\"ClusterRoleBinding\",\n\t\t\"ClusterRole\",\n\t\t\"VolumeAttachment\",\n\t\t\"StorageClass\",\n\t\t\"CSIDriver\",\n\t\t\"CSINode\",\n\t\t\"ValidatingWebhookConfiguration\",\n\t\t\"MutatingWebhookConfiguration\",\n\t\t\"CustomResourceDefinition\",\n\t\t\"PriorityClass\",\n\t\t\"RuntimeClass\":\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n}", "func (r deleteReq) Validate() error {\n\tif len(r.ServiceAccountID) == 0 {\n\t\treturn fmt.Errorf(\"the service account ID cannot be empty\")\n\t}\n\treturn nil\n}", "func resourceVolterraK8SPodSecurityPolicyCreate(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*APIClient)\n\n\tcreateMeta := &ves_io_schema.ObjectCreateMetaType{}\n\tcreateSpec := &ves_io_schema_k8s_pod_security_policy.CreateSpecType{}\n\tcreateReq := &ves_io_schema_k8s_pod_security_policy.CreateRequest{\n\t\tMetadata: createMeta,\n\t\tSpec: createSpec,\n\t}\n\n\tif v, ok := d.GetOk(\"annotations\"); ok && !isIntfNil(v) {\n\n\t\tms := map[string]string{}\n\n\t\tfor k, v := range v.(map[string]interface{}) {\n\t\t\tval := v.(string)\n\t\t\tms[k] = val\n\t\t}\n\t\tcreateMeta.Annotations = ms\n\t}\n\n\tif v, ok := d.GetOk(\"description\"); ok && !isIntfNil(v) {\n\t\tcreateMeta.Description =\n\t\t\tv.(string)\n\t}\n\n\tif v, ok := d.GetOk(\"disable\"); ok && !isIntfNil(v) {\n\t\tcreateMeta.Disable =\n\t\t\tv.(bool)\n\t}\n\n\tif v, ok := d.GetOk(\"labels\"); ok && !isIntfNil(v) {\n\n\t\tms := map[string]string{}\n\n\t\tfor k, v := range v.(map[string]interface{}) {\n\t\t\tval := v.(string)\n\t\t\tms[k] = val\n\t\t}\n\t\tcreateMeta.Labels = ms\n\t}\n\n\tif v, ok := d.GetOk(\"name\"); ok && !isIntfNil(v) {\n\t\tcreateMeta.Name =\n\t\t\tv.(string)\n\t}\n\n\tif v, ok := d.GetOk(\"namespace\"); ok && !isIntfNil(v) {\n\t\tcreateMeta.Namespace =\n\t\t\tv.(string)\n\t}\n\n\t//config_method_choice\n\n\tconfigMethodChoiceTypeFound := false\n\n\tif v, ok := d.GetOk(\"psp_spec\"); ok && !configMethodChoiceTypeFound {\n\n\t\tconfigMethodChoiceTypeFound = true\n\t\tconfigMethodChoiceInt := &ves_io_schema_k8s_pod_security_policy.CreateSpecType_PspSpec{}\n\t\tconfigMethodChoiceInt.PspSpec = &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType{}\n\t\tcreateSpec.ConfigMethodChoice = configMethodChoiceInt\n\n\t\tsl := v.(*schema.Set).List()\n\t\tfor _, set := range sl {\n\t\t\tcs := set.(map[string]interface{})\n\n\t\t\tif v, ok := cs[\"allow_privilege_escalation\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tconfigMethodChoiceInt.PspSpec.AllowPrivilegeEscalation = v.(bool)\n\n\t\t\t}\n\n\t\t\tallowedCapabilitiesChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"allowed_capabilities\"]; ok && !isIntfNil(v) && !allowedCapabilitiesChoiceTypeFound {\n\n\t\t\t\tallowedCapabilitiesChoiceTypeFound = true\n\t\t\t\tallowedCapabilitiesChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_AllowedCapabilities{}\n\t\t\t\tallowedCapabilitiesChoiceInt.AllowedCapabilities = &ves_io_schema_k8s_pod_security_policy.CapabilityListType{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.AllowedCapabilitiesChoice = allowedCapabilitiesChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"capabilities\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tallowedCapabilitiesChoiceInt.AllowedCapabilities.Capabilities = ls\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"no_allowed_capabilities\"]; ok && !isIntfNil(v) && !allowedCapabilitiesChoiceTypeFound {\n\n\t\t\t\tallowedCapabilitiesChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tallowedCapabilitiesChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoAllowedCapabilities{}\n\t\t\t\t\tallowedCapabilitiesChoiceInt.NoAllowedCapabilities = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.AllowedCapabilitiesChoice = allowedCapabilitiesChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"allowed_csi_drivers\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.AllowedCsiDrivers = ls\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"allowed_flex_volumes\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.AllowedFlexVolumes = ls\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"allowed_host_paths\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.([]interface{})\n\t\t\t\tallowedHostPaths := make([]*ves_io_schema_k8s_pod_security_policy.HostPathType, len(sl))\n\t\t\t\tconfigMethodChoiceInt.PspSpec.AllowedHostPaths = allowedHostPaths\n\t\t\t\tfor i, set := range sl {\n\t\t\t\t\tallowedHostPaths[i] = &ves_io_schema_k8s_pod_security_policy.HostPathType{}\n\t\t\t\t\tallowedHostPathsMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\tif w, ok := allowedHostPathsMapStrToI[\"path_prefix\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\tallowedHostPaths[i].PathPrefix = w.(string)\n\t\t\t\t\t}\n\n\t\t\t\t\tif w, ok := allowedHostPathsMapStrToI[\"read_only\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\tallowedHostPaths[i].ReadOnly = w.(bool)\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"allowed_proc_mounts\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.AllowedProcMounts = ls\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"allowed_unsafe_sysctls\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.AllowedUnsafeSysctls = ls\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"default_allow_privilege_escalation\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tconfigMethodChoiceInt.PspSpec.DefaultAllowPrivilegeEscalation = v.(bool)\n\n\t\t\t}\n\n\t\t\tdefaultCapabilitiesChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"default_capabilities\"]; ok && !isIntfNil(v) && !defaultCapabilitiesChoiceTypeFound {\n\n\t\t\t\tdefaultCapabilitiesChoiceTypeFound = true\n\t\t\t\tdefaultCapabilitiesChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_DefaultCapabilities{}\n\t\t\t\tdefaultCapabilitiesChoiceInt.DefaultCapabilities = &ves_io_schema_k8s_pod_security_policy.CapabilityListType{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.DefaultCapabilitiesChoice = defaultCapabilitiesChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"capabilities\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdefaultCapabilitiesChoiceInt.DefaultCapabilities.Capabilities = ls\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"no_default_capabilities\"]; ok && !isIntfNil(v) && !defaultCapabilitiesChoiceTypeFound {\n\n\t\t\t\tdefaultCapabilitiesChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tdefaultCapabilitiesChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoDefaultCapabilities{}\n\t\t\t\t\tdefaultCapabilitiesChoiceInt.NoDefaultCapabilities = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.DefaultCapabilitiesChoice = defaultCapabilitiesChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tdropCapabilitiesChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"drop_capabilities\"]; ok && !isIntfNil(v) && !dropCapabilitiesChoiceTypeFound {\n\n\t\t\t\tdropCapabilitiesChoiceTypeFound = true\n\t\t\t\tdropCapabilitiesChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_DropCapabilities{}\n\t\t\t\tdropCapabilitiesChoiceInt.DropCapabilities = &ves_io_schema_k8s_pod_security_policy.CapabilityListType{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.DropCapabilitiesChoice = dropCapabilitiesChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"capabilities\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdropCapabilitiesChoiceInt.DropCapabilities.Capabilities = ls\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"no_drop_capabilities\"]; ok && !isIntfNil(v) && !dropCapabilitiesChoiceTypeFound {\n\n\t\t\t\tdropCapabilitiesChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tdropCapabilitiesChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoDropCapabilities{}\n\t\t\t\t\tdropCapabilitiesChoiceInt.NoDropCapabilities = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.DropCapabilitiesChoice = dropCapabilitiesChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"forbidden_sysctls\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.ForbiddenSysctls = ls\n\n\t\t\t}\n\n\t\t\tfsGroupChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"fs_group_strategy_options\"]; ok && !isIntfNil(v) && !fsGroupChoiceTypeFound {\n\n\t\t\t\tfsGroupChoiceTypeFound = true\n\t\t\t\tfsGroupChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_FsGroupStrategyOptions{}\n\t\t\t\tfsGroupChoiceInt.FsGroupStrategyOptions = &ves_io_schema_k8s_pod_security_policy.IDStrategyOptionsType{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.FsGroupChoice = fsGroupChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"id_ranges\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsl := v.([]interface{})\n\t\t\t\t\t\tidRanges := make([]*ves_io_schema_k8s_pod_security_policy.IDRangeType, len(sl))\n\t\t\t\t\t\tfsGroupChoiceInt.FsGroupStrategyOptions.IdRanges = idRanges\n\t\t\t\t\t\tfor i, set := range sl {\n\t\t\t\t\t\t\tidRanges[i] = &ves_io_schema_k8s_pod_security_policy.IDRangeType{}\n\t\t\t\t\t\t\tidRangesMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif w, ok := idRangesMapStrToI[\"max_id\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\tidRanges[i].MaxId = uint32(w.(int))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif w, ok := idRangesMapStrToI[\"min_id\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\tidRanges[i].MinId = uint32(w.(int))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"rule\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tfsGroupChoiceInt.FsGroupStrategyOptions.Rule = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"no_fs_groups\"]; ok && !isIntfNil(v) && !fsGroupChoiceTypeFound {\n\n\t\t\t\tfsGroupChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tfsGroupChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoFsGroups{}\n\t\t\t\t\tfsGroupChoiceInt.NoFsGroups = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.FsGroupChoice = fsGroupChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tgroupChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"no_run_as_group\"]; ok && !isIntfNil(v) && !groupChoiceTypeFound {\n\n\t\t\t\tgroupChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tgroupChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoRunAsGroup{}\n\t\t\t\t\tgroupChoiceInt.NoRunAsGroup = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.GroupChoice = groupChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"run_as_group\"]; ok && !isIntfNil(v) && !groupChoiceTypeFound {\n\n\t\t\t\tgroupChoiceTypeFound = true\n\t\t\t\tgroupChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_RunAsGroup{}\n\t\t\t\tgroupChoiceInt.RunAsGroup = &ves_io_schema_k8s_pod_security_policy.IDStrategyOptionsType{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.GroupChoice = groupChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"id_ranges\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsl := v.([]interface{})\n\t\t\t\t\t\tidRanges := make([]*ves_io_schema_k8s_pod_security_policy.IDRangeType, len(sl))\n\t\t\t\t\t\tgroupChoiceInt.RunAsGroup.IdRanges = idRanges\n\t\t\t\t\t\tfor i, set := range sl {\n\t\t\t\t\t\t\tidRanges[i] = &ves_io_schema_k8s_pod_security_policy.IDRangeType{}\n\t\t\t\t\t\t\tidRangesMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif w, ok := idRangesMapStrToI[\"max_id\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\tidRanges[i].MaxId = uint32(w.(int))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif w, ok := idRangesMapStrToI[\"min_id\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\tidRanges[i].MinId = uint32(w.(int))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"rule\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tgroupChoiceInt.RunAsGroup.Rule = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"host_ipc\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tconfigMethodChoiceInt.PspSpec.HostIpc = v.(bool)\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"host_network\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tconfigMethodChoiceInt.PspSpec.HostNetwork = v.(bool)\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"host_pid\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tconfigMethodChoiceInt.PspSpec.HostPid = v.(bool)\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"host_port_ranges\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tconfigMethodChoiceInt.PspSpec.HostPortRanges = v.(string)\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"privileged\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tconfigMethodChoiceInt.PspSpec.Privileged = v.(bool)\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"read_only_root_filesystem\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tconfigMethodChoiceInt.PspSpec.ReadOnlyRootFilesystem = v.(bool)\n\n\t\t\t}\n\n\t\t\truntimeClassChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"no_runtime_class\"]; ok && !isIntfNil(v) && !runtimeClassChoiceTypeFound {\n\n\t\t\t\truntimeClassChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\truntimeClassChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoRuntimeClass{}\n\t\t\t\t\truntimeClassChoiceInt.NoRuntimeClass = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.RuntimeClassChoice = runtimeClassChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"runtime_class\"]; ok && !isIntfNil(v) && !runtimeClassChoiceTypeFound {\n\n\t\t\t\truntimeClassChoiceTypeFound = true\n\t\t\t\truntimeClassChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_RuntimeClass{}\n\t\t\t\truntimeClassChoiceInt.RuntimeClass = &ves_io_schema_k8s_pod_security_policy.RuntimeClassStrategyOptions{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.RuntimeClassChoice = runtimeClassChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"allowed_runtime_class_names\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t\t\t}\n\t\t\t\t\t\truntimeClassChoiceInt.RuntimeClass.AllowedRuntimeClassNames = ls\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"default_runtime_class_name\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\truntimeClassChoiceInt.RuntimeClass.DefaultRuntimeClassName = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tseLinuxChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"no_se_linux_options\"]; ok && !isIntfNil(v) && !seLinuxChoiceTypeFound {\n\n\t\t\t\tseLinuxChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tseLinuxChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoSeLinuxOptions{}\n\t\t\t\t\tseLinuxChoiceInt.NoSeLinuxOptions = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.SeLinuxChoice = seLinuxChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"se_linux_options\"]; ok && !isIntfNil(v) && !seLinuxChoiceTypeFound {\n\n\t\t\t\tseLinuxChoiceTypeFound = true\n\t\t\t\tseLinuxChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_SeLinuxOptions{}\n\t\t\t\tseLinuxChoiceInt.SeLinuxOptions = &ves_io_schema_k8s_pod_security_policy.SELinuxStrategyOptions{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.SeLinuxChoice = seLinuxChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"level\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tseLinuxChoiceInt.SeLinuxOptions.Level = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"role\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tseLinuxChoiceInt.SeLinuxOptions.Role = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"rule\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tseLinuxChoiceInt.SeLinuxOptions.Rule = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"type\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tseLinuxChoiceInt.SeLinuxOptions.Type = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"user\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tseLinuxChoiceInt.SeLinuxOptions.User = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tsupplementalGroupChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"no_supplemental_groups\"]; ok && !isIntfNil(v) && !supplementalGroupChoiceTypeFound {\n\n\t\t\t\tsupplementalGroupChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tsupplementalGroupChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoSupplementalGroups{}\n\t\t\t\t\tsupplementalGroupChoiceInt.NoSupplementalGroups = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.SupplementalGroupChoice = supplementalGroupChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"supplemental_groups\"]; ok && !isIntfNil(v) && !supplementalGroupChoiceTypeFound {\n\n\t\t\t\tsupplementalGroupChoiceTypeFound = true\n\t\t\t\tsupplementalGroupChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_SupplementalGroups{}\n\t\t\t\tsupplementalGroupChoiceInt.SupplementalGroups = &ves_io_schema_k8s_pod_security_policy.IDStrategyOptionsType{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.SupplementalGroupChoice = supplementalGroupChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"id_ranges\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsl := v.([]interface{})\n\t\t\t\t\t\tidRanges := make([]*ves_io_schema_k8s_pod_security_policy.IDRangeType, len(sl))\n\t\t\t\t\t\tsupplementalGroupChoiceInt.SupplementalGroups.IdRanges = idRanges\n\t\t\t\t\t\tfor i, set := range sl {\n\t\t\t\t\t\t\tidRanges[i] = &ves_io_schema_k8s_pod_security_policy.IDRangeType{}\n\t\t\t\t\t\t\tidRangesMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif w, ok := idRangesMapStrToI[\"max_id\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\tidRanges[i].MaxId = uint32(w.(int))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif w, ok := idRangesMapStrToI[\"min_id\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\tidRanges[i].MinId = uint32(w.(int))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"rule\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsupplementalGroupChoiceInt.SupplementalGroups.Rule = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tuserChoiceTypeFound := false\n\n\t\t\tif v, ok := cs[\"no_run_as_user\"]; ok && !isIntfNil(v) && !userChoiceTypeFound {\n\n\t\t\t\tuserChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tuserChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_NoRunAsUser{}\n\t\t\t\t\tuserChoiceInt.NoRunAsUser = &ves_io_schema.Empty{}\n\t\t\t\t\tconfigMethodChoiceInt.PspSpec.UserChoice = userChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"run_as_user\"]; ok && !isIntfNil(v) && !userChoiceTypeFound {\n\n\t\t\t\tuserChoiceTypeFound = true\n\t\t\t\tuserChoiceInt := &ves_io_schema_k8s_pod_security_policy.PodSecurityPolicySpecType_RunAsUser{}\n\t\t\t\tuserChoiceInt.RunAsUser = &ves_io_schema_k8s_pod_security_policy.IDStrategyOptionsType{}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.UserChoice = userChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"id_ranges\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsl := v.([]interface{})\n\t\t\t\t\t\tidRanges := make([]*ves_io_schema_k8s_pod_security_policy.IDRangeType, len(sl))\n\t\t\t\t\t\tuserChoiceInt.RunAsUser.IdRanges = idRanges\n\t\t\t\t\t\tfor i, set := range sl {\n\t\t\t\t\t\t\tidRanges[i] = &ves_io_schema_k8s_pod_security_policy.IDRangeType{}\n\t\t\t\t\t\t\tidRangesMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\t\t\tif w, ok := idRangesMapStrToI[\"max_id\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\tidRanges[i].MaxId = uint32(w.(int))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif w, ok := idRangesMapStrToI[\"min_id\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\t\t\tidRanges[i].MinId = uint32(w.(int))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"rule\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tuserChoiceInt.RunAsUser.Rule = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := cs[\"volumes\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tconfigMethodChoiceInt.PspSpec.Volumes = ls\n\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tif v, ok := d.GetOk(\"yaml\"); ok && !configMethodChoiceTypeFound {\n\n\t\tconfigMethodChoiceTypeFound = true\n\t\tconfigMethodChoiceInt := &ves_io_schema_k8s_pod_security_policy.CreateSpecType_Yaml{}\n\n\t\tcreateSpec.ConfigMethodChoice = configMethodChoiceInt\n\n\t\tconfigMethodChoiceInt.Yaml = v.(string)\n\n\t}\n\n\tlog.Printf(\"[DEBUG] Creating Volterra K8SPodSecurityPolicy object with struct: %+v\", createReq)\n\n\tcreateK8SPodSecurityPolicyResp, err := client.CreateObject(context.Background(), ves_io_schema_k8s_pod_security_policy.ObjectType, createReq)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating K8SPodSecurityPolicy: %s\", err)\n\t}\n\td.SetId(createK8SPodSecurityPolicyResp.GetObjSystemMetadata().GetUid())\n\n\treturn resourceVolterraK8SPodSecurityPolicyRead(d, meta)\n}", "func (p *plugin) Validate(req json.RawMessage) error {\n\tlog.Debugln(\"validate\", string(req))\n\n\tparsed := SpecPropertiesFormat{}\n\terr := json.Unmarshal([]byte(req), &parsed)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif parsed.Type == \"\" {\n\t\treturn fmt.Errorf(\"no-resource-type:%s\", string(req))\n\t}\n\n\tif len(parsed.Value) == 0 {\n\t\treturn fmt.Errorf(\"no-value:%s\", string(req))\n\t}\n\treturn nil\n}", "func validateK8STrigger(trigger *v1alpha1.StandardK8STrigger) error {\n\tif trigger == nil {\n\t\treturn errors.New(\"k8s trigger for can't be nil\")\n\t}\n\tif trigger.Source == nil {\n\t\treturn errors.New(\"k8s trigger for does not contain an absolute action\")\n\t}\n\tif trigger.GroupVersionResource.Size() == 0 {\n\t\treturn errors.New(\"must provide group, version and resource for the resource\")\n\t}\n\tswitch trigger.Operation {\n\tcase \"\", v1alpha1.Create, v1alpha1.Patch, v1alpha1.Update:\n\tdefault:\n\t\treturn errors.Errorf(\"unknown operation type %s\", string(trigger.Operation))\n\t}\n\tif trigger.Parameters != nil {\n\t\tfor i, parameter := range trigger.Parameters {\n\t\t\tif err := validateTriggerParameter(&parameter); err != nil {\n\t\t\t\treturn errors.Errorf(\"resource parameter index: %d. err: %+v\", i, err)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (v *validator) Validate(ctx context.Context, seed *kubermaticv1.Seed, op admissionv1.Operation) error {\n\t// We need locking to make the validation concurrency-safe\n\t// TODO: this is acceptable as request rate is low, but is it required?\n\tv.lock.Lock()\n\tdefer v.lock.Unlock()\n\n\tseeds := kubermaticv1.SeedList{}\n\terr := v.client.List(ctx, &seeds)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get seeds: %v\", err)\n\t}\n\tseedsMap := map[string]*kubermaticv1.Seed{}\n\tfor i, s := range seeds.Items {\n\t\tseedsMap[s.Name] = &seeds.Items[i]\n\t}\n\tif op == admissionv1.Delete {\n\t\t// when a namespace is deleted, a DELETE call for all seeds in the namespace\n\t\t// is issued; this request has no .Request.Name set, so this check will make\n\t\t// sure that we exit cleanly and allow deleting namespaces without seeds\n\t\tif _, exists := seedsMap[seed.Name]; !exists && op == admissionv1.Delete {\n\t\t\treturn nil\n\t\t}\n\t\t// in case of delete request the seed is empty\n\t\tseed = seedsMap[seed.Name]\n\t}\n\n\tclient, err := v.seedClientGetter(seed)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get client for seed %q: %v\", seed.Name, err)\n\t}\n\n\treturn v.validate(ctx, seed, client, seedsMap, op == admissionv1.Delete)\n}", "func (s *OpenconfigOfficeAp_System_Logging_RemoteServers_RemoteServer_Selectors_Selector_Config) Validate() error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigOfficeAp_System_Logging_RemoteServers_RemoteServer_Selectors_Selector_Config\"], s); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (m *CheckRightOnResourcesV4Request) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateResourceCrns(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateRight(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (t *OpenconfigInterfaces_Interfaces_Interface_Subinterfaces_Subinterface_Ipv4) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigInterfaces_Interfaces_Interface_Subinterfaces_Subinterface_Ipv4\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (w *WhoCan) validate(action Action) error {\n\tif action.NonResourceURL != \"\" && action.SubResource != \"\" {\n\t\treturn fmt.Errorf(\"--subresource cannot be used with NONRESOURCEURL\")\n\t}\n\n\terr := w.namespaceValidator.Validate(action.Namespace)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"validating namespace: %v\", err)\n\t}\n\n\treturn nil\n}", "func validateK3sControlPlaneTemplateResourceSpec(s K3sControlPlaneTemplateResourceSpec, pathPrefix *field.Path) field.ErrorList {\n\treturn validateRolloutStrategy(s.RolloutStrategy, nil, pathPrefix.Child(\"rolloutStrategy\"))\n}", "func CfnRegistryPolicy_IsCfnResource(construct constructs.IConstruct) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_ecr.CfnRegistryPolicy\",\n\t\t\"isCfnResource\",\n\t\t[]interface{}{construct},\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func (o *deleteSvcOpts) Validate() error {\n\treturn nil\n}", "func (s *OpenconfigOfficeAp_System_Logging_RemoteServers_RemoteServer_Selectors_Selector_State) Validate() error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigOfficeAp_System_Logging_RemoteServers_RemoteServer_Selectors_Selector_State\"], s); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (sc selectCriterion) Validate() error {\n\tif sc.name == \"\" {\n\t\treturn fmt.Errorf(\"name is empty\")\n\t}\n\tif i := strings.IndexAny(sc.name, InvalidCriterionRunes); i == 0 {\n\t\treturn fmt.Errorf(\"name starts with invalid character '%v'\", sc.name[i])\n\t}\n\tif sc.name == SelectAll {\n\t\tif sc.tagged {\n\t\t\treturn fmt.Errorf(\"cannot use '.' with special name 'v'\", SelectAll)\n\t\t}\n\t\tif sc.negated {\n\t\t\treturn fmt.Errorf(\"cannot use '!' with special name 'v'\", SelectAll)\n\t\t}\n\t}\n\treturn nil\n}", "func validateK8sTriggerPolicy(policy *v1alpha1.K8SResourcePolicy) error {\n\tif policy == nil {\n\t\treturn nil\n\t}\n\tif policy.Labels == nil {\n\t\treturn errors.New(\"resource labels are not specified\")\n\t}\n\tif &policy.Backoff == nil {\n\t\treturn errors.New(\"backoff is not specified\")\n\t}\n\treturn nil\n}", "func (s *OpenconfigInterfaces_Interfaces_Interface_Subinterfaces_Subinterface_Ipv4) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigInterfaces_Interfaces_Interface_Subinterfaces_Subinterface_Ipv4\"], s, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (t *Qos_Qos_Qos_ConnectivityService) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"Qos_Qos_Qos_ConnectivityService\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func TestDiscoveryResourceGate(t *testing.T) {\n\tresources := map[string][]metav1.APIResource{\n\t\t\"allLegacy\": {\n\t\t\t{Name: \"clusterpolicies\", Kind: \"ClusterPolicies\"},\n\t\t\t{Name: \"clusterpolicybindings\", Kind: \"ClusterPolicyBindings\"},\n\t\t\t{Name: \"policies\", Kind: \"Policies\"},\n\t\t\t{Name: \"policybindings\", Kind: \"PolicyBindings\"},\n\t\t\t{Name: \"foo\", Kind: \"Foo\"},\n\t\t},\n\t\t\"partialLegacy\": {\n\t\t\t{Name: \"clusterpolicies\", Kind: \"ClusterPolicies\"},\n\t\t\t{Name: \"clusterpolicybindings\", Kind: \"ClusterPolicyBindings\"},\n\t\t\t{Name: \"foo\", Kind: \"Foo\"},\n\t\t},\n\t\t\"noLegacy\": {\n\t\t\t{Name: \"foo\", Kind: \"Foo\"},\n\t\t\t{Name: \"bar\", Kind: \"Bar\"},\n\t\t},\n\t}\n\n\tlegacyTests := map[string]struct {\n\t\texistingResources *metav1.APIResourceList\n\t\texpectErrStr string\n\t}{\n\t\t\"scheme-legacy-all-supported\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: authorization.LegacySchemeGroupVersion.String(),\n\t\t\t\tAPIResources: resources[\"allLegacy\"],\n\t\t\t},\n\t\t\texpectErrStr: \"\",\n\t\t},\n\t\t\"scheme-legacy-some-supported\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: authorization.LegacySchemeGroupVersion.String(),\n\t\t\t\tAPIResources: resources[\"partialLegacy\"],\n\t\t\t},\n\t\t\texpectErrStr: \"the server does not support legacy policy resources\",\n\t\t},\n\t\t\"scheme-legacy-none-supported\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: authorization.LegacySchemeGroupVersion.String(),\n\t\t\t\tAPIResources: resources[\"noLegacy\"],\n\t\t\t},\n\t\t\texpectErrStr: \"the server does not support legacy policy resources\",\n\t\t},\n\t\t\"scheme-all-supported\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: authorization.SchemeGroupVersion.String(),\n\t\t\t\tAPIResources: resources[\"allLegacy\"],\n\t\t\t},\n\t\t\texpectErrStr: \"\",\n\t\t},\n\t\t\"scheme-some-supported\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: authorization.SchemeGroupVersion.String(),\n\t\t\t\tAPIResources: resources[\"partialLegacy\"],\n\t\t\t},\n\t\t\texpectErrStr: \"the server does not support legacy policy resources\",\n\t\t},\n\t\t\"scheme-none-supported\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: authorization.SchemeGroupVersion.String(),\n\t\t\t\tAPIResources: resources[\"noLegacy\"],\n\t\t\t},\n\t\t\texpectErrStr: \"the server does not support legacy policy resources\",\n\t\t},\n\t}\n\n\tdiscoveryTests := map[string]struct {\n\t\texistingResources *metav1.APIResourceList\n\t\tinputGVR []schema.GroupVersionResource\n\t\texpectedGVR []schema.GroupVersionResource\n\t\texpectedAll bool\n\t}{\n\t\t\"discovery-subset\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: \"v1\",\n\t\t\t\tAPIResources: resources[\"noLegacy\"],\n\t\t\t},\n\t\t\tinputGVR: []schema.GroupVersionResource{\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"foo\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"bar\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"noexist\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedGVR: []schema.GroupVersionResource{\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"foo\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"bar\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"discovery-none\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: \"v1\",\n\t\t\t\tAPIResources: resources[\"noLegacy\"],\n\t\t\t},\n\t\t\tinputGVR: []schema.GroupVersionResource{\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"noexist\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedGVR: []schema.GroupVersionResource{},\n\t\t},\n\t\t\"discovery-all\": {\n\t\t\texistingResources: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: \"v1\",\n\t\t\t\tAPIResources: resources[\"noLegacy\"],\n\t\t\t},\n\t\t\tinputGVR: []schema.GroupVersionResource{\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"foo\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"bar\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedGVR: []schema.GroupVersionResource{\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"foo\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tGroup: \"\",\n\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\tResource: \"bar\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedAll: true,\n\t\t},\n\t}\n\n\tfor tcName, tc := range discoveryTests {\n\t\tfunc() {\n\t\t\tserver := testServer(t, tc.existingResources)\n\t\t\tdefer server.Close()\n\t\t\tclient := discovery.NewDiscoveryClientForConfigOrDie(&restclient.Config{Host: server.URL})\n\n\t\t\tgot, all, err := DiscoverGroupVersionResources(client, tc.inputGVR...)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"myerr %s\", err.Error())\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(got, tc.expectedGVR) {\n\t\t\t\tt.Fatalf(\"%s got %v, expected %v\", tcName, got, tc.expectedGVR)\n\t\t\t}\n\t\t\tif tc.expectedAll && !all {\n\t\t\t\tt.Fatalf(\"%s expected all\", tcName)\n\t\t\t}\n\t\t}()\n\t}\n\n\tfor tcName, tc := range legacyTests {\n\t\tfunc() {\n\t\t\tserver := testServer(t, tc.existingResources)\n\t\t\tdefer server.Close()\n\t\t\tclient := discovery.NewDiscoveryClientForConfigOrDie(&restclient.Config{Host: server.URL})\n\n\t\t\terr := LegacyPolicyResourceGate(client)\n\t\t\tif err != nil {\n\t\t\t\tif len(tc.expectErrStr) == 0 {\n\t\t\t\t\tt.Fatalf(\"%s unexpected err %s\\n\", tcName, err.Error())\n\t\t\t\t}\n\t\t\t\tif tc.expectErrStr != err.Error() {\n\t\t\t\t\tt.Fatalf(\"%s expected err %s, got %s\", tcName, tc.expectErrStr, err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err == nil && len(tc.expectErrStr) != 0 {\n\t\t\t\tt.Fatalf(\"%s expected err %s, got none\\n\", tcName, tc.expectErrStr)\n\t\t\t}\n\t\t}()\n\t}\n}", "func TbCustomImageReqStructLevelValidation(sl validator.StructLevel) {\n\n\tu := sl.Current().Interface().(TbCustomImageReq)\n\n\terr := common.CheckString(u.Name)\n\tif err != nil {\n\t\t// ReportError(field interface{}, fieldName, structFieldName, tag, param string)\n\t\tsl.ReportError(u.Name, \"name\", \"Name\", err.Error(), \"\")\n\t}\n}", "func (setting *MongodbDatabaseCollectionThroughputSetting) validateResourceReferences() (admission.Warnings, error) {\n\trefs, err := reflecthelpers.FindResourceReferences(&setting.Spec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn genruntime.ValidateResourceReferences(refs)\n}", "func resourceVolterraNetworkPolicyRule() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceVolterraNetworkPolicyRuleCreate,\n\t\tRead: resourceVolterraNetworkPolicyRuleRead,\n\t\tUpdate: resourceVolterraNetworkPolicyRuleUpdate,\n\t\tDelete: resourceVolterraNetworkPolicyRuleDelete,\n\n\t\tSchema: map[string]*schema.Schema{\n\n\t\t\t\"annotations\": {\n\t\t\t\tType: schema.TypeMap,\n\t\t\t\tOptional: true,\n\t\t\t},\n\n\t\t\t\"description\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\n\t\t\t\"disable\": {\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t},\n\n\t\t\t\"labels\": {\n\t\t\t\tType: schema.TypeMap,\n\t\t\t\tOptional: true,\n\t\t\t},\n\n\t\t\t\"name\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"namespace\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"action\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\n\t\t\t\"advanced_action\": {\n\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true,\n\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\tSchema: map[string]*schema.Schema{\n\n\t\t\t\t\t\t\"action\": {\n\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\"label_matcher\": {\n\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true,\n\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\tSchema: map[string]*schema.Schema{\n\n\t\t\t\t\t\t\"keys\": {\n\n\t\t\t\t\t\t\tType: schema.TypeList,\n\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Schema{\n\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\"ports\": {\n\n\t\t\t\tType: schema.TypeList,\n\n\t\t\t\tOptional: true,\n\t\t\t\tElem: &schema.Schema{\n\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\"protocol\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\n\t\t\t\"ip_prefix_set\": {\n\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true,\n\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\tSchema: map[string]*schema.Schema{\n\n\t\t\t\t\t\t\"ref\": {\n\n\t\t\t\t\t\t\tType: schema.TypeList,\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\t\t\t\tSchema: map[string]*schema.Schema{\n\n\t\t\t\t\t\t\t\t\t\"kind\": {\n\t\t\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t\t\t\tComputed: true,\n\t\t\t\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\t\t\t\"name\": {\n\t\t\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\"namespace\": {\n\t\t\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\"tenant\": {\n\t\t\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\"prefix\": {\n\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true,\n\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\tSchema: map[string]*schema.Schema{\n\n\t\t\t\t\t\t\"ipv6_prefix\": {\n\n\t\t\t\t\t\t\tType: schema.TypeList,\n\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Schema{\n\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\"prefix\": {\n\n\t\t\t\t\t\t\tType: schema.TypeList,\n\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\tElem: &schema.Schema{\n\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\"prefix_selector\": {\n\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true,\n\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\tSchema: map[string]*schema.Schema{\n\n\t\t\t\t\t\t\"expressions\": {\n\n\t\t\t\t\t\t\tType: schema.TypeList,\n\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t\tElem: &schema.Schema{\n\t\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func (o TopImagesOptions) Validate(cmd *cobra.Command) error {\n\treturn nil\n}", "func (m *SearchResources) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateResource(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func NamespaceForResourceIsReady(resource common.ComponentResource) (bool, error) {\n\t// create a stub namespace resource to pass to the NamespaceIsReady method\n\tnamespace := &Resource{\n\t\tReconciler: resource.GetReconciler(),\n\t}\n\n\t// insert the inherited fields\n\tnamespace.Name = resource.GetNamespace()\n\tnamespace.Group = \"\"\n\tnamespace.Version = \"v1\"\n\tnamespace.Kind = NamespaceKind\n\n\treturn NamespaceIsReady(namespace)\n}", "func (t Topic) Validate() error {\n\treturn validateResource(string(t), \"topics\")\n}", "func (opts Options) validate() error {\n\t// AzureDNSClient is only not nil for the tests.\n\tif opts.AzureAuthentication == nil && opts.AzureDNSClient == nil {\n\t\treturn errAzureAuthenticationNil\n\t}\n\n\tif opts.KubeClient == nil {\n\t\treturn errKubeClientNil\n\t}\n\n\tif len(opts.DomainNameRoot) <= 0 {\n\t\treturn errDomainNameRootEmpty\n\t}\n\n\tif len(opts.ResourceGroupName) <= 0 {\n\t\treturn errResourceGroupNameEmpty\n\t}\n\n\tif len(opts.ResourceName) <= 0 {\n\t\treturn errResourceNameEmpty\n\t}\n\n\tif len(opts.Region) <= 0 {\n\t\treturn errRegionEmpty\n\t}\n\n\treturn nil\n}", "func (r *Resource) Valid() bool {\n\tif r.Spec == nil {\n\t\tfmt.Println(\"no resource spec\")\n\t\treturn false\n\t}\n\n\treturn r.Spec.Valid()\n}", "func validateSelectRead(media []*medium) (a cmdArgs, err error) {\n\tif len(media) > 1 {\n\t\terr = tooManyInputs\n\t\treturn\n\t}\n\n\tif len(media) == 0 {\n\t\ta.in, err = processEtcdArgs(defaultEtcdUrls, defaultEtcdPrefix, \"\")\n\t\treturn\n\t}\n\n\tswitch media[0].typ {\n\tcase inlineIds, patchPrepend, patchPrependFile, patchAppend, patchAppendFile:\n\t\terr = invalidInputType\n\t\treturn\n\t}\n\n\ta.in = media[0]\n\treturn\n}", "func ValidateControllerRegistrationSpecUpdate(new, old *core.ControllerRegistrationSpec, deletionTimestampSet bool, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif deletionTimestampSet && !apiequality.Semantic.DeepEqual(new, old) {\n\t\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(new, old, fldPath)...)\n\t\treturn allErrs\n\t}\n\n\tkindTypeToPrimary := make(map[string]*bool, len(old.Resources))\n\tfor _, resource := range old.Resources {\n\t\tkindTypeToPrimary[resource.Kind+resource.Type] = resource.Primary\n\t}\n\tfor i, resource := range new.Resources {\n\t\tif primary, ok := kindTypeToPrimary[resource.Kind+resource.Type]; ok {\n\t\t\tallErrs = append(allErrs, apivalidation.ValidateImmutableField(resource.Primary, primary, fldPath.Child(\"resources\").Index(i).Child(\"primary\"))...)\n\t\t}\n\t}\n\n\treturn allErrs\n}", "func NewResourceCondition(ref *common.ResourceRef) ResourceConditionInterface {\n\tkvg := ref.SprintKindVersionGroup()\n\trc, found := supportedResourceConditions[kvg]\n\tif found == false {\n\t\trc = nil\n\t}\n\treturn rc\n}", "func TestUpdate_Unit_EmptyLabelSelector(t *testing.T) {\n\n\tvar dr Router\n\tdr = &MockedResources{}\n\n\tvar vs Router\n\tvs = &MockedResources{}\n\n\tshift := router.Shift{}\n\n\tvar op Operator\n\top = &Istiops{\n\t\tDrRouter: dr,\n\t\tVsRouter: vs,\n\t}\n\n\terr := op.Update(shift)\n\tassert.EqualError(t, err, \"label-selector must exists in need to find resources\")\n}", "func (m *CreateNamespaceQuotaReq) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tif l := utf8.RuneCountInString(m.GetNamespace()); l < 2 || l > 100 {\n\t\treturn CreateNamespaceQuotaReqValidationError{\n\t\t\tfield: \"Namespace\",\n\t\t\treason: \"value length must be between 2 and 100 runes, inclusive\",\n\t\t}\n\t}\n\n\tif utf8.RuneCountInString(m.GetFederationClusterID()) > 100 {\n\t\treturn CreateNamespaceQuotaReqValidationError{\n\t\t\tfield: \"FederationClusterID\",\n\t\t\treason: \"value length must be at most 100 runes\",\n\t\t}\n\t}\n\n\tif utf8.RuneCountInString(m.GetClusterID()) > 100 {\n\t\treturn CreateNamespaceQuotaReqValidationError{\n\t\t\tfield: \"ClusterID\",\n\t\t\treason: \"value length must be at most 100 runes\",\n\t\t}\n\t}\n\n\tif utf8.RuneCountInString(m.GetRegion()) > 100 {\n\t\treturn CreateNamespaceQuotaReqValidationError{\n\t\t\tfield: \"Region\",\n\t\t\treason: \"value length must be at most 100 runes\",\n\t\t}\n\t}\n\n\t// no validation rules for ResourceQuota\n\n\treturn nil\n}", "func (t *OpenconfigQos_Qos_Classifiers_Classifier_Terms_Term_Conditions) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigQos_Qos_Classifiers_Classifier_Terms_Term_Conditions\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (client *Client) CheckCloudResourceAuthorizedWithOptions(request *CheckCloudResourceAuthorizedRequest, runtime *util.RuntimeOptions) (_result *CheckCloudResourceAuthorizedResponse, _err error) {\n\t_err = util.ValidateModel(request)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\tquery := map[string]interface{}{}\n\tif !tea.BoolValue(util.IsUnset(request.InstanceId)) {\n\t\tquery[\"InstanceId\"] = request.InstanceId\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.OwnerAccount)) {\n\t\tquery[\"OwnerAccount\"] = request.OwnerAccount\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.OwnerId)) {\n\t\tquery[\"OwnerId\"] = request.OwnerId\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.ResourceOwnerAccount)) {\n\t\tquery[\"ResourceOwnerAccount\"] = request.ResourceOwnerAccount\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.ResourceOwnerId)) {\n\t\tquery[\"ResourceOwnerId\"] = request.ResourceOwnerId\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.RoleArn)) {\n\t\tquery[\"RoleArn\"] = request.RoleArn\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.SecurityToken)) {\n\t\tquery[\"SecurityToken\"] = request.SecurityToken\n\t}\n\n\treq := &openapi.OpenApiRequest{\n\t\tQuery: openapiutil.Query(query),\n\t}\n\tparams := &openapi.Params{\n\t\tAction: tea.String(\"CheckCloudResourceAuthorized\"),\n\t\tVersion: tea.String(\"2015-01-01\"),\n\t\tProtocol: tea.String(\"HTTPS\"),\n\t\tPathname: tea.String(\"/\"),\n\t\tMethod: tea.String(\"POST\"),\n\t\tAuthType: tea.String(\"AK\"),\n\t\tStyle: tea.String(\"RPC\"),\n\t\tReqBodyType: tea.String(\"formData\"),\n\t\tBodyType: tea.String(\"json\"),\n\t}\n\t_result = &CheckCloudResourceAuthorizedResponse{}\n\t_body, _err := client.CallApi(params, req, runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_err = tea.Convert(_body, &_result)\n\treturn _result, _err\n}", "func (s *PostgresSpec) ObserveSelectors(rsrc interface{}) []ResourceSelector {\n\tr := rsrc.(*AirflowBase)\n\tif s.Operator {\n\t\treturn nil\n\t}\n\tselector := selectorLabels(r, ValueAirflowComponentPostgres)\n\tsecretSelector := selectorLabels(r, ValueAirflowComponentPostgres)\n\trsrcSelectos := []ResourceSelector{\n\t\t{&resources.StatefulSet{}, selector},\n\t\t{&resources.Service{}, selector},\n\t\t{&resources.Secret{}, secretSelector},\n\t\t{&resources.PodDisruptionBudget{}, selector},\n\t}\n\t//if s.VolumeClaimTemplate != nil {\n\t//\trsrcSelectos = append(rsrcSelectos, ResourceSelector{s.VolumeClaimTemplate, nil})\n\t//}\n\treturn rsrcSelectos\n}" ]
[ "0.59584147", "0.5700171", "0.5696888", "0.56482863", "0.55733466", "0.5490589", "0.5441203", "0.5355841", "0.5320605", "0.5286241", "0.528479", "0.51627755", "0.5150385", "0.514723", "0.5142086", "0.51359487", "0.50891954", "0.50874746", "0.5085249", "0.49886724", "0.49864337", "0.49799454", "0.49588868", "0.4947418", "0.48951918", "0.489457", "0.48921522", "0.48917222", "0.4879877", "0.48758942", "0.48561984", "0.4837104", "0.48315364", "0.48307315", "0.48282707", "0.48198515", "0.48125488", "0.47837245", "0.47707272", "0.47650874", "0.47620618", "0.47580746", "0.4736916", "0.47324926", "0.47316223", "0.47189975", "0.47154793", "0.47030818", "0.4686685", "0.46838415", "0.46750504", "0.4670461", "0.4654456", "0.46542817", "0.46418408", "0.46418408", "0.46404904", "0.46302986", "0.46260333", "0.46227896", "0.4620711", "0.46157253", "0.45900446", "0.4580738", "0.457716", "0.45765427", "0.4571648", "0.45658255", "0.4564042", "0.45610094", "0.45604318", "0.4557306", "0.4555927", "0.45549107", "0.45511654", "0.45501846", "0.4548094", "0.45387068", "0.45358163", "0.45349956", "0.4531337", "0.45298812", "0.4529672", "0.45296577", "0.4528455", "0.45202333", "0.45105314", "0.44924912", "0.44765055", "0.4474568", "0.44696963", "0.44657195", "0.4465683", "0.44607705", "0.44569254", "0.44544676", "0.44533348", "0.4447023", "0.4443836", "0.44431514" ]
0.71744615
0
TestConcurrentBuildControllers tests the transition of a build from new to pending. Ensures that only a single New > Pending transition happens and that only a single pod is created during a set period of time.
TestConcurrentBuildControllers проверяет переход сборки из состояния new в pending. Обеспечивает, что происходит только один переход New > Pending и что создается только один pod в течение определенного периода времени.
func TestConcurrentBuildControllers(t *testing.T) { defer testutil.DumpEtcdOnFailure(t) // Start a master with multiple BuildControllers osClient, kClient := setupBuildControllerTest(controllerCount{BuildControllers: 5}, t) build.RunBuildControllerTest(t, osClient, kClient) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestConcurrentBuildPodControllers(t *testing.T) {\n\tdefer testutil.DumpEtcdOnFailure(t)\n\t// Start a master with multiple BuildPodControllers\n\tosClient, kClient := setupBuildControllerTest(controllerCount{BuildPodControllers: 5}, t)\n\tbuild.RunBuildPodControllerTest(t, osClient, kClient)\n}", "func TestConcurrentBuildControllers(t *testing.T) {\n\t// Start a master with multiple BuildControllers\n\tbuildClient, _, kClient, fn := setupBuildControllerTest(controllerCount{BuildControllers: 5}, t)\n\tdefer fn()\n\tbuild.RunBuildControllerTest(t, buildClient, kClient)\n}", "func TestConcurrentBuildControllersPodSync(t *testing.T) {\n\t// Start a master with multiple BuildControllers\n\tbuildClient, _, kClient, fn := setupBuildControllerTest(controllerCount{BuildControllers: 5}, t)\n\tdefer fn()\n\tbuild.RunBuildControllerPodSyncTest(t, buildClient, kClient)\n}", "func TestTriggerController(t *testing.T) {\n\tconfig, stopFn := framework.RunControlPlane(t)\n\tdefer stopFn()\n\n\tctx, cancel := context.WithTimeout(context.TODO(), time.Second*20)\n\tdefer cancel()\n\n\tfakeClock := &fakeclock.FakeClock{}\n\t// Build, instantiate and run the trigger controller.\n\tkubeClient, factory, cmCl, cmFactory := framework.NewClients(t, config)\n\n\tnamespace := \"testns\"\n\n\t// Create Namespace\n\tns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}\n\t_, err := kubeClient.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tctrl, queue, mustSync := trigger.NewController(logf.Log, cmCl, factory, cmFactory, framework.NewEventRecorder(t), fakeClock, policies.NewTriggerPolicyChain(fakeClock))\n\tc := controllerpkg.NewController(\n\t\tcontext.Background(),\n\t\t\"trigger_test\",\n\t\tmetrics.New(logf.Log),\n\t\tctrl.ProcessItem,\n\t\tmustSync,\n\t\tnil,\n\t\tqueue,\n\t)\n\tstopController := framework.StartInformersAndController(t, factory, cmFactory, c)\n\tdefer stopController()\n\n\t// Create a Certificate resource and wait for it to have the 'Issuing' condition.\n\tcert, err := cmCl.CertmanagerV1().Certificates(namespace).Create(ctx, &cmapi.Certificate{\n\t\tObjectMeta: metav1.ObjectMeta{Name: \"testcrt\", Namespace: \"testns\"},\n\t\tSpec: cmapi.CertificateSpec{\n\t\t\tSecretName: \"example\",\n\t\t\tCommonName: \"example.com\",\n\t\t\tIssuerRef: cmmeta.ObjectReference{Name: \"testissuer\"}, // doesn't need to exist\n\t\t},\n\t}, metav1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = wait.Poll(time.Millisecond*100, time.Second*5, func() (done bool, err error) {\n\t\tc, err := cmCl.CertmanagerV1().Certificates(cert.Namespace).Get(ctx, cert.Name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tt.Logf(\"Failed to fetch Certificate resource, retrying: %v\", err)\n\t\t\treturn false, nil\n\t\t}\n\t\tif !apiutil.CertificateHasCondition(c, cmapi.CertificateCondition{\n\t\t\tType: cmapi.CertificateConditionIssuing,\n\t\t\tStatus: cmmeta.ConditionTrue,\n\t\t}) {\n\t\t\tt.Logf(\"Certificate does not have expected condition, got=%#v\", apiutil.GetCertificateCondition(c, cmapi.CertificateConditionIssuing))\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func validateController(ctx context.Context, c clientset.Interface, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) {\n\tcontainerImage = trimDockerRegistry(containerImage)\n\tgetPodsTemplate := \"--template={{range.items}}{{.metadata.name}} {{end}}\"\n\n\tgetContainerStateTemplate := fmt.Sprintf(`--template={{if (exists . \"status\" \"containerStatuses\")}}{{range .status.containerStatuses}}{{if (and (eq .name \"%s\") (exists . \"state\" \"running\"))}}true{{end}}{{end}}{{end}}`, containername)\n\n\tgetImageTemplate := fmt.Sprintf(`--template={{if (exists . \"spec\" \"containers\")}}{{range .spec.containers}}{{if eq .name \"%s\"}}{{.image}}{{end}}{{end}}{{end}}`, containername)\n\n\tginkgo.By(fmt.Sprintf(\"waiting for all containers in %s pods to come up.\", testname)) //testname should be selector\nwaitLoop:\n\tfor start := time.Now(); time.Since(start) < framework.PodStartTimeout && ctx.Err() == nil; time.Sleep(5 * time.Second) {\n\t\tgetPodsOutput := e2ekubectl.RunKubectlOrDie(ns, \"get\", \"pods\", \"-o\", \"template\", getPodsTemplate, \"-l\", testname)\n\t\tpods := strings.Fields(getPodsOutput)\n\t\tif numPods := len(pods); numPods != replicas {\n\t\t\tginkgo.By(fmt.Sprintf(\"Replicas for %s: expected=%d actual=%d\", testname, replicas, numPods))\n\t\t\tcontinue\n\t\t}\n\t\tvar runningPods []string\n\t\tfor _, podID := range pods {\n\t\t\trunning := e2ekubectl.RunKubectlOrDie(ns, \"get\", \"pods\", podID, \"-o\", \"template\", getContainerStateTemplate)\n\t\t\tif running != \"true\" {\n\t\t\t\tframework.Logf(\"%s is created but not running\", podID)\n\t\t\t\tcontinue waitLoop\n\t\t\t}\n\n\t\t\tcurrentImage := e2ekubectl.RunKubectlOrDie(ns, \"get\", \"pods\", podID, \"-o\", \"template\", getImageTemplate)\n\t\t\tcurrentImage = trimDockerRegistry(currentImage)\n\t\t\tif currentImage != containerImage {\n\t\t\t\tframework.Logf(\"%s is created but running wrong image; expected: %s, actual: %s\", podID, containerImage, currentImage)\n\t\t\t\tcontinue waitLoop\n\t\t\t}\n\n\t\t\t// Call the generic validator function here.\n\t\t\t// This might validate for example, that (1) getting a url works and (2) url is serving correct content.\n\t\t\tif err := validator(ctx, c, podID); err != nil {\n\t\t\t\tframework.Logf(\"%s is running right image but validator function failed: %v\", podID, err)\n\t\t\t\tcontinue waitLoop\n\t\t\t}\n\n\t\t\tframework.Logf(\"%s is verified up and running\", podID)\n\t\t\trunningPods = append(runningPods, podID)\n\t\t}\n\t\t// If we reach here, then all our checks passed.\n\t\tif len(runningPods) == replicas {\n\t\t\treturn\n\t\t}\n\t}\n\t// Reaching here means that one of more checks failed multiple times. Assuming its not a race condition, something is broken.\n\tframework.Failf(\"Timed out after %v seconds waiting for %s pods to reach valid state\", framework.PodStartTimeout.Seconds(), testname)\n}", "func TestBuildControllerNoBuildManifestsFirst(t *testing.T) {\n\tf := newTestFixture(t)\n\tdefer f.TearDown()\n\n\tmanifests := make([]model.Manifest, 10)\n\tfor i := 0; i < 10; i++ {\n\t\tsync := model.Sync{LocalPath: f.Path(), ContainerPath: \"/go\"}\n\t\tmanifests[i] = f.newManifest(fmt.Sprintf(\"built%d\", i+1), []model.Sync{sync})\n\t}\n\n\tfor _, i := range []int{3, 7, 8} {\n\t\tmanifests[i] = assembleK8sManifest(\n\t\t\tmodel.Manifest{\n\t\t\t\tName: model.ManifestName(fmt.Sprintf(\"unbuilt%d\", i+1))},\n\t\t\tmodel.K8sTarget{YAML: \"fake-yaml\"})\n\t}\n\tf.Start(manifests, true)\n\n\tvar observedBuildOrder []string\n\tfor i := 0; i < len(manifests); i++ {\n\t\tcall := f.nextCall()\n\t\tobservedBuildOrder = append(observedBuildOrder, call.k8s().Name.String())\n\t}\n\n\t// throwing a bunch of elements at it to increase confidence we maintain order between built and unbuilt\n\t// this might miss bugs since we might just get these elements back in the right order via luck\n\texpectedBuildOrder := []string{\n\t\t\"unbuilt4\",\n\t\t\"unbuilt8\",\n\t\t\"unbuilt9\",\n\t\t\"built1\",\n\t\t\"built2\",\n\t\t\"built3\",\n\t\t\"built5\",\n\t\t\"built6\",\n\t\t\"built7\",\n\t\t\"built10\",\n\t}\n\tassert.Equal(t, expectedBuildOrder, observedBuildOrder)\n}", "func TestTriggerController(t *testing.T) {\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*40)\n\tdefer cancel()\n\n\tconfig, stopFn := framework.RunControlPlane(t, ctx)\n\tdefer stopFn()\n\n\tfakeClock := &fakeclock.FakeClock{}\n\t// Build, instantiate and run the trigger controller.\n\tkubeClient, factory, cmCl, cmFactory := framework.NewClients(t, config)\n\n\tnamespace := \"testns\"\n\n\t// Create Namespace\n\tns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}\n\t_, err := kubeClient.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tshouldReissue := policies.NewTriggerPolicyChain(fakeClock).Evaluate\n\tctrl, queue, mustSync := trigger.NewController(logf.Log, cmCl, factory, cmFactory, framework.NewEventRecorder(t), fakeClock, shouldReissue)\n\tc := controllerpkg.NewController(\n\t\tctx,\n\t\t\"trigger_test\",\n\t\tmetrics.New(logf.Log, clock.RealClock{}),\n\t\tctrl.ProcessItem,\n\t\tmustSync,\n\t\tnil,\n\t\tqueue,\n\t)\n\tstopController := framework.StartInformersAndController(t, factory, cmFactory, c)\n\tdefer stopController()\n\n\t// Create a Certificate resource and wait for it to have the 'Issuing' condition.\n\tcert, err := cmCl.CertmanagerV1().Certificates(namespace).Create(ctx, &cmapi.Certificate{\n\t\tObjectMeta: metav1.ObjectMeta{Name: \"testcrt\", Namespace: \"testns\"},\n\t\tSpec: cmapi.CertificateSpec{\n\t\t\tSecretName: \"example\",\n\t\t\tCommonName: \"example.com\",\n\t\t\tIssuerRef: cmmeta.ObjectReference{Name: \"testissuer\"}, // doesn't need to exist\n\t\t},\n\t}, metav1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = wait.PollImmediateUntil(time.Millisecond*100, func() (done bool, err error) {\n\t\tc, err := cmCl.CertmanagerV1().Certificates(cert.Namespace).Get(ctx, cert.Name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tt.Logf(\"Failed to fetch Certificate resource, retrying: %v\", err)\n\t\t\treturn false, nil\n\t\t}\n\t\tif !apiutil.CertificateHasCondition(c, cmapi.CertificateCondition{\n\t\t\tType: cmapi.CertificateConditionIssuing,\n\t\t\tStatus: cmmeta.ConditionTrue,\n\t\t}) {\n\t\t\tt.Logf(\"Certificate does not have expected condition, got=%#v\", apiutil.GetCertificateCondition(c, cmapi.CertificateConditionIssuing))\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t}, ctx.Done())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func TestNewController(t *testing.T) {\n\tmessagingClientSet, err := clientset.NewForConfig(&rest.Config{})\n\tif err != nil {\n\t\tt.Fail()\n\t}\n\n\tmessagingInformerFactory := informers.NewSharedInformerFactory(messagingClientSet, 0)\n\tnatssChannelInformer := messagingInformerFactory.Messaging().V1alpha1().NatssChannels()\n\n\tc := NewController(reconciler.Options{\n\t\tKubeClientSet: fakekubeclientset.NewSimpleClientset(),\n\t\tDynamicClientSet: nil,\n\t\tNatssClientSet: nil,\n\t\tRecorder: nil,\n\t\tStatsReporter: nil,\n\t\tConfigMapWatcher: nil,\n\t\tLogger: logtesting.TestLogger(t),\n\t\tResyncPeriod: 0,\n\t\tStopChannel: nil,\n\t}, dispatchertesting.NewDispatcherDoNothing(), natssChannelInformer)\n\tif c == nil {\n\t\tt.Errorf(\"unable to create dispatcher controller\")\n\t}\n}", "func (m *MockPodControllerFactory) Build(mgr mc_manager.AsyncManager, clusterName string) (controller1.PodController, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Build\", mgr, clusterName)\n\tret0, _ := ret[0].(controller1.PodController)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestController(t *testing.T) {\n\tctx, _ := rtesting.SetupFakeContext(t)\n\tctx, cancel := context.WithTimeout(ctx, 30*time.Second)\n\tdefer cancel()\n\n\t// Create reconcilers, start controller.\n\tresults := test.NewResultsClient(t)\n\n\ttrctrl := taskrun.NewController(ctx, results)\n\tprctrl := pipelinerun.NewController(ctx, results)\n\tgo controller.StartAll(ctx, trctrl, prctrl)\n\n\t// Start informers - this notifies the controller of new events.\n\tgo taskruninformer.Get(ctx).Informer().Run(ctx.Done())\n\tgo pipelineruninformer.Get(ctx).Informer().Run(ctx.Done())\n\n\tpipeline := fakepipelineclient.Get(ctx)\n\tt.Run(\"taskrun\", func(t *testing.T) {\n\t\ttr := &v1beta1.TaskRun{\n\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\tAPIVersion: \"tekton.dev/v1beta1\",\n\t\t\t\tKind: \"TaskRun\",\n\t\t\t},\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"taskrun\",\n\t\t\t\tNamespace: \"ns\",\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\"demo\": \"demo\",\n\t\t\t\t\t// This TaskRun belongs to a PipelineRun, so the record should\n\t\t\t\t\t// be associated with the PipelineRun result.\n\t\t\t\t\t\"tekton.dev/pipelineRun\": \"pr\",\n\t\t\t\t},\n\t\t\t\tOwnerReferences: []metav1.OwnerReference{{\n\t\t\t\t\tAPIVersion: \"tekton.dev/v1beta1\",\n\t\t\t\t\tKind: \"PipelineRun\",\n\t\t\t\t\tUID: \"pr-id\",\n\t\t\t\t}},\n\t\t\t\tUID: \"tr-id\",\n\t\t\t},\n\t\t}\n\n\t\t// The following is a hack to make the fake clients play nice with\n\t\t// each other. While the controller uses the typed informer that uses\n\t\t// the fake pipeline client to receive events, the controller uses the\n\t\t// fake dynamic client to fetch and update objects during reconcile.\n\t\t// These fake clients store objects independently, so we create the\n\t\t// object in each client to make sure the data is populated in both\n\t\t// places.\n\t\tif _, err := pipeline.TektonV1beta1().TaskRuns(tr.GetNamespace()).Create(tr); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdata, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tr)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"ToUnstructured: %v\", err)\n\t\t}\n\t\t_, err = dynamicinject.Get(ctx).Resource(apis.KindToResource(tr.GroupVersionKind())).Namespace(tr.GetNamespace()).Create(&unstructured.Unstructured{Object: data}, metav1.CreateOptions{})\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Create: %v\", err)\n\t\t}\n\n\t\twait(ctx, t, tr, \"ns/results/pr-id\")\n\t})\n\n\tt.Run(\"pipelinerun\", func(t *testing.T) {\n\t\tpr := &v1beta1.PipelineRun{\n\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\tAPIVersion: \"tekton.dev/v1beta1\",\n\t\t\t\tKind: \"PipelineRun\",\n\t\t\t},\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"pr\",\n\t\t\t\tNamespace: \"ns\",\n\t\t\t\tAnnotations: map[string]string{\"demo\": \"demo\"},\n\t\t\t\tUID: \"pr-id\",\n\t\t\t},\n\t\t}\n\n\t\t// Same create hack as taskrun (see above).\n\t\tif _, err := pipeline.TektonV1beta1().PipelineRuns(pr.GetNamespace()).Create(pr); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdata, err := runtime.DefaultUnstructuredConverter.ToUnstructured(pr)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"ToUnstructured: %v\", err)\n\t\t}\n\t\t_, err = dynamicinject.Get(ctx).Resource(apis.KindToResource(pr.GroupVersionKind())).Namespace(pr.GetNamespace()).Create(&unstructured.Unstructured{Object: data}, metav1.CreateOptions{})\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Create: %v\", err)\n\t\t}\n\n\t\twait(ctx, t, pr, \"ns/results/pr-id\")\n\t})\n}", "func TestControllerInitPrepare_Parallel(t *testing.T) {\n\t_ = testlib.IntegrationEnv(t)\n\n\tt.Run(\"with parent context that is never canceled\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\t// the nil params should never be used in this case\n\t\tbuildControllers := controllerinit.Prepare(nil, nil, buildBrokenInformer(t))\n\n\t\tstart := time.Now()\n\t\trunControllers, err := buildControllers(context.Background()) // we expect this to not block forever even with a context.Background()\n\t\tdelta := time.Since(start)\n\n\t\trequire.EqualError(t, err,\n\t\t\t\"failed to sync informers of k8s.io/client-go/informers.sharedInformerFactory: \"+\n\t\t\t\t\"[k8s.io/api/core/v1.Namespace k8s.io/api/core/v1.Node]\")\n\t\trequire.Nil(t, runControllers)\n\n\t\trequire.InDelta(t, time.Minute, delta, float64(30*time.Second))\n\t})\n\n\tt.Run(\"with parent context that is canceled early\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\t// the nil params should never be used in this case\n\t\tbuildControllers := controllerinit.Prepare(nil, nil, buildBrokenInformer(t))\n\n\t\t// we expect this to exit sooner because the parent context is shorter\n\t\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\t\tt.Cleanup(cancel)\n\n\t\tstart := time.Now()\n\t\trunControllers, err := buildControllers(ctx)\n\t\tdelta := time.Since(start)\n\n\t\trequire.EqualError(t, err,\n\t\t\t\"failed to sync informers of k8s.io/client-go/informers.sharedInformerFactory: \"+\n\t\t\t\t\"[k8s.io/api/core/v1.Namespace k8s.io/api/core/v1.Node]\")\n\t\trequire.Nil(t, runControllers)\n\n\t\trequire.InDelta(t, 10*time.Second, delta, float64(15*time.Second))\n\t})\n}", "func (b *Botanist) WaitForControllersToBeActive(ctx context.Context) error {\n\ttype controllerInfo struct {\n\t\tname string\n\t\tlabels map[string]string\n\t}\n\n\ttype checkOutput struct {\n\t\tcontrollerName string\n\t\tready bool\n\t\terr error\n\t}\n\n\tvar (\n\t\tcontrollers = []controllerInfo{}\n\t\tpollInterval = 5 * time.Second\n\t)\n\n\t// Check whether the kube-controller-manager deployment exists\n\tif err := b.K8sSeedClient.Client().Get(ctx, kutil.Key(b.Shoot.SeedNamespace, v1beta1constants.DeploymentNameKubeControllerManager), &appsv1.Deployment{}); err == nil {\n\t\tcontrollers = append(controllers, controllerInfo{\n\t\t\tname: v1beta1constants.DeploymentNameKubeControllerManager,\n\t\t\tlabels: map[string]string{\n\t\t\t\t\"app\": \"kubernetes\",\n\t\t\t\t\"role\": \"controller-manager\",\n\t\t\t},\n\t\t})\n\t} else if client.IgnoreNotFound(err) != nil {\n\t\treturn err\n\t}\n\n\treturn retry.UntilTimeout(context.TODO(), pollInterval, 90*time.Second, func(ctx context.Context) (done bool, err error) {\n\t\tvar (\n\t\t\twg sync.WaitGroup\n\t\t\tout = make(chan *checkOutput)\n\t\t)\n\n\t\tfor _, controller := range controllers {\n\t\t\twg.Add(1)\n\n\t\t\tgo func(controller controllerInfo) {\n\t\t\t\tdefer wg.Done()\n\n\t\t\t\tpodList := &corev1.PodList{}\n\t\t\t\terr := b.K8sSeedClient.Client().List(ctx, podList,\n\t\t\t\t\tclient.InNamespace(b.Shoot.SeedNamespace),\n\t\t\t\t\tclient.MatchingLabels(controller.labels))\n\t\t\t\tif err != nil {\n\t\t\t\t\tout <- &checkOutput{controllerName: controller.name, err: err}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// Check that only one replica of the controller exists.\n\t\t\t\tif len(podList.Items) != 1 {\n\t\t\t\t\tb.Logger.Infof(\"Waiting for %s to have exactly one replica\", controller.name)\n\t\t\t\t\tout <- &checkOutput{controllerName: controller.name}\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\t// Check that the existing replica is not in getting deleted.\n\t\t\t\tif podList.Items[0].DeletionTimestamp != nil {\n\t\t\t\t\tb.Logger.Infof(\"Waiting for a new replica of %s\", controller.name)\n\t\t\t\t\tout <- &checkOutput{controllerName: controller.name}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// Check if the controller is active by reading its leader election record.\n\t\t\t\tleaderElectionRecord, err := common.ReadLeaderElectionRecord(b.K8sShootClient, resourcelock.EndpointsResourceLock, metav1.NamespaceSystem, controller.name)\n\t\t\t\tif err != nil {\n\t\t\t\t\tout <- &checkOutput{controllerName: controller.name, err: err}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif delta := metav1.Now().UTC().Sub(leaderElectionRecord.RenewTime.Time.UTC()); delta <= pollInterval-time.Second {\n\t\t\t\t\tout <- &checkOutput{controllerName: controller.name, ready: true}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tb.Logger.Infof(\"Waiting for %s to be active\", controller.name)\n\t\t\t\tout <- &checkOutput{controllerName: controller.name}\n\t\t\t}(controller)\n\t\t}\n\n\t\tgo func() {\n\t\t\twg.Wait()\n\t\t\tclose(out)\n\t\t}()\n\n\t\tfor result := range out {\n\t\t\tif result.err != nil {\n\t\t\t\treturn retry.SevereError(fmt.Errorf(\"could not check whether controller %s is active: %+v\", result.controllerName, result.err))\n\t\t\t}\n\t\t\tif !result.ready {\n\t\t\t\treturn retry.MinorError(fmt.Errorf(\"controller %s is not active\", result.controllerName))\n\t\t\t}\n\t\t}\n\n\t\treturn retry.Ok()\n\t})\n}", "func TestController(t *testing.T) {\n\tfakeKubeClient, catalogClient, fakeBrokerCatalog, _, _, testController, _, stopCh := newTestController(t)\n\tdefer close(stopCh)\n\n\tt.Log(fakeKubeClient, catalogClient, fakeBrokerCatalog, testController, stopCh)\n\n\tfakeBrokerCatalog.RetCatalog = &brokerapi.Catalog{\n\t\tServices: []*brokerapi.Service{\n\t\t\t{\n\t\t\t\tName: \"test-service\",\n\t\t\t\tID: \"12345\",\n\t\t\t\tDescription: \"a test service\",\n\t\t\t\tPlans: []brokerapi.ServicePlan{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"test-plan\",\n\t\t\t\t\t\tFree: true,\n\t\t\t\t\t\tID: \"34567\",\n\t\t\t\t\t\tDescription: \"a test plan\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tname := \"test-name\"\n\tbroker := &v1alpha1.Broker{\n\t\tObjectMeta: v1.ObjectMeta{Name: name},\n\t\tSpec: v1alpha1.BrokerSpec{\n\t\t\tURL: \"https://example.com\",\n\t\t},\n\t}\n\tbrokerClient := catalogClient.Servicecatalog().Brokers()\n\n\tbrokerServer, err := brokerClient.Create(broker)\n\tif nil != err {\n\t\tt.Fatalf(\"error creating the broker %q (%q)\", broker, err)\n\t}\n\n\tif err := wait.PollImmediate(500*time.Millisecond, wait.ForeverTestTimeout,\n\t\tfunc() (bool, error) {\n\t\t\tbrokerServer, err = brokerClient.Get(name)\n\t\t\tif nil != err {\n\t\t\t\treturn false,\n\t\t\t\t\tfmt.Errorf(\"error getting broker %s (%s)\",\n\t\t\t\t\t\tname, err)\n\t\t\t} else if len(brokerServer.Status.Conditions) > 0 {\n\t\t\t\tt.Log(brokerServer)\n\t\t\t\treturn true, nil\n\t\t\t} else {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t},\n\t); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// check\n\tserviceClassClient := catalogClient.Servicecatalog().ServiceClasses()\n\t_, err = serviceClassClient.Get(\"test-service\")\n\tif nil != err {\n\t\tt.Fatal(\"could not find the test service\", err)\n\t}\n\n\t// cleanup our broker\n\terr = brokerClient.Delete(name, &v1.DeleteOptions{})\n\tif nil != err {\n\t\tt.Fatalf(\"broker should be deleted (%s)\", err)\n\t}\n\n\t// uncomment if/when deleting a broker deletes the associated service\n\t// if class, err := serviceClassClient.Get(\"test-service\"); nil == err {\n\t// \tt.Fatal(\"found the test service that should have been deleted\", err, class)\n\t// }\n}", "func TestConcurrent(t *testing.T) {\n\tt.Parallel()\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tt.Cleanup(cancel)\n\n\tconfig := Config{MaxConcurrency: 4}\n\tcountdown := NewCountdown(config.MaxConcurrency)\n\tprocess := NewMockEventsProcess(ctx, t, config, func(ctx context.Context, event types.Event) error {\n\t\tdefer countdown.Decrement()\n\t\ttime.Sleep(time.Second)\n\t\treturn trace.Wrap(ctx.Err())\n\t})\n\n\ttimeBefore := time.Now()\n\tfor i := 0; i < config.MaxConcurrency; i++ {\n\t\tresource, err := types.NewAccessRequest(fmt.Sprintf(\"REQ-%v\", i+1), \"foo\", \"admin\")\n\t\trequire.NoError(t, err)\n\t\tprocess.Events.Fire(types.Event{Type: types.OpPut, Resource: resource})\n\t}\n\trequire.NoError(t, countdown.Wait(ctx))\n\n\ttimeAfter := time.Now()\n\tassert.InDelta(t, time.Second, timeAfter.Sub(timeBefore), float64(500*time.Millisecond))\n}", "func TestControllerHandleEvents(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\taddServices []*corev1.Service\n\t\tupdateServices []string\n\t\tdelServices []string\n\t\texpAddedServices []string\n\t\texpDeletedServices []string\n\t}{\n\t\t{\n\t\t\tname: \"If a controller is watching services it should react to the service change events.\",\n\t\t\taddServices: []*corev1.Service{\n\t\t\t\t{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{Name: \"svc1\"},\n\t\t\t\t\tSpec: corev1.ServiceSpec{\n\t\t\t\t\t\tType: \"ClusterIP\",\n\t\t\t\t\t\tPorts: []corev1.ServicePort{\n\t\t\t\t\t\t\tcorev1.ServicePort{Name: \"port1\", Port: 8080},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{Name: \"svc2\"},\n\t\t\t\t\tSpec: corev1.ServiceSpec{\n\t\t\t\t\t\tType: \"ClusterIP\",\n\t\t\t\t\t\tPorts: []corev1.ServicePort{\n\t\t\t\t\t\t\tcorev1.ServicePort{Name: \"port1\", Port: 8080},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tupdateServices: []string{\"svc1\"},\n\t\t\tdelServices: []string{\"svc1\", \"svc2\"},\n\t\t\texpAddedServices: []string{\"svc1\", \"svc2\", \"svc1\"},\n\t\t\texpDeletedServices: []string{\"svc1\", \"svc2\"},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\trequire := require.New(t)\n\t\t\tassert := assert.New(t)\n\t\t\tresync := 30 * time.Second\n\t\t\tstopC := make(chan struct{})\n\t\t\tvar gotAddedServices []string\n\t\t\tvar gotDeletedServices []string\n\n\t\t\t// Create the kubernetes client.\n\t\t\tk8scli, _, _, err := cli.GetK8sClients(\"\")\n\n\t\t\trequire.NoError(err, \"kubernetes client is required\")\n\n\t\t\t// Prepare the environment on the cluster.\n\t\t\tprep := prepare.New(k8scli, t)\n\t\t\tprep.SetUp()\n\t\t\tdefer prep.TearDown()\n\n\t\t\t// Create the reitrever.\n\t\t\trt := &retrieve.Resource{\n\t\t\t\tListerWatcher: cache.NewListWatchFromClient(k8scli.CoreV1().RESTClient(), \"services\", prep.Namespace().Name, fields.Everything()),\n\t\t\t\tObject: &corev1.Service{},\n\t\t\t}\n\n\t\t\t// Call times are the number of times the handler should be called before sending the termination signal.\n\t\t\tstopCallTimes := len(test.addServices) + len(test.updateServices) + len(test.delServices)\n\t\t\tcalledTimes := 0\n\t\t\tvar mx sync.Mutex\n\n\t\t\t// Create the handler.\n\t\t\thl := &handler.HandlerFunc{\n\t\t\t\tAddFunc: func(_ context.Context, obj runtime.Object) error {\n\t\t\t\t\tmx.Lock()\n\t\t\t\t\tcalledTimes++\n\t\t\t\t\tmx.Unlock()\n\n\t\t\t\t\tsvc := obj.(*corev1.Service)\n\t\t\t\t\tgotAddedServices = append(gotAddedServices, svc.Name)\n\t\t\t\t\tif calledTimes >= stopCallTimes {\n\t\t\t\t\t\tclose(stopC)\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t\tDeleteFunc: func(_ context.Context, id string) error {\n\t\t\t\t\tmx.Lock()\n\t\t\t\t\tcalledTimes++\n\t\t\t\t\tmx.Unlock()\n\n\t\t\t\t\t// Ignore namespace.\n\t\t\t\t\tid = strings.Split(id, \"/\")[1]\n\t\t\t\t\tgotDeletedServices = append(gotDeletedServices, id)\n\t\t\t\t\tif calledTimes >= stopCallTimes {\n\t\t\t\t\t\tclose(stopC)\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t}\n\n\t\t\t// Create a Pod controller.\n\t\t\tctrl := controller.NewSequential(resync, hl, rt, nil, log.Dummy)\n\t\t\trequire.NotNil(ctrl, \"controller is required\")\n\t\t\tgo ctrl.Run(stopC)\n\n\t\t\t// Create the required services.\n\t\t\tfor _, svc := range test.addServices {\n\t\t\t\t_, err := k8scli.CoreV1().Services(prep.Namespace().Name).Create(svc)\n\t\t\t\tassert.NoError(err)\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t}\n\n\t\t\tfor _, svc := range test.updateServices {\n\t\t\t\torigSvc, err := k8scli.CoreV1().Services(prep.Namespace().Name).Get(svc, metav1.GetOptions{})\n\t\t\t\tif assert.NoError(err) {\n\t\t\t\t\t// Change something\n\t\t\t\t\torigSvc.Spec.Ports = append(origSvc.Spec.Ports, corev1.ServicePort{Name: \"updateport\", Port: 9876})\n\t\t\t\t\t_, err := k8scli.CoreV1().Services(prep.Namespace().Name).Update(origSvc)\n\t\t\t\t\tassert.NoError(err)\n\t\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Delete the required services.\n\t\t\tfor _, svc := range test.delServices {\n\t\t\t\terr := k8scli.CoreV1().Services(prep.Namespace().Name).Delete(svc, &metav1.DeleteOptions{})\n\t\t\t\tassert.NoError(err)\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t}\n\n\t\t\t// Wait until we have finished.\n\t\t\tselect {\n\t\t\t// Timeout.\n\t\t\tcase <-time.After(20 * time.Second):\n\t\t\t// Finished.\n\t\t\tcase <-stopC:\n\t\t\t}\n\n\t\t\t// Check.\n\t\t\tassert.Equal(test.expAddedServices, gotAddedServices)\n\t\t\tassert.Equal(test.expDeletedServices, gotDeletedServices)\n\t\t})\n\t}\n}", "func TestBatch(t *testing.T) {\n\tpre := config.Presubmit{\n\t\tName: \"pr-some-job\",\n\t\tAgent: \"jenkins\",\n\t\tContext: \"Some Job Context\",\n\t}\n\tfc := &fkc{\n\t\tprowjobs: []kube.ProwJob{pjutil.NewProwJob(pjutil.BatchSpec(pre, kube.Refs{\n\t\t\tOrg: \"o\",\n\t\t\tRepo: \"r\",\n\t\t\tBaseRef: \"master\",\n\t\t\tBaseSHA: \"123\",\n\t\t\tPulls: []kube.Pull{\n\t\t\t\t{\n\t\t\t\t\tNumber: 1,\n\t\t\t\t\tSHA: \"abc\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tNumber: 2,\n\t\t\t\t\tSHA: \"qwe\",\n\t\t\t\t},\n\t\t\t},\n\t\t}))},\n\t}\n\tjc := &fjc{}\n\tc := Controller{\n\t\tkc: fc,\n\t\tjc: jc,\n\t\tca: newFakeConfigAgent(t),\n\t\tpendingJobs: make(map[string]int),\n\t\tlock: sync.RWMutex{},\n\t}\n\n\tif err := c.Sync(); err != nil {\n\t\tt.Fatalf(\"Error on first sync: %v\", err)\n\t}\n\tif fc.prowjobs[0].Status.State != kube.PendingState {\n\t\tt.Fatalf(\"Wrong state: %v\", fc.prowjobs[0].Status.State)\n\t}\n\tif !fc.prowjobs[0].Status.JenkinsEnqueued {\n\t\tt.Fatal(\"Wrong enqueued.\")\n\t}\n\tjc.enqueued = true\n\tif err := c.Sync(); err != nil {\n\t\tt.Fatalf(\"Error on second sync: %v\", err)\n\t}\n\tif !fc.prowjobs[0].Status.JenkinsEnqueued {\n\t\tt.Fatal(\"Wrong enqueued steady state.\")\n\t}\n\tjc.enqueued = false\n\tif err := c.Sync(); err != nil {\n\t\tt.Fatalf(\"Error on third sync: %v\", err)\n\t}\n\tif fc.prowjobs[0].Status.JenkinsEnqueued {\n\t\tt.Fatal(\"Wrong enqueued after leaving queue.\")\n\t}\n\tjc.status = Status{Building: true}\n\tif err := c.Sync(); err != nil {\n\t\tt.Fatalf(\"Error on fourth sync: %v\", err)\n\t}\n\tif fc.prowjobs[0].Status.State != kube.PendingState {\n\t\tt.Fatalf(\"Wrong state: %v\", fc.prowjobs[0].Status.State)\n\t}\n\tjc.status = Status{\n\t\tBuilding: false,\n\t\tNumber: 42,\n\t}\n\tif err := c.Sync(); err != nil {\n\t\tt.Fatalf(\"Error on fifth sync: %v\", err)\n\t}\n\tif fc.prowjobs[0].Status.PodName != \"pr-some-job-42\" {\n\t\tt.Fatalf(\"Wrong PodName: %s\", fc.prowjobs[0].Status.PodName)\n\t}\n\tif fc.prowjobs[0].Status.State != kube.FailureState {\n\t\tt.Fatalf(\"Wrong state: %v\", fc.prowjobs[0].Status.State)\n\t}\n\n\t// This is what the SQ reads.\n\tif fc.prowjobs[0].Spec.Context != \"Some Job Context\" {\n\t\tt.Fatalf(\"Wrong context: %v\", fc.prowjobs[0].Spec.Context)\n\t}\n}", "func (m *MockDeploymentControllerFactory) Build(mgr mc_manager.AsyncManager, clusterName string) (controller0.DeploymentController, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Build\", mgr, clusterName)\n\tret0, _ := ret[0].(controller0.DeploymentController)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestCancelManyJobs(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\n\tt.Parallel()\n\tc, _ := minikubetestenv.AcquireCluster(t)\n\n\t// Create an input repo\n\trepo := tu.UniqueString(\"TestCancelManyJobs\")\n\trequire.NoError(t, c.CreateRepo(pfs.DefaultProjectName, repo))\n\n\t// Create sleep pipeline\n\tpipeline := tu.UniqueString(\"pipeline\")\n\trequire.NoError(t, c.CreatePipeline(pfs.DefaultProjectName,\n\t\tpipeline,\n\t\t\"\",\n\t\t[]string{\"sleep\", \"600\"},\n\t\tnil,\n\t\t&pps.ParallelismSpec{\n\t\t\tConstant: 1,\n\t\t},\n\t\tclient.NewPFSInput(pfs.DefaultProjectName, repo, \"/*\"),\n\t\t\"\",\n\t\tfalse,\n\t))\n\n\t// Create 10 input commits, to spawn 10 jobs\n\tvar commits []*pfs.Commit\n\tfor i := 0; i < 10; i++ {\n\t\tcommit, err := c.StartCommit(pfs.DefaultProjectName, repo, \"master\")\n\t\trequire.NoError(t, c.PutFile(commit, \"file\", strings.NewReader(\"foo\")))\n\t\trequire.NoError(t, err)\n\t\trequire.NoError(t, c.FinishCommit(pfs.DefaultProjectName, repo, commit.Branch.Name, commit.Id))\n\t\tcommits = append(commits, commit)\n\t}\n\n\t// For each expected job: watch to make sure the input job comes up, make\n\t// sure that it's the only job running, then cancel it\n\tfor _, commit := range commits {\n\t\t// Wait until PPS has started processing commit\n\t\tvar jobInfo *pps.JobInfo\n\t\trequire.NoErrorWithinT(t, 30*time.Second, func() error {\n\t\t\treturn backoff.Retry(func() error {\n\t\t\t\tjobInfos, err := c.ListJob(pfs.DefaultProjectName, pipeline, []*pfs.Commit{commit}, -1, true)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif len(jobInfos) != 1 {\n\t\t\t\t\treturn errors.Errorf(\"Expected one job, but got %d: %v\", len(jobInfos), jobInfos)\n\t\t\t\t}\n\t\t\t\tjobInfo = jobInfos[0]\n\t\t\t\treturn nil\n\t\t\t}, backoff.NewTestingBackOff())\n\t\t})\n\n\t\t// Stop the job\n\t\trequire.NoError(t, c.StopJob(pfs.DefaultProjectName, jobInfo.Job.Pipeline.Name, jobInfo.Job.Id))\n\n\t\t// Check that the job is now killed\n\t\trequire.NoErrorWithinT(t, 30*time.Second, func() error {\n\t\t\treturn backoff.Retry(func() error {\n\t\t\t\t// TODO(msteffen): once github.com/pachyderm/pachyderm/v2/pull/2642 is\n\t\t\t\t// submitted, change ListJob here to filter on commit1 as the input commit,\n\t\t\t\t// rather than inspecting the input in the test\n\t\t\t\tupdatedJobInfo, err := c.InspectJob(pfs.DefaultProjectName, jobInfo.Job.Pipeline.Name, jobInfo.Job.Id, false)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif updatedJobInfo.State != pps.JobState_JOB_KILLED {\n\t\t\t\t\treturn errors.Errorf(\"job %s is still running, but should be KILLED\", jobInfo.Job.Id)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}, backoff.NewTestingBackOff())\n\t\t})\n\t}\n}", "func OperatorRunningTest(bundle *apimanifests.Bundle) scapiv1alpha3.TestStatus {\n\tr := scapiv1alpha3.TestResult{}\n\tr.Name = OperatorRunningTestName\n\tr.State = scapiv1alpha3.PassState\n\tr.Errors = make([]string, 0)\n\tr.Suggestions = make([]string, 0)\n\n\t//\ttime.Sleep(20 * time.Second)\n\n\t//clientset, config, err := util.GetKubeClient()\n\tclientset, _, err := util.GetKubeClient()\n\tif err != nil {\n\t\tr.State = scapiv1alpha3.FailState\n\t\tr.Errors = append(r.Errors, \"unable to connect to kube\")\n\t\treturn wrapResult(r)\n\t}\n\n\tns := \"tekton-pipelines\"\n\n\tnamespaces, err := clientset.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})\n\tif err != nil {\n\t\tr.State = scapiv1alpha3.FailState\n\t\tr.Errors = append(r.Errors, fmt.Sprintf(\"error getting namespaces %s\", err.Error()))\n\t\treturn wrapResult(r)\n\t}\n\n\tfor i := 0; i < len(namespaces.Items); i++ {\n\t\tn := namespaces.Items[i]\n\t\tif n.Name == \"openshift-pipelines\" {\n\t\t\tns = \"openshift-pipelines\"\n\t\t\tbreak\n\t\t}\n\t\tif n.Name == \"tekton-pipelines\" {\n\t\t\tns = \"tekton-pipelines\"\n\t\t\tbreak\n\t\t}\n\t}\n\n\tvar pods *corev1.PodList\n\tvar p corev1.Pod\n\n\t// look for a pod with this label\n\t//app=tekton-pipelines-controller\n\tselector := \"app=tekton-pipelines-controller\"\n\tlistOpts := metav1.ListOptions{LabelSelector: selector}\n\tpods, err = clientset.CoreV1().Pods(ns).List(context.TODO(), listOpts)\n\tif err != nil {\n\t\tr.State = scapiv1alpha3.FailState\n\t\tr.Errors = append(r.Errors, fmt.Sprintf(\"error getting pods %s\", err.Error()))\n\t\treturn wrapResult(r)\n\t}\n\tif len(pods.Items) == 0 {\n\t\tr.State = scapiv1alpha3.FailState\n\t\tr.Errors = append(r.Errors, \"tekton-pipelines-controller pod not found\")\n\t\treturn wrapResult(r)\n\t}\n\tp = pods.Items[0]\n\tif p.Status.Phase != corev1.PodRunning {\n\t\tr.State = scapiv1alpha3.FailState\n\t\tr.Errors = append(r.Errors, \"tekton-pipelines-controller pod not running\")\n\t\treturn wrapResult(r)\n\t}\n\n\t// look for a pod with this label\n\t//app=tekton-pipelines-webhook\n\tselector = \"app=tekton-pipelines-webhook\"\n\tlistOpts = metav1.ListOptions{LabelSelector: selector}\n\tpods, err = clientset.CoreV1().Pods(ns).List(context.TODO(), listOpts)\n\tif err != nil {\n\t\tr.State = scapiv1alpha3.FailState\n\t\tr.Errors = append(r.Errors, fmt.Sprintf(\"error getting pods %s\", err.Error()))\n\t\treturn wrapResult(r)\n\t}\n\tif len(pods.Items) == 0 {\n\t\tr.State = scapiv1alpha3.FailState\n\t\tr.Errors = append(r.Errors, \"tekton-pipelines-webhook pod not found\")\n\t\treturn wrapResult(r)\n\t}\n\n\tp = pods.Items[0]\n\n\tif p.Status.Phase != corev1.PodRunning {\n\t\tr.State = scapiv1alpha3.FailState\n\t\tr.Errors = append(r.Errors, \"tekton-pipelines-webhook pod not running\")\n\t\treturn wrapResult(r)\n\t}\n\n\treturn wrapResult(r)\n}", "func TestKnativeServingDeploymentRecreationReady(t *testing.T) {\n\tcancel := logstream.Start(t)\n\tdefer cancel()\n\tclients := Setup(t)\n\n\tdpList, err := clients.KubeClient.Kube.AppsV1().Deployments(test.ServingOperatorNamespace).List(metav1.ListOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to get any deployment under the namespace %q: %v\",\n\t\t\ttest.ServingOperatorNamespace, err)\n\t}\n\t// Delete the deployments one by one to see if they will be recreated.\n\tfor _, deployment := range dpList.Items {\n\t\tif err := clients.KubeClient.Kube.AppsV1().Deployments(deployment.Namespace).Delete(deployment.Name,\n\t\t\t&metav1.DeleteOptions{}); err != nil {\n\t\t\tt.Fatalf(\"Failed to delete deployment %s/%s: %v\", deployment.Namespace, deployment.Name, err)\n\t\t}\n\t\tif _, err = resources.WaitForDeploymentAvailable(clients, deployment.Name, deployment.Namespace,\n\t\t\tresources.IsDeploymentAvailable); err != nil {\n\t\t\tt.Fatalf(\"The deployment %s/%s failed to reach the desired state: %v\",\n\t\t\t\tdeployment.Namespace, deployment.Name, err)\n\t\t}\n\t\tif _, err := resources.WaitForKnativeServingState(clients.KnativeServingAlphaClient, test.ServingOperatorName,\n\t\t\tresources.IsKnativeServingReady); err != nil {\n\t\t\tt.Fatalf(\"KnativeService %q failed to reach the desired state: %v\", test.ServingOperatorName, err)\n\t\t}\n\t\tt.Logf(\"The deployment %s/%s reached the desired state.\", deployment.Namespace, deployment.Name)\n\t}\n}", "func runSyncTests(t *testing.T, ctx context.Context, tests []controllerTest, storageClasses []*storage.StorageClass, pods []*v1.Pod) {\n\tdoit := func(t *testing.T, test controllerTest) {\n\t\t// Initialize the controller\n\t\tclient := &fake.Clientset{}\n\t\tctrl, err := newTestController(ctx, client, nil, true)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Test %q construct persistent volume failed: %v\", test.name, err)\n\t\t}\n\t\treactor := newVolumeReactor(ctx, client, ctrl, nil, nil, test.errors)\n\t\tfor _, claim := range test.initialClaims {\n\t\t\tif metav1.HasAnnotation(claim.ObjectMeta, annSkipLocalStore) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tctrl.claims.Add(claim)\n\t\t}\n\t\tfor _, volume := range test.initialVolumes {\n\t\t\tif metav1.HasAnnotation(volume.ObjectMeta, annSkipLocalStore) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tctrl.volumes.store.Add(volume)\n\t\t}\n\t\treactor.AddClaims(test.initialClaims)\n\t\treactor.AddVolumes(test.initialVolumes)\n\n\t\t// Inject classes into controller via a custom lister.\n\t\tindexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})\n\t\tfor _, class := range storageClasses {\n\t\t\tindexer.Add(class)\n\t\t}\n\t\tctrl.classLister = storagelisters.NewStorageClassLister(indexer)\n\n\t\tpodIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})\n\t\tfor _, pod := range pods {\n\t\t\tpodIndexer.Add(pod)\n\t\t\tctrl.podIndexer.Add(pod)\n\t\t}\n\t\tctrl.podLister = corelisters.NewPodLister(podIndexer)\n\n\t\t// Run the tested functions\n\t\terr = test.test(ctrl, reactor.VolumeReactor, test)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Test %q failed: %v\", test.name, err)\n\t\t}\n\n\t\t// Wait for the target state\n\t\terr = reactor.waitTest(test)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Test %q failed: %v\", test.name, err)\n\t\t}\n\n\t\tevaluateTestResults(ctx, ctrl, reactor.VolumeReactor, test, t)\n\t}\n\n\tfor _, test := range tests {\n\t\ttest := test\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tdoit(t, test)\n\t\t})\n\t}\n}", "func (m *MockServiceControllerFactory) Build(mgr mc_manager.AsyncManager, clusterName string) (controller1.ServiceController, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Build\", mgr, clusterName)\n\tret0, _ := ret[0].(controller1.ServiceController)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func StartControllers(s *options.MCMServer,\n\tcontrolCoreKubeconfig *rest.Config,\n\ttargetCoreKubeconfig *rest.Config,\n\tcontrolMachineClientBuilder machinecontroller.ClientBuilder,\n\tcontrolCoreClientBuilder corecontroller.ClientBuilder,\n\ttargetCoreClientBuilder corecontroller.ClientBuilder,\n\trecorder record.EventRecorder,\n\tstop <-chan struct{}) error {\n\n\tklog.V(5).Info(\"Getting available resources\")\n\tavailableResources, err := getAvailableResources(controlCoreClientBuilder)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcontrolMachineClient := controlMachineClientBuilder.ClientOrDie(controllerManagerAgentName).MachineV1alpha1()\n\n\tcontrolCoreKubeconfig = rest.AddUserAgent(controlCoreKubeconfig, controllerManagerAgentName)\n\tcontrolCoreClient, err := kubernetes.NewForConfig(controlCoreKubeconfig)\n\tif err != nil {\n\t\tklog.Fatal(err)\n\t}\n\n\ttargetCoreKubeconfig = rest.AddUserAgent(targetCoreKubeconfig, controllerManagerAgentName)\n\ttargetCoreClient, err := kubernetes.NewForConfig(targetCoreKubeconfig)\n\tif err != nil {\n\t\tklog.Fatal(err)\n\t}\n\n\tif availableResources[machineGVR] || availableResources[machineSetGVR] || availableResources[machineDeploymentGVR] {\n\t\tklog.V(5).Infof(\"Creating shared informers; resync interval: %v\", s.MinResyncPeriod)\n\n\t\tcontrolMachineInformerFactory := machineinformers.NewFilteredSharedInformerFactory(\n\t\t\tcontrolMachineClientBuilder.ClientOrDie(\"control-machine-shared-informers\"),\n\t\t\ts.MinResyncPeriod.Duration,\n\t\t\ts.Namespace,\n\t\t\tnil,\n\t\t)\n\n\t\tcontrolCoreInformerFactory := coreinformers.NewFilteredSharedInformerFactory(\n\t\t\tcontrolCoreClientBuilder.ClientOrDie(\"control-core-shared-informers\"),\n\t\t\ts.MinResyncPeriod.Duration,\n\t\t\ts.Namespace,\n\t\t\tnil,\n\t\t)\n\n\t\ttargetCoreInformerFactory := coreinformers.NewSharedInformerFactory(\n\t\t\ttargetCoreClientBuilder.ClientOrDie(\"target-core-shared-informers\"),\n\t\t\ts.MinResyncPeriod.Duration,\n\t\t)\n\n\t\t// All shared informers are v1alpha1 API level\n\t\tmachineSharedInformers := controlMachineInformerFactory.Machine().V1alpha1()\n\n\t\tklog.V(5).Infof(\"Creating controllers...\")\n\t\tmcmcontroller, err := mcmcontroller.NewController(\n\t\t\ts.Namespace,\n\t\t\tcontrolMachineClient,\n\t\t\tcontrolCoreClient,\n\t\t\ttargetCoreClient,\n\t\t\ttargetCoreInformerFactory.Core().V1().PersistentVolumeClaims(),\n\t\t\ttargetCoreInformerFactory.Core().V1().PersistentVolumes(),\n\t\t\tcontrolCoreInformerFactory.Core().V1().Secrets(),\n\t\t\ttargetCoreInformerFactory.Core().V1().Nodes(),\n\t\t\tmachineSharedInformers.OpenStackMachineClasses(),\n\t\t\tmachineSharedInformers.AWSMachineClasses(),\n\t\t\tmachineSharedInformers.AzureMachineClasses(),\n\t\t\tmachineSharedInformers.GCPMachineClasses(),\n\t\t\tmachineSharedInformers.AlicloudMachineClasses(),\n\t\t\tmachineSharedInformers.PacketMachineClasses(),\n\t\t\tmachineSharedInformers.Machines(),\n\t\t\tmachineSharedInformers.MachineSets(),\n\t\t\tmachineSharedInformers.MachineDeployments(),\n\t\t\trecorder,\n\t\t\ts.SafetyOptions,\n\t\t\ts.NodeConditions,\n\t\t\ts.BootstrapTokenAuthExtraGroups,\n\t\t\ts.DeleteMigratedMachineClass,\n\t\t\ts.AutoscalerScaleDownAnnotationDuringRollout,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tklog.V(1).Info(\"Starting shared informers\")\n\n\t\tcontrolMachineInformerFactory.Start(stop)\n\t\tcontrolCoreInformerFactory.Start(stop)\n\t\ttargetCoreInformerFactory.Start(stop)\n\n\t\tklog.V(5).Info(\"Running controller\")\n\t\tgo mcmcontroller.Run(int(s.ConcurrentNodeSyncs), stop)\n\n\t} else {\n\t\treturn fmt.Errorf(\"unable to start machine controller: API GroupVersion %q or %q or %q is not available; \\nFound: %#v\", machineGVR, machineSetGVR, machineDeploymentGVR, availableResources)\n\t}\n\n\tselect {}\n}", "func TestCmdDeploy_latestConcurrentRejection(t *testing.T) {\n\tvar existingDeployment *kapi.ReplicationController\n\n\tcommandClient := &deployCommandClientImpl{\n\t\tGetDeploymentFn: func(namespace, name string) (*kapi.ReplicationController, error) {\n\t\t\treturn existingDeployment, nil\n\t\t},\n\t\tUpdateDeploymentConfigFn: func(config *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) {\n\t\t\tt.Fatalf(\"unexpected call to UpdateDeploymentConfig\")\n\t\t\treturn nil, nil\n\t\t},\n\t\tUpdateDeploymentFn: func(deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {\n\t\t\tt.Fatalf(\"unexpected call to UpdateDeployment for %s/%s\", deployment.Namespace, deployment.Name)\n\t\t\treturn nil, nil\n\t\t},\n\t}\n\n\tc := &deployLatestCommand{client: commandClient}\n\n\tinvalidStatusList := []deployapi.DeploymentStatus{\n\t\tdeployapi.DeploymentStatusNew,\n\t\tdeployapi.DeploymentStatusPending,\n\t\tdeployapi.DeploymentStatusRunning,\n\t}\n\n\tfor _, status := range invalidStatusList {\n\t\tconfig := deploytest.OkDeploymentConfig(1)\n\t\texistingDeployment = deploymentFor(config, status)\n\t\terr := c.deploy(config, ioutil.Discard)\n\t\tif err == nil {\n\t\t\tt.Errorf(\"expected an error starting deployment with existing status %s\", status)\n\t\t}\n\t}\n}", "func TestHandle_updateOk(t *testing.T) {\n\tvar (\n\t\tconfig *deployapi.DeploymentConfig\n\t\tdeployed *kapi.ReplicationController\n\t\texistingDeployments *kapi.ReplicationControllerList\n\t)\n\n\tcontroller := &DeploymentConfigController{\n\t\tmakeDeployment: func(config *deployapi.DeploymentConfig) (*kapi.ReplicationController, error) {\n\t\t\treturn deployutil.MakeDeployment(config, api.Codec)\n\t\t},\n\t\tdeploymentClient: &deploymentClientImpl{\n\t\t\tcreateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {\n\t\t\t\tdeployed = deployment\n\t\t\t\treturn deployment, nil\n\t\t\t},\n\t\t\tlistDeploymentsForConfigFunc: func(namespace, configName string) (*kapi.ReplicationControllerList, error) {\n\t\t\t\treturn existingDeployments, nil\n\t\t\t},\n\t\t\tupdateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {\n\t\t\t\tt.Fatalf(\"unexpected update call with deployment %v\", deployment)\n\t\t\t\treturn nil, nil\n\t\t\t},\n\t\t},\n\t\trecorder: &record.FakeRecorder{},\n\t}\n\n\ttype existing struct {\n\t\tversion int\n\t\treplicas int\n\t\tstatus deployapi.DeploymentStatus\n\t}\n\n\ttype scenario struct {\n\t\tversion int\n\t\texpectedReplicas int\n\t\texisting []existing\n\t}\n\n\tscenarios := []scenario{\n\t\t{1, 1, []existing{}},\n\t\t{2, 1, []existing{\n\t\t\t{1, 1, deployapi.DeploymentStatusComplete},\n\t\t}},\n\t\t{3, 4, []existing{\n\t\t\t{1, 0, deployapi.DeploymentStatusComplete},\n\t\t\t{2, 4, deployapi.DeploymentStatusComplete},\n\t\t}},\n\t\t{3, 4, []existing{\n\t\t\t{1, 4, deployapi.DeploymentStatusComplete},\n\t\t\t{2, 1, deployapi.DeploymentStatusFailed},\n\t\t}},\n\t\t{4, 2, []existing{\n\t\t\t{1, 0, deployapi.DeploymentStatusComplete},\n\t\t\t{2, 0, deployapi.DeploymentStatusFailed},\n\t\t\t{3, 2, deployapi.DeploymentStatusComplete},\n\t\t}},\n\t\t// Scramble the order of the previous to ensure we still get it right.\n\t\t{4, 2, []existing{\n\t\t\t{2, 0, deployapi.DeploymentStatusFailed},\n\t\t\t{3, 2, deployapi.DeploymentStatusComplete},\n\t\t\t{1, 0, deployapi.DeploymentStatusComplete},\n\t\t}},\n\t}\n\n\tfor _, scenario := range scenarios {\n\t\tdeployed = nil\n\t\tconfig = deploytest.OkDeploymentConfig(scenario.version)\n\t\texistingDeployments = &kapi.ReplicationControllerList{}\n\t\tfor _, e := range scenario.existing {\n\t\t\td, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(e.version), api.Codec)\n\t\t\td.Spec.Replicas = e.replicas\n\t\t\td.Annotations[deployapi.DeploymentStatusAnnotation] = string(e.status)\n\t\t\texistingDeployments.Items = append(existingDeployments.Items, *d)\n\t\t}\n\t\terr := controller.Handle(config)\n\n\t\tif deployed == nil {\n\t\t\tt.Fatalf(\"expected a deployment\")\n\t\t}\n\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\n\t\tdesired, hasDesired := deployutil.DeploymentDesiredReplicas(deployed)\n\t\tif !hasDesired {\n\t\t\tt.Fatalf(\"expected desired replicas\")\n\t\t}\n\t\tif e, a := scenario.expectedReplicas, desired; e != a {\n\t\t\tt.Errorf(\"expected desired replicas %d, got %d\", e, a)\n\t\t}\n\t}\n}", "func (t *JobUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {\n\t<-done\n\tBy(\"Ensuring active pods == parallelism\")\n\trunning, err := framework.CheckForAllJobPodsRunning(f.ClientSet, t.namespace, t.job.Name, 2)\n\tExpect(err).NotTo(HaveOccurred())\n\tExpect(running).To(BeTrue())\n}", "func TestProject_CreateProject_PollsUntilOperationIsSuccessful(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tcoreClient := azdosdkmocks.NewMockCoreClient(ctrl)\n\toperationsClient := azdosdkmocks.NewMockOperationsClient(ctrl)\n\tclients := &client.AggregatedClient{\n\t\tCoreClient: coreClient,\n\t\tOperationsClient: operationsClient,\n\t\tCtx: context.Background(),\n\t}\n\n\texpectedProjectCreateArgs := core.QueueCreateProjectArgs{ProjectToCreate: &testProject}\n\tmockedOperationReference := operations.OperationReference{Id: &testID}\n\texpectedOperationArgs := operations.GetOperationArgs{OperationId: &testID}\n\n\tcoreClient.\n\t\tEXPECT().\n\t\tQueueCreateProject(clients.Ctx, expectedProjectCreateArgs).\n\t\tReturn(&mockedOperationReference, nil).\n\t\tTimes(1)\n\n\tfirstStatus := operationWithStatus(operations.OperationStatusValues.InProgress)\n\tfirstPoll := operationsClient.\n\t\tEXPECT().\n\t\tGetOperation(clients.Ctx, expectedOperationArgs).\n\t\tReturn(&firstStatus, nil)\n\n\tsecondStatus := operationWithStatus(operations.OperationStatusValues.Succeeded)\n\tsecondPoll := operationsClient.\n\t\tEXPECT().\n\t\tGetOperation(clients.Ctx, expectedOperationArgs).\n\t\tReturn(&secondStatus, nil)\n\n\tgomock.InOrder(firstPoll, secondPoll)\n\n\terr := createProject(clients, &testProject, 10*time.Minute)\n\trequire.Equal(t, nil, err)\n}", "func TestConfigController(t *testing.T) {\n\tvar (\n\t\tname = \"common-service\"\n\t\tnamespace = \"ibm-common-service\"\n\t)\n\n\treq := getReconcileRequest(name, namespace)\n\tr := getReconciler(name, namespace)\n\n\tinitReconcile(t, r, req)\n\n}", "func SetupAddControllers(k kubernetes.Interface, namespace string) kubernetes.Interface {\n\td1 := MockDeploy()\n\tif _, err := k.AppsV1().Deployments(namespace).Create(&d1); err != nil {\n\t\tpanic(err)\n\t}\n\n\ts1 := MockStatefulSet()\n\tif _, err := k.AppsV1().StatefulSets(namespace).Create(&s1); err != nil {\n\t\tpanic(err)\n\t}\n\n\tds1 := MockDaemonSet()\n\tif _, err := k.AppsV1().DaemonSets(namespace).Create(&ds1); err != nil {\n\t\tpanic(err)\n\t}\n\n\tj1 := MockJob()\n\tif _, err := k.BatchV1().Jobs(namespace).Create(&j1); err != nil {\n\t\tpanic(err)\n\t}\n\n\tcj1 := MockCronJob()\n\tif _, err := k.BatchV1beta1().CronJobs(namespace).Create(&cj1); err != nil {\n\t\tpanic(err)\n\t}\n\n\trc1 := MockReplicationController()\n\tif _, err := k.CoreV1().ReplicationControllers(namespace).Create(&rc1); err != nil {\n\t\tpanic(err)\n\t}\n\n\tp1 := MockNakedPod()\n\tif _, err := k.CoreV1().Pods(namespace).Create(&p1); err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn k\n}", "func TestCreate(t *testing.T) {\n\t// set up fake web server\n\tr := gin.Default()\n\tbuilds.Mount(r)\n\n\t// test artifacts send to callback URL\n\tr.POST(\"/callback\", func(c *gin.Context) {\n\t\tr, _, err := c.Request.FormFile(\"file\")\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tres, err := testhelper.ShouldIncludeFileInTar(r, \"app\")\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tif !res {\n\t\t\tt.Error(\"artifact should be found\")\n\t\t}\n\t})\n\n\t// run web server\n\ts := httptest.NewServer(r)\n\tdefer s.Close()\n\n\t// prepare jobqueue\n\tgo jobqueue.Wait()\n\tdefer jobqueue.Close()\n\n\t// send request\n\tbuild, err := controller_helper.Create(s.URL, \"./example/app.tar\", s.URL+\"/callback\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// wait for finishing build\n\texitCode := make(chan int, 1)\n\tgo func() {\n\t\tfor {\n\t\t\tif res, err := controller_helper.Show(s.URL, build.Id); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t} else {\n\t\t\t\tif res.Job.Finished {\n\t\t\t\t\texitCode <- res.Job.ExitCode\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\ttime.Sleep(200 * time.Millisecond)\n\t\t}\n\t}()\n\n\t// timeout after three seconds\n\tselect {\n\tcase c := <-exitCode:\n\t\tif c != 0 {\n\t\t\tt.Fatal(c)\n\t\t}\n\tcase <-time.After(3 * time.Second):\n\t\tt.Fatal(\"the build should be finished in a few second\")\n\t}\n\n\treq, err := testhelper.Get(s.URL+\"/builds/\"+build.Id+\"/log.txt\", map[string]string{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tc := http.Client{}\n\tres, err := c.Do(req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer res.Body.Close()\n\n\tif b, err := ioutil.ReadAll(res.Body); err != nil {\n\t\tt.Fatal(err)\n\t} else if !strings.Contains(string(b), \"make\") {\n\t\tt.Fatal(\"example build shuold start with make command\")\n\t}\n}", "func TestHandle_existingDeployments(t *testing.T) {\n\tvar updatedDeployments []kapi.ReplicationController\n\tvar (\n\t\tconfig *deployapi.DeploymentConfig\n\t\tdeployed *kapi.ReplicationController\n\t\texistingDeployments *kapi.ReplicationControllerList\n\t)\n\n\tcontroller := &DeploymentConfigController{\n\t\tmakeDeployment: func(config *deployapi.DeploymentConfig) (*kapi.ReplicationController, error) {\n\t\t\treturn deployutil.MakeDeployment(config, api.Codec)\n\t\t},\n\t\tdeploymentClient: &deploymentClientImpl{\n\t\t\tcreateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {\n\t\t\t\tdeployed = deployment\n\t\t\t\treturn deployment, nil\n\t\t\t},\n\t\t\tlistDeploymentsForConfigFunc: func(namespace, configName string) (*kapi.ReplicationControllerList, error) {\n\t\t\t\treturn existingDeployments, nil\n\t\t\t},\n\t\t\tupdateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {\n\t\t\t\tupdatedDeployments = append(updatedDeployments, *deployment)\n\t\t\t\t//t.Fatalf(\"unexpected update call with deployment %v\", deployment)\n\t\t\t\treturn deployment, nil\n\t\t\t},\n\t\t},\n\t\trecorder: &record.FakeRecorder{},\n\t}\n\n\ttype existing struct {\n\t\tversion int\n\t\tstatus deployapi.DeploymentStatus\n\t\tshouldCancel bool\n\t}\n\n\ttype scenario struct {\n\t\tversion int\n\t\texisting []existing\n\t\terrorType reflect.Type\n\t\texpectDeployment bool\n\t}\n\n\ttransientErrorType := reflect.TypeOf(transientError(\"\"))\n\tscenarios := []scenario{\n\t\t// No existing deployments\n\t\t{1, []existing{}, nil, true},\n\t\t// A single existing completed deployment\n\t\t{2, []existing{{1, deployapi.DeploymentStatusComplete, false}}, nil, true},\n\t\t// A single existing failed deployment\n\t\t{2, []existing{{1, deployapi.DeploymentStatusFailed, false}}, nil, true},\n\t\t// Multiple existing completed/failed deployments\n\t\t{3, []existing{{2, deployapi.DeploymentStatusFailed, false}, {1, deployapi.DeploymentStatusComplete, false}}, nil, true},\n\n\t\t// A single existing deployment in the default state\n\t\t{2, []existing{{1, \"\", false}}, transientErrorType, false},\n\t\t// A single existing new deployment\n\t\t{2, []existing{{1, deployapi.DeploymentStatusNew, false}}, transientErrorType, false},\n\t\t// A single existing pending deployment\n\t\t{2, []existing{{1, deployapi.DeploymentStatusPending, false}}, transientErrorType, false},\n\t\t// A single existing running deployment\n\t\t{2, []existing{{1, deployapi.DeploymentStatusRunning, false}}, transientErrorType, false},\n\t\t// Multiple existing deployments with one in new/pending/running\n\t\t{4, []existing{{3, deployapi.DeploymentStatusRunning, false}, {2, deployapi.DeploymentStatusComplete, false}, {1, deployapi.DeploymentStatusFailed, false}}, transientErrorType, false},\n\n\t\t// Latest deployment exists and has already failed/completed\n\t\t{2, []existing{{2, deployapi.DeploymentStatusFailed, false}, {1, deployapi.DeploymentStatusComplete, false}}, nil, false},\n\t\t// Latest deployment exists and is in new/pending/running state\n\t\t{2, []existing{{2, deployapi.DeploymentStatusRunning, false}, {1, deployapi.DeploymentStatusComplete, false}}, nil, false},\n\n\t\t// Multiple existing deployments with more than one in new/pending/running\n\t\t{4, []existing{{3, deployapi.DeploymentStatusNew, false}, {2, deployapi.DeploymentStatusRunning, true}, {1, deployapi.DeploymentStatusFailed, false}}, transientErrorType, false},\n\t\t// Multiple existing deployments with more than one in new/pending/running\n\t\t// Latest deployment has already failed\n\t\t{6, []existing{{5, deployapi.DeploymentStatusFailed, false}, {4, deployapi.DeploymentStatusRunning, false}, {3, deployapi.DeploymentStatusNew, true}, {2, deployapi.DeploymentStatusComplete, false}, {1, deployapi.DeploymentStatusNew, true}}, transientErrorType, false},\n\t}\n\n\tfor _, scenario := range scenarios {\n\t\tupdatedDeployments = []kapi.ReplicationController{}\n\t\tdeployed = nil\n\t\tconfig = deploytest.OkDeploymentConfig(scenario.version)\n\t\texistingDeployments = &kapi.ReplicationControllerList{}\n\t\tfor _, e := range scenario.existing {\n\t\t\td, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(e.version), api.Codec)\n\t\t\tif e.status != \"\" {\n\t\t\t\td.Annotations[deployapi.DeploymentStatusAnnotation] = string(e.status)\n\t\t\t}\n\t\t\texistingDeployments.Items = append(existingDeployments.Items, *d)\n\t\t}\n\t\terr := controller.Handle(config)\n\n\t\tif scenario.expectDeployment && deployed == nil {\n\t\t\tt.Fatalf(\"expected a deployment\")\n\t\t}\n\n\t\tif scenario.errorType == nil {\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tif err == nil {\n\t\t\t\tt.Fatalf(\"expected error\")\n\t\t\t}\n\t\t\tif reflect.TypeOf(err) != scenario.errorType {\n\t\t\t\tt.Fatalf(\"error expected: %s, got: %s\", scenario.errorType, reflect.TypeOf(err))\n\t\t\t}\n\t\t}\n\n\t\texpectedCancellations := []int{}\n\t\tactualCancellations := []int{}\n\t\tfor _, e := range scenario.existing {\n\t\t\tif e.shouldCancel {\n\t\t\t\texpectedCancellations = append(expectedCancellations, e.version)\n\t\t\t}\n\t\t}\n\t\tfor _, d := range updatedDeployments {\n\t\t\tactualCancellations = append(actualCancellations, deployutil.DeploymentVersionFor(&d))\n\t\t}\n\n\t\tsort.Ints(actualCancellations)\n\t\tsort.Ints(expectedCancellations)\n\t\tif !reflect.DeepEqual(actualCancellations, expectedCancellations) {\n\t\t\tt.Fatalf(\"expected cancellations: %v, actual: %v\", expectedCancellations, actualCancellations)\n\t\t}\n\t}\n}", "func TestApplyStatus(t *testing.T) {\n\tserver, err := apiservertesting.StartTestServer(t, apiservertesting.NewDefaultTestServerOptions(), []string{\"--disable-admission-plugins\", \"ServiceAccount,TaintNodesByCondition\"}, framework.SharedEtcd())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer server.TearDownFn()\n\n\tclient, err := kubernetes.NewForConfig(server.ClientConfig)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdynamicClient, err := dynamic.NewForConfig(server.ClientConfig)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// create CRDs so we can make sure that custom resources do not get lost\n\tetcd.CreateTestCRDs(t, apiextensionsclientset.NewForConfigOrDie(server.ClientConfig), false, etcd.GetCustomResourceDefinitionData()...)\n\tif _, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}, metav1.CreateOptions{}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcreateData := etcd.GetEtcdStorageData()\n\n\t// gather resources to test\n\t_, resourceLists, err := client.Discovery().ServerGroupsAndResources()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to get ServerGroupsAndResources with error: %+v\", err)\n\t}\n\n\tfor _, resourceList := range resourceLists {\n\t\tfor _, resource := range resourceList.APIResources {\n\t\t\tif !strings.HasSuffix(resource.Name, \"/status\") {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmapping, err := createMapping(resourceList.GroupVersion, resource)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tt.Run(mapping.Resource.String(), func(t *testing.T) {\n\t\t\t\t// both spec and status get wiped for CSRs,\n\t\t\t\t// nothing is expected to be managed for it, skip it\n\t\t\t\tif mapping.Resource.Resource == \"certificatesigningrequests\" {\n\t\t\t\t\tt.Skip()\n\t\t\t\t}\n\n\t\t\t\tstatus, ok := statusData[mapping.Resource]\n\t\t\t\tif !ok {\n\t\t\t\t\tstatus = statusDefault\n\t\t\t\t}\n\t\t\t\tnewResource, ok := createData[mapping.Resource]\n\t\t\t\tif !ok {\n\t\t\t\t\tt.Fatalf(\"no test data for %s. Please add a test for your new type to etcd.GetEtcdStorageData().\", mapping.Resource)\n\t\t\t\t}\n\t\t\t\tnewObj := unstructured.Unstructured{}\n\t\t\t\tif err := json.Unmarshal([]byte(newResource.Stub), &newObj.Object); err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\tnamespace := testNamespace\n\t\t\t\tif mapping.Scope == meta.RESTScopeRoot {\n\t\t\t\t\tnamespace = \"\"\n\t\t\t\t}\n\t\t\t\tname := newObj.GetName()\n\n\t\t\t\t// etcd test stub data doesn't contain apiVersion/kind (!), but apply requires it\n\t\t\t\tnewObj.SetGroupVersionKind(mapping.GroupVersionKind)\n\n\t\t\t\trsc := dynamicClient.Resource(mapping.Resource).Namespace(namespace)\n\t\t\t\t// apply to create\n\t\t\t\t_, err = rsc.Apply(context.TODO(), name, &newObj, metav1.ApplyOptions{FieldManager: \"create_test\"})\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\tstatusObj := unstructured.Unstructured{}\n\t\t\t\tif err := json.Unmarshal([]byte(status), &statusObj.Object); err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t\tstatusObj.SetAPIVersion(mapping.GroupVersionKind.GroupVersion().String())\n\t\t\t\tstatusObj.SetKind(mapping.GroupVersionKind.Kind)\n\t\t\t\tstatusObj.SetName(name)\n\n\t\t\t\tobj, err := dynamicClient.\n\t\t\t\t\tResource(mapping.Resource).\n\t\t\t\t\tNamespace(namespace).\n\t\t\t\t\tApplyStatus(context.TODO(), name, &statusObj, metav1.ApplyOptions{FieldManager: \"apply_status_test\", Force: true})\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"Failed to apply: %v\", err)\n\t\t\t\t}\n\n\t\t\t\taccessor, err := meta.Accessor(obj)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"Failed to get meta accessor: %v:\\n%v\", err, obj)\n\t\t\t\t}\n\n\t\t\t\tmanagedFields := accessor.GetManagedFields()\n\t\t\t\tif managedFields == nil {\n\t\t\t\t\tt.Fatal(\"Empty managed fields\")\n\t\t\t\t}\n\t\t\t\tif !findManager(managedFields, \"apply_status_test\") {\n\t\t\t\t\tt.Fatalf(\"Couldn't find apply_status_test: %v\", managedFields)\n\t\t\t\t}\n\t\t\t\tif !findManager(managedFields, \"create_test\") {\n\t\t\t\t\tt.Fatalf(\"Couldn't find create_test: %v\", managedFields)\n\t\t\t\t}\n\n\t\t\t\tif err := rsc.Delete(context.TODO(), name, *metav1.NewDeleteOptions(0)); err != nil {\n\t\t\t\t\tt.Fatalf(\"deleting final object failed: %v\", err)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}", "func TestShouldReconcileClusterServiceBroker(t *testing.T) {\n\t// Anonymous struct fields:\n\t// name: short description of the test\n\t// broker: broker object to test\n\t// now: what time the interval is calculated with respect to interval\n\t// reconcile: whether or not the reconciler should run, the return of\n\t// shouldReconcileClusterServiceBroker\n\tcases := []struct {\n\t\tname string\n\t\tbroker *v1beta1.ClusterServiceBroker\n\t\tnow time.Time\n\t\treconcile bool\n\t\terr error\n\t}{\n\t\t{\n\t\t\tname: \"no status\",\n\t\t\tbroker: func() *v1beta1.ClusterServiceBroker {\n\t\t\t\tbroker := getTestClusterServiceBroker()\n\t\t\t\tbroker.Spec.RelistDuration = &metav1.Duration{Duration: 3 * time.Minute}\n\t\t\t\treturn broker\n\t\t\t}(),\n\t\t\tnow: time.Now(),\n\t\t\treconcile: true,\n\t\t},\n\t\t{\n\t\t\tname: \"deletionTimestamp set\",\n\t\t\tbroker: func() *v1beta1.ClusterServiceBroker {\n\t\t\t\tbroker := getTestClusterServiceBrokerWithStatus(v1beta1.ConditionTrue)\n\t\t\t\tbroker.DeletionTimestamp = &metav1.Time{}\n\t\t\t\tbroker.Spec.RelistDuration = &metav1.Duration{Duration: 3 * time.Hour}\n\t\t\t\treturn broker\n\t\t\t}(),\n\t\t\tnow: time.Now(),\n\t\t\treconcile: true,\n\t\t},\n\t\t{\n\t\t\tname: \"no ready condition\",\n\t\t\tbroker: func() *v1beta1.ClusterServiceBroker {\n\t\t\t\tbroker := getTestClusterServiceBroker()\n\t\t\t\tbroker.Status = v1beta1.ClusterServiceBrokerStatus{\n\t\t\t\t\tCommonServiceBrokerStatus: v1beta1.CommonServiceBrokerStatus{\n\t\t\t\t\t\tConditions: []v1beta1.ServiceBrokerCondition{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tType: v1beta1.ServiceBrokerConditionType(\"NotARealCondition\"),\n\t\t\t\t\t\t\t\tStatus: v1beta1.ConditionTrue,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\tbroker.Spec.RelistDuration = &metav1.Duration{Duration: 3 * time.Minute}\n\t\t\t\treturn broker\n\t\t\t}(),\n\t\t\tnow: time.Now(),\n\t\t\treconcile: true,\n\t\t},\n\t\t{\n\t\t\tname: \"not ready\",\n\t\t\tbroker: func() *v1beta1.ClusterServiceBroker {\n\t\t\t\tbroker := getTestClusterServiceBrokerWithStatus(v1beta1.ConditionFalse)\n\t\t\t\tbroker.Spec.RelistDuration = &metav1.Duration{Duration: 3 * time.Minute}\n\t\t\t\treturn broker\n\t\t\t}(),\n\t\t\tnow: time.Now(),\n\t\t\treconcile: true,\n\t\t},\n\t\t{\n\t\t\tname: \"ready, interval elapsed\",\n\t\t\tbroker: func() *v1beta1.ClusterServiceBroker {\n\t\t\t\tbroker := getTestClusterServiceBrokerWithStatus(v1beta1.ConditionTrue)\n\t\t\t\tbroker.Spec.RelistDuration = &metav1.Duration{Duration: 3 * time.Minute}\n\t\t\t\treturn broker\n\t\t\t}(),\n\t\t\tnow: time.Now(),\n\t\t\treconcile: true,\n\t\t},\n\t\t{\n\t\t\tname: \"good steady state - ready, interval not elapsed, but last state change was a long time ago\",\n\t\t\tbroker: func() *v1beta1.ClusterServiceBroker {\n\t\t\t\tlastTransitionTime := metav1.NewTime(time.Now().Add(-30 * time.Minute))\n\t\t\t\tlastRelistTime := metav1.NewTime(time.Now().Add(-2 * time.Minute))\n\t\t\t\tbroker := getTestClusterServiceBrokerWithStatusAndTime(v1beta1.ConditionTrue, lastTransitionTime, lastRelistTime)\n\t\t\t\tbroker.Spec.RelistDuration = &metav1.Duration{Duration: 3 * time.Minute}\n\t\t\t\treturn broker\n\t\t\t}(),\n\t\t\tnow: time.Now(),\n\t\t\treconcile: false,\n\t\t},\n\t\t{\n\t\t\tname: \"good steady state - ready, interval has elapsed, last state change was a long time ago\",\n\t\t\tbroker: func() *v1beta1.ClusterServiceBroker {\n\t\t\t\tlastTransitionTime := metav1.NewTime(time.Now().Add(-30 * time.Minute))\n\t\t\t\tlastRelistTime := metav1.NewTime(time.Now().Add(-4 * time.Minute))\n\t\t\t\tbroker := getTestClusterServiceBrokerWithStatusAndTime(v1beta1.ConditionTrue, lastTransitionTime, lastRelistTime)\n\t\t\t\tbroker.Spec.RelistDuration = &metav1.Duration{Duration: 3 * time.Minute}\n\t\t\t\treturn broker\n\t\t\t}(),\n\t\t\tnow: time.Now(),\n\t\t\treconcile: true,\n\t\t},\n\t\t{\n\t\t\tname: \"ready, interval not elapsed\",\n\t\t\tbroker: func() *v1beta1.ClusterServiceBroker {\n\t\t\t\tbroker := getTestClusterServiceBrokerWithStatus(v1beta1.ConditionTrue)\n\t\t\t\tbroker.Spec.RelistDuration = &metav1.Duration{Duration: 3 * time.Hour}\n\t\t\t\treturn broker\n\t\t\t}(),\n\t\t\tnow: time.Now(),\n\t\t\treconcile: false,\n\t\t},\n\t\t{\n\t\t\tname: \"ready, interval not elapsed, spec changed\",\n\t\t\tbroker: func() *v1beta1.ClusterServiceBroker {\n\t\t\t\tbroker := getTestClusterServiceBrokerWithStatus(v1beta1.ConditionTrue)\n\t\t\t\tbroker.Generation = 2\n\t\t\t\tbroker.Status.ReconciledGeneration = 1\n\t\t\t\tbroker.Spec.RelistDuration = &metav1.Duration{Duration: 3 * time.Hour}\n\t\t\t\treturn broker\n\t\t\t}(),\n\t\t\tnow: time.Now(),\n\t\t\treconcile: true,\n\t\t},\n\t\t{\n\t\t\tname: \"ready, duration behavior, nil duration\",\n\t\t\tbroker: func() *v1beta1.ClusterServiceBroker {\n\t\t\t\tbroker := getTestClusterServiceBrokerWithStatus(v1beta1.ConditionTrue)\n\t\t\t\tbroker.Spec.RelistBehavior = v1beta1.ServiceBrokerRelistBehaviorDuration\n\t\t\t\tbroker.Spec.RelistDuration = nil\n\t\t\t\treturn broker\n\t\t\t}(),\n\t\t\tnow: time.Now(),\n\t\t\treconcile: false,\n\t\t},\n\t\t{\n\t\t\tname: \"ready, manual behavior\",\n\t\t\tbroker: func() *v1beta1.ClusterServiceBroker {\n\t\t\t\tbroker := getTestClusterServiceBrokerWithStatus(v1beta1.ConditionTrue)\n\t\t\t\tbroker.Spec.RelistBehavior = v1beta1.ServiceBrokerRelistBehaviorManual\n\t\t\t\treturn broker\n\t\t\t}(),\n\t\t\tnow: time.Now(),\n\t\t\treconcile: false,\n\t\t},\n\t}\n\n\tfor _, tc := range cases {\n\t\tvar ltt *time.Time\n\t\tif len(tc.broker.Status.Conditions) != 0 {\n\t\t\tltt = &tc.broker.Status.Conditions[0].LastTransitionTime.Time\n\t\t}\n\n\t\tif tc.broker.Spec.RelistDuration != nil {\n\t\t\tinterval := tc.broker.Spec.RelistDuration.Duration\n\t\t\tlastRelistTime := tc.broker.Status.LastCatalogRetrievalTime\n\t\t\tt.Logf(\"%v: now: %v, interval: %v, last transition time: %v, last relist time: %v\", tc.name, tc.now, interval, ltt, lastRelistTime)\n\t\t} else {\n\t\t\tt.Logf(\"broker.Spec.RelistDuration set to nil\")\n\t\t}\n\n\t\tactual := shouldReconcileClusterServiceBroker(tc.broker, tc.now)\n\n\t\tif e, a := tc.reconcile, actual; e != a {\n\t\t\tt.Errorf(\"%v: unexpected result: %s\", tc.name, expectedGot(e, a))\n\t\t}\n\t}\n}", "func waitForPods(cs *framework.ClientSet, expectedTotal, min, max int32) error {\n\terr := wait.PollImmediate(1*time.Second, 5*time.Minute, func() (bool, error) {\n\t\td, err := cs.AppsV1Interface.Deployments(\"openshift-machine-config-operator\").Get(context.TODO(), \"etcd-quorum-guard\", metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\t// By this point the deployment should exist.\n\t\t\tfmt.Printf(\" error waiting for etcd-quorum-guard deployment to exist: %v\\n\", err)\n\t\t\treturn true, err\n\t\t}\n\t\tif d.Status.Replicas < 1 {\n\t\t\tfmt.Println(\"operator deployment has no replicas\")\n\t\t\treturn false, nil\n\t\t}\n\t\tif d.Status.Replicas == expectedTotal &&\n\t\t\td.Status.AvailableReplicas >= min &&\n\t\t\td.Status.AvailableReplicas <= max {\n\t\t\tfmt.Printf(\" Deployment is ready! %d %d\\n\", d.Status.Replicas, d.Status.AvailableReplicas)\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor pod, info := range pods {\n\t\tif info.status == \"Running\" {\n\t\t\tnode := info.node\n\t\t\tif node == \"\" {\n\t\t\t\treturn fmt.Errorf(\"Pod %s not associated with a node\", pod)\n\t\t\t}\n\t\t\tif _, ok := nodes[node]; !ok {\n\t\t\t\treturn fmt.Errorf(\"pod %s running on %s, not a master\", pod, node)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func TestCmdDeploy_cancelOk(t *testing.T) {\n\tvar (\n\t\tconfig *deployapi.DeploymentConfig\n\t\texistingDeployments *kapi.ReplicationControllerList\n\t\tupdatedDeployments []kapi.ReplicationController\n\t)\n\n\tcommandClient := &deployCommandClientImpl{\n\t\tGetDeploymentFn: func(namespace, name string) (*kapi.ReplicationController, error) {\n\t\t\tt.Fatalf(\"unexpected call to GetDeployment: %s\", name)\n\t\t\treturn nil, nil\n\t\t},\n\t\tListDeploymentsForConfigFn: func(namespace, configName string) (*kapi.ReplicationControllerList, error) {\n\t\t\treturn existingDeployments, nil\n\t\t},\n\t\tUpdateDeploymentConfigFn: func(config *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) {\n\t\t\tt.Fatalf(\"unexpected call to UpdateDeploymentConfig\")\n\t\t\treturn nil, nil\n\t\t},\n\t\tUpdateDeploymentFn: func(deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {\n\t\t\tupdatedDeployments = append(updatedDeployments, *deployment)\n\t\t\treturn deployment, nil\n\t\t},\n\t}\n\n\ttype existing struct {\n\t\tversion int\n\t\tstatus deployapi.DeploymentStatus\n\t\tshouldCancel bool\n\t}\n\ttype scenario struct {\n\t\tversion int\n\t\texisting []existing\n\t}\n\n\tscenarios := []scenario{\n\t\t// No existing deployments\n\t\t{1, []existing{{1, deployapi.DeploymentStatusComplete, false}}},\n\t\t// A single existing failed deployment\n\t\t{1, []existing{{1, deployapi.DeploymentStatusFailed, false}}},\n\t\t// Multiple existing completed/failed deployments\n\t\t{2, []existing{{2, deployapi.DeploymentStatusFailed, false}, {1, deployapi.DeploymentStatusComplete, false}}},\n\t\t// A single existing new deployment\n\t\t{1, []existing{{1, deployapi.DeploymentStatusNew, true}}},\n\t\t// A single existing pending deployment\n\t\t{1, []existing{{1, deployapi.DeploymentStatusPending, true}}},\n\t\t// A single existing running deployment\n\t\t{1, []existing{{1, deployapi.DeploymentStatusRunning, true}}},\n\t\t// Multiple existing deployments with one in new/pending/running\n\t\t{3, []existing{{3, deployapi.DeploymentStatusRunning, true}, {2, deployapi.DeploymentStatusComplete, false}, {1, deployapi.DeploymentStatusFailed, false}}},\n\t\t// Multiple existing deployments with more than one in new/pending/running\n\t\t{3, []existing{{3, deployapi.DeploymentStatusNew, true}, {2, deployapi.DeploymentStatusRunning, true}, {1, deployapi.DeploymentStatusFailed, false}}},\n\t}\n\n\tc := &cancelDeploymentCommand{client: commandClient}\n\tfor _, scenario := range scenarios {\n\t\tupdatedDeployments = []kapi.ReplicationController{}\n\t\tconfig = deploytest.OkDeploymentConfig(scenario.version)\n\t\texistingDeployments = &kapi.ReplicationControllerList{}\n\t\tfor _, e := range scenario.existing {\n\t\t\td, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(e.version), api.Codec)\n\t\t\td.Annotations[deployapi.DeploymentStatusAnnotation] = string(e.status)\n\t\t\texistingDeployments.Items = append(existingDeployments.Items, *d)\n\t\t}\n\n\t\terr := c.cancel(config, ioutil.Discard)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\n\t\texpectedCancellations := []int{}\n\t\tactualCancellations := []int{}\n\t\tfor _, e := range scenario.existing {\n\t\t\tif e.shouldCancel {\n\t\t\t\texpectedCancellations = append(expectedCancellations, e.version)\n\t\t\t}\n\t\t}\n\t\tfor _, d := range updatedDeployments {\n\t\t\tactualCancellations = append(actualCancellations, deployutil.DeploymentVersionFor(&d))\n\t\t}\n\n\t\tsort.Ints(actualCancellations)\n\t\tsort.Ints(expectedCancellations)\n\t\tif !reflect.DeepEqual(actualCancellations, expectedCancellations) {\n\t\t\tt.Fatalf(\"expected cancellations: %v, actual: %v\", expectedCancellations, actualCancellations)\n\t\t}\n\t}\n}", "func assertPodsPendingForDuration(c clientset.Interface, deployment *appsv1.Deployment, pendingPodsNum int, pendingDuration time.Duration) error {\n\n\tpendingPods := make(map[string]time.Time)\n\n\terr := wait.PollImmediate(pollInterval, pollTimeout+pendingDuration, func() (bool, error) {\n\t\tvar err error\n\t\tcurrentPodList, err := framework_deployment.GetPodsForDeployment(c, deployment)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tmissingPods := make(map[string]bool)\n\t\tfor podName := range pendingPods {\n\t\t\tmissingPods[podName] = true\n\t\t}\n\n\t\tnow := time.Now()\n\t\tfor _, pod := range currentPodList.Items {\n\t\t\tdelete(missingPods, pod.Name)\n\t\t\tswitch pod.Status.Phase {\n\t\t\tcase apiv1.PodPending:\n\t\t\t\t_, ok := pendingPods[pod.Name]\n\t\t\t\tif !ok {\n\t\t\t\t\tpendingPods[pod.Name] = now\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tdelete(pendingPods, pod.Name)\n\t\t\t}\n\t\t}\n\n\t\tfor missingPod := range missingPods {\n\t\t\tdelete(pendingPods, missingPod)\n\t\t}\n\n\t\tif len(pendingPods) < pendingPodsNum {\n\t\t\treturn false, nil\n\t\t}\n\n\t\tif len(pendingPods) > pendingPodsNum {\n\t\t\treturn false, fmt.Errorf(\"%v pending pods seen - expecting %v\", len(pendingPods), pendingPodsNum)\n\t\t}\n\n\t\tfor p, t := range pendingPods {\n\t\t\tfmt.Println(\"task\", now, p, t, now.Sub(t), pendingDuration)\n\t\t\tif now.Sub(t) < pendingDuration {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\n\t\treturn true, nil\n\t})\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"assertion failed for pending pods in %v: %v\", deployment.Name, err)\n\t}\n\treturn nil\n}", "func checkConcurrency(t *testing.T, expectedConcurrency *int, run *v1alpha1.Run,\n\texpectedTaskRuns []*v1beta1.TaskRun, actualTaskRunList *v1beta1.TaskRunList) {\n\n\ttype taskRunEventType int\n\tconst (\n\t\ttrCreated taskRunEventType = iota\n\t\ttrCompleted taskRunEventType = iota\n\t)\n\n\ttype taskRunEvent struct {\n\t\teventTime metav1.Time\n\t\teventType taskRunEventType\n\t\ttrName string\n\t}\n\n\tevents := []taskRunEvent{}\n\tfor _, actualTaskRun := range actualTaskRunList.Items {\n\t\t// This shouldn't happen unless something really went wrong but we need to test it to avoid a panic dereferencing the pointer.\n\t\tif actualTaskRun.Status.CompletionTime == nil {\n\t\t\tt.Errorf(\"TaskRun %s does not have a completion time\", actualTaskRun.Name)\n\t\t\tcontinue\n\t\t}\n\t\tevents = append(events,\n\t\t\ttaskRunEvent{eventTime: actualTaskRun.ObjectMeta.CreationTimestamp, eventType: trCreated, trName: actualTaskRun.ObjectMeta.Name},\n\t\t\ttaskRunEvent{eventTime: *actualTaskRun.Status.CompletionTime, eventType: trCompleted, trName: actualTaskRun.ObjectMeta.Name},\n\t\t)\n\t}\n\n\tsort.Slice(events, func(i, j int) bool {\n\t\t// Unfortunately the timestamp resolution is only 1 second which makes event ordering imprecise.\n\t\t// This could trigger a false limit failure if TaskRun B was created a few milliseconds after TaskRun A completed\n\t\t// but the events were sorted the other way. In order to address this, the sort places TaskRun completion\n\t\t// before TaskRun creation when the times are equal and the TaskRun names are different. There is a small\n\t\t// chance this could mask a problem but it's the best that can be done with the limited timestamp resolution.\n\t\treturn events[i].eventTime.Before(&events[j].eventTime) ||\n\t\t\t(events[i].eventTime.Equal(&events[j].eventTime) &&\n\t\t\t\t((events[i].trName == events[j].trName && events[i].eventType == trCreated) ||\n\t\t\t\t\t(events[i].trName != events[j].trName && events[i].eventType == trCompleted)))\n\t})\n\n\tt.Logf(\"Sorted taskrun event table: %v\", events)\n\n\t// Determine how many TaskRuns were \"alive\" at any given time, where \"alive\" means the TaskRun was created\n\t// and thus eligible to run and not yet completed.\n\tconcurrency := 0\n\tmaxConcurrency := 0\n\tconcurrencyLimit := 1\n\tif expectedConcurrency != nil {\n\t\tconcurrencyLimit = *expectedConcurrency\n\t}\n\toffender := \"\"\n\tvar firstCreationTime, lastCompletionTime metav1.Time\n\tfirstCreationTime = metav1.Unix(1<<63-62135596801, 999999999) // max time\n\tfor _, event := range events {\n\t\tif event.eventType == trCreated {\n\t\t\tconcurrency++\n\t\t} else {\n\t\t\tconcurrency--\n\t\t}\n\t\t// If the concurrency limit was breached, record the first TaskRun where it happened.\n\t\tif concurrencyLimit > 0 && concurrency > concurrencyLimit && offender == \"\" {\n\t\t\toffender = event.trName\n\t\t}\n\t\t// Track the peak number of active TaskRuns.\n\t\tif concurrency > maxConcurrency {\n\t\t\tmaxConcurrency = concurrency\n\t\t}\n\t\tif event.eventType == trCreated {\n\t\t\tif event.eventTime.Before(&firstCreationTime) {\n\t\t\t\tfirstCreationTime = event.eventTime\n\t\t\t}\n\t\t} else {\n\t\t\tif lastCompletionTime.Before(&event.eventTime) {\n\t\t\t\tlastCompletionTime = event.eventTime\n\t\t\t}\n\t\t}\n\t}\n\n\tt.Logf(\"maxConcurrency=%v\", maxConcurrency)\n\n\tif concurrencyLimit <= 0 {\n\t\t// There is no limit so all of the expected TaskRuns should have been created at once.\n\t\t// This check assumes that the controller can create all of the TaskRuns before any of them complete.\n\t\t// It would take a very fast TaskRun and a very slow controller to violate that but using a sleep in\n\t\t// the task helps to make that unlikely.\n\t\tif maxConcurrency < len(expectedTaskRuns) {\n\t\t\tt.Errorf(\"Concurrency is unlimited so all %d expected TaskRuns should have been active at once but only %d were.\",\n\t\t\t\tlen(expectedTaskRuns), maxConcurrency)\n\t\t}\n\t} else {\n\t\t// There is a limit so there shouldn't be more TaskRuns than that alive at any moment.\n\t\tif maxConcurrency > concurrencyLimit {\n\t\t\tt.Errorf(\"Concurrency limit %d was broken. \"+\n\t\t\t\t\"%d TaskRuns were running or eligible to run at one point. \"+\n\t\t\t\t\"The limit was first crossed when TaskRun %s was created.\",\n\t\t\t\tconcurrencyLimit, maxConcurrency, offender)\n\t\t} else {\n\t\t\t// The limit should be equaled when TaskRuns are created for the first set of iterations,\n\t\t\t// unless there are fewer TaskRuns than the limit.\n\t\t\texpectedPeak := concurrencyLimit\n\t\t\tif len(expectedTaskRuns) < concurrencyLimit {\n\t\t\t\texpectedPeak = len(expectedTaskRuns)\n\t\t\t}\n\t\t\tif maxConcurrency < expectedPeak {\n\t\t\t\tt.Errorf(\"Concurrency limit %d was not reached. \"+\n\t\t\t\t\t\"At most only %d TaskRuns were running or eligible to run.\",\n\t\t\t\t\tconcurrencyLimit, maxConcurrency)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Check that the Run's start and completion times are set appropriately.\n\tif run.Status.StartTime == nil {\n\t\tt.Errorf(\"The Run start time is not set!\")\n\t} else if firstCreationTime.Before(run.Status.StartTime) {\n\t\tt.Errorf(\"The Run start time %v is after the first TaskRun's creation time %v\", run.Status.StartTime, firstCreationTime)\n\t}\n\tif run.Status.CompletionTime == nil {\n\t\tt.Errorf(\"The Run completion time is not set!\")\n\t} else if run.Status.CompletionTime.Before(&lastCompletionTime) {\n\t\tt.Errorf(\"The Run completion time %v is before the last TaskRun's completion time %v\", run.Status.CompletionTime, lastCompletionTime)\n\t}\n}", "func ConcurrencyTest(t *testing.T, hc HandlerConstructor, requestNumber int, limit int) ConcurrencyTestResult {\n\n\tvar oopsCount uint64\n\n\tcounter := make(chan int)\n\tcodes := make(chan int)\n\n\thandler := hc(requestNumber, limit, counter)\n\n\tts := httptest.NewServer(handler)\n\tdefer ts.Close()\n\n\tvar requestsWg sync.WaitGroup\n\tfor i := 0; i < requestNumber; i++ {\n\t\trequestsWg.Add(1)\n\t\tgo func() {\n\t\t\tres, err := http.Get(ts.URL)\n\t\t\tassert.NoError(t, err)\n\t\t\tif err == nil {\n\t\t\t\tcodes <- res.StatusCode\n\t\t\t} else {\n\t\t\t\tatomic.AddUint64(&oopsCount, 1)\n\t\t\t}\n\t\t\trequestsWg.Done()\n\t\t}()\n\t}\n\n\tvar resultsWg sync.WaitGroup\n\tvar result ConcurrencyTestResult\n\n\tresultsWg.Add(2)\n\tgo func() {\n\t\tvar concurrencyNow int\n\t\tfor number := range counter {\n\t\t\tconcurrencyNow += number\n\t\t\tif concurrencyNow > result.maxConcurrency {\n\t\t\t\tresult.maxConcurrency = concurrencyNow\n\t\t\t}\n\t\t}\n\t\tresultsWg.Done()\n\t}()\n\tgo func() {\n\t\tfor number := range codes {\n\t\t\tswitch number {\n\t\t\tcase http.StatusOK:\n\t\t\t\tresult.accepted++\n\t\t\tcase http.StatusTooManyRequests:\n\t\t\t\tresult.denied++\n\t\t\tdefault:\n\t\t\t\tassert.Failf(t, \"bad response\", \"unexpected status code: %v\", number)\n\t\t\t}\n\t\t}\n\t\tresultsWg.Done()\n\t}()\n\n\trequestsWg.Wait()\n\tif oopsCount > 0 {\n\t\tfmt.Printf(\"Saw a number of errors, count is: %d\\n\", oopsCount)\n\t}\n\tclose(counter)\n\tclose(codes)\n\tresultsWg.Wait()\n\n\treturn result\n}", "func TestConcurrentAccessToRelatedVolumes(ctx context.Context, f *framework.Framework, cs clientset.Interface, ns string,\n\tnode e2epod.NodeSelection, pvcs []*v1.PersistentVolumeClaim, expectedContent string) {\n\n\tvar pods []*v1.Pod\n\n\t// Create each pod with pvc\n\tfor i := range pvcs {\n\t\tindex := i + 1\n\t\tginkgo.By(fmt.Sprintf(\"Creating pod%d with a volume on %+v\", index, node))\n\t\tpodConfig := e2epod.Config{\n\t\t\tNS: ns,\n\t\t\tPVCs: []*v1.PersistentVolumeClaim{pvcs[i]},\n\t\t\tSeLinuxLabel: e2epod.GetLinuxLabel(),\n\t\t\tNodeSelection: node,\n\t\t\tPVCsReadOnly: false,\n\t\t\tImageID: e2epod.GetTestImageID(imageutils.JessieDnsutils),\n\t\t}\n\t\tpod, err := e2epod.CreateSecPodWithNodeSelection(ctx, cs, &podConfig, f.Timeouts.PodStart)\n\t\tdefer func() {\n\t\t\tframework.ExpectNoError(e2epod.DeletePodWithWait(ctx, cs, pod))\n\t\t}()\n\t\tframework.ExpectNoError(err)\n\t\tpods = append(pods, pod)\n\t\tactualNodeName := pod.Spec.NodeName\n\n\t\t// Always run the subsequent pods on the same node.\n\t\te2epod.SetAffinity(&node, actualNodeName)\n\t}\n\n\tfor i, pvc := range pvcs {\n\t\tvar commands []string\n\n\t\tif *pvc.Spec.VolumeMode == v1.PersistentVolumeBlock {\n\t\t\tfileName := \"/mnt/volume1\"\n\t\t\tcommands = e2evolume.GenerateReadBlockCmd(fileName, len(expectedContent))\n\t\t\t// Check that all pods have the same content\n\t\t\tindex := i + 1\n\t\t\tginkgo.By(fmt.Sprintf(\"Checking if the volume in pod%d has expected initial content\", index))\n\t\t\t_, err := e2eoutput.LookForStringInPodExec(pods[i].Namespace, pods[i].Name, commands, expectedContent, time.Minute)\n\t\t\tframework.ExpectNoError(err, \"failed: finding the contents of the block volume %s.\", fileName)\n\t\t} else {\n\t\t\tfileName := \"/mnt/volume1/index.html\"\n\t\t\tcommands = e2evolume.GenerateReadFileCmd(fileName)\n\t\t\t// Check that all pods have the same content\n\t\t\tindex := i + 1\n\t\t\tginkgo.By(fmt.Sprintf(\"Checking if the volume in pod%d has expected initial content\", index))\n\t\t\t_, err := e2eoutput.LookForStringInPodExec(pods[i].Namespace, pods[i].Name, commands, expectedContent, time.Minute)\n\t\t\tframework.ExpectNoError(err, \"failed: finding the contents of the mounted file %s.\", fileName)\n\t\t}\n\t}\n}", "func TestReconcile(t *testing.T) {\n\n\t//\n\t// Define The KafkaChannel Reconciler Test Cases\n\t//\n\t// Note - Knative testing framework assumes ALL actions will be in the same Namespace\n\t// as the Key so we have to set SkipNamespaceValidation in all tests!\n\t//\n\t// Note - Knative reconciler framework expects Events (not errors) from ReconcileKind()\n\t// so WantErr is only for higher level failures in the injected Reconcile() function.\n\t//\n\tcommontesting.SetTestEnvironment(t)\n\ttableTest := TableTest{\n\n\t\t//\n\t\t// Top Level Use Cases\n\t\t//\n\n\t\t{\n\t\t\tName: \"Bad KafkaChannel Key\",\n\t\t\tKey: \"too/many/parts\",\n\t\t},\n\t\t{\n\t\t\tName: \"KafkaChannel Key Not Found\",\n\t\t\tKey: \"foo/not-found\",\n\t\t},\n\n\t\t//\n\t\t// Full Reconciliation\n\t\t//\n\n\t\t{\n\t\t\tName: \"Complete Reconciliation Success\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(controllertesting.WithInitializedConditions),\n\t\t\t},\n\t\t\tWantCreates: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(),\n\t\t\t},\n\t\t\tWantStatusUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\t{\n\t\t\t\t\tObject: controllertesting.NewKafkaChannel(\n\t\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t\tWantUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\tcontrollertesting.NewKafkaChannelLabelUpdate(\n\t\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t\t),\n\t\t\t\t),\n\t\t\t},\n\t\t\tWantPatches: []clientgotesting.PatchActionImpl{controllertesting.NewFinalizerPatchActionImpl()},\n\t\t\tWantEvents: []string{\n\t\t\t\tcontrollertesting.NewKafkaChannelFinalizerUpdateEvent(),\n\t\t\t\tcontrollertesting.NewKafkaChannelSuccessfulReconciliationEvent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Complete Reconciliation Success, No Dispatcher Resource Requests Or Limits\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(controllertesting.WithInitializedConditions),\n\t\t\t},\n\t\t\tWantCreates: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(controllertesting.WithoutResources),\n\t\t\t},\n\t\t\tWantStatusUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\t{\n\t\t\t\t\tObject: controllertesting.NewKafkaChannel(\n\t\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t\tWantUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\tcontrollertesting.NewKafkaChannelLabelUpdate(\n\t\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t\t),\n\t\t\t\t),\n\t\t\t},\n\t\t\tWantPatches: []clientgotesting.PatchActionImpl{controllertesting.NewFinalizerPatchActionImpl()},\n\t\t\tWantEvents: []string{\n\t\t\t\tcontrollertesting.NewKafkaChannelFinalizerUpdateEvent(),\n\t\t\t\tcontrollertesting.NewKafkaChannelSuccessfulReconciliationEvent(),\n\t\t\t},\n\t\t\tOtherTestData: map[string]interface{}{\n\t\t\t\t\"configOptions\": []controllertesting.KafkaConfigOption{controllertesting.WithNoDispatcherResources},\n\t\t\t},\n\t\t},\n\n\t\t//\n\t\t// KafkaChannel Deletion (Finalizer)\n\t\t//\n\n\t\t{\n\t\t\tName: \"Finalize Deleted KafkaChannel With Dispatcher\",\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithLabels,\n\t\t\t\t\tcontrollertesting.WithDeletionTimestamp,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(),\n\t\t\t},\n\t\t\tWantUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\tcontrollertesting.NewServiceUpdateActionImpl(controllertesting.NewKafkaChannelDispatcherService(controllertesting.WithoutFinalizersService)),\n\t\t\t\tcontrollertesting.NewDeploymentUpdateActionImpl(controllertesting.NewKafkaChannelDispatcherDeployment(controllertesting.WithoutFinalizersDeployment)),\n\t\t\t},\n\t\t\tWantDeletes: []clientgotesting.DeleteActionImpl{\n\t\t\t\tcontrollertesting.NewServiceDeleteActionImpl(controllertesting.NewKafkaChannelDispatcherService(controllertesting.WithoutFinalizersService)),\n\t\t\t\tcontrollertesting.NewDeploymentDeleteActionImpl(controllertesting.NewKafkaChannelDispatcherDeployment(controllertesting.WithoutFinalizersDeployment)),\n\t\t\t},\n\t\t\tWantEvents: []string{\n\t\t\t\tcontrollertesting.NewKafkaChannelSuccessfulFinalizedEvent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Finalize Deleted KafkaChannel Without Dispatcher\",\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithLabels,\n\t\t\t\t\tcontrollertesting.WithDeletionTimestamp,\n\t\t\t\t),\n\t\t\t},\n\t\t\tWantEvents: []string{\n\t\t\t\tcontrollertesting.NewKafkaChannelSuccessfulFinalizedEvent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Finalize Deleted KafkaChannel Errors(Delete)\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithLabels,\n\t\t\t\t\tcontrollertesting.WithDeletionTimestamp,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(),\n\t\t\t},\n\t\t\tWithReactors: []clientgotesting.ReactionFunc{\n\t\t\t\tInduceFailure(\"delete\", \"Services\"),\n\t\t\t\tInduceFailure(\"delete\", \"Deployments\"),\n\t\t\t},\n\t\t\tWantUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\tcontrollertesting.NewServiceUpdateActionImpl(controllertesting.NewKafkaChannelDispatcherService(controllertesting.WithoutFinalizersService)),\n\t\t\t\tcontrollertesting.NewDeploymentUpdateActionImpl(controllertesting.NewKafkaChannelDispatcherDeployment(controllertesting.WithoutFinalizersDeployment)),\n\t\t\t},\n\t\t\tWantDeletes: []clientgotesting.DeleteActionImpl{\n\t\t\t\tcontrollertesting.NewServiceDeleteActionImpl(controllertesting.NewKafkaChannelDispatcherService(controllertesting.WithoutFinalizersService)),\n\t\t\t\tcontrollertesting.NewDeploymentDeleteActionImpl(controllertesting.NewKafkaChannelDispatcherDeployment(controllertesting.WithoutFinalizersDeployment)),\n\t\t\t},\n\t\t\tWantErr: true,\n\t\t\tWantEvents: []string{\n\t\t\t\tEventf(corev1.EventTypeWarning, event.DispatcherServiceFinalizationFailed.String(), \"Failed To Finalize Dispatcher Service: inducing failure for delete services\"),\n\t\t\t\tEventf(corev1.EventTypeWarning, event.DispatcherDeploymentFinalizationFailed.String(), \"Failed To Finalize Dispatcher Deployment: inducing failure for delete deployments\"),\n\t\t\t\tcontrollertesting.NewKafkaChannelFailedFinalizationEvent(),\n\t\t\t},\n\t\t},\n\n\t\t//\n\t\t// KafkaChannel Service\n\t\t//\n\n\t\t{\n\t\t\tName: \"Reconcile Missing KafkaChannel Service Success\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(),\n\t\t\t},\n\t\t\tWantCreates: []runtime.Object{controllertesting.NewKafkaChannelService()},\n\t\t\tWantEvents: []string{controllertesting.NewKafkaChannelSuccessfulReconciliationEvent()},\n\t\t},\n\t\t{\n\t\t\tName: \"Reconcile Missing KafkaChannel Service Error(Create)\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(),\n\t\t\t},\n\t\t\tWithReactors: []clientgotesting.ReactionFunc{InduceFailure(\"create\", \"Services\")},\n\t\t\tWantErr: true,\n\t\t\tWantCreates: []runtime.Object{controllertesting.NewKafkaChannelService()},\n\t\t\tWantStatusUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\t{\n\t\t\t\t\tObject: controllertesting.NewKafkaChannel(\n\t\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceFailed,\n\t\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t\tWantEvents: []string{\n\t\t\t\tEventf(corev1.EventTypeWarning, event.KafkaChannelServiceReconciliationFailed.String(), \"Failed To Reconcile KafkaChannel Service: inducing failure for create services\"),\n\t\t\t\tcontrollertesting.NewKafkaChannelFailedReconciliationEvent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Reconcile KafkaChannel Service With Deletion Timestamp\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelService(controllertesting.WithDeletionTimestampService),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(),\n\t\t\t},\n\t\t\tWantErr: true,\n\t\t\tWantEvents: []string{\n\t\t\t\tEventf(corev1.EventTypeWarning, event.KafkaChannelServiceReconciliationFailed.String(), \"Failed To Reconcile KafkaChannel Service: encountered KafkaChannel Service with DeletionTimestamp kafkachannel-namespace/kafkachannel-name-kn-channel - potential race condition\"),\n\t\t\t\tcontrollertesting.NewKafkaChannelFailedReconciliationEvent(),\n\t\t\t},\n\t\t},\n\n\t\t//\n\t\t// KafkaChannel Dispatcher Service\n\t\t//\n\n\t\t{\n\t\t\tName: \"Reconcile Missing Dispatcher Service Success\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\tcontrollertesting.WithReceiverServiceReady,\n\t\t\t\t\tcontrollertesting.WithReceiverDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(),\n\t\t\t},\n\t\t\tWantCreates: []runtime.Object{controllertesting.NewKafkaChannelDispatcherService()},\n\t\t\tWantEvents: []string{controllertesting.NewKafkaChannelSuccessfulReconciliationEvent()},\n\t\t},\n\t\t{\n\t\t\tName: \"Reconcile Missing Dispatcher Service Error(Create)\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\tcontrollertesting.WithReceiverServiceReady,\n\t\t\t\t\tcontrollertesting.WithReceiverDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(),\n\t\t\t},\n\t\t\tWithReactors: []clientgotesting.ReactionFunc{InduceFailure(\"create\", \"Services\")},\n\t\t\tWantErr: true,\n\t\t\tWantCreates: []runtime.Object{controllertesting.NewKafkaChannelDispatcherService()},\n\t\t\tWantStatusUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\t// Note - Not currently tracking status for the Dispatcher Service since it is only for Prometheus\n\t\t\t},\n\t\t\tWantEvents: []string{\n\t\t\t\tEventf(corev1.EventTypeWarning, event.DispatcherServiceReconciliationFailed.String(), \"Failed To Reconcile Dispatcher Service: inducing failure for create services\"),\n\t\t\t\tcontrollertesting.NewKafkaChannelFailedReconciliationEvent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Reconcile Dispatcher Service With Deletion Timestamp And Finalizer\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(controllertesting.WithDeletionTimestampService),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(),\n\t\t\t},\n\t\t\tWantErr: false,\n\t\t\tWantEvents: []string{\n\t\t\t\tcontrollertesting.NewKafkaChannelSuccessfulReconciliationEvent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Reconcile Dispatcher Service With Deletion Timestamp And Missing Finalizer\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(controllertesting.WithoutFinalizersService, controllertesting.WithDeletionTimestampService),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(),\n\t\t\t},\n\t\t\tWantErr: false,\n\t\t\tWantEvents: []string{\n\t\t\t\tcontrollertesting.NewKafkaChannelSuccessfulReconciliationEvent(),\n\t\t\t},\n\t\t},\n\n\t\t//\n\t\t// KafkaChannel Dispatcher Deployment\n\t\t//\n\n\t\t{\n\t\t\tName: \"Reconcile Missing Dispatcher Deployment Success\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\tcontrollertesting.WithReceiverServiceReady,\n\t\t\t\t\tcontrollertesting.WithReceiverDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t},\n\t\t\tWantCreates: []runtime.Object{controllertesting.NewKafkaChannelDispatcherDeployment()},\n\t\t\tWantEvents: []string{controllertesting.NewKafkaChannelSuccessfulReconciliationEvent()},\n\t\t},\n\t\t{\n\t\t\tName: \"Reconcile Missing Dispatcher Deployment Error(Create)\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\tcontrollertesting.WithReceiverServiceReady,\n\t\t\t\t\tcontrollertesting.WithReceiverDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t},\n\t\t\tWithReactors: []clientgotesting.ReactionFunc{InduceFailure(\"create\", \"Deployments\")},\n\t\t\tWantErr: true,\n\t\t\tWantCreates: []runtime.Object{controllertesting.NewKafkaChannelDispatcherDeployment()},\n\t\t\tWantStatusUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\t{\n\t\t\t\t\tObject: controllertesting.NewKafkaChannel(\n\t\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\t\tcontrollertesting.WithReceiverServiceReady,\n\t\t\t\t\t\tcontrollertesting.WithReceiverDeploymentReady,\n\t\t\t\t\t\tcontrollertesting.WithDispatcherFailed,\n\t\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t\tWantEvents: []string{\n\t\t\t\tEventf(corev1.EventTypeWarning, event.DispatcherDeploymentReconciliationFailed.String(), \"Failed To Reconcile Dispatcher Deployment: inducing failure for create deployments\"),\n\t\t\t\tcontrollertesting.NewKafkaChannelFailedReconciliationEvent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Reconcile Dispatcher Deployment With Deletion Timestamp And Finalizer\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(controllertesting.WithDeletionTimestampDeployment),\n\t\t\t},\n\t\t\tWantErr: false,\n\t\t\tWantEvents: []string{\n\t\t\t\tcontrollertesting.NewKafkaChannelSuccessfulReconciliationEvent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Reconcile Dispatcher Deployment With Deletion Timestamp And Missing Finalizer\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(controllertesting.WithoutFinalizersDeployment, controllertesting.WithDeletionTimestampDeployment),\n\t\t\t},\n\t\t\tWantErr: false,\n\t\t\tWantEvents: []string{\n\t\t\t\tcontrollertesting.NewKafkaChannelSuccessfulReconciliationEvent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Reconcile Dispatcher Deployment - Redeployment on ConfigMapHash change\",\n\t\t\tSkipNamespaceValidation: true,\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\tcontrollertesting.WithMetaData,\n\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\tcontrollertesting.WithReceiverServiceReady,\n\t\t\t\t\tcontrollertesting.WithReceiverDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithDispatcherDeploymentReady,\n\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(controllertesting.WithConfigMapHash(\"initial-hash-to-be-overridden-by-controller\")),\n\t\t\t},\n\t\t\tWantUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\tcontrollertesting.NewDeploymentUpdateActionImpl(controllertesting.NewKafkaChannelDispatcherDeployment()),\n\t\t\t},\n\t\t\tWantEvents: []string{\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeploymentUpdatedEvent(),\n\t\t\t\tcontrollertesting.NewKafkaChannelSuccessfulReconciliationEvent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Missing KafkaSecret - Error\",\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(controllertesting.WithFinalizer),\n\t\t\t},\n\t\t\tWantEvents: []string{\n\t\t\t\tEventf(corev1.EventTypeWarning, \"InternalError\", \"reconciliation failed\"),\n\t\t\t},\n\t\t\tWantStatusUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\t{\n\t\t\t\t\tObject: controllertesting.NewKafkaChannel(\n\t\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\t\tcontrollertesting.WithKafkaChannelConfigurationFailedNoSecret,\n\t\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t\tWantErr: true,\n\t\t\tOtherTestData: map[string]interface{}{\n\t\t\t\t\"reconcilerOptions\": []reconcilerOption{withEmptyKafkaSecret},\n\t\t\t},\n\t\t},\n\n\t\t//\n\t\t// Deployment Updating - Repairing Incorrect Or Missing Fields In Existing Deployments\n\t\t//\n\n\t\tnewDispatcherUpdateTest(\"No Resources\", controllertesting.WithoutResources),\n\t\tnewDispatcherUpdateTest(\"Different Name\", controllertesting.WithDifferentName),\n\t\tnewDispatcherUpdateTest(\"Different Image\", controllertesting.WithDifferentImage),\n\t\tnewDispatcherUpdateTest(\"Different Command\", controllertesting.WithDifferentCommand),\n\t\tnewDispatcherUpdateTest(\"Different Args\", controllertesting.WithDifferentArgs),\n\t\tnewDispatcherUpdateTest(\"Different WorkingDir\", controllertesting.WithDifferentWorkingDir),\n\t\tnewDispatcherUpdateTest(\"Different Ports\", controllertesting.WithDifferentPorts),\n\t\tnewDispatcherUpdateTest(\"Different Environment\", controllertesting.WithMissingEnvironment),\n\t\tnewDispatcherUpdateTest(\"Different Environment\", controllertesting.WithDifferentEnvironment),\n\t\tnewDispatcherUpdateTest(\"Different VolumeMounts\", controllertesting.WithDifferentVolumeMounts),\n\t\tnewDispatcherUpdateTest(\"Different VolumeDevices\", controllertesting.WithDifferentVolumeDevices),\n\t\tnewDispatcherUpdateTest(\"Different LivenessProbe\", controllertesting.WithDifferentLivenessProbe),\n\t\tnewDispatcherUpdateTest(\"Different ReadinessProbe\", controllertesting.WithDifferentReadinessProbe),\n\t\tnewDispatcherUpdateTest(\"Missing Labels\", controllertesting.WithoutLabels),\n\t\tnewDispatcherUpdateTest(\"Missing Annotations\", controllertesting.WithoutAnnotations),\n\t\tnewDispatcherNoUpdateTest(\"Different Lifecycle\", controllertesting.WithDifferentLifecycle),\n\t\tnewDispatcherNoUpdateTest(\"Different TerminationPath\", controllertesting.WithDifferentTerminationPath),\n\t\tnewDispatcherNoUpdateTest(\"Different TerminationPolicy\", controllertesting.WithDifferentTerminationPolicy),\n\t\tnewDispatcherNoUpdateTest(\"Different ImagePullPolicy\", controllertesting.WithDifferentImagePullPolicy),\n\t\tnewDispatcherNoUpdateTest(\"Different SecurityContext\", controllertesting.WithDifferentSecurityContext),\n\t\tnewDispatcherNoUpdateTest(\"Different Replicas\", controllertesting.WithDifferentReplicas),\n\t\tnewDispatcherNoUpdateTest(\"Extra Labels\", controllertesting.WithExtraLabels),\n\t\tnewDispatcherNoUpdateTest(\"Extra Annotations\", controllertesting.WithExtraAnnotations),\n\n\t\t//\n\t\t// Deployment Update Failure\n\t\t//\n\n\t\t{\n\t\t\tName: \"Existing Dispatcher Deployment, Different Image, Update Error\",\n\t\t\tKey: controllertesting.KafkaChannelKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannel(controllertesting.WithFinalizer),\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(controllertesting.WithDifferentImage),\n\t\t\t},\n\t\t\tWantStatusUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\t{\n\t\t\t\t\tObject: controllertesting.NewKafkaChannel(\n\t\t\t\t\t\tcontrollertesting.WithFinalizer,\n\t\t\t\t\t\tcontrollertesting.WithAddress,\n\t\t\t\t\t\tcontrollertesting.WithInitializedConditions,\n\t\t\t\t\t\tcontrollertesting.WithKafkaChannelServiceReady,\n\t\t\t\t\t\tcontrollertesting.WithDispatcherUpdateFailed,\n\t\t\t\t\t\tcontrollertesting.WithTopicReady,\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t\tWantUpdates: []clientgotesting.UpdateActionImpl{{Object: controllertesting.NewKafkaChannelDispatcherDeployment()}},\n\t\t\tWantEvents: []string{\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeploymentUpdateFailedEvent(),\n\t\t\t\tEventf(corev1.EventTypeWarning, event.DispatcherDeploymentReconciliationFailed.String(), \"Failed To Reconcile Dispatcher Deployment: inducing failure for update deployments\"),\n\t\t\t\tcontrollertesting.NewKafkaChannelFailedReconciliationEvent(),\n\t\t\t},\n\t\t\tWithReactors: []clientgotesting.ReactionFunc{\n\t\t\t\tInduceFailure(\"update\", \"Deployments\"),\n\t\t\t},\n\t\t\tWantErr: true,\n\t\t},\n\n\t\t//\n\t\t// Service Patching - Repairing Incorrect Or Missing Fields In Existing Services\n\t\t//\n\n\t\tnewServicePatchTest(\"Missing Ports\", controllertesting.WithoutServicePorts),\n\t\tnewServicePatchTest(\"Missing App Label Selector\", controllertesting.WithoutServiceSelector),\n\t\tnewServicePatchTest(\"Missing Labels\", controllertesting.WithoutServiceLabels),\n\t\tnewServiceNoPatchTest(\"Extra Labels\", controllertesting.WithExtraServiceLabels),\n\t\tnewServiceNoPatchTest(\"Different Status\", controllertesting.WithDifferentServiceStatus),\n\n\t\t//\n\t\t// Service Patch Failure\n\t\t//\n\n\t\tnewServicePatchFailureTest(\"Missing Ports\", controllertesting.WithoutServicePorts),\n\t\tnewServicePatchFailureTest(\"Missing Labels\", controllertesting.WithoutServiceLabels),\n\t}\n\n\t// Create A Mock AdminClient\n\tmockAdminClient := &controllertesting.MockAdminClient{}\n\n\t// Stub The Creation Of AdminClient\n\tkafkaadmintesting.StubNewAdminClientFn(kafkaadmintesting.NonValidatingNewAdminClientFn(mockAdminClient))\n\tdefer kafkaadmintesting.RestoreNewAdminClientFn()\n\n\t// Run The TableTest Using The KafkaChannel Reconciler Provided By The Factory\n\tlogger := logtesting.TestLogger(t)\n\ttableTest.Test(t, controllertesting.MakeFactory(func(ctx context.Context, listers *controllertesting.Listers, cmw configmap.Watcher, options map[string]interface{}) controller.Reconciler {\n\n\t\tconfigOptionsInt, ok := options[\"configOptions\"]\n\t\tif !ok || configOptionsInt == nil {\n\t\t\tconfigOptionsInt = []controllertesting.KafkaConfigOption{}\n\t\t}\n\t\tconfigOptions := configOptionsInt.([]controllertesting.KafkaConfigOption)\n\n\t\tr := &Reconciler{\n\t\t\tkubeClientset: kubeclient.Get(ctx),\n\t\t\tadminClientType: types.Kafka,\n\t\t\tadminClient: nil,\n\t\t\tenvironment: controllertesting.NewEnvironment(),\n\t\t\tconfig: controllertesting.NewConfig(configOptions...),\n\t\t\tkafkachannelLister: listers.GetKafkaChannelLister(),\n\t\t\tkafkachannelInformer: nil,\n\t\t\tdeploymentLister: listers.GetDeploymentLister(),\n\t\t\tserviceLister: listers.GetServiceLister(),\n\t\t\tkafkaClientSet: fakekafkaclient.Get(ctx),\n\t\t\tadminMutex: &sync.Mutex{},\n\t\t\tkafkaBrokers: controllertesting.KafkaSecretDataValueBrokers,\n\t\t\tkafkaSecret: controllertesting.KafkaSecretName,\n\t\t\tkafkaUsername: controllertesting.KafkaSecretDataValueUsername,\n\t\t\tkafkaPassword: controllertesting.KafkaSecretDataValuePassword,\n\t\t\tkafkaSaslType: controllertesting.KafkaSecretDataValueSaslType,\n\t\t\tkafkaConfigMapHash: controllertesting.ConfigMapHash,\n\t\t}\n\n\t\treconcilerOptions, ok := options[\"reconcilerOptions\"]\n\t\tif ok {\n\t\t\tfor _, option := range reconcilerOptions.([]reconcilerOption) {\n\t\t\t\toption(r)\n\t\t\t}\n\t\t}\n\n\t\treturn kafkachannelreconciler.NewReconciler(ctx, logger, r.kafkaClientSet, listers.GetKafkaChannelLister(), controller.GetEventRecorder(ctx), r)\n\t}, logger.Desugar()))\n}", "func TestConcurrentCreateBigDocuments(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skip on short tests\")\n\t}\n\n\t// Disable those tests for active failover\n\tif getTestMode() == testModeResilientSingle {\n\t\tt.Skip(\"Disabled in active failover mode\")\n\t}\n\n\t// don't use disallowUnknownFields in this test - we have here custom structs defined\n\tc := createClient(t, &testsClientConfig{skipDisallowUnknownFields: true})\n\n\tversion, err := c.Version(nil)\n\tif err != nil {\n\t\tt.Fatalf(\"Version failed: %s\", describe(err))\n\t}\n\tisv33p := version.Version.CompareTo(\"3.3\") >= 0\n\tif !isv33p && os.Getenv(\"TEST_CONNECTION\") == \"vst\" {\n\t\tt.Skip(\"Skipping VST load test on 3.2\")\n\t} else {\n\t\tdb := ensureDatabase(nil, c, \"document_test\", nil, t)\n\t\tcol := ensureCollection(nil, db, \"TestConcurrentCreateBigDocuments\", nil, t)\n\n\t\tdocChan := make(chan driver.DocumentMeta, 16*1024)\n\n\t\tcreator := func(limit, interval int) {\n\t\t\tdata := make([]byte, 1024)\n\t\t\tfor i := 0; i < limit; i++ {\n\t\t\t\trand.Read(data)\n\t\t\t\tctx := context.Background()\n\t\t\t\tdoc := UserDoc{\n\t\t\t\t\t\"Jan\" + strconv.Itoa(i) + hex.EncodeToString(data),\n\t\t\t\t\ti * interval,\n\t\t\t\t}\n\t\t\t\tmeta, err := col.CreateDocument(ctx, doc)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"Failed to create new document: %s\", describe(err))\n\t\t\t\t}\n\t\t\t\tdocChan <- meta\n\t\t\t}\n\t\t}\n\n\t\treader := func() {\n\t\t\tfor {\n\t\t\t\tmeta, ok := <-docChan\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\t// Document must exists now\n\t\t\t\tif found, err := col.DocumentExists(nil, meta.Key); err != nil {\n\t\t\t\t\tt.Fatalf(\"DocumentExists failed for '%s': %s\", meta.Key, describe(err))\n\t\t\t\t} else if !found {\n\t\t\t\t\tt.Errorf(\"DocumentExists returned false for '%s', expected true\", meta.Key)\n\t\t\t\t}\n\t\t\t\t// Read document\n\t\t\t\tvar readDoc UserDoc\n\t\t\t\tif _, err := col.ReadDocument(nil, meta.Key, &readDoc); err != nil {\n\t\t\t\t\tt.Fatalf(\"Failed to read document '%s': %s\", meta.Key, describe(err))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tnoCreators := getIntFromEnv(\"NOCREATORS\", 25)\n\t\tnoReaders := getIntFromEnv(\"NOREADERS\", 50)\n\t\tnoDocuments := getIntFromEnv(\"NODOCUMENTS\", 100) // per creator\n\n\t\twgCreators := sync.WaitGroup{}\n\t\t// Run N concurrent creators\n\t\tfor i := 0; i < noCreators; i++ {\n\t\t\twgCreators.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wgCreators.Done()\n\t\t\t\tcreator(noDocuments, noCreators)\n\t\t\t}()\n\t\t}\n\t\twgReaders := sync.WaitGroup{}\n\t\t// Run M readers\n\t\tfor i := 0; i < noReaders; i++ {\n\t\t\twgReaders.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wgReaders.Done()\n\t\t\t\treader()\n\t\t\t}()\n\t\t}\n\t\twgCreators.Wait()\n\t\tclose(docChan)\n\t\twgReaders.Wait()\n\t}\n}", "func (m *MockFullNode) Concurrent(arg0 context.Context) int64 {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Concurrent\", arg0)\n\tret0, _ := ret[0].(int64)\n\treturn ret0\n}", "func TestActiveReplicatorPushFromCheckpointIgnored(t *testing.T) {\n\n\tbase.RequireNumTestBuckets(t, 2)\n\n\tbase.SetUpTestLogging(t, logger.LevelInfo, logger.KeyReplicate, logger.KeyHTTP, logger.KeyHTTPResp, logger.KeySync, logger.KeySyncMsg)\n\n\tconst (\n\t\tchangesBatchSize = 10\n\t\tnumRT1DocsInitial = 13 // 2 batches of changes\n\t\tnumRT1DocsTotal = 24 // 2 more batches\n\t)\n\n\t// Active\n\ttb1 := base.GetTestBucket(t)\n\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: tb1,\n\t})\n\tdefer rt1.Close()\n\n\t// Passive\n\ttb2 := base.GetTestBucket(t)\n\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\tTestBucket: tb2,\n\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\"alice\": {\n\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\tExplicitChannels: utils.SetOf(\"alice\"),\n\t\t\t\t},\n\t\t\t},\n\t\t}},\n\t})\n\tdefer rt2.Close()\n\n\t// Create first batch of docs\n\tdocIDPrefix := t.Name() + \"doc\"\n\tfor i := 0; i < numRT1DocsInitial; i++ {\n\t\tresp := rt1.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t\trt1RevID := respRevID(t, resp)\n\t\tresp = rt2.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t\trt2RevID := respRevID(t, resp)\n\t\trequire.Equal(t, rt1RevID, rt2RevID)\n\t}\n\n\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1\n\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\tdefer srv.Close()\n\n\t// Build passiveDBURL with basic auth creds\n\tpassiveDBURL, err := url.Parse(srv.URL + \"/db\")\n\trequire.NoError(t, err)\n\tpassiveDBURL.User = url.UserPassword(\"alice\", \"pass\")\n\n\tarConfig := db.ActiveReplicatorConfig{\n\t\tID: t.Name(),\n\t\tDirection: db.ActiveReplicatorTypePush,\n\t\tRemoteDBURL: passiveDBURL,\n\t\tActiveDB: &db.Database{\n\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t},\n\t\tContinuous: true,\n\t\tChangesBatchSize: changesBatchSize,\n\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t}\n\n\t// Create the first active replicator to pull from seq:0\n\tar := db.NewActiveReplicator(&arConfig)\n\n\tstartNumChangesRequestedFromZeroTotal := rt1.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\n\tassert.NoError(t, ar.Start())\n\n\t_, ok := base.WaitForStat(func() int64 {\n\t\treturn ar.Push.Checkpointer.Stats().AlreadyKnownSequenceCount\n\t}, numRT1DocsInitial)\n\tassert.True(t, ok)\n\n\t// one _changes from seq:0 with initial number of docs sent\n\tnumChangesRequestedFromZeroTotal := rt1.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\tassert.Equal(t, startNumChangesRequestedFromZeroTotal+1, numChangesRequestedFromZeroTotal)\n\n\t// rev assertions\n\tnumRevsSentTotal := ar.Push.GetStats().SendRevCount.Value()\n\tassert.Equal(t, int64(0), numRevsSentTotal)\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().ProcessedSequenceCount)\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().ExpectedSequenceCount)\n\n\t// checkpoint assertions\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().GetCheckpointHitCount)\n\tassert.Equal(t, int64(1), ar.Push.Checkpointer.Stats().GetCheckpointMissCount)\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().SetCheckpointCount)\n\n\tassert.NoError(t, ar.Stop())\n\n\t// Second batch of docs\n\tfor i := numRT1DocsInitial; i < numRT1DocsTotal; i++ {\n\t\tresp := rt1.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t\trt1RevID := respRevID(t, resp)\n\t\tresp = rt2.SendAdminRequest(http.MethodPut, fmt.Sprintf(\"/db/%s%d\", docIDPrefix, i), `{\"channels\":[\"alice\"]}`)\n\t\tassertStatus(t, resp, http.StatusCreated)\n\t\trt2RevID := respRevID(t, resp)\n\t\trequire.Equal(t, rt1RevID, rt2RevID)\n\t}\n\n\t// Create a new replicator using the same config, which should use the checkpoint set from the first.\n\tar = db.NewActiveReplicator(&arConfig)\n\tdefer func() { assert.NoError(t, ar.Stop()) }()\n\tassert.NoError(t, ar.Start())\n\n\t_, ok = base.WaitForStat(func() int64 {\n\t\treturn ar.Push.Checkpointer.Stats().AlreadyKnownSequenceCount\n\t}, numRT1DocsTotal-numRT1DocsInitial)\n\tassert.True(t, ok)\n\n\t// Make sure we've not started any more since:0 replications on rt1 since the first one\n\tendNumChangesRequestedFromZeroTotal := rt1.GetDatabase().DbStats.CBLReplicationPull().NumPullReplSinceZero.Value()\n\tassert.Equal(t, numChangesRequestedFromZeroTotal, endNumChangesRequestedFromZeroTotal)\n\n\t// make sure rt1 thinks it has sent all of the revs via a 2.x replicator\n\tnumRevsSentTotal = ar.Push.GetStats().SendRevCount.Value()\n\tassert.Equal(t, int64(0), numRevsSentTotal)\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().ProcessedSequenceCount)\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().ExpectedSequenceCount)\n\n\t// assert the second active replicator stats\n\tassert.Equal(t, int64(1), ar.Push.Checkpointer.Stats().GetCheckpointHitCount)\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().GetCheckpointMissCount)\n\tassert.Equal(t, int64(0), ar.Push.Checkpointer.Stats().SetCheckpointCount)\n\tar.Push.Checkpointer.CheckpointNow()\n\tassert.Equal(t, int64(1), ar.Push.Checkpointer.Stats().SetCheckpointCount)\n}", "func updatePodTests() []*SerialTestCase {\n\tsequence1Tests := []*SerialTestCase{\n\t\t{\n\t\t\tDescription: \"Sequence 1: Pod A create --> Policy create --> Pod A cleanup --> Pod B create\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t\tUpdatePolicy(policyXBaseOnK1V1()),\n\t\t\t\tUpdatePolicy(policyXBase2OnK2V2()),\n\t\t\t\tDeletePod(\"x\", \"a\", ip1, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t\tCreatePod(\"x\", \"b\", ip1, thisNode, map[string]string{\"k2\": \"v2\"}),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: defaultWindowsDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet, ip1),\n\t\t\t\t\t// old labels (not yet garbage collected)\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set),\n\t\t\t\t\t// new labels\n\t\t\t\t\tdptestutils.SetPolicy(podK2Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK2V2Set, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: map[string][]*hnswrapper.FakeEndpointPolicy{\n\t\t\t\t\tendpoint1: {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"Out\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tRemoteAddresses: testNodeIP,\n\t\t\t\t\t\t\tPriority: 201,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tDescription: \"Sequence 1: Policy create --> Pod A create --> Pod A cleanup --> Pod B create\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tUpdatePolicy(policyXBaseOnK1V1()),\n\t\t\t\tUpdatePolicy(policyXBase2OnK2V2()),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t\tDeletePod(\"x\", \"a\", ip1, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t\tCreatePod(\"x\", \"b\", ip1, thisNode, map[string]string{\"k2\": \"v2\"}),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: defaultWindowsDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet, ip1),\n\t\t\t\t\t// old labels (not yet garbage collected)\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set),\n\t\t\t\t\t// new labels\n\t\t\t\t\tdptestutils.SetPolicy(podK2Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK2V2Set, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: map[string][]*hnswrapper.FakeEndpointPolicy{\n\t\t\t\t\tendpoint1: {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"Out\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tRemoteAddresses: testNodeIP,\n\t\t\t\t\t\t\tPriority: 201,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tDescription: \"Sequence 1: Policy create --> Pod A create --> Pod A cleanup --> Pod B create (skip first apply DP)\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tUpdatePolicy(policyXBaseOnK1V1()),\n\t\t\t\tUpdatePolicy(policyXBase2OnK2V2()),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tDeletePod(\"x\", \"a\", ip1, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t\tCreatePod(\"x\", \"b\", ip1, thisNode, map[string]string{\"k2\": \"v2\"}),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: defaultWindowsDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet, ip1),\n\t\t\t\t\t// old labels (not yet garbage collected)\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set),\n\t\t\t\t\t// new labels\n\t\t\t\t\tdptestutils.SetPolicy(podK2Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK2V2Set, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: map[string][]*hnswrapper.FakeEndpointPolicy{\n\t\t\t\t\tendpoint1: {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"Out\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tRemoteAddresses: testNodeIP,\n\t\t\t\t\t\t\tPriority: 201,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tDescription: \"Sequence 1: Policy create --> Pod A create --> Pod A cleanup --> Pod B create (skip first two apply DP)\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tUpdatePolicy(policyXBaseOnK1V1()),\n\t\t\t\tUpdatePolicy(policyXBase2OnK2V2()),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tDeletePod(\"x\", \"a\", ip1, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tCreatePod(\"x\", \"b\", ip1, thisNode, map[string]string{\"k2\": \"v2\"}),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: defaultWindowsDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet, ip1),\n\t\t\t\t\t// old labels (not yet garbage collected)\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set),\n\t\t\t\t\t// new labels\n\t\t\t\t\tdptestutils.SetPolicy(podK2Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK2V2Set, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: map[string][]*hnswrapper.FakeEndpointPolicy{\n\t\t\t\t\tendpoint1: {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"Out\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tRemoteAddresses: testNodeIP,\n\t\t\t\t\t\t\tPriority: 201,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tsequence2Tests := []*SerialTestCase{\n\t\t{\n\t\t\tDescription: \"Sequence 2 with Calico network\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tUpdatePolicy(policyXBaseOnK1V1()),\n\t\t\t\tUpdatePolicy(policyXBase2OnK2V2()),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t\tCreatePod(\"x\", \"b\", ip1, thisNode, map[string]string{\"k2\": \"v2\"}),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: windowsCalicoDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet, ip1),\n\t\t\t\t\t// IP temporarily associated with IPSets of both pod A and pod B\n\t\t\t\t\t// Pod A sets\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set, ip1),\n\t\t\t\t\t// Pod B sets\n\t\t\t\t\tdptestutils.SetPolicy(podK2Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK2V2Set, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: map[string][]*hnswrapper.FakeEndpointPolicy{\n\t\t\t\t\tendpoint1: {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"Out\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tRemoteAddresses: testNodeIP,\n\t\t\t\t\t\t\tPriority: 201,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-baseazurewireserver\",\n\t\t\t\t\t\t\tAction: \"Block\",\n\t\t\t\t\t\t\tDirection: \"Out\",\n\t\t\t\t\t\t\tPriority: 200,\n\t\t\t\t\t\t\tRemoteAddresses: \"168.63.129.16/32\",\n\t\t\t\t\t\t\tRemotePorts: \"80\",\n\t\t\t\t\t\t\tProtocols: \"6\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-baseallowinswitch\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tPriority: 65499,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-baseallowoutswitch\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"Out\",\n\t\t\t\t\t\t\tPriority: 65499,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-baseallowinhost\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tPriority: 0,\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\t// RuleType is unsupported in FakeEndpointPolicy\n\t\t\t\t\t\t\t// RuleType: \"Host\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-baseallowouthost\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"Out\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tPriority: 0,\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\t// RuleType is unsupported in FakeEndpointPolicy\n\t\t\t\t\t\t\t// RuleType: \"Host\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tDescription: \"Sequence 2: Policy create --> Pod A Create --> Pod B create\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tUpdatePolicy(policyXBaseOnK1V1()),\n\t\t\t\tUpdatePolicy(policyXBase2OnK2V2()),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t\tCreatePod(\"x\", \"b\", ip1, thisNode, map[string]string{\"k2\": \"v2\"}),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: defaultWindowsDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet, ip1),\n\t\t\t\t\t// IP temporarily associated with IPSets of both pod A and pod B\n\t\t\t\t\t// Pod A sets\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set, ip1),\n\t\t\t\t\t// Pod B sets\n\t\t\t\t\tdptestutils.SetPolicy(podK2Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK2V2Set, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: map[string][]*hnswrapper.FakeEndpointPolicy{\n\t\t\t\t\tendpoint1: {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"Out\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tRemoteAddresses: testNodeIP,\n\t\t\t\t\t\t\tPriority: 201,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tDescription: \"Sequence 2: Policy create --> Pod A Create --> Pod B create --> Pod A cleanup\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tUpdatePolicy(policyXBaseOnK1V1()),\n\t\t\t\tUpdatePolicy(policyXBase2OnK2V2()),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t\tCreatePod(\"x\", \"b\", ip1, thisNode, map[string]string{\"k2\": \"v2\"}),\n\t\t\t\tApplyDP(),\n\t\t\t\tDeletePod(\"x\", \"a\", ip1, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: defaultWindowsDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet, ip1),\n\t\t\t\t\t// old labels (not yet garbage collected)\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set),\n\t\t\t\t\t// new labels\n\t\t\t\t\tdptestutils.SetPolicy(podK2Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK2V2Set, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: map[string][]*hnswrapper.FakeEndpointPolicy{\n\t\t\t\t\tendpoint1: {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"Out\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tRemoteAddresses: testNodeIP,\n\t\t\t\t\t\t\tPriority: 201,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t// skipping this test. See PR #1856\n\t\t\tDescription: \"Sequence 2: Policy create --> Pod A Create --> Pod B create (skip first ApplyDP())\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tUpdatePolicy(policyXBaseOnK1V1()),\n\t\t\t\tUpdatePolicy(policyXBase2OnK2V2()),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tCreatePod(\"x\", \"b\", ip1, thisNode, map[string]string{\"k2\": \"v2\"}),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: defaultWindowsDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet, ip1),\n\t\t\t\t\t// IP temporarily associated with IPSets of both pod A and pod B\n\t\t\t\t\t// Pod A sets\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set, ip1),\n\t\t\t\t\t// Pod B sets\n\t\t\t\t\tdptestutils.SetPolicy(podK2Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK2V2Set, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: map[string][]*hnswrapper.FakeEndpointPolicy{\n\t\t\t\t\tendpoint1: {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"Out\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tRemoteAddresses: testNodeIP,\n\t\t\t\t\t\t\tPriority: 201,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t// skipping this test. See PR #1856\n\t\t\tDescription: \"Sequence 2: Policy create --> Pod A Create --> Pod B create --> Pod A cleanup (skip first two ApplyDP())\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tUpdatePolicy(policyXBaseOnK1V1()),\n\t\t\t\tUpdatePolicy(policyXBase2OnK2V2()),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tCreatePod(\"x\", \"b\", ip1, thisNode, map[string]string{\"k2\": \"v2\"}),\n\t\t\t\tDeletePod(\"x\", \"a\", ip1, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: defaultWindowsDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet, ip1),\n\t\t\t\t\t// old labels (not yet garbage collected)\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set),\n\t\t\t\t\t// new labels\n\t\t\t\t\tdptestutils.SetPolicy(podK2Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK2V2Set, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: map[string][]*hnswrapper.FakeEndpointPolicy{\n\t\t\t\t\tendpoint1: {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"Out\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tRemoteAddresses: testNodeIP,\n\t\t\t\t\t\t\tPriority: 201,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\totherTests := []*SerialTestCase{\n\t\t{\n\t\t\tDescription: \"ignore Pod update if added then deleted before ApplyDP()\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tUpdatePolicy(policyXBaseOnK1V1()),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tDeletePod(\"x\", \"a\", ip1, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: defaultWindowsDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet),\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: map[string][]*hnswrapper.FakeEndpointPolicy{\n\t\t\t\t\tendpoint1: {},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t// doesn't really enforce behavior in DP, but one could look at logs to make sure we don't make a reset ACL SysCall into HNS\n\t\t\tDescription: \"ignore Pod delete for deleted endpoint\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tUpdatePolicy(policyXBaseOnK1V1()),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t\tDeleteEndpoint(endpoint1),\n\t\t\t\tDeletePod(\"x\", \"a\", ip1, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: defaultWindowsDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet),\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t// doesn't really enforce behavior in DP, but one could look at logs to make sure we don't make a reset ACL SysCall into HNS\n\t\t\tDescription: \"ignore Pod delete for deleted endpoint (skip first ApplyDP())\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tUpdatePolicy(policyXBaseOnK1V1()),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tDeleteEndpoint(endpoint1),\n\t\t\t\tDeletePod(\"x\", \"a\", ip1, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: defaultWindowsDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet),\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t// doesn't really enforce behavior in DP, but one could look at logs to make sure we don't make an add ACL SysCall into HNS\"\n\t\t\tDescription: \"ignore Pod update when there's no corresponding endpoint\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tUpdatePolicy(policyXBaseOnK1V1()),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tDeleteEndpoint(endpoint1),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: defaultWindowsDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tDescription: \"two endpoints, one with policy, one without\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tUpdatePolicy(policyXBase2OnK2V2()),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tCreateEndpoint(endpoint2, ip2),\n\t\t\t\tCreatePod(\"x\", \"b\", ip2, thisNode, map[string]string{\"k2\": \"v2\"}),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: defaultWindowsDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet, ip1, ip2),\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK2Set, ip2),\n\t\t\t\t\tdptestutils.SetPolicy(podK2V2Set, ip2),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: map[string][]*hnswrapper.FakeEndpointPolicy{\n\t\t\t\t\tendpoint1: {},\n\t\t\t\t\tendpoint2: {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"Out\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tRemoteAddresses: testNodeIP,\n\t\t\t\t\t\t\tPriority: 201,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tallTests := sequence1Tests\n\tallTests = append(allTests, sequence2Tests...)\n\t// allTests = append(allTests, podAssignmentSequence3Tests()...)\n\t// make golint happy\n\t_ = podAssignmentSequence3Tests()\n\tallTests = append(allTests, otherTests...)\n\treturn allTests\n}", "func TestConsulStateDriverWatchAllStateCreate(t *testing.T) {\n\tdriver := setupConsulDriver(t)\n\tcommonTestStateDriverWatchAllStateCreate(t, driver)\n}", "func (factory *DeploymentCancellationControllerFactory) Create() controller.RunnableController {\n\tdeploymentLW := &deployutil.ListWatcherImpl{\n\t\t// TODO: Investigate specifying annotation field selectors to fetch only 'deployments'\n\t\t// Currently field selectors are not supported for replication controllers\n\t\tListFunc: func() (runtime.Object, error) {\n\t\t\treturn factory.KubeClient.ReplicationControllers(kapi.NamespaceAll).List(labels.Everything())\n\t\t},\n\t\tWatchFunc: func(resourceVersion string) (watch.Interface, error) {\n\t\t\treturn factory.KubeClient.ReplicationControllers(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)\n\t\t},\n\t}\n\tdeploymentQueue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)\n\tcache.NewReflector(deploymentLW, &kapi.ReplicationController{}, deploymentQueue, 2*time.Minute).Run()\n\n\teventBroadcaster := record.NewBroadcaster()\n\teventBroadcaster.StartRecordingToSink(factory.KubeClient.Events(\"\"))\n\n\tdeployController := &DeploymentCancellationController{\n\t\tpodClient: &podClientImpl{\n\t\t\tgetPodFunc: func(namespace, name string) (*kapi.Pod, error) {\n\t\t\t\treturn factory.KubeClient.Pods(namespace).Get(name)\n\t\t\t},\n\t\t\tupdatePodFunc: func(namespace string, pod *kapi.Pod) (*kapi.Pod, error) {\n\t\t\t\treturn factory.KubeClient.Pods(namespace).Update(pod)\n\t\t\t},\n\t\t},\n\t\trecorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: \"deployer\"}),\n\t}\n\n\treturn &controller.RetryController{\n\t\tQueue: deploymentQueue,\n\t\tRetryManager: controller.NewQueueRetryManager(\n\t\t\tdeploymentQueue,\n\t\t\tcache.MetaNamespaceKeyFunc,\n\t\t\tfunc(obj interface{}, err error, retries controller.Retry) bool { return retries.Count < 1 },\n\t\t\tkutil.NewTokenBucketRateLimiter(1, 10),\n\t\t),\n\t\tHandle: func(obj interface{}) error {\n\t\t\tdeployment := obj.(*kapi.ReplicationController)\n\t\t\treturn deployController.Handle(deployment)\n\t\t},\n\t}\n}", "func CreatePods(f *framework.Framework, appName string, ns string, labels map[string]string, spec v1.PodSpec, maxCount int, tuning *TuningSetType) {\n\tfor i := 0; i < maxCount; i++ {\n\t\tframework.Logf(\"%v/%v : Creating pod\", i+1, maxCount)\n\t\t// Retry on pod creation failure\n\t\tfor retryCount := 0; retryCount < maxRetries; retryCount++ {\n\t\t\t_, err := f.ClientSet.Core().Pods(ns).Create(&v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: fmt.Sprintf(appName+\"-pod-%v\", i),\n\t\t\t\t\tNamespace: ns,\n\t\t\t\t\tLabels: labels,\n\t\t\t\t},\n\t\t\t\tSpec: spec,\n\t\t\t})\n\t\t\tif err == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tframework.ExpectNoError(err)\n\t\t}\n\t\tif tuning != nil {\n\t\t\t// If a rate limit has been defined we wait for N ms between creation\n\t\t\tif tuning.Pods.RateLimit.Delay != 0 {\n\t\t\t\tframework.Logf(\"Sleeping %d ms between podcreation.\", tuning.Pods.RateLimit.Delay)\n\t\t\t\ttime.Sleep(tuning.Pods.RateLimit.Delay * time.Millisecond)\n\t\t\t}\n\t\t\t// If a stepping tuningset has been defined in the config, we wait for the step of pods to be created, and pause\n\t\t\tif tuning.Pods.Stepping.StepSize != 0 && (i+1)%tuning.Pods.Stepping.StepSize == 0 {\n\t\t\t\tverifyRunning := f.NewClusterVerification(\n\t\t\t\t\t&v1.Namespace{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: ns,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStatus: v1.NamespaceStatus{},\n\t\t\t\t\t},\n\t\t\t\t\tframework.PodStateVerification{\n\t\t\t\t\t\tSelectors: labels,\n\t\t\t\t\t\tValidPhases: []v1.PodPhase{v1.PodRunning},\n\t\t\t\t\t},\n\t\t\t\t)\n\n\t\t\t\tpods, err := verifyRunning.WaitFor(i+1, tuning.Pods.Stepping.Timeout*time.Second)\n\t\t\t\tif err != nil {\n\t\t\t\t\tframework.Failf(\"Error in wait... %v\", err)\n\t\t\t\t} else if len(pods) < i+1 {\n\t\t\t\t\tframework.Failf(\"Only got %v out of %v\", len(pods), i+1)\n\t\t\t\t}\n\n\t\t\t\tframework.Logf(\"We have created %d pods and are now sleeping for %d seconds\", i+1, tuning.Pods.Stepping.Pause)\n\t\t\t\ttime.Sleep(tuning.Pods.Stepping.Pause * time.Second)\n\t\t\t}\n\t\t}\n\t}\n}", "func TestCoilSetup() {\n\tIt(\"should be deployed successfully\", func() {\n\t\tBy(\"preparing etcd user and certificates\")\n\t\texecSafeAt(boot0, \"ckecli\", \"etcd\", \"user-add\", \"coil\", \"/coil/\")\n\n\t\t_, stderr, err := execAt(boot0, \"ckecli\", \"etcd\", \"issue\", \"coil\", \"--output\", \"file\")\n\t\tExpect(err).ShouldNot(HaveOccurred(), \"stderr=%s\", stderr)\n\n\t\t_, stderr, err = execAt(boot0, \"kubectl\", \"--namespace=kube-system\", \"create\", \"secret\",\n\t\t\t\"generic\", \"coil-etcd-secrets\",\n\t\t\t\"--from-file=etcd-ca.crt\",\n\t\t\t\"--from-file=etcd-coil.crt\",\n\t\t\t\"--from-file=etcd-coil.key\")\n\t\tExpect(err).ShouldNot(HaveOccurred(), \"stderr=%s\", stderr)\n\n\t\tBy(\"waiting for coil-node DaemonSet and coil-controllers Deployment\")\n\t\tcheckCoilNodeDaemonSet()\n\t\tcheckCoilControllersDeployment()\n\n\t\tEventually(func() error {\n\t\t\tstdout, _, err := execAt(boot0, \"kubectl\", \"get\", \"nodes\", \"-o\", \"json\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tvar nl corev1.NodeList\n\t\t\terr = json.Unmarshal(stdout, &nl)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tOUTER:\n\t\t\tfor _, n := range nl.Items {\n\t\t\t\tfor _, cond := range n.Status.Conditions {\n\t\t\t\t\tif cond.Type != corev1.NodeReady {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif cond.Status != corev1.ConditionTrue {\n\t\t\t\t\t\treturn fmt.Errorf(\"node %s is not ready\", n.Name)\n\t\t\t\t\t}\n\t\t\t\t\tcontinue OUTER\n\t\t\t\t}\n\n\t\t\t\treturn fmt.Errorf(\"node %s has no readiness status\", n.Name)\n\t\t\t}\n\t\t\treturn nil\n\t\t}).Should(Succeed())\n\n\t\tBy(\"waiting for kube-system/cke-etcd getting created\")\n\t\tEventually(func() error {\n\t\t\t_, _, err := execAt(boot0, \"kubectl\", \"--namespace=kube-system\", \"get\", \"endpoints/cke-etcd\")\n\t\t\treturn err\n\t\t}).Should(Succeed())\n\n\t\tBy(\"creating IP address pool\")\n\t\tstdout, stderr, err := execAt(boot0, \"kubectl\", \"--namespace=kube-system\", \"get\", \"pods\", \"--selector=app.kubernetes.io/name=coil-controllers\", \"-o=json\")\n\t\tExpect(err).NotTo(HaveOccurred(), \"stdout=%s, stderr=%s\", stdout, stderr)\n\n\t\tpodList := new(corev1.PodList)\n\t\terr = json.Unmarshal(stdout, podList)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(len(podList.Items)).To(Equal(1))\n\t\tpodName := podList.Items[0].Name\n\n\t\t// create non-default pools first to prevent misuse of default pool\n\t\t_, stderr, err = execAt(boot0, \"kubectl\", \"--namespace=kube-system\", \"exec\", podName, \"/coilctl\", \"pool\", \"create\", \"internet-egress\", \"172.19.0.0/28\", \"0\")\n\t\tExpect(err).NotTo(HaveOccurred(), \"stderr=%s\", stderr)\n\t\t_, stderr, err = execAt(boot0, \"kubectl\", \"--namespace=kube-system\", \"exec\", podName, \"/coilctl\", \"pool\", \"create\", \"default\", \"10.64.0.0/14\", \"5\")\n\t\tExpect(err).NotTo(HaveOccurred(), \"stderr=%s\", stderr)\n\t})\n}", "func startServerAndControllers(t *testing.T) (\n\t*kubefake.Clientset,\n\twatch.Interface,\n\tclustopclientset.Interface,\n\tcapiclientset.Interface,\n\t*capifakeclientset.Clientset,\n\tfunc()) {\n\n\t// create a fake kube client\n\tfakePtr := clientgotesting.Fake{}\n\tscheme := runtime.NewScheme()\n\tcodecs := serializer.NewCodecFactory(scheme)\n\tmetav1.AddToGroupVersion(scheme, schema.GroupVersion{Version: \"v1\"})\n\tkubefake.AddToScheme(scheme)\n\tobjectTracker := clientgotesting.NewObjectTracker(scheme, codecs.UniversalDecoder())\n\tkubeWatch := watch.NewRaceFreeFake()\n\t// Add a reactor for sending watch events when a job is modified\n\tobjectReaction := clientgotesting.ObjectReaction(objectTracker)\n\tfakePtr.AddReactor(\"*\", \"jobs\", func(action clientgotesting.Action) (bool, runtime.Object, error) {\n\t\tvar deletedObj runtime.Object\n\t\tif action, ok := action.(clientgotesting.DeleteActionImpl); ok {\n\t\t\tdeletedObj, _ = objectTracker.Get(action.GetResource(), action.GetNamespace(), action.GetName())\n\t\t}\n\t\thandled, obj, err := objectReaction(action)\n\t\tswitch action.(type) {\n\t\tcase clientgotesting.CreateActionImpl:\n\t\t\tkubeWatch.Add(obj)\n\t\tcase clientgotesting.UpdateActionImpl:\n\t\t\tkubeWatch.Modify(obj)\n\t\tcase clientgotesting.DeleteActionImpl:\n\t\t\tif deletedObj != nil {\n\t\t\t\tkubeWatch.Delete(deletedObj)\n\t\t\t}\n\t\t}\n\t\treturn handled, obj, err\n\t})\n\tfakePtr.AddWatchReactor(\"*\", clientgotesting.DefaultWatchReactor(kubeWatch, nil))\n\t// Create actual fake kube client\n\tfakeKubeClient := &kubefake.Clientset{Fake: fakePtr}\n\n\t// start the cluster-operator api server\n\tapiServerClientConfig, shutdownServer := servertesting.StartTestServerOrDie(t)\n\n\t// create a cluster-operator client\n\tclustopClient, err := clustopclientset.NewForConfig(apiServerClientConfig)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\t// create a cluster-api client\n\tcapiClient, err := capiclientset.NewForConfig(apiServerClientConfig)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tfakeCAPIClient := &capifakeclientset.Clientset{}\n\n\t// create informers\n\tkubeInformerFactory := kubeinformers.NewSharedInformerFactory(fakeKubeClient, 10*time.Second)\n\tbatchSharedInformers := kubeInformerFactory.Batch().V1()\n\tclustopInformerFactory := clustopinformers.NewSharedInformerFactory(clustopClient, 10*time.Second)\n\tcapiInformerFactory := capiinformers.NewSharedInformerFactory(capiClient, 10*time.Second)\n\tcapiSharedInformers := capiInformerFactory.Cluster().V1alpha1()\n\n\t// create controllers\n\tstopCh := make(chan struct{})\n\tt.Log(\"controller start\")\n\t// Note that controllers must be created prior to starting the informers.\n\t// Otherwise, the controllers will not get the initial sync from the\n\t// informer and will time out waiting to sync.\n\trunControllers := []func(){\n\t\t// infra\n\t\tfunc() func() {\n\t\t\tcontroller := infracontroller.NewController(\n\t\t\t\tcapiSharedInformers.Clusters(),\n\t\t\t\tbatchSharedInformers.Jobs(),\n\t\t\t\tfakeKubeClient,\n\t\t\t\tclustopClient,\n\t\t\t\tcapiClient,\n\t\t\t)\n\t\t\treturn func() { controller.Run(1, stopCh) }\n\t\t}(),\n\t\t// master\n\t\tfunc() func() {\n\t\t\tcontroller := mastercontroller.NewController(\n\t\t\t\tcapiSharedInformers.Clusters(),\n\t\t\t\tcapiSharedInformers.MachineSets(),\n\t\t\t\tbatchSharedInformers.Jobs(),\n\t\t\t\tfakeKubeClient,\n\t\t\t\tclustopClient,\n\t\t\t\tcapiClient,\n\t\t\t)\n\t\t\treturn func() { controller.Run(1, stopCh) }\n\t\t}(),\n\t\t// components\n\t\tfunc() func() {\n\t\t\tcontroller := componentscontroller.NewController(\n\t\t\t\tcapiSharedInformers.Clusters(),\n\t\t\t\tcapiSharedInformers.MachineSets(),\n\t\t\t\tbatchSharedInformers.Jobs(),\n\t\t\t\tfakeKubeClient,\n\t\t\t\tclustopClient,\n\t\t\t\tcapiClient,\n\t\t\t)\n\t\t\treturn func() { controller.Run(1, stopCh) }\n\t\t}(),\n\t\t// nodeconfig\n\t\tfunc() func() {\n\t\t\tcontroller := nodeconfigcontroller.NewController(\n\t\t\t\tcapiSharedInformers.Clusters(),\n\t\t\t\tcapiSharedInformers.MachineSets(),\n\t\t\t\tbatchSharedInformers.Jobs(),\n\t\t\t\tfakeKubeClient,\n\t\t\t\tclustopClient,\n\t\t\t\tcapiClient,\n\t\t\t)\n\t\t\treturn func() { controller.Run(1, stopCh) }\n\t\t}(),\n\t\t// deployclusterapi\n\t\tfunc() func() {\n\t\t\tcontroller := deployclusterapicontroller.NewController(\n\t\t\t\tcapiSharedInformers.Clusters(),\n\t\t\t\tcapiSharedInformers.MachineSets(),\n\t\t\t\tbatchSharedInformers.Jobs(),\n\t\t\t\tfakeKubeClient,\n\t\t\t\tclustopClient,\n\t\t\t\tcapiClient,\n\t\t\t)\n\t\t\treturn func() { controller.Run(1, stopCh) }\n\t\t}(),\n\t\t// awselb\n\t\tfunc() func() {\n\t\t\tcontroller := awselb.NewController(\n\t\t\t\tcapiSharedInformers.Machines(),\n\t\t\t\tfakeKubeClient,\n\t\t\t\tclustopClient,\n\t\t\t\tcapiClient,\n\t\t\t)\n\t\t\treturn func() { controller.Run(1, stopCh) }\n\t\t}(),\n\t}\n\tvar wg sync.WaitGroup\n\twg.Add(len(runControllers))\n\tfor _, run := range runControllers {\n\t\tgo func(r func()) {\n\t\t\tdefer wg.Done()\n\t\t\tr()\n\t\t}(run)\n\t}\n\n\tt.Log(\"informers start\")\n\tkubeInformerFactory.Start(stopCh)\n\tclustopInformerFactory.Start(stopCh)\n\tcapiInformerFactory.Start(stopCh)\n\n\tshutdown := func() {\n\t\t// Shut down controller\n\t\tclose(stopCh)\n\t\t// Wait for all controller to stop\n\t\twg.Wait()\n\t\t// Shut down api server\n\t\tshutdownServer()\n\t}\n\n\treturn fakeKubeClient, kubeWatch, clustopClient, capiClient, fakeCAPIClient, shutdown\n}", "func (m *MockMeshServiceControllerFactory) Build(mgr mc_manager.AsyncManager, clusterName string) (controller.MeshServiceController, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Build\", mgr, clusterName)\n\tret0, _ := ret[0].(controller.MeshServiceController)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestCommitLogActiveLogsConcurrency(t *testing.T) {\n\tvar (\n\t\topts, _ = newTestOptions(t, overrides{\n\t\t\tstrategy: StrategyWriteBehind,\n\t\t})\n\t\tnumFilesRequired = 10\n\t)\n\n\tdefer cleanup(t, opts)\n\n\tvar (\n\t\tdoneCh = make(chan struct{})\n\t\tcommitLog = newTestCommitLog(t, opts)\n\t)\n\n\t// One goroutine continuously writing.\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-doneCh:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\ttime.Sleep(time.Millisecond)\n\t\t\t\terr := commitLog.Write(\n\t\t\t\t\tcontext.NewBackground(),\n\t\t\t\t\ttestSeries(t, opts, 0, \"foo.bar\", testTags1, 127),\n\t\t\t\t\tts.Datapoint{},\n\t\t\t\t\txtime.Second,\n\t\t\t\t\tnil)\n\t\t\t\tif err == errCommitLogClosed {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif err == ErrCommitLogQueueFull {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\t// One goroutine continuously rotating the logs.\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-doneCh:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\ttime.Sleep(time.Millisecond)\n\t\t\t\t_, err := commitLog.RotateLogs()\n\t\t\t\tif err == errCommitLogClosed {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\t// One goroutine continuously checking active logs.\n\tgo func() {\n\t\tvar (\n\t\t\tlastSeenFile string\n\t\t\tnumFilesSeen int\n\t\t)\n\t\tfor numFilesSeen < numFilesRequired {\n\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\tlogs, err := commitLog.ActiveLogs()\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\trequire.Equal(t, 2, len(logs))\n\t\t\tif logs[0].FilePath != lastSeenFile {\n\t\t\t\tlastSeenFile = logs[0].FilePath\n\t\t\t\tnumFilesSeen++\n\t\t\t}\n\t\t}\n\t\tclose(doneCh)\n\t}()\n\n\t<-doneCh\n\n\trequire.NoError(t, commitLog.Close())\n}", "func ecsPodTests() map[string]func(ctx context.Context, t *testing.T, pc cocoa.ECSPodCreator, c *ECSClient, smc *SecretsManagerClient) {\n\tmakeSecretEnvVar := func(t *testing.T) *cocoa.EnvironmentVariable {\n\t\treturn cocoa.NewEnvironmentVariable().\n\t\t\tSetName(t.Name()).\n\t\t\tSetSecretOptions(*cocoa.NewSecretOptions().\n\t\t\t\tSetName(t.Name()).\n\t\t\t\tSetValue(utility.RandomString()).\n\t\t\t\tSetOwned(true))\n\t}\n\tmakeContainerDef := func(t *testing.T) *cocoa.ECSContainerDefinition {\n\t\treturn cocoa.NewECSContainerDefinition().\n\t\t\tSetImage(\"image\").\n\t\t\tSetMemoryMB(128).\n\t\t\tSetCPU(128).\n\t\t\tSetName(\"container\")\n\t}\n\n\tmakePodCreationOpts := func(t *testing.T) *cocoa.ECSPodCreationOptions {\n\t\treturn cocoa.NewECSPodCreationOptions().\n\t\t\tSetName(testutil.NewTaskDefinitionFamily(t.Name())).\n\t\t\tSetMemoryMB(128).\n\t\t\tSetCPU(128).\n\t\t\tSetTaskRole(testutil.TaskRole()).\n\t\t\tSetExecutionRole(testutil.ExecutionRole()).\n\t\t\tSetExecutionOptions(*cocoa.NewECSPodExecutionOptions().\n\t\t\t\tSetCluster(testutil.ECSClusterName()))\n\t}\n\n\tcheckPodDeleted := func(ctx context.Context, t *testing.T, p cocoa.ECSPod, c cocoa.ECSClient, smc cocoa.SecretsManagerClient, opts cocoa.ECSPodCreationOptions) {\n\t\tstat := p.StatusInfo()\n\t\tassert.Equal(t, cocoa.StatusDeleted, stat.Status)\n\n\t\tres := p.Resources()\n\n\t\tdescribeTaskDef, err := c.DescribeTaskDefinition(ctx, &awsECS.DescribeTaskDefinitionInput{\n\t\t\tTaskDefinition: res.TaskDefinition.ID,\n\t\t})\n\t\trequire.NoError(t, err)\n\t\trequire.NotZero(t, describeTaskDef.TaskDefinition)\n\t\tassert.Equal(t, utility.FromStringPtr(opts.Name), utility.FromStringPtr(describeTaskDef.TaskDefinition.Family))\n\n\t\tdescribeTasks, err := c.DescribeTasks(ctx, &awsECS.DescribeTasksInput{\n\t\t\tCluster: res.Cluster,\n\t\t\tTasks: []*string{res.TaskID},\n\t\t})\n\t\trequire.NoError(t, err)\n\t\tassert.Empty(t, describeTasks.Failures)\n\t\trequire.Len(t, describeTasks.Tasks, 1)\n\t\tassert.Equal(t, awsECS.DesiredStatusStopped, utility.FromStringPtr(describeTasks.Tasks[0].LastStatus))\n\n\t\tfor _, containerRes := range res.Containers {\n\t\t\tfor _, s := range containerRes.Secrets {\n\t\t\t\t_, err := smc.DescribeSecret(ctx, &secretsmanager.DescribeSecretInput{\n\t\t\t\t\tSecretId: s.Name,\n\t\t\t\t})\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\t_, err = smc.GetSecretValue(ctx, &secretsmanager.GetSecretValueInput{\n\t\t\t\t\tSecretId: s.Name,\n\t\t\t\t})\n\t\t\t\tassert.Error(t, err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn map[string]func(ctx context.Context, t *testing.T, pc cocoa.ECSPodCreator, c *ECSClient, smc *SecretsManagerClient){\n\t\t\"StopIsIdempotentWhenItFails\": func(ctx context.Context, t *testing.T, pc cocoa.ECSPodCreator, c *ECSClient, smc *SecretsManagerClient) {\n\t\t\topts := makePodCreationOpts(t).AddContainerDefinitions(*makeContainerDef(t))\n\t\t\tp, err := pc.CreatePod(ctx, opts)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tc.StopTaskError = errors.New(\"fake error\")\n\n\t\t\trequire.Error(t, p.Stop(ctx))\n\n\t\t\tstat := p.StatusInfo()\n\t\t\tassert.Equal(t, cocoa.StatusStarting, stat.Status)\n\n\t\t\tc.StopTaskError = nil\n\n\t\t\trequire.NoError(t, p.Stop(ctx))\n\t\t\tstat = p.StatusInfo()\n\t\t\tassert.Equal(t, cocoa.StatusStopped, stat.Status)\n\t\t},\n\t\t\"DeleteIsIdempotentWhenStoppingTaskFails\": func(ctx context.Context, t *testing.T, pc cocoa.ECSPodCreator, c *ECSClient, smc *SecretsManagerClient) {\n\t\t\topts := makePodCreationOpts(t).AddContainerDefinitions(\n\t\t\t\t*makeContainerDef(t).AddEnvironmentVariables(\n\t\t\t\t\t*makeSecretEnvVar(t),\n\t\t\t\t),\n\t\t\t)\n\t\t\tp, err := pc.CreatePod(ctx, opts)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tc.StopTaskError = errors.New(\"fake error\")\n\n\t\t\trequire.Error(t, p.Delete(ctx))\n\n\t\t\tstat := p.StatusInfo()\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, cocoa.StatusStarting, stat.Status)\n\n\t\t\tc.StopTaskError = nil\n\n\t\t\trequire.NoError(t, p.Delete(ctx))\n\n\t\t\tcheckPodDeleted(ctx, t, p, c, smc, *opts)\n\t\t},\n\t\t\"DeleteIsIdempotentWhenDeregisteringTaskDefinitionFails\": func(ctx context.Context, t *testing.T, pc cocoa.ECSPodCreator, c *ECSClient, smc *SecretsManagerClient) {\n\t\t\topts := makePodCreationOpts(t).AddContainerDefinitions(\n\t\t\t\t*makeContainerDef(t).AddEnvironmentVariables(\n\t\t\t\t\t*makeSecretEnvVar(t),\n\t\t\t\t),\n\t\t\t)\n\t\t\tp, err := pc.CreatePod(ctx, opts)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tc.DeregisterTaskDefinitionError = errors.New(\"fake error\")\n\n\t\t\trequire.Error(t, p.Delete(ctx))\n\n\t\t\tstat := p.StatusInfo()\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, cocoa.StatusStopped, stat.Status)\n\n\t\t\tc.DeregisterTaskDefinitionError = nil\n\n\t\t\trequire.NoError(t, p.Delete(ctx))\n\n\t\t\tcheckPodDeleted(ctx, t, p, c, smc, *opts)\n\t\t},\n\t\t\"DeleteIsIdempotentWhenDeletingSecretsFails\": func(ctx context.Context, t *testing.T, pc cocoa.ECSPodCreator, c *ECSClient, smc *SecretsManagerClient) {\n\t\t\topts := makePodCreationOpts(t).AddContainerDefinitions(\n\t\t\t\t*makeContainerDef(t).AddEnvironmentVariables(\n\t\t\t\t\t*makeSecretEnvVar(t),\n\t\t\t\t),\n\t\t\t)\n\t\t\tp, err := pc.CreatePod(ctx, opts)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tsmc.DeleteSecretError = errors.New(\"fake error\")\n\n\t\t\trequire.Error(t, p.Delete(ctx))\n\n\t\t\tstat := p.StatusInfo()\n\t\t\tassert.Equal(t, cocoa.StatusStopped, stat.Status)\n\n\t\t\tsmc.DeleteSecretError = nil\n\n\t\t\trequire.NoError(t, p.Delete(ctx))\n\n\t\t\tcheckPodDeleted(ctx, t, p, c, smc, *opts)\n\t\t},\n\t}\n}", "func TestHandleStoppedToSteadyStateTransition(t *testing.T) {\n\ttaskEngine := &DockerTaskEngine{}\n\tfirstContainerName := \"container1\"\n\tfirstContainer := &apicontainer.Container{\n\t\tKnownStatusUnsafe: apicontainerstatus.ContainerStopped,\n\t\tName: firstContainerName,\n\t}\n\tsecondContainerName := \"container2\"\n\tsecondContainer := &apicontainer.Container{\n\t\tKnownStatusUnsafe: apicontainerstatus.ContainerRunning,\n\t\tDesiredStatusUnsafe: apicontainerstatus.ContainerRunning,\n\t\tName: secondContainerName,\n\t}\n\n\tctx, cancel := context.WithCancel(context.TODO())\n\tdefer cancel()\n\n\tmTask := &managedTask{\n\t\tTask: &apitask.Task{\n\t\t\tContainers: []*apicontainer.Container{\n\t\t\t\tfirstContainer,\n\t\t\t\tsecondContainer,\n\t\t\t},\n\t\t\tArn: \"task1\",\n\t\t},\n\t\tengine: taskEngine,\n\t\tacsMessages: make(chan acsTransition),\n\t\tdockerMessages: make(chan dockerContainerChange),\n\t\tctx: ctx,\n\t}\n\ttaskEngine.managedTasks = make(map[string]*managedTask)\n\ttaskEngine.managedTasks[\"task1\"] = mTask\n\n\tvar waitForTransitionFunctionInvocation sync.WaitGroup\n\twaitForTransitionFunctionInvocation.Add(1)\n\ttransitionFunction := func(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata {\n\t\tassert.Equal(t, firstContainerName, container.Name,\n\t\t\t\"Mismatch in container reference in transition function\")\n\t\twaitForTransitionFunctionInvocation.Done()\n\t\treturn dockerapi.DockerContainerMetadata{}\n\t}\n\n\ttaskEngine.containerStatusToTransitionFunction = map[apicontainerstatus.ContainerStatus]transitionApplyFunc{\n\t\tapicontainerstatus.ContainerStopped: transitionFunction,\n\t}\n\n\t// Received RUNNING event, known status is not STOPPED, expect this to\n\t// be a noop. Assertions in transitionFunction asserts that as well\n\tmTask.handleStoppedToRunningContainerTransition(\n\t\tapicontainerstatus.ContainerRunning, secondContainer)\n\n\t// Start building preconditions and assertions for STOPPED -> RUNNING\n\t// transition that will be triggered by next invocation of\n\t// handleStoppedToRunningContainerTransition\n\n\t// This wait group ensures that a docker message is generated as a\n\t// result of the transition function\n\tvar waitForDockerMessageAssertions sync.WaitGroup\n\twaitForDockerMessageAssertions.Add(1)\n\tgo func() {\n\t\tdockerMessage := <-mTask.dockerMessages\n\t\tassert.Equal(t, apicontainerstatus.ContainerStopped, dockerMessage.event.Status,\n\t\t\t\"Mismatch in event status\")\n\t\tassert.Equal(t, firstContainerName, dockerMessage.container.Name,\n\t\t\t\"Mismatch in container reference in event\")\n\t\twaitForDockerMessageAssertions.Done()\n\t}()\n\t// Received RUNNING, known status is STOPPED, expect this to invoke\n\t// transition function once\n\tmTask.handleStoppedToRunningContainerTransition(\n\t\tapicontainerstatus.ContainerRunning, firstContainer)\n\n\t// Wait for wait groups to be done\n\twaitForTransitionFunctionInvocation.Wait()\n\twaitForDockerMessageAssertions.Wait()\n\n\t// We now have an empty transition function map. Any further transitions\n\t// should be noops\n\tdelete(taskEngine.containerStatusToTransitionFunction, apicontainerstatus.ContainerStopped)\n\t// Simulate getting RUNNING event for a STOPPED container 10 times.\n\t// All of these should be noops. 10 is chosen arbitrarily. Any number > 0\n\t// should be fine here\n\tfor i := 0; i < 10; i++ {\n\t\tmTask.handleStoppedToRunningContainerTransition(\n\t\t\tapicontainerstatus.ContainerRunning, firstContainer)\n\t}\n}", "func TestNewAutoscalingPolicyController(t *testing.T) {\n\tapc := initalizeFakeAutoscalingPolicyController()\n\n\t// test to make sure new creating a autoscaling controller is being\n\t// created and returned\n\tassert.Equal(t, autoscalingPolicySyncControllerName, apc.syncController.name)\n}", "func (m *MockMeshWorkloadControllerFactory) Build(mgr mc_manager.AsyncManager, clusterName string) (controller.MeshWorkloadController, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Build\", mgr, clusterName)\n\tret0, _ := ret[0].(controller.MeshWorkloadController)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestCancelJobSomeWorkersOccupied(t *testing.T) {\n\ts := newTestingServer()\n\tmux := SetupRoutes(s)\n\n\tjobs := []fuq.JobDescription{\n\t\t{\n\t\t\tName: \"job1\",\n\t\t\tNumTasks: 6,\n\t\t\tWorkingDir: \"/foo/bar\",\n\t\t\tLoggingDir: \"/foo/bar/logs\",\n\t\t\tCommand: \"/foo/foo_it.sh\",\n\t\t},\n\t}\n\n\tfor i, j := range jobs {\n\t\tjobs[i].JobId = addJob(t, s.Foreman, j)\n\t\tjobs[i].Status = fuq.Waiting\n\t}\n\n\torigJobs := make([]fuq.JobDescription, len(jobs))\n\tcopy(origJobs, jobs)\n\n\twsConn, client := newTestClient(t, s)\n\tdefer wsConn.Close()\n\tdefer client.Close()\n\n\tni := client.NodeInfo\n\t_ = ni\n\n\tmsgCh := make(chan proto.Message)\n\ttaskCh := make(chan []fuq.Task)\n\n\tvar nproc, nrun uint16 = 8, 0\n\tvar running []fuq.Task\n\tvar toCancel []int\n\n\tclient.OnMessageFunc(proto.MTypeJob, func(msg proto.Message) proto.Message {\n\t\ttasks := msg.Data.([]fuq.Task)\n\n\t\tif len(tasks) > int(nproc) {\n\t\t\tpanic(\"invalid number of tasks\")\n\t\t}\n\n\t\tt.Logf(\"onJob received %d tasks: %v\", len(tasks), tasks)\n\t\tnproc -= uint16(len(tasks))\n\t\tnrun += uint16(len(tasks))\n\n\t\trepl := proto.OkayMessage(nproc, nrun, msg.Seq)\n\n\t\trunning = append(running, tasks...)\n\n\t\ttaskCh <- tasks\n\t\treturn repl\n\t})\n\n\tclient.OnMessageFunc(proto.MTypeCancel, func(msg proto.Message) proto.Message {\n\t\tpairs := msg.Data.([]fuq.TaskPair)\n\n\t\tncancel := 0\n\t\tfor i, t := range running {\n\t\t\tfor _, p := range pairs {\n\t\t\t\tif t.JobId != p.JobId {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif p.Task >= 0 && t.Task != p.Task {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\ttoCancel = append(toCancel, i)\n\t\t\t\tncancel++\n\t\t\t}\n\t\t}\n\t\tmsgCh <- msg\n\n\t\trepl := proto.OkayMessage(uint16(ncancel), 0, msg.Seq)\n\t\treturn repl\n\t})\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\tfuqtest.GoPanicOnError(ctx, client.runConversationLoop)\n\n\tmsg, err := client.SendHello(ctx, proto.HelloData{\n\t\tNumProcs: 8,\n\t\tRunning: nil,\n\t})\n\n\tif err != nil {\n\t\tt.Fatalf(\"error in HELLO: %v\", err)\n\t}\n\n\tnp, nr := msg.AsOkay()\n\tif np != 8 || nr != 0 {\n\t\tt.Fatalf(\"expected OK(8|0), but received OK(%d|%d)\", nproc, nrun)\n\t}\n\n\ttasks := <-taskCh\n\t// JOB message received\n\tif len(tasks) != 6 {\n\t\tt.Fatalf(\"expected 8 task, but received %d tasks\", len(tasks))\n\t}\n\n\t/** Cancel job **/\n\tenv := ClientRequestEnvelope{\n\t\tAuth: fuq.Client{Password: testingPass, Client: \"testing\"},\n\t\tMsg: fuq.ClientStateChangeReq{\n\t\t\tJobIds: []fuq.JobId{jobs[0].JobId},\n\t\t\tAction: \"cancel\",\n\t\t},\n\t}\n\n\trepl := []fuq.JobStateChangeResponse{}\n\n\troundTrip{\n\t\tT: t,\n\t\tMsg: env,\n\t\tDst: &repl,\n\t\tTarget: \"/\" + ClientJobStatePath,\n\t}.ExpectOK(mux.ServeHTTP)\n\tt.Logf(\"response is %v\", repl)\n\n\texpectedRepl := []fuq.JobStateChangeResponse{\n\t\t{jobs[0].JobId, fuq.Running, fuq.Cancelled},\n\t}\n\n\tif !reflect.DeepEqual(repl, expectedRepl) {\n\t\tt.Fatalf(\"expected response '%v' but found '%v'\",\n\t\t\texpectedRepl, repl)\n\t}\n\n\t/** Receive CANCEL message **/\n\tmsg = <-msgCh\n\t// expect CANCEL message\n\n\texpected := proto.Message{\n\t\tType: proto.MTypeCancel,\n\t\tSeq: msg.Seq,\n\t\tData: []fuq.TaskPair{{jobs[0].JobId, -1}},\n\t}\n\n\tif !reflect.DeepEqual(msg, expected) {\n\t\tt.Fatalf(\"expected '%v', but found '%v'\", expected, msg)\n\t}\n}", "func validateBuildRunToSucceed(testBuild *utils.TestBuild, testBuildRun *buildv1alpha1.BuildRun) {\n\ttrueCondition := corev1.ConditionTrue\n\tfalseCondition := corev1.ConditionFalse\n\n\t// Ensure the BuildRun has been created\n\terr := testBuild.CreateBR(testBuildRun)\n\tExpect(err).ToNot(HaveOccurred(), \"Failed to create BuildRun\")\n\n\t// Ensure a BuildRun eventually moves to a succeeded TRUE status\n\tnextStatusLog := time.Now().Add(60 * time.Second)\n\tEventually(func() corev1.ConditionStatus {\n\t\ttestBuildRun, err = testBuild.LookupBuildRun(types.NamespacedName{Name: testBuildRun.Name, Namespace: testBuild.Namespace})\n\t\tExpect(err).ToNot(HaveOccurred(), \"Error retrieving a buildRun\")\n\n\t\tif testBuildRun.Status.GetCondition(buildv1alpha1.Succeeded) == nil {\n\t\t\treturn corev1.ConditionUnknown\n\t\t}\n\n\t\tExpect(testBuildRun.Status.GetCondition(buildv1alpha1.Succeeded).Status).ToNot(Equal(falseCondition), \"BuildRun status doesn't move to Succeeded\")\n\n\t\tnow := time.Now()\n\t\tif now.After(nextStatusLog) {\n\t\t\tLogf(\"Still waiting for build run '%s' to succeed.\", testBuildRun.Name)\n\t\t\tnextStatusLog = time.Now().Add(60 * time.Second)\n\t\t}\n\n\t\treturn testBuildRun.Status.GetCondition(buildv1alpha1.Succeeded).Status\n\n\t}, time.Duration(1100*getTimeoutMultiplier())*time.Second, 5*time.Second).Should(Equal(trueCondition), \"BuildRun did not succeed\")\n\n\t// Verify that the BuildSpec is still available in the status\n\tExpect(testBuildRun.Status.BuildSpec).ToNot(BeNil(), \"BuildSpec is not available in the status\")\n\n\tLogf(\"Test build '%s' is completed after %v !\", testBuildRun.GetName(), testBuildRun.Status.CompletionTime.Time.Sub(testBuildRun.Status.StartTime.Time))\n}", "func TestDeviceController(t *testing.T) {\n\n\t// Set the logger to development mode for verbose logs.\n\tlogf.SetLogger(zap.New(zap.UseDevMode(true)))\n\n\t// Create a fake client to mock API calls.\n\tcl, s := CreateFakeClient(t)\n\n\t// Create a ReconcileBlockDevice object with the scheme and fake client.\n\tr := &BlockDeviceReconciler{Client: cl, Scheme: s, Recorder: fakeRecorder}\n\n\t// Mock request to simulate Reconcile() being called on an event for a\n\t// watched resource .\n\treq := reconcile.Request{\n\t\tNamespacedName: types.NamespacedName{\n\t\t\tName: deviceName,\n\t\t\tNamespace: namespace,\n\t\t},\n\t}\n\n\tres, err := r.Reconcile(context.TODO(), req)\n\tif err != nil {\n\t\tt.Fatalf(\"reconcile: (%v)\", err)\n\t}\n\n\t// Check the result of reconciliation to make sure it has the desired state.\n\tif !res.Requeue {\n\t\tt.Log(\"reconcile did not requeue request as expected\")\n\t}\n\n\tdeviceInstance := &openebsv1alpha1.BlockDevice{}\n\terr = r.Client.Get(context.TODO(), req.NamespacedName, deviceInstance)\n\tif err != nil {\n\t\tt.Errorf(\"get deviceInstance : (%v)\", err)\n\t}\n\n\t// Disk Status state should be Active as expected.\n\tif deviceInstance.Status.State == ndm.NDMActive {\n\t\tt.Logf(\"BlockDevice Object state:%v match expected state:%v\", deviceInstance.Status.State, ndm.NDMActive)\n\t} else {\n\t\tt.Fatalf(\"BlockDevice Object state:%v did not match expected state:%v\", deviceInstance.Status.State, ndm.NDMActive)\n\t}\n}", "func (r *TestWorkloadReconciler) reconcilePending(ctx context.Context, workload *naglfarv1.TestWorkload) (ctrl.Result, error) {\n\tclusterTopologies := make(map[types.NamespacedName]struct{})\n\tfor _, item := range workload.Spec.ClusterTopologiesRefs {\n\t\tclusterTopologies[types.NamespacedName{\n\t\t\tNamespace: workload.Namespace,\n\t\t\tName: item.Name,\n\t\t}] = struct{}{}\n\t}\n\ttopologies, allReady, err := r.checkTopologiesReady(ctx, clusterTopologies)\n\tif err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\tif !allReady {\n\t\tr.Recorder.Event(workload, \"Warning\", \"Precondition\", \"not all clusters are ready\")\n\t\treturn ctrl.Result{RequeueAfter: 5 * time.Second}, nil\n\t}\n\tvar resourceList naglfarv1.TestResourceList\n\tif err := r.List(ctx, &resourceList, client.InNamespace(workload.Namespace)); err != nil {\n\t\treturn ctrl.Result{}, err\n\t}\n\tvar installedCount = 0\n\tfor _, item := range workload.Spec.Workloads {\n\t\tworkloadNode, err := r.getWorkloadRequestNode(ctx, workload.Namespace, item.DockerContainer)\n\t\tif err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\tif workloadNode == nil {\n\t\t\terr := fmt.Errorf(\"cannot find the resource %s\", item.DockerContainer.ResourceRequest.Name)\n\t\t\tr.Recorder.Event(workload, \"Warning\", \"Precondition\", err.Error())\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\tif workloadNode.Status.ClaimRef != nil && *workloadNode.Status.ClaimRef != ref.CreateRef(&workload.ObjectMeta) {\n\t\t\tr.Recorder.Eventf(workload, \"Warning\", \"Precondition\", \"node %s is occupied by %s\",\n\t\t\t\tref.CreateRef(&workloadNode.ObjectMeta).Key(),\n\t\t\t\tworkloadNode.Status.ClaimRef.Key())\n\t\t\treturn ctrl.Result{RequeueAfter: 5 * time.Second}, nil\n\t\t}\n\t\tswitch workloadNode.Status.State {\n\t\tcase naglfarv1.ResourceDestroy:\n\t\t\treturn ctrl.Result{RequeueAfter: time.Second}, nil\n\t\tcase naglfarv1.ResourcePending:\n\t\t\tpanic(fmt.Sprintf(\"there's a bug, it shouldn't see the `%s` state\", workloadNode.Status.State))\n\t\tcase naglfarv1.ResourceUninitialized:\n\t\t\tif workloadNode.Status.Image == \"\" {\n\t\t\t\tself := ref.CreateRef(&workload.ObjectMeta)\n\t\t\t\t// claim occupy\n\t\t\t\tworkloadNode.Status.ClaimRef = &self\n\n\t\t\t\tr.setContainerSpec(&workloadNode.Status, workload, &item)\n\t\t\t\ttopologyEnvs, err := r.buildTopologyEnvs(&workload.Spec, topologies, resourceList)\n\t\t\t\tif err != nil {\n\t\t\t\t\tr.Recorder.Event(workload, \"Warning\", \"Precondition\", err.Error())\n\t\t\t\t\treturn ctrl.Result{}, err\n\t\t\t\t}\n\t\t\t\tworkloadNode.Status.Envs = append(workloadNode.Status.Envs, topologyEnvs...)\n\t\t\t\tif err := r.Status().Update(ctx, workloadNode); err != nil {\n\t\t\t\t\treturn ctrl.Result{}, err\n\t\t\t\t}\n\t\t\t\tr.Recorder.Event(workload, \"Normal\", \"Install\", fmt.Sprintf(\"preparing the workload: %s\", item.Name))\n\t\t\t} else if workloadNode.Status.Image != item.DockerContainer.Image {\n\t\t\t\terr := fmt.Errorf(\"resource %s has installed a conflict image\", workloadNode.Name)\n\t\t\t\tr.Recorder.Event(workload, \"Warning\", \"Precondition\", err.Error())\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\tcase naglfarv1.ResourceReady, naglfarv1.ResourceFinish:\n\t\t\tself := ref.CreateRef(&workload.ObjectMeta)\n\t\t\t// check if the workload node resource is used by a tct(ClaimRef == nil)\n\t\t\t// or has occupied by another workload (Claim != self)\n\t\t\tif workloadNode.Status.ClaimRef == nil || *workloadNode.Status.ClaimRef != self {\n\t\t\t\terr := fmt.Errorf(\"resource %s is used by others now, wait\", workloadNode.Name)\n\t\t\t\tr.Recorder.Eventf(workload, \"Warning\", \"Precondition\", err.Error())\n\t\t\t\treturn ctrl.Result{}, err\n\t\t\t}\n\t\t\tinstalledCount += 1\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"there's a bug, forget to process the `%s` state\", workloadNode.Status.State))\n\t\t}\n\t}\n\t// TODO: we can record installed workloads on the `status` field\n\tif installedCount == len(workload.Spec.Workloads) {\n\t\tworkload.Status.State = naglfarv1.TestWorkloadStateRunning\n\t\tif err := r.Status().Update(ctx, workload); err != nil {\n\t\t\treturn ctrl.Result{}, err\n\t\t}\n\t\tr.Recorder.Event(workload, \"Normal\", \"Install\", \"all workload has been installed\")\n\t\treturn ctrl.Result{}, nil\n\t}\n\t// otherwise, we are still pending\n\treturn ctrl.Result{RequeueAfter: 5 * time.Second}, nil\n}", "func TestAdmissionLifecycle(t *testing.T) {\n\tnamespaceObj := &kapi.Namespace{\n\t\tObjectMeta: kapi.ObjectMeta{\n\t\t\tName: \"test\",\n\t\t\tNamespace: \"\",\n\t\t},\n\t\tStatus: kapi.NamespaceStatus{\n\t\t\tPhase: kapi.NamespaceActive,\n\t\t},\n\t}\n\tstore := cache.NewStore(cache.IndexFuncToKeyFuncAdapter(cache.MetaNamespaceIndexFunc))\n\tstore.Add(namespaceObj)\n\tmockClient := &testclient.Fake{}\n\tprojectcache.FakeProjectCache(mockClient, store, \"\")\n\thandler := &lifecycle{client: mockClient}\n\tbuild := &buildapi.Build{\n\t\tObjectMeta: kapi.ObjectMeta{Name: \"buildid\", Namespace: \"other\"},\n\t\tSpec: buildapi.BuildSpec{\n\t\t\tSource: buildapi.BuildSource{\n\t\t\t\tGit: &buildapi.GitBuildSource{\n\t\t\t\t\tURI: \"http://github.com/my/repository\",\n\t\t\t\t},\n\t\t\t\tContextDir: \"context\",\n\t\t\t},\n\t\t\tStrategy: buildapi.BuildStrategy{\n\t\t\t\tDockerStrategy: &buildapi.DockerBuildStrategy{},\n\t\t\t},\n\t\t\tOutput: buildapi.BuildOutput{\n\t\t\t\tTo: &kapi.ObjectReference{\n\t\t\t\t\tKind: \"DockerImage\",\n\t\t\t\t\tName: \"repository/data\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tStatus: buildapi.BuildStatus{\n\t\t\tPhase: buildapi.BuildPhaseNew,\n\t\t},\n\t}\n\terr := handler.Admit(admission.NewAttributesRecord(build, \"Build\", build.Namespace, \"name\", \"builds\", \"\", \"CREATE\", nil))\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error returned from admission handler: %v\", err)\n\t}\n\n\t// change namespace state to terminating\n\tnamespaceObj.Status.Phase = kapi.NamespaceTerminating\n\tstore.Add(namespaceObj)\n\n\t// verify create operations in the namespace cause an error\n\terr = handler.Admit(admission.NewAttributesRecord(build, \"Build\", build.Namespace, \"name\", \"builds\", \"\", \"CREATE\", nil))\n\tif err == nil {\n\t\tt.Errorf(\"Expected error rejecting creates in a namespace when it is terminating\")\n\t}\n\n\t// verify update operations in the namespace can proceed\n\terr = handler.Admit(admission.NewAttributesRecord(build, \"Build\", build.Namespace, \"name\", \"builds\", \"\", \"UPDATE\", nil))\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error returned from admission handler: %v\", err)\n\t}\n\n\t// verify delete operations in the namespace can proceed\n\terr = handler.Admit(admission.NewAttributesRecord(nil, \"Build\", build.Namespace, \"name\", \"builds\", \"\", \"DELETE\", nil))\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error returned from admission handler: %v\", err)\n\t}\n\n}", "func Test_Pod_Checker(t *testing.T) {\n\tworkflow := func(name string) string {\n\t\treturn workflowPath(\"pod\", name)\n\t}\n\tconst (\n\t\tadded = \"added\"\n\t\tcontainerTerminatedError = \"containerTerminatedError\"\n\t\tcontainerTerminatedSuccess = \"containerTerminatedSuccess\"\n\t\tcontainerTerminatedSuccessRestartNever = \"containerTerminatedSuccessRestartNever\"\n\t\tcreateSuccess = \"createSuccess\"\n\t\timagePullError = \"imagePullError\"\n\t\timagePullErrorResolved = \"imagePullErrorResolved\"\n\t\tscheduled = \"scheduled\"\n\t\tunready = \"unready\"\n\t\tunscheduled = \"unscheduled\"\n\t)\n\n\ttests := []struct {\n\t\tname string\n\t\trecordingPaths []string\n\t\t// TODO: optional message validator function to check returned messages\n\t\texpectReady bool\n\t}{\n\t\t{\n\t\t\tname: \"Pod added but not ready\",\n\t\t\trecordingPaths: []string{workflow(added)},\n\t\t\texpectReady: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod scheduled but not ready\",\n\t\t\trecordingPaths: []string{workflow(scheduled)},\n\t\t\texpectReady: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod create success\",\n\t\t\trecordingPaths: []string{workflow(createSuccess)},\n\t\t\texpectReady: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod image pull error\",\n\t\t\trecordingPaths: []string{workflow(imagePullError)},\n\t\t\texpectReady: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod create success after image pull failure resolved\",\n\t\t\trecordingPaths: []string{workflow(imagePullError), workflow(imagePullErrorResolved)},\n\t\t\texpectReady: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod unscheduled\",\n\t\t\trecordingPaths: []string{workflow(unscheduled)},\n\t\t\texpectReady: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod unready\",\n\t\t\trecordingPaths: []string{workflow(unready)},\n\t\t\texpectReady: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod container terminated with error\",\n\t\t\trecordingPaths: []string{workflow(containerTerminatedError)},\n\t\t\texpectReady: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod container terminated successfully\",\n\t\t\trecordingPaths: []string{workflow(containerTerminatedSuccess)},\n\t\t\texpectReady: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod container terminated successfully with restartPolicy: Never\",\n\t\t\trecordingPaths: []string{workflow(containerTerminatedSuccessRestartNever)},\n\t\t\texpectReady: true,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tchecker := NewPodChecker()\n\n\t\t\tready, messages := mustCheckIfRecordingsReady(tt.recordingPaths, checker)\n\t\t\tif ready != tt.expectReady {\n\t\t\t\tt.Errorf(\"Ready() = %t, want %t\\nMessages: %s\", ready, tt.expectReady, messages)\n\t\t\t}\n\t\t})\n\t}\n}", "func TestCancelJob(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\n\tt.Parallel()\n\tc, _ := minikubetestenv.AcquireCluster(t)\n\n\t// Create an input repo\n\trepo := tu.UniqueString(\"TestCancelJob\")\n\trequire.NoError(t, c.CreateRepo(pfs.DefaultProjectName, repo))\n\n\t// Create an input commit\n\tcommit, err := c.StartCommit(pfs.DefaultProjectName, repo, \"master\")\n\trequire.NoError(t, err)\n\trequire.NoError(t, c.PutFile(commit, \"/time\", strings.NewReader(\"600\"), client.WithAppendPutFile()))\n\trequire.NoError(t, c.PutFile(commit, \"/data\", strings.NewReader(\"commit data\"), client.WithAppendPutFile()))\n\trequire.NoError(t, c.FinishCommit(pfs.DefaultProjectName, repo, commit.Branch.Name, commit.Id))\n\n\t// Create sleep + copy pipeline\n\tpipeline := tu.UniqueString(\"pipeline\")\n\trequire.NoError(t, c.CreatePipeline(pfs.DefaultProjectName,\n\t\tpipeline,\n\t\t\"\",\n\t\t[]string{\"bash\"},\n\t\t[]string{\n\t\t\t\"sleep `cat /pfs/*/time`\",\n\t\t\t\"cp /pfs/*/data /pfs/out/\",\n\t\t},\n\t\t&pps.ParallelismSpec{\n\t\t\tConstant: 1,\n\t\t},\n\t\tclient.NewPFSInput(pfs.DefaultProjectName, repo, \"/\"),\n\t\t\"\",\n\t\tfalse,\n\t))\n\n\t// Wait until PPS has started processing commit\n\tvar jobInfo *pps.JobInfo\n\trequire.NoErrorWithinT(t, 30*time.Second, func() error {\n\t\treturn backoff.Retry(func() error {\n\t\t\tjobInfos, err := c.ListJob(pfs.DefaultProjectName, pipeline, nil, -1, true)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif len(jobInfos) != 1 {\n\t\t\t\treturn errors.Errorf(\"Expected one job, but got %d: %v\", len(jobInfos), jobInfos)\n\t\t\t}\n\t\t\tjobInfo = jobInfos[0]\n\t\t\treturn nil\n\t\t}, backoff.NewTestingBackOff())\n\t})\n\n\t// stop the job\n\trequire.NoError(t, c.StopJob(pfs.DefaultProjectName, jobInfo.Job.Pipeline.Name, jobInfo.Job.Id))\n\n\t// Wait until the job is cancelled\n\trequire.NoErrorWithinT(t, 30*time.Second, func() error {\n\t\treturn backoff.Retry(func() error {\n\t\t\tupdatedJobInfo, err := c.InspectJob(pfs.DefaultProjectName, jobInfo.Job.Pipeline.Name, jobInfo.Job.Id, false)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif updatedJobInfo.State != pps.JobState_JOB_KILLED {\n\t\t\t\treturn errors.Errorf(\"job %s is still running, but should be KILLED\", jobInfo.Job.Id)\n\t\t\t}\n\t\t\treturn nil\n\t\t}, backoff.NewTestingBackOff())\n\t})\n\n\t// Create one more commit to make sure the pipeline can still process input\n\t// commits\n\tcommit2, err := c.StartCommit(pfs.DefaultProjectName, repo, \"master\")\n\trequire.NoError(t, err)\n\trequire.NoError(t, c.DeleteFile(commit2, \"/time\"))\n\trequire.NoError(t, c.PutFile(commit2, \"/time\", strings.NewReader(\"1\"), client.WithAppendPutFile()))\n\trequire.NoError(t, c.DeleteFile(commit2, \"/data\"))\n\trequire.NoError(t, c.PutFile(commit2, \"/data\", strings.NewReader(\"commit 2 data\"), client.WithAppendPutFile()))\n\trequire.NoError(t, c.FinishCommit(pfs.DefaultProjectName, repo, commit2.Branch.Name, commit2.Id))\n\n\t// Flush commit2, and make sure the output is as expected\n\tcommitInfo, err := c.WaitCommit(pfs.DefaultProjectName, pipeline, \"master\", commit2.Id)\n\trequire.NoError(t, err)\n\n\tbuf := bytes.Buffer{}\n\terr = c.GetFile(commitInfo.Commit, \"/data\", &buf)\n\trequire.NoError(t, err)\n\trequire.Equal(t, \"commit 2 data\", buf.String())\n}", "func add(ctx context.Context, mgr manager.Manager, r reconcile.Reconciler) error {\n\t// Create a new controller\n\tc, err := controller.New(\"buildrun-controller\", mgr, controller.Options{Reconciler: r})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpredBuildRun := predicate.Funcs{\n\t\tCreateFunc: func(e event.CreateEvent) bool {\n\t\t\to := e.Object.(*buildv1alpha1.BuildRun)\n\n\t\t\t// The CreateFunc is also called when the controller is started and iterates over all objects. For those BuildRuns that have a TaskRun referenced already,\n\t\t\t// we do not need to do a further reconciliation. BuildRun updates then only happen from the TaskRun.\n\t\t\treturn o.Status.LatestTaskRunRef == nil\n\t\t},\n\t\tUpdateFunc: func(e event.UpdateEvent) bool {\n\t\t\t// Ignore updates to CR status in which case metadata.Generation does not change\n\t\t\to := e.ObjectOld.(*buildv1alpha1.BuildRun)\n\n\t\t\t// Avoid reconciling when for updates on the BuildRun, the build.build.dev/name\n\t\t\t// label is set, and when a BuildRun already have a referenced TaskRun.\n\t\t\tif o.GetLabels()[buildv1alpha1.LabelBuild] == \"\" || o.Status.LatestTaskRunRef != nil {\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\treturn e.MetaOld.GetGeneration() != e.MetaNew.GetGeneration()\n\t\t},\n\t\tDeleteFunc: func(e event.DeleteEvent) bool {\n\t\t\t// Evaluates to false if the object has been confirmed deleted.\n\t\t\treturn !e.DeleteStateUnknown\n\t\t},\n\t}\n\n\tpredTaskRun := predicate.Funcs{\n\t\tUpdateFunc: func(e event.UpdateEvent) bool {\n\t\t\to := e.ObjectOld.(*v1beta1.TaskRun)\n\t\t\tn := e.ObjectNew.(*v1beta1.TaskRun)\n\n\t\t\t// Process an update event when the old TR resource is not yet started and the new TR resource got a\n\t\t\t// condition of the type Succeeded\n\t\t\tif o.Status.StartTime.IsZero() && n.Status.GetCondition(apis.ConditionSucceeded) != nil {\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\t// Process an update event for every change in the condition.Reason between the old and new TR resource\n\t\t\tif o.Status.GetCondition(apis.ConditionSucceeded) != nil && n.Status.GetCondition(apis.ConditionSucceeded) != nil {\n\t\t\t\tif o.Status.GetCondition(apis.ConditionSucceeded).Reason != n.Status.GetCondition(apis.ConditionSucceeded).Reason {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\tDeleteFunc: func(e event.DeleteEvent) bool {\n\t\t\t// Evaluates to false if the object has been confirmed deleted.\n\t\t\treturn !e.DeleteStateUnknown\n\t\t},\n\t}\n\n\t// Watch for changes to primary resource BuildRun\n\terr = c.Watch(&source.Kind{Type: &buildv1alpha1.BuildRun{}}, &handler.EnqueueRequestForObject{}, predBuildRun)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// enqueue Reconciles requests only for events where a TaskRun already exists and that is related\n\t// to a BuildRun\n\treturn c.Watch(&source.Kind{Type: &v1beta1.TaskRun{}}, &handler.EnqueueRequestsFromMapFunc{\n\t\tToRequests: handler.ToRequestsFunc(func(o handler.MapObject) []reconcile.Request {\n\n\t\t\ttaskRun := o.Object.(*v1beta1.TaskRun)\n\n\t\t\t// check if TaskRun is related to BuildRun\n\t\t\tif taskRun.GetLabels() == nil || taskRun.GetLabels()[buildv1alpha1.LabelBuildRun] == \"\" {\n\t\t\t\treturn []reconcile.Request{}\n\t\t\t}\n\n\t\t\treturn []reconcile.Request{\n\t\t\t\t{\n\t\t\t\t\tNamespacedName: types.NamespacedName{\n\t\t\t\t\t\tName: taskRun.Name,\n\t\t\t\t\t\tNamespace: taskRun.Namespace,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t}),\n\t}, predTaskRun)\n}", "func (k *Kubernetes) Test(ctx context.Context) error {\n\tk.l.Lock()\n\tdefer k.l.Unlock()\n\n\treturn k.updatePods(ctx)\n}", "func controllerSubtest(name string, tc *sessionTestCase) func(t *testing.T) {\n\treturn func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\t// This test uses the same controller to manage two sessions that are communicating with\n\t\t// each other (basically, both the \"local\" and \"remote\" session are on the same system).\n\t\t// This is possible because the local discriminators are chosen such that they are\n\t\t// different.\n\t\t//\n\t\t// While this is something that rarely (if ever) occurs in practice, it makes test setup\n\t\t// much simpler here. In the real world, BFD would configured between two systems and each\n\t\t// system would have its own controller which is in charge only of sessions on that system.\n\t\tcontroller := &bfd.Controller{\n\t\t\tSessions: []*bfd.Session{tc.sessionA, tc.sessionB},\n\t\t\tReceiveQueueSize: 10,\n\t\t}\n\n\t\t// both sessions send their messages through the same controller\n\t\tmessageQueue := &redirectSender{Destination: controller.Messages()}\n\t\ttc.sessionA.Sender = messageQueue\n\t\ttc.sessionB.Sender = messageQueue\n\t\ttc.sessionA.Logger = testlog.NewLogger(t).New(\"session\", \"a\")\n\t\ttc.sessionB.Logger = testlog.NewLogger(t).New(\"session\", \"b\")\n\n\t\t// the wait group is not used for synchronization, but rather to check that the controller\n\t\t// returns\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\n\t\tgo func() {\n\t\t\terr := controller.Run()\n\t\t\trequire.NoError(t, err)\n\t\t\twg.Done()\n\t\t}()\n\n\t\t// second argument is not used because we have a single queue\n\t\ttc.testBehavior(messageQueue, nil)\n\n\t\tassert.Equal(t, tc.expectedUpA, controller.IsUp(tc.sessionA.LocalDiscriminator))\n\t\tassert.Equal(t, tc.expectedUpB, controller.IsUp(tc.sessionB.LocalDiscriminator))\n\n\t\tmessageQueue.Close()\n\n\t\tfor i := 0; i < 2; i++ {\n\t\t\terr := <-controller.Errors()\n\t\t\tassert.NoError(t, err)\n\t\t}\n\t\twg.Wait()\n\t}\n}", "func TestReconcile(t *testing.T) {\n\n\t//\n\t// Define The KafkaChannel Reconciler Test Cases\n\t//\n\t// Note - Knative testing framework assumes ALL actions will be in the same Namespace\n\t// as the Key so we have to set SkipNamespaceValidation in all tests!\n\t//\n\t// Note - Knative reconciler framework expects Events (not errors) from ReconcileKind()\n\t// so WantErr is only for higher level failures in the injected Reconcile() function.\n\t//\n\tcommontesting.SetTestEnvironment(t)\n\ttableTest := TableTest{\n\n\t\t//\n\t\t// Top Level Use Cases\n\t\t//\n\n\t\t{\n\t\t\tName: \"Bad Secret Key\",\n\t\t\tKey: \"too/many/parts\",\n\t\t},\n\t\t{\n\t\t\tName: \"Secret Key Not Found\",\n\t\t\tKey: \"foo/not-found\",\n\t\t},\n\n\t\t//\n\t\t// Full Reconciliation\n\t\t//\n\n\t\t{\n\t\t\tName: \"Complete Reconciliation Without KafkaChannel\",\n\t\t\tKey: controllertesting.KafkaSecretKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaSecret(),\n\t\t\t},\n\t\t\tWantCreates: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(),\n\t\t\t},\n\t\t\tWantPatches: []clientgotesting.PatchActionImpl{controllertesting.NewKafkaSecretFinalizerPatchActionImpl()},\n\t\t\tWantEvents: []string{\n\t\t\t\tcontrollertesting.NewKafkaSecretFinalizerUpdateEvent(),\n\t\t\t\tcontrollertesting.NewKafkaSecretSuccessfulReconciliationEvent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Complete Reconciliation With KafkaChannel\",\n\t\t\tKey: controllertesting.KafkaSecretKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaSecret(),\n\t\t\t\tcontrollertesting.NewKafkaChannel(),\n\t\t\t},\n\t\t\tWantCreates: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(),\n\t\t\t},\n\t\t\tWantStatusUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\t{\n\t\t\t\t\tObject: controllertesting.NewKafkaChannel(\n\t\t\t\t\t\tcontrollertesting.WithReceiverServiceReady,\n\t\t\t\t\t\tcontrollertesting.WithReceiverDeploymentReady,\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t\tWantPatches: []clientgotesting.PatchActionImpl{controllertesting.NewKafkaSecretFinalizerPatchActionImpl()},\n\t\t\tWantEvents: []string{\n\t\t\t\tcontrollertesting.NewKafkaSecretFinalizerUpdateEvent(),\n\t\t\t\tcontrollertesting.NewKafkaSecretSuccessfulReconciliationEvent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Complete Reconciliation With KafkaChannel, No Receiver Resource Requests Or Limits\",\n\t\t\tKey: controllertesting.KafkaSecretKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaSecret(),\n\t\t\t\tcontrollertesting.NewKafkaChannel(),\n\t\t\t},\n\t\t\tWantCreates: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(controllertesting.WithoutResources),\n\t\t\t},\n\t\t\tWantStatusUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\t{\n\t\t\t\t\tObject: controllertesting.NewKafkaChannel(\n\t\t\t\t\t\tcontrollertesting.WithReceiverServiceReady,\n\t\t\t\t\t\tcontrollertesting.WithReceiverDeploymentReady,\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t\tWantPatches: []clientgotesting.PatchActionImpl{controllertesting.NewKafkaSecretFinalizerPatchActionImpl()},\n\t\t\tWantEvents: []string{\n\t\t\t\tcontrollertesting.NewKafkaSecretFinalizerUpdateEvent(),\n\t\t\t\tcontrollertesting.NewKafkaSecretSuccessfulReconciliationEvent(),\n\t\t\t},\n\t\t\tOtherTestData: map[string]interface{}{\n\t\t\t\t\"configOptions\": []controllertesting.KafkaConfigOption{controllertesting.WithNoReceiverResources},\n\t\t\t},\n\t\t},\n\n\t\t//\n\t\t// KafkaChannel Secret Deletion (Finalizer)\n\t\t//\n\n\t\t{\n\t\t\tName: \"Finalize Deleted Secret\",\n\t\t\tKey: controllertesting.KafkaSecretKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaSecret(controllertesting.WithKafkaSecretDeleted),\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithReceiverServiceReady,\n\t\t\t\t\tcontrollertesting.WithReceiverDeploymentReady,\n\t\t\t\t),\n\t\t\t},\n\t\t\tWantStatusUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\t{\n\t\t\t\t\tObject: controllertesting.NewKafkaChannel(\n\t\t\t\t\t\tcontrollertesting.WithReceiverServiceFinalized,\n\t\t\t\t\t\tcontrollertesting.WithReceiverDeploymentFinalized,\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t\tWantEvents: []string{\n\t\t\t\tcontrollertesting.NewKafkaSecretSuccessfulFinalizedEvent(),\n\t\t\t},\n\t\t},\n\n\t\t//\n\t\t// KafkaSecret Receiver Service\n\t\t//\n\n\t\t{\n\t\t\tName: \"Reconcile Missing Receiver Service Success\",\n\t\t\tKey: controllertesting.KafkaSecretKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaSecret(controllertesting.WithKafkaSecretFinalizer),\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithReceiverServiceReady,\n\t\t\t\t\tcontrollertesting.WithReceiverDeploymentReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(),\n\t\t\t},\n\t\t\tWantCreates: []runtime.Object{controllertesting.NewKafkaChannelReceiverService()},\n\t\t\tWantEvents: []string{\n\t\t\t\tcontrollertesting.NewKafkaSecretSuccessfulReconciliationEvent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Reconcile Missing Receiver Service Error(Create)\",\n\t\t\tKey: controllertesting.KafkaSecretKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaSecret(controllertesting.WithKafkaSecretFinalizer),\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithReceiverServiceReady,\n\t\t\t\t\tcontrollertesting.WithReceiverDeploymentReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelDispatcherDeployment(),\n\t\t\t},\n\t\t\tWithReactors: []clientgotesting.ReactionFunc{InduceFailure(\"create\", \"services\")},\n\t\t\tWantErr: true,\n\t\t\tWantCreates: []runtime.Object{controllertesting.NewKafkaChannelReceiverService()},\n\t\t\tWantStatusUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\t{\n\t\t\t\t\tObject: controllertesting.NewKafkaChannel(\n\t\t\t\t\t\tcontrollertesting.WithReceiverServiceFailed,\n\t\t\t\t\t\tcontrollertesting.WithReceiverDeploymentReady,\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t\tWantEvents: []string{\n\t\t\t\tEventf(corev1.EventTypeWarning, event.ReceiverServiceReconciliationFailed.String(), \"Failed To Reconcile Receiver Service: inducing failure for create services\"),\n\t\t\t\tcontrollertesting.NewKafkaSecretFailedReconciliationEvent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Reconcile Receiver Service With Deletion Timestamp\",\n\t\t\tKey: controllertesting.KafkaSecretKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaSecret(controllertesting.WithKafkaSecretFinalizer),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(controllertesting.WithDeletionTimestampService),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(),\n\t\t\t},\n\t\t\tWantErr: true,\n\t\t\tWantEvents: []string{\n\t\t\t\tEventf(corev1.EventTypeWarning, event.ReceiverServiceReconciliationFailed.String(), \"Failed To Reconcile Receiver Service: encountered Receiver Service with DeletionTimestamp \"+controllertesting.KafkaSecretNamespace+\"/kafkasecret-name-b9176d5f-receiver - potential race condition\"),\n\t\t\t\tcontrollertesting.NewKafkaSecretFailedReconciliationEvent(),\n\t\t\t},\n\t\t},\n\n\t\t//\n\t\t// KafkaSecret Receiver Deployment\n\t\t//\n\n\t\t{\n\t\t\tName: \"Reconcile Missing Receiver Deployment Success\",\n\t\t\tKey: controllertesting.KafkaSecretKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaSecret(controllertesting.WithKafkaSecretFinalizer),\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithReceiverServiceReady,\n\t\t\t\t\tcontrollertesting.WithReceiverDeploymentReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t},\n\t\t\tWantCreates: []runtime.Object{controllertesting.NewKafkaChannelReceiverDeployment()},\n\t\t\tWantEvents: []string{controllertesting.NewKafkaSecretSuccessfulReconciliationEvent()},\n\t\t},\n\t\t{\n\t\t\tName: \"Reconcile Missing Receiver Deployment Error(Create)\",\n\t\t\tKey: controllertesting.KafkaSecretKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaSecret(controllertesting.WithKafkaSecretFinalizer),\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithReceiverServiceReady,\n\t\t\t\t\tcontrollertesting.WithReceiverDeploymentReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t},\n\t\t\tWithReactors: []clientgotesting.ReactionFunc{InduceFailure(\"create\", \"deployments\")},\n\t\t\tWantErr: true,\n\t\t\tWantCreates: []runtime.Object{controllertesting.NewKafkaChannelReceiverDeployment()},\n\t\t\tWantStatusUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\t{\n\t\t\t\t\tObject: controllertesting.NewKafkaChannel(\n\t\t\t\t\t\tcontrollertesting.WithReceiverServiceReady,\n\t\t\t\t\t\tcontrollertesting.WithReceiverDeploymentFailed,\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t\tWantEvents: []string{\n\t\t\t\tEventf(corev1.EventTypeWarning, event.ReceiverDeploymentReconciliationFailed.String(), \"Failed To Reconcile Receiver Deployment: inducing failure for create deployments\"),\n\t\t\t\tcontrollertesting.NewKafkaSecretFailedReconciliationEvent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Reconcile Receiver Deployment With Deletion Timestamp\",\n\t\t\tKey: controllertesting.KafkaSecretKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaSecret(controllertesting.WithKafkaSecretFinalizer),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(controllertesting.WithDeletionTimestampDeployment),\n\t\t\t},\n\t\t\tWantErr: true,\n\t\t\tWantEvents: []string{\n\t\t\t\tEventf(corev1.EventTypeWarning, event.ReceiverDeploymentReconciliationFailed.String(), \"Failed To Reconcile Receiver Deployment: encountered Receiver Deployment with DeletionTimestamp \"+controllertesting.KafkaSecretNamespace+\"/kafkasecret-name-b9176d5f-receiver - potential race condition\"),\n\t\t\t\tcontrollertesting.NewKafkaSecretFailedReconciliationEvent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Reconcile Receiver Deployment - Redeployment on ConfigMapHash change\",\n\t\t\tKey: controllertesting.KafkaSecretKey,\n\t\t\tObjects: []runtime.Object{\n\t\t\t\tcontrollertesting.NewKafkaSecret(controllertesting.WithKafkaSecretFinalizer),\n\t\t\t\tcontrollertesting.NewKafkaChannel(\n\t\t\t\t\tcontrollertesting.WithReceiverServiceReady,\n\t\t\t\t\tcontrollertesting.WithReceiverDeploymentReady,\n\t\t\t\t),\n\t\t\t\tcontrollertesting.NewKafkaChannelService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverService(),\n\t\t\t\tcontrollertesting.NewKafkaChannelReceiverDeployment(controllertesting.WithConfigMapHash(\"initial-hash-to-be-overridden-by-controller\")),\n\t\t\t},\n\t\t\tWantUpdates: []clientgotesting.UpdateActionImpl{\n\t\t\t\tcontrollertesting.NewDeploymentUpdateActionImpl(controllertesting.NewKafkaChannelReceiverDeployment()),\n\t\t\t},\n\t\t\tWantEvents: []string{controllertesting.NewKafkaSecretSuccessfulReconciliationEvent()},\n\t\t},\n\t}\n\n\t// Run The TableTest Using The KafkaChannel Reconciler Provided By The Factory\n\tlogger := logtesting.TestLogger(t)\n\ttableTest.Test(t, controllertesting.MakeFactory(func(ctx context.Context, listers *controllertesting.Listers, cmw configmap.Watcher, configOptions []controllertesting.KafkaConfigOption) controller.Reconciler {\n\t\tr := &Reconciler{\n\t\t\tkubeClientset: kubeclient.Get(ctx),\n\t\t\tenvironment: controllertesting.NewEnvironment(),\n\t\t\tconfig: controllertesting.NewConfig(configOptions...),\n\t\t\tkafkaChannelClient: fakekafkaclient.Get(ctx),\n\t\t\tkafkachannelLister: listers.GetKafkaChannelLister(),\n\t\t\tdeploymentLister: listers.GetDeploymentLister(),\n\t\t\tserviceLister: listers.GetServiceLister(),\n\t\t\tkafkaConfigMapHash: controllertesting.ConfigMapHash,\n\t\t}\n\t\treturn kafkasecretinjection.NewReconciler(ctx, r.kubeClientset.CoreV1(), listers.GetSecretLister(), controller.GetEventRecorder(ctx), r)\n\t}, logger.Desugar()))\n}", "func TestServicesAPIWrongControllerGWClass(t *testing.T) {\n\t// create gateway, nothing happens\n\t// create gatewayclass, VS created\n\t// update to bad gatewayclass (wrong controller), VS deleted\n\tg := gomega.NewGomegaWithT(t)\n\n\tgwClassName, gatewayName, ns := \"avi-lb\", \"my-gateway\", \"default\"\n\tmodelName := \"admin/cluster--default-my-gateway\"\n\n\tSetupGateway(t, gatewayName, ns, gwClassName)\n\tSetupSvcApiService(t, \"svc\", ns, gatewayName, ns, \"TCP\")\n\n\tSetupGatewayClass(t, gwClassName, lib.SvcApiAviGatewayController, \"\")\n\n\tg.Eventually(func() string {\n\t\tgw, _ := SvcAPIClient.NetworkingV1alpha1().Gateways(ns).Get(context.TODO(), gatewayName, metav1.GetOptions{})\n\t\tif len(gw.Status.Addresses) > 0 {\n\t\t\treturn gw.Status.Addresses[0].Value\n\t\t}\n\t\treturn \"\"\n\t}, 40*time.Second).Should(gomega.Equal(\"10.250.250.1\"))\n\n\tgwclassUpdate := FakeGWClass{\n\t\tName: gwClassName,\n\t\tController: \"xyz\",\n\t}.GatewayClass()\n\tgwclassUpdate.ResourceVersion = \"2\"\n\tif _, err := lib.AKOControlConfig().ServicesAPIClientset().NetworkingV1alpha1().GatewayClasses().Update(context.TODO(), gwclassUpdate, metav1.UpdateOptions{}); err != nil {\n\t\tt.Fatalf(\"error in updating GatewayClass: %v\", err)\n\t}\n\n\tg.Eventually(func() int {\n\t\tgw, _ := SvcAPIClient.NetworkingV1alpha1().Gateways(ns).Get(context.TODO(), gatewayName, metav1.GetOptions{})\n\t\treturn len(gw.Status.Addresses)\n\t}, 40*time.Second).Should(gomega.Equal(0))\n\tg.Eventually(func() int {\n\t\tsvc, _ := KubeClient.CoreV1().Services(ns).Get(context.TODO(), \"svc\", metav1.GetOptions{})\n\t\treturn len(svc.Status.LoadBalancer.Ingress)\n\t}, 40*time.Second).Should(gomega.Equal(0))\n\n\tTeardownAdvLBService(t, \"svc\", ns)\n\tTeardownGateway(t, gatewayName, ns)\n\tTeardownGatewayClass(t, gwClassName)\n\tVerifyGatewayVSNodeDeletion(g, modelName)\n}", "func TestLoad(t *testing.T) {\n\tclientset, err := k8sutils.MustGetClientset()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute)\n\tdefer cancel()\n\n\t// Create namespace if it doesn't exist\n\tnamespaceExists, err := k8sutils.NamespaceExists(ctx, clientset, namespace)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !namespaceExists {\n\t\terr = k8sutils.MustCreateNamespace(ctx, clientset, namespace)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\tdeployment, err := k8sutils.MustParseDeployment(noopDeploymentMap[*osType])\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdeploymentsClient := clientset.AppsV1().Deployments(namespace)\n\terr = k8sutils.MustCreateDeployment(ctx, deploymentsClient, deployment)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tt.Log(\"Checking pods are running\")\n\terr = k8sutils.WaitForPodsRunning(ctx, clientset, namespace, podLabelSelector)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tt.Log(\"Repeating the scale up/down cycle\")\n\tfor i := 0; i < *iterations; i++ {\n\t\tt.Log(\"Iteration \", i)\n\t\tt.Log(\"Scale down deployment\")\n\t\terr = k8sutils.MustScaleDeployment(ctx, deploymentsClient, deployment, clientset, namespace, podLabelSelector, *scaleDownReplicas, *skipWait)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tt.Log(\"Scale up deployment\")\n\t\terr = k8sutils.MustScaleDeployment(ctx, deploymentsClient, deployment, clientset, namespace, podLabelSelector, *scaleUpReplicas, *skipWait)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\tt.Log(\"Checking pods are running and IP assigned\")\n\terr = k8sutils.WaitForPodsRunning(ctx, clientset, \"\", \"\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif *validateStateFile {\n\t\tt.Run(\"Validate state file\", TestValidateState)\n\t}\n\n\tif *validateDualStack {\n\t\tt.Run(\"Validate dualstack overlay\", TestDualStackProperties)\n\t}\n}", "func CreatePods(c kclientset.Interface, appName string, ns string, labels map[string]string, spec kapiv1.PodSpec, maxCount int, tuning *TuningSetType) {\n\tfor i := 0; i < maxCount; i++ {\n\t\tframework.Logf(\"%v/%v : Creating pod\", i+1, maxCount)\n\t\t// Retry on pod creation failure\n\t\tfor retryCount := 0; retryCount < maxRetries; retryCount++ {\n\t\t\t_, err := c.CoreV1().Pods(ns).Create(&kapiv1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: fmt.Sprintf(appName+\"-pod-%v\", i),\n\t\t\t\t\tNamespace: ns,\n\t\t\t\t\tLabels: labels,\n\t\t\t\t},\n\t\t\t\tSpec: spec,\n\t\t\t})\n\t\t\tif err == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tframework.ExpectNoError(err)\n\t\t}\n\t\tif tuning != nil {\n\t\t\t// If a rate limit has been defined we wait for N ms between creation\n\t\t\tif tuning.Pods.RateLimit.Delay != 0 {\n\t\t\t\tframework.Logf(\"Sleeping %d ms between podcreation.\", tuning.Pods.RateLimit.Delay)\n\t\t\t\ttime.Sleep(tuning.Pods.RateLimit.Delay * time.Millisecond)\n\t\t\t}\n\t\t\t// If a stepping tuningset has been defined in the config, we wait for the step of pods to be created, and pause\n\t\t\tif tuning.Pods.Stepping.StepSize != 0 && (i+1)%tuning.Pods.Stepping.StepSize == 0 {\n\t\t\t\tframework.Logf(\"Waiting for pods created this step to be running\")\n\t\t\t\tpods, err := exutil.WaitForPods(c.CoreV1().Pods(ns), exutil.ParseLabelsOrDie(mapToString(labels)), exutil.CheckPodIsRunningFn, i+1, tuning.Pods.Stepping.Timeout*time.Second)\n\t\t\t\tif err != nil {\n\t\t\t\t\tframework.Failf(\"Error in wait... %v\", err)\n\t\t\t\t} else if len(pods) < i+1 {\n\t\t\t\t\tframework.Failf(\"Only got %v out of %v\", len(pods), i+1)\n\t\t\t\t}\n\n\t\t\t\tframework.Logf(\"We have created %d pods and are now sleeping for %d seconds\", i+1, tuning.Pods.Stepping.Pause)\n\t\t\t\ttime.Sleep(tuning.Pods.Stepping.Pause * time.Second)\n\t\t\t}\n\t\t}\n\t}\n}", "func TestUpdateServiceBrokerCondition(t *testing.T) {\n\t// Anonymous struct fields:\n\t// name: short description of the test\n\t// input: broker object to test\n\t// status: new condition status\n\t// reason: condition reason\n\t// message: condition message\n\t// transitionTimeChanged: true if the test conditions should result in transition time change\n\tcases := []struct {\n\t\tname string\n\t\tinput *v1beta1.ClusterServiceBroker\n\t\tstatus v1beta1.ConditionStatus\n\t\treason string\n\t\tmessage string\n\t\ttransitionTimeChanged bool\n\t}{\n\n\t\t{\n\t\t\tname: \"initially unset\",\n\t\t\tinput: getTestClusterServiceBroker(),\n\t\t\tstatus: v1beta1.ConditionFalse,\n\t\t\ttransitionTimeChanged: true,\n\t\t},\n\t\t{\n\t\t\tname: \"not ready -> not ready\",\n\t\t\tinput: getTestClusterServiceBrokerWithStatus(v1beta1.ConditionFalse),\n\t\t\tstatus: v1beta1.ConditionFalse,\n\t\t\ttransitionTimeChanged: false,\n\t\t},\n\t\t{\n\t\t\tname: \"not ready -> not ready with reason and message change\",\n\t\t\tinput: getTestClusterServiceBrokerWithStatus(v1beta1.ConditionFalse),\n\t\t\tstatus: v1beta1.ConditionFalse,\n\t\t\treason: \"foo\",\n\t\t\tmessage: \"bar\",\n\t\t\ttransitionTimeChanged: false,\n\t\t},\n\t\t{\n\t\t\tname: \"not ready -> ready\",\n\t\t\tinput: getTestClusterServiceBrokerWithStatus(v1beta1.ConditionFalse),\n\t\t\tstatus: v1beta1.ConditionTrue,\n\t\t\ttransitionTimeChanged: true,\n\t\t},\n\t\t{\n\t\t\tname: \"ready -> ready\",\n\t\t\tinput: getTestClusterServiceBrokerWithStatus(v1beta1.ConditionTrue),\n\t\t\tstatus: v1beta1.ConditionTrue,\n\t\t\ttransitionTimeChanged: false,\n\t\t},\n\t\t{\n\t\t\tname: \"ready -> not ready\",\n\t\t\tinput: getTestClusterServiceBrokerWithStatus(v1beta1.ConditionTrue),\n\t\t\tstatus: v1beta1.ConditionFalse,\n\t\t\ttransitionTimeChanged: true,\n\t\t},\n\t}\n\n\tfor _, tc := range cases {\n\t\t_, fakeCatalogClient, _, testController, _ := newTestController(t, getTestCatalogConfig())\n\n\t\tinputClone := tc.input.DeepCopy()\n\n\t\terr := testController.updateClusterServiceBrokerCondition(tc.input, v1beta1.ServiceBrokerConditionReady, tc.status, tc.reason, tc.message)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%v: error updating broker condition: %v\", tc.name, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif !reflect.DeepEqual(tc.input, inputClone) {\n\t\t\tt.Errorf(\"%v: updating broker condition mutated input: %s\", tc.name, expectedGot(inputClone, tc.input))\n\t\t\tcontinue\n\t\t}\n\n\t\tactions := fakeCatalogClient.Actions()\n\t\tif ok := expectNumberOfActions(t, tc.name, actions, 1); !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tupdatedClusterServiceBroker, ok := expectUpdateStatus(t, tc.name, actions[0], tc.input)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tupdateActionObject, ok := updatedClusterServiceBroker.(*v1beta1.ClusterServiceBroker)\n\t\tif !ok {\n\t\t\tt.Errorf(\"%v: couldn't convert to broker\", tc.name)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar initialTs metav1.Time\n\t\tif len(inputClone.Status.Conditions) != 0 {\n\t\t\tinitialTs = inputClone.Status.Conditions[0].LastTransitionTime\n\t\t}\n\n\t\tif e, a := 1, len(updateActionObject.Status.Conditions); e != a {\n\t\t\tt.Errorf(\"%v: %s\", tc.name, expectedGot(e, a))\n\t\t}\n\n\t\toutputCondition := updateActionObject.Status.Conditions[0]\n\t\tnewTs := outputCondition.LastTransitionTime\n\n\t\tif tc.transitionTimeChanged && initialTs == newTs {\n\t\t\tt.Errorf(\"%v: transition time didn't change when it should have\", tc.name)\n\t\t\tcontinue\n\t\t} else if !tc.transitionTimeChanged && initialTs != newTs {\n\t\t\tt.Errorf(\"%v: transition time changed when it shouldn't have\", tc.name)\n\t\t\tcontinue\n\t\t}\n\t\tif e, a := tc.reason, outputCondition.Reason; e != \"\" && e != a {\n\t\t\tt.Errorf(\"%v: condition reasons didn't match; %s\", tc.name, expectedGot(e, a))\n\t\t\tcontinue\n\t\t}\n\t\tif e, a := tc.message, outputCondition.Message; e != \"\" && e != a {\n\t\t\tt.Errorf(\"%v: condition message didn't match; %s\", tc.name, expectedGot(e, a))\n\t\t}\n\t}\n}", "func TestIssue351MultipleJobRun(t *testing.T) {\n\tctx, err := NewContext(t)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer ctx.Cleanup()\n\n\t// Create initial CR to generate an initial job and create the initial k8s resources\n\n\tgitops := &gitopsv1alpha1.GitOpsConfig{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"GitOpsConfig\",\n\t\t\tAPIVersion: \"eunomia.kohls.io/v1alpha1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"gitops-issue351\",\n\t\t\tNamespace: ctx.namespace,\n\t\t},\n\t\tSpec: gitopsv1alpha1.GitOpsConfigSpec{\n\t\t\tTemplateSource: gitopsv1alpha1.GitConfig{\n\t\t\t\tURI: ctx.eunomiaURI,\n\t\t\t\tRef: ctx.eunomiaRef,\n\t\t\t\tContextDir: \"test/e2e/testdata/hello-a\",\n\t\t\t},\n\t\t\tParameterSource: gitopsv1alpha1.GitConfig{\n\t\t\t\tURI: ctx.eunomiaURI,\n\t\t\t\tRef: ctx.eunomiaRef,\n\t\t\t\tContextDir: \"test/e2e/testdata/empty-yaml\",\n\t\t\t},\n\t\t\tTriggers: []gitopsv1alpha1.GitOpsTrigger{\n\t\t\t\t{Type: \"Change\"},\n\t\t\t},\n\t\t\tTemplateProcessorImage: \"quay.io/kohlstechnology/eunomia-base:dev\",\n\t\t\tResourceHandlingMode: \"Apply\",\n\t\t\tResourceDeletionMode: \"Delete\",\n\t\t\tServiceAccountRef: \"eunomia-operator\",\n\t\t},\n\t}\n\n\terr = framework.Global.Client.Create(ctx, gitops, &framework.CleanupOptions{TestContext: ctx.TestCtx, Timeout: timeout, RetryInterval: retryInterval})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// When the initial job is created, we will use it as a template to create two additional jobs at the same time\n\terr = WaitForJobCreation(ctx.namespace, \"gitopsconfig-gitops-issue351-\", framework.Global.KubeClient)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tgitopsJob, err := GetJob(ctx.namespace, \"gitopsconfig-gitops-issue351-\", framework.Global.KubeClient)\n\n\tfirstJob := &batchv1.Job{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"GitOpsConfig\",\n\t\t\tAPIVersion: \"eunomia.kohls.io/v1alpha1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"first-job\",\n\t\t\tNamespace: ctx.namespace,\n\t\t},\n\t\tSpec: gitopsJob.Spec,\n\t}\n\t// The deep copy of the job keeps the selector and selector label that has to be generated by k8s.\n\t// Trying to create a job with those set will fail.\n\tfirstJob.Spec.Template.SetLabels(map[string]string{})\n\tfirstJob.Spec.Selector.Reset()\n\n\terr = framework.Global.Client.Create(ctx, firstJob, &framework.CleanupOptions{TestContext: ctx.TestCtx, Timeout: timeout, RetryInterval: retryInterval})\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tsecondJob := &batchv1.Job{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"GitOpsConfig\",\n\t\t\tAPIVersion: \"eunomia.kohls.io/v1alpha1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"second-job\",\n\t\t\tNamespace: ctx.namespace,\n\t\t},\n\t\tSpec: gitopsJob.Spec,\n\t}\n\tsecondJob.Spec.Template.SetLabels(map[string]string{})\n\tsecondJob.Spec.Selector.Reset()\n\n\terr = framework.Global.Client.Create(ctx, secondJob, &framework.CleanupOptions{TestContext: ctx.TestCtx, Timeout: timeout, RetryInterval: retryInterval})\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// Wait to make sure both of the jobs finish running\n\terr = wait.Poll(retryInterval, 60*time.Second, func() (done bool, err error) {\n\t\tjobOne, _ := GetJob(ctx.namespace, \"first-job\", framework.Global.KubeClient)\n\t\tjobTwo, _ := GetJob(ctx.namespace, \"second-job\", framework.Global.KubeClient)\n\n\t\tswitch {\n\t\tcase jobOne.Status.Succeeded == 1 && jobTwo.Status.Succeeded == 1:\n\t\t\tt.Logf(\"Both jobs are done\")\n\t\t\treturn true, nil\n\t\tdefault:\n\t\t\tt.Logf(\"Both jobs are not done\")\n\t\t\treturn false, nil\n\t\t}\n\t})\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tdeploymentList, err := framework.Global.KubeClient.AppsV1().Deployments(ctx.namespace).List(metav1.ListOptions{})\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif len(deploymentList.Items) != 1 {\n\t\tt.Errorf(\"There was only %d deployments when we were expecting 1\", len(deploymentList.Items))\n\t}\n\tif deploymentList.Items[0].GetDeletionTimestamp() != nil {\n\t\tt.Errorf(\"The deployment has been marked for deletion\")\n\t}\n}", "func TestProject_CreateProject_ReportsErrorIfNoSuccessForLongTime(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tcoreClient := azdosdkmocks.NewMockCoreClient(ctrl)\n\toperationsClient := azdosdkmocks.NewMockOperationsClient(ctrl)\n\tclients := &client.AggregatedClient{\n\t\tCoreClient: coreClient,\n\t\tOperationsClient: operationsClient,\n\t\tCtx: context.Background(),\n\t}\n\n\texpectedProjectCreateArgs := core.QueueCreateProjectArgs{ProjectToCreate: &testProject}\n\tmockedOperationReference := operations.OperationReference{Id: &testID}\n\texpectedOperationArgs := operations.GetOperationArgs{OperationId: &testID}\n\n\tcoreClient.\n\t\tEXPECT().\n\t\tQueueCreateProject(clients.Ctx, expectedProjectCreateArgs).\n\t\tReturn(&mockedOperationReference, nil).\n\t\tTimes(1)\n\n\t// the operation will forever be \"in progress\"\n\tstatus := operationWithStatus(operations.OperationStatusValues.InProgress)\n\toperationsClient.\n\t\tEXPECT().\n\t\tGetOperation(clients.Ctx, expectedOperationArgs).\n\t\tReturn(&status, nil).\n\t\tMinTimes(1)\n\n\terr := createProject(clients, &testProject, 20*time.Second)\n\trequire.NotNil(t, err, \"Expected error indicating timeout\")\n}", "func TestConstructSlowGo(t *testing.T) {\n\tpathEnv := testComponentSlowPathEnv(t)\n\n\t// TODO[pulumi/pulumi#5455]: Dynamic providers fail to load when used from multi-lang components.\n\t// Until we've addressed this, set PULUMI_TEST_YARN_LINK_PULUMI, which tells the integration test\n\t// module to run `yarn install && yarn link @pulumi/pulumi` in the Go program's directory, allowing\n\t// the Node.js dynamic provider plugin to load.\n\t// When the underlying issue has been fixed, the use of this environment variable inside the integration\n\t// test module should be removed.\n\tconst testYarnLinkPulumiEnv = \"PULUMI_TEST_YARN_LINK_PULUMI=true\"\n\n\topts := &integration.ProgramTestOptions{\n\t\tEnv: []string{pathEnv, testYarnLinkPulumiEnv},\n\t\tDir: filepath.Join(\"construct_component_slow\", \"go\"),\n\t\tDependencies: []string{\n\t\t\t\"github.com/pulumi/pulumi/sdk/v3\",\n\t\t},\n\t\tQuick: true,\n\t\tExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {\n\t\t\tassert.NotNil(t, stackInfo.Deployment)\n\t\t\tif assert.Equal(t, 5, len(stackInfo.Deployment.Resources)) {\n\t\t\t\tstackRes := stackInfo.Deployment.Resources[0]\n\t\t\t\tassert.NotNil(t, stackRes)\n\t\t\t\tassert.Equal(t, resource.RootStackType, stackRes.Type)\n\t\t\t\tassert.Equal(t, \"\", string(stackRes.Parent))\n\t\t\t}\n\t\t},\n\t}\n\tintegration.ProgramTest(t, opts)\n}", "func TestSetReadyConcurrent(t *testing.T) {\n\tvar ready readiness\n\twg := sync.WaitGroup{}\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\t_ = ready.IsReady()\n\t}()\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tready.SetReady(true)\n\t}()\n\n\twg.Wait()\n\trequire.True(t, ready.IsReady())\n}", "func TestConcurrencyLimit(t *testing.T) {\n\tt.Parallel()\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tt.Cleanup(cancel)\n\n\tconfig := Config{MaxConcurrency: 4}\n\tcountdown := NewCountdown(config.MaxConcurrency * 2)\n\tprocess := NewMockEventsProcess(ctx, t, config, func(ctx context.Context, event types.Event) error {\n\t\tdefer countdown.Decrement()\n\t\ttime.Sleep(time.Second)\n\t\treturn trace.Wrap(ctx.Err())\n\t})\n\n\ttimeBefore := time.Now()\n\tfor i := 0; i < config.MaxConcurrency; i++ {\n\t\tresource, err := types.NewAccessRequest(fmt.Sprintf(\"REQ-%v\", i+1), \"foo\", \"admin\")\n\t\trequire.NoError(t, err)\n\n\t\tfor j := 0; j < 2; j++ {\n\t\t\tprocess.Events.Fire(types.Event{Type: types.OpPut, Resource: resource})\n\t\t}\n\t}\n\trequire.NoError(t, countdown.Wait(ctx))\n\n\ttimeAfter := time.Now()\n\tassert.InDelta(t, 4*time.Second, timeAfter.Sub(timeBefore), float64(750*time.Millisecond))\n}", "func TestProject_CreateProject_DoesNotSwallowErrorFromFailedAsyncStatusCheckCall(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tcoreClient := azdosdkmocks.NewMockCoreClient(ctrl)\n\toperationsClient := azdosdkmocks.NewMockOperationsClient(ctrl)\n\tclients := &client.AggregatedClient{\n\t\tCoreClient: coreClient,\n\t\tOperationsClient: operationsClient,\n\t\tCtx: context.Background(),\n\t}\n\n\texpectedProjectCreateArgs := core.QueueCreateProjectArgs{ProjectToCreate: &testProject}\n\tmockedOperationReference := operations.OperationReference{Id: &testID}\n\texpectedOperationArgs := operations.GetOperationArgs{OperationId: &testID}\n\n\tcoreClient.\n\t\tEXPECT().\n\t\tQueueCreateProject(clients.Ctx, expectedProjectCreateArgs).\n\t\tReturn(&mockedOperationReference, nil).\n\t\tTimes(1)\n\n\toperationsClient.\n\t\tEXPECT().\n\t\tGetOperation(clients.Ctx, expectedOperationArgs).\n\t\tReturn(nil, errors.New(\"GetOperation() failed\")).\n\t\tTimes(1)\n\n\terr := createProject(clients, &testProject, 10*time.Minute)\n\trequire.Equal(t, \" waiting for project ready. GetOperation() failed \", err.Error())\n}", "func TestNamespacePreExisting(t *testing.T) {\n\ttestName := \"TestNamespacePreExisting\"\n\tbeforeTest()\n\t// kinds to check for status\n\tvar kindsToCheckStatus = map[string]bool{\n\t\tAPPLICATION: true,\n\t\t\"Deployment\": true,\n\t\t\"Service\": true,\n\t}\n\n\t// starting resources to pre-populate\n\tvar files = []string{\n\t\t/* 0 */ KappnavConfigFile,\n\t\t/* 1 */ CrdApplication,\n\t\t/* 2 */ ns1Service,\n\t\t/* 3 */ ns1Deployment,\n\t\t/* 4 */ ns2Service,\n\t\t/* 5 */ ns2Deployment,\n\t}\n\titeration0IDs, err := readResourceIDs(files)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t/* Iteration 0: no applications. No resources should have status */\n\ttestActions := newTestActions(testName, kindsToCheckStatus)\n\tvar emptyIDs = []resourceID{}\n\n\t// status should not be checked when there are not applications\n\titeration0IDs[2].expectedStatus = NoStatus\n\titeration0IDs[3].expectedStatus = NoStatus\n\titeration0IDs[4].expectedStatus = NoStatus\n\titeration0IDs[5].expectedStatus = NoStatus\n\ttestActions.addIteration(iteration0IDs, emptyIDs)\n\n\t// iteration 1: add application to NS_1. All in NS_1 is normal.\n\t// All in NS_2 remains NoStatus\n\tres, err := readOneResourceID(ns1App)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tarrayLength := len(iteration0IDs)\n\tvar iteration1IDs = make([]resourceID, arrayLength, arrayLength)\n\tcopy(iteration1IDs, iteration0IDs)\n\titeration1IDs = append(iteration1IDs, res)\n\tarrayLength++\n\titeration1IDs[2].expectedStatus = Normal\n\titeration1IDs[3].expectedStatus = Normal\n\titeration1IDs[6].expectedStatus = Normal\n\ttestActions.addIteration(iteration1IDs, emptyIDs)\n\n\t/* iteration 4: clean up */\n\ttestActions.addIteration(emptyIDs, emptyIDs)\n\n\tclusterWatcher, err := createClusterWatcher(iteration0IDs, testActions, StatusFailureRate)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer clusterWatcher.shutDown()\n\n\t// make all trasition of testAction\n\terr = testActions.transitionAll()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (tc *testContext) testNodesBecomeReadyAndSchedulable(t *testing.T) {\n\tnodes := gc.allNodes()\n\tfor _, node := range nodes {\n\t\tt.Run(node.GetName(), func(t *testing.T) {\n\t\t\terr := wait.PollImmediate(retry.Interval, retry.ResourceChangeTimeout, func() (done bool, err error) {\n\t\t\t\tfoundNode, err := tc.client.K8s.CoreV1().Nodes().Get(context.TODO(), node.GetName(), meta.GetOptions{})\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\treturn tc.nodeReadyAndSchedulable(*foundNode), nil\n\t\t\t})\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}", "func TestConcurrencyAndRaceConditions(t *testing.T) {\n\t//\tdefer profile.Start(profile.MemProfile).Stop()\n\tfmt.Println(\"Testing for concurrency and race conditions\")\n\tq := New()\n\tq.Sched()\n\tnotifyChannel := make(chan bool)\n\tfor i := 0; i < 1000; i++ {\n\t\tgo func(j int) {\n\t\t\tfor j := 0; j < 2000; j++ {\n\t\t\t\tjd := jobData{\n\t\t\t\t\tstatusChannel: notifyChannel,\n\t\t\t\t\tdata: int32(j),\n\t\t\t\t}\n\t\t\t\tjob := Job{\n\t\t\t\t\tJobData: jd,\n\t\t\t\t\tTaskCreator: taskHandler{},\n\t\t\t\t}\n\t\t\t\tq.PushChannel() <- job\n\t\t\t}\n\t\t}(i)\n\t}\n\tcounter := 0\n\tdone := false\n\tfor {\n\t\tselect {\n\t\tcase <-notifyChannel:\n\t\t\tcounter++\n\t\t\tif counter == 2000000 {\n\t\t\t\tdone = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif done {\n\t\t\tbreak\n\t\t}\n\t}\n\tassert.Equal(t, 1999000000, int(sum))\n\tassert.Equal(t, 2000000, counter)\n}", "func containerGCTest(f *framework.Framework, test testRun) {\n\tvar runtime internalapi.RuntimeService\n\tginkgo.BeforeEach(func() {\n\t\tvar err error\n\t\truntime, _, err = getCRIClient()\n\t\tframework.ExpectNoError(err)\n\t})\n\tfor _, pod := range test.testPods {\n\t\t// Initialize the getContainerNames function to use CRI runtime client.\n\t\tpod.getContainerNames = func() ([]string, error) {\n\t\t\trelevantContainers := []string{}\n\t\t\tcontainers, err := runtime.ListContainers(context.Background(), &runtimeapi.ContainerFilter{\n\t\t\t\tLabelSelector: map[string]string{\n\t\t\t\t\ttypes.KubernetesPodNameLabel: pod.podName,\n\t\t\t\t\ttypes.KubernetesPodNamespaceLabel: f.Namespace.Name,\n\t\t\t\t},\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn relevantContainers, err\n\t\t\t}\n\t\t\tfor _, container := range containers {\n\t\t\t\trelevantContainers = append(relevantContainers, container.Labels[types.KubernetesContainerNameLabel])\n\t\t\t}\n\t\t\treturn relevantContainers, nil\n\t\t}\n\t}\n\n\tginkgo.Context(fmt.Sprintf(\"Garbage Collection Test: %s\", test.testName), func() {\n\t\tginkgo.BeforeEach(func(ctx context.Context) {\n\t\t\trealPods := getPods(test.testPods)\n\t\t\te2epod.NewPodClient(f).CreateBatch(ctx, realPods)\n\t\t\tginkgo.By(\"Making sure all containers restart the specified number of times\")\n\t\t\tgomega.Eventually(ctx, func(ctx context.Context) error {\n\t\t\t\tfor _, podSpec := range test.testPods {\n\t\t\t\t\terr := verifyPodRestartCount(ctx, f, podSpec.podName, podSpec.numContainers, podSpec.restartCount)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}, setupDuration, runtimePollInterval).Should(gomega.BeNil())\n\t\t})\n\n\t\tginkgo.It(\"Should eventually garbage collect containers when we exceed the number of dead containers per container\", func(ctx context.Context) {\n\t\t\ttotalContainers := 0\n\t\t\tfor _, pod := range test.testPods {\n\t\t\t\ttotalContainers += pod.numContainers*2 + 1\n\t\t\t}\n\t\t\tgomega.Eventually(ctx, func() error {\n\t\t\t\ttotal := 0\n\t\t\t\tfor _, pod := range test.testPods {\n\t\t\t\t\tcontainerNames, err := pod.getContainerNames()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\ttotal += len(containerNames)\n\t\t\t\t\t// Check maxPerPodContainer for each container in the pod\n\t\t\t\t\tfor i := 0; i < pod.numContainers; i++ {\n\t\t\t\t\t\tcontainerCount := 0\n\t\t\t\t\t\tfor _, containerName := range containerNames {\n\t\t\t\t\t\t\tif containerName == pod.getContainerName(i) {\n\t\t\t\t\t\t\t\tcontainerCount++\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif containerCount > maxPerPodContainer+1 {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"expected number of copies of container: %s, to be <= maxPerPodContainer: %d; list of containers: %v\",\n\t\t\t\t\t\t\t\tpod.getContainerName(i), maxPerPodContainer, containerNames)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t//Check maxTotalContainers. Currently, the default is -1, so this will never happen until we can configure maxTotalContainers\n\t\t\t\tif maxTotalContainers > 0 && totalContainers <= maxTotalContainers && total > maxTotalContainers {\n\t\t\t\t\treturn fmt.Errorf(\"expected total number of containers: %v, to be <= maxTotalContainers: %v\", total, maxTotalContainers)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}, garbageCollectDuration, runtimePollInterval).Should(gomega.BeNil())\n\n\t\t\tif maxPerPodContainer >= 2 && maxTotalContainers < 0 { // make sure constraints wouldn't make us gc old containers\n\t\t\t\tginkgo.By(\"Making sure the kubelet consistently keeps around an extra copy of each container.\")\n\t\t\t\tgomega.Consistently(ctx, func() error {\n\t\t\t\t\tfor _, pod := range test.testPods {\n\t\t\t\t\t\tcontainerNames, err := pod.getContainerNames()\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfor i := 0; i < pod.numContainers; i++ {\n\t\t\t\t\t\t\tcontainerCount := 0\n\t\t\t\t\t\t\tfor _, containerName := range containerNames {\n\t\t\t\t\t\t\t\tif containerName == pod.getContainerName(i) {\n\t\t\t\t\t\t\t\t\tcontainerCount++\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif pod.restartCount > 0 && containerCount < maxPerPodContainer+1 {\n\t\t\t\t\t\t\t\treturn fmt.Errorf(\"expected pod %v to have extra copies of old containers\", pod.podName)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t}, garbageCollectDuration, runtimePollInterval).Should(gomega.BeNil())\n\t\t\t}\n\t\t})\n\n\t\tginkgo.AfterEach(func(ctx context.Context) {\n\t\t\tfor _, pod := range test.testPods {\n\t\t\t\tginkgo.By(fmt.Sprintf(\"Deleting Pod %v\", pod.podName))\n\t\t\t\te2epod.NewPodClient(f).DeleteSync(ctx, pod.podName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)\n\t\t\t}\n\n\t\t\tginkgo.By(\"Making sure all containers get cleaned up\")\n\t\t\tgomega.Eventually(ctx, func() error {\n\t\t\t\tfor _, pod := range test.testPods {\n\t\t\t\t\tcontainerNames, err := pod.getContainerNames()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tif len(containerNames) > 0 {\n\t\t\t\t\t\treturn fmt.Errorf(\"%v containers still remain\", containerNames)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}, garbageCollectDuration, runtimePollInterval).Should(gomega.BeNil())\n\n\t\t\tif ginkgo.CurrentSpecReport().Failed() && framework.TestContext.DumpLogsOnFailure {\n\t\t\t\tlogNodeEvents(ctx, f)\n\t\t\t\tlogPodEvents(ctx, f)\n\t\t\t}\n\t\t})\n\t})\n}", "func DeploymentStatusCheck(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {\n\n\t//Record start timestamp\n\tChaosStartTimeStamp := time.Now().Unix()\n\tisFailed := false\n\n\terr = retry.\n\t\tTimes(uint(experimentsDetails.ChaosDuration / experimentsDetails.Delay)).\n\t\tWait(time.Duration(experimentsDetails.Delay) * time.Second).\n\t\tTry(func(attempt uint) error {\n\t\t\tfor _, app := range appsUnderTest {\n\t\t\t\tdeployment, err := appsv1DeploymentClient.Get(app.AppName, metav1.GetOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn errors.Errorf(\"Unable to find the deployment with name %v, err: %v\", app.AppName, err)\n\t\t\t\t}\n\t\t\t\tlog.Infof(\"Deployment's Available Replica Count is %v\", deployment.Status.AvailableReplicas)\n\t\t\t\tif int(deployment.Status.AvailableReplicas) != app.ReplicaCount {\n\t\t\t\t\tisFailed = true\n\t\t\t\t\treturn errors.Errorf(\"Application %s is not scaled yet, err: %v\", app.AppName, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tisFailed = false\n\t\t\treturn nil\n\t\t})\n\n\tif isFailed {\n\t\terr = AutoscalerRecoveryInDeployment(experimentsDetails, clients, appsUnderTest)\n\t\tif err != nil {\n\t\t\treturn errors.Errorf(\"Unable to perform autoscaling, err: %v\", err)\n\t\t}\n\t\treturn errors.Errorf(\"Failed to scale the application\")\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// run the probes during chaos\n\tif len(resultDetails.ProbeDetails) != 0 {\n\t\tif err = probe.RunProbes(chaosDetails, clients, resultDetails, \"DuringChaos\", eventsDetails); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t//ChaosCurrentTimeStamp contains the current timestamp\n\tChaosCurrentTimeStamp := time.Now().Unix()\n\tif int(ChaosCurrentTimeStamp-ChaosStartTimeStamp) <= experimentsDetails.ChaosDuration {\n\t\tlog.Info(\"[Wait]: Waiting for completion of chaos duration\")\n\t\ttime.Sleep(time.Duration(experimentsDetails.ChaosDuration-int(ChaosCurrentTimeStamp-ChaosStartTimeStamp)) * time.Second)\n\t}\n\n\treturn nil\n}", "func TestCancelJobAllWorkersOccupied(t *testing.T) {\n\ts := newTestingServer()\n\tmux := SetupRoutes(s)\n\n\tjobs := []fuq.JobDescription{\n\t\t{\n\t\t\tName: \"job1\",\n\t\t\tNumTasks: 16,\n\t\t\tWorkingDir: \"/foo/bar\",\n\t\t\tLoggingDir: \"/foo/bar/logs\",\n\t\t\tCommand: \"/foo/foo_it.sh\",\n\t\t},\n\t\t{\n\t\t\tName: \"job2\",\n\t\t\tNumTasks: 27,\n\t\t\tWorkingDir: \"/foo/baz\",\n\t\t\tLoggingDir: \"/foo/baz/logs\",\n\t\t\tCommand: \"/foo/baz_it.sh\",\n\t\t},\n\t}\n\n\tfor i, j := range jobs {\n\t\tjobs[i].JobId = addJob(t, s.Foreman, j)\n\t\tjobs[i].Status = fuq.Waiting\n\t}\n\n\torigJobs := make([]fuq.JobDescription, len(jobs))\n\tcopy(origJobs, jobs)\n\n\twsConn, client := newTestClient(t, s)\n\tdefer wsConn.Close()\n\tdefer client.Close()\n\n\tni := client.NodeInfo\n\t_ = ni\n\n\tmsgCh := make(chan proto.Message)\n\ttaskCh := make(chan []fuq.Task)\n\n\tvar nproc, nrun uint16 = 8, 0\n\tvar running []fuq.Task\n\tvar toCancel []int\n\n\tclient.OnMessageFunc(proto.MTypeJob, func(msg proto.Message) proto.Message {\n\t\ttasks := msg.Data.([]fuq.Task)\n\n\t\tif len(tasks) > int(nproc) {\n\t\t\tpanic(\"invalid number of tasks\")\n\t\t}\n\n\t\tt.Logf(\"onJob received %d tasks: %v\", len(tasks), tasks)\n\t\tnproc -= uint16(len(tasks))\n\t\tnrun += uint16(len(tasks))\n\n\t\trepl := proto.OkayMessage(nproc, nrun, msg.Seq)\n\n\t\trunning = append(running, tasks...)\n\n\t\ttaskCh <- tasks\n\t\treturn repl\n\t})\n\n\tclient.OnMessageFunc(proto.MTypeCancel, func(msg proto.Message) proto.Message {\n\t\tpairs := msg.Data.([]fuq.TaskPair)\n\n\t\tncancel := 0\n\t\tfor i, t := range running {\n\t\t\tfor _, p := range pairs {\n\t\t\t\tif t.JobId != p.JobId {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif p.Task >= 0 && t.Task != p.Task {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\ttoCancel = append(toCancel, i)\n\t\t\t\tncancel++\n\t\t\t}\n\t\t}\n\t\tmsgCh <- msg\n\n\t\trepl := proto.OkayMessage(uint16(ncancel), 0, msg.Seq)\n\t\treturn repl\n\t})\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\tfuqtest.GoPanicOnError(ctx, client.runConversationLoop)\n\n\tmsg, err := client.SendHello(ctx, proto.HelloData{\n\t\tNumProcs: 8,\n\t\tRunning: nil,\n\t})\n\n\tif err != nil {\n\t\tt.Fatalf(\"error in HELLO: %v\", err)\n\t}\n\n\tnp, nr := msg.AsOkay()\n\tif np != 8 || nr != 0 {\n\t\tt.Fatalf(\"expected OK(8|0), but received OK(%d|%d)\", nproc, nrun)\n\t}\n\n\ttasks := <-taskCh\n\t// JOB message received\n\tif len(tasks) != 8 {\n\t\tt.Fatalf(\"expected 8 task, but received %d tasks\", len(tasks))\n\t}\n\n\t/** Cancel job **/\n\tenv := ClientRequestEnvelope{\n\t\tAuth: fuq.Client{Password: testingPass, Client: \"testing\"},\n\t\tMsg: fuq.ClientStateChangeReq{\n\t\t\tJobIds: []fuq.JobId{jobs[0].JobId},\n\t\t\tAction: \"cancel\",\n\t\t},\n\t}\n\n\trepl := []fuq.JobStateChangeResponse{}\n\n\troundTrip{\n\t\tT: t,\n\t\tMsg: env,\n\t\tDst: &repl,\n\t\tTarget: \"/\" + ClientJobStatePath,\n\t}.ExpectOK(mux.ServeHTTP)\n\tt.Logf(\"response is %v\", repl)\n\n\texpectedRepl := []fuq.JobStateChangeResponse{\n\t\t{jobs[0].JobId, fuq.Running, fuq.Cancelled},\n\t}\n\n\tif !reflect.DeepEqual(repl, expectedRepl) {\n\t\tt.Fatalf(\"expected response '%v' but found '%v'\",\n\t\t\texpectedRepl, repl)\n\t}\n\n\t/** Receive CANCEL message **/\n\tmsg = <-msgCh\n\t// expect CANCEL message\n\n\texpected := proto.Message{\n\t\tType: proto.MTypeCancel,\n\t\tSeq: msg.Seq,\n\t\tData: []fuq.TaskPair{{jobs[0].JobId, -1}},\n\t}\n\n\tif !reflect.DeepEqual(msg, expected) {\n\t\tt.Fatalf(\"expected '%v', but found '%v'\", expected, msg)\n\t}\n}", "func TestUnitToContainerStatus(t *testing.T) {\n\ttestCases := []struct {\n\t\tunitState api.UnitState\n\t}{\n\t\t{\n\t\t\tunitState: api.UnitState{\n\t\t\t\tWaiting: &api.UnitStateWaiting{\n\t\t\t\t\tReason: \"waiting to start\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tunitState: api.UnitState{\n\t\t\t\tRunning: &api.UnitStateRunning{\n\t\t\t\t\tStartedAt: api.Now(),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tunitState: api.UnitState{\n\t\t\t\tTerminated: &api.UnitStateTerminated{\n\t\t\t\t\tExitCode: int32(rand.Intn(256)),\n\t\t\t\t\tFinishedAt: api.Now(),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tc := range testCases {\n\t\tus := api.UnitStatus{\n\t\t\tName: \"myunit\",\n\t\t\tRestartCount: 0,\n\t\t\tImage: \"elotl/myimage\",\n\t\t\tState: tc.unitState,\n\t\t}\n\t\tcs := unitToContainerStatus(us)\n\t\tif us.State.Waiting != nil {\n\t\t\tassert.NotNil(t, cs.State.Waiting)\n\t\t\tassert.Nil(t, cs.State.Running)\n\t\t\tassert.Nil(t, cs.State.Terminated)\n\t\t\tassert.Equal(t, us.State.Waiting.Reason, cs.State.Waiting.Reason)\n\t\t}\n\t\tif us.State.Running != nil {\n\t\t\tassert.NotNil(t, cs.State.Running)\n\t\t\tassert.Nil(t, cs.State.Waiting)\n\t\t\tassert.Nil(t, cs.State.Terminated)\n\t\t\tassert.Equal(\n\t\t\t\tt,\n\t\t\t\tus.State.Running.StartedAt.Time,\n\t\t\t\tcs.State.Running.StartedAt.Time)\n\t\t}\n\t\tif us.State.Terminated != nil {\n\t\t\tassert.NotNil(t, cs.State.Terminated)\n\t\t\tassert.Nil(t, cs.State.Running)\n\t\t\tassert.Nil(t, cs.State.Waiting)\n\t\t\tassert.Equal(\n\t\t\t\tt,\n\t\t\t\tus.State.Terminated.ExitCode,\n\t\t\t\tcs.State.Terminated.ExitCode)\n\t\t\tassert.Equal(\n\t\t\t\tt,\n\t\t\t\tus.State.Terminated.FinishedAt.Time,\n\t\t\t\tcs.State.Terminated.FinishedAt.Time)\n\t\t}\n\t}\n}", "func (d *deploymentTester) markUpdatedPodsReady(wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\n\tns := d.deployment.Namespace\n\terr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {\n\t\t// We're done when the deployment is complete\n\t\tif completed, err := d.deploymentComplete(); err != nil {\n\t\t\treturn false, err\n\t\t} else if completed {\n\t\t\treturn true, nil\n\t\t}\n\t\t// Otherwise, mark remaining pods as ready\n\t\tpods, err := d.listUpdatedPods()\n\t\tif err != nil {\n\t\t\td.t.Log(err)\n\t\t\treturn false, nil\n\t\t}\n\t\td.t.Logf(\"%d/%d of deployment pods are created\", len(pods), *d.deployment.Spec.Replicas)\n\t\tfor i := range pods {\n\t\t\tpod := pods[i]\n\t\t\tif podutil.IsPodReady(&pod) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err = markPodReady(d.c, ns, &pod); err != nil {\n\t\t\t\td.t.Logf(\"failed to update Deployment pod %s, will retry later: %v\", pod.Name, err)\n\t\t\t}\n\t\t}\n\t\treturn false, nil\n\t})\n\tif err != nil {\n\t\td.t.Errorf(\"failed to mark updated Deployment pods to ready: %v\", err)\n\t}\n}", "func (s *DomainSuite) TestUpdateConfigConcurrently() {\n\tid := \"test\"\n\tuserMech := \"h2\"\n\tuserId := \"dave\"\n\tinitialData := `{ \"a\": 0, \"b\": 0 }`\n\n\ttestRepo := &memoryRepository{\n\t\tdata: map[string]*ChangeSet{\n\t\t\tid: &ChangeSet{\n\t\t\t\tId: id,\n\t\t\t\tBody: []byte(initialData),\n\t\t\t\tTimestamp: time.Now(),\n\t\t\t},\n\t\t},\n\t}\n\n\tDefaultRepository = testRepo\n\n\tlock := &mockLock{}\n\ts.zk.\n\t\tOn(\"NewLock\", lockPath(id), gozk.WorldACL(gozk.PermAll)).\n\t\tReturn(lock)\n\n\tpaths := []string{\"a\", \"b\"}\n\n\tfor i := 1; i <= 100; i++ {\n\t\twg := sync.WaitGroup{}\n\t\twg.Add(len(paths))\n\n\t\tfor _, path := range paths {\n\t\t\tgo func(path string) {\n\t\t\t\tmsg := fmt.Sprintf(\"Setting %s to %d\", path, i)\n\t\t\t\terr := CreateOrUpdateConfig(fmt.Sprintf(\"%s:%d\", path, i), id, path, userMech, userId, msg, []byte(fmt.Sprintf(\"%d\", i)))\n\t\t\t\ts.NoError(err)\n\n\t\t\t\twg.Done()\n\t\t\t}(path)\n\t\t}\n\n\t\twg.Wait()\n\t\tdata, _, err := ReadConfig(id, \"\")\n\t\ts.NoError(err, \"Failed to update config\")\n\n\t\tconfig := map[string]int{}\n\t\terr = json.Unmarshal(data, &config)\n\t\ts.NoError(err, \"Failed to unmarshal config\")\n\n\t\tfor _, path := range paths {\n\t\t\ts.Equal(config[path], i)\n\t\t}\n\t}\n}", "func testReconcileDeploymentReadyRestartRequired(t *testing.T) {\n\treaper := createReaper()\n\tdeployment := createReadyDeployment(reaper)\n\n\tobjs := []runtime.Object{reaper, deployment}\n\n\tSetConfigurationUpdatedCondition(&reaper.Status)\n\n\tr := createDeploymentReconciler(objs...)\n\tresult, err := r.ReconcileDeployment(context.TODO(), reaper)\n\n\tif result == nil {\n\t\tt.Errorf(\"expected non-nil result\")\n\t} else if !result.Requeue {\n\t\tt.Errorf(\"expected requeue\")\n\t}\n\n\tif err != nil {\n\t\tt.Errorf(\"expected err (nil), got (%s)\", err)\n\t}\n\n\tcond := GetCondition(&reaper.Status, v1alpha1.ConfigurationUpdated)\n\tif cond == nil {\n\t\tt.Errorf(\"expected to find condition (%s)\", v1alpha1.ConfigurationUpdated)\n\t} else if cond.Reason != RestartRequiredReason {\n\t\tt.Errorf(\"condition %s reason is wrong: expected (%s), got (%s)\", v1alpha1.ConfigurationUpdated, RestartRequiredReason, cond.Reason)\n\t}\n\n\tdeployment = &appsv1.Deployment{}\n\tif err := r.client.Get(context.TODO(), namespaceName, deployment); err != nil {\n\t\tt.Errorf(\"failed to get deployment: (%s)\", err)\n\t} else if _, found := deployment.Spec.Template.Annotations[reaperRestartedAt]; !found {\n\t\tt.Errorf(\"expected to find deployment annotation: (%s)\", reaperRestartedAt)\n\t}\n}", "func GetBuildMockClientToRun2Times() MockClient {\n\tmc := NewMockClient()\n\n\tmc.AddData(buildHead)\n\tmc.AddData(buildHead)\n\n\tmc.AddData(buildPost)\n\tmc.AddData(buildPost)\n\n\tmc.AddData(buildGet1)\n\tmc.AddData(buildGet2)\n\tmc.AddData(buildGet1)\n\tmc.AddData(buildGet2)\n\n\tmc.AddData(buildGetTasks)\n\tmc.AddData(buildGetTasks)\n\n\tmc.AddData(buildGetTask0Logs)\n\tmc.AddData(buildGetTask1Logs)\n\tmc.AddData(buildGetTask2Logs)\n\tmc.AddData(buildGetTask3Logs)\n\tmc.AddData(buildGetTask4Logs)\n\tmc.AddData(buildGetTask5Logs)\n\tmc.AddData(buildGetTask6Logs)\n\tmc.AddData(buildGetTask7Logs)\n\tmc.AddData(buildGetTask8Logs)\n\tmc.AddData(buildGetTask9Logs)\n\tmc.AddData(buildGetTask10Logs)\n\tmc.AddData(buildGetTask11Logs)\n\tmc.AddData(buildGetTask12Logs)\n\n\tmc.AddData(buildGetTask0Logs)\n\tmc.AddData(buildGetTask1Logs)\n\tmc.AddData(buildGetTask2Logs)\n\tmc.AddData(buildGetTask3Logs)\n\tmc.AddData(buildGetTask4Logs)\n\tmc.AddData(buildGetTask5Logs)\n\tmc.AddData(buildGetTask6Logs)\n\tmc.AddData(buildGetTask7Logs)\n\tmc.AddData(buildGetTask8Logs)\n\tmc.AddData(buildGetTask9Logs)\n\tmc.AddData(buildGetTask10Logs)\n\tmc.AddData(buildGetTask11Logs)\n\tmc.AddData(buildGetTask12Logs)\n\n\tmc.AddData(buildGetTask0Result)\n\tmc.AddData(buildGetTask1Result)\n\tmc.AddData(buildGetTask2Result)\n\tmc.AddData(buildGetTask3Result)\n\tmc.AddData(buildGetTask4Result)\n\tmc.AddData(buildGetTask5Result)\n\tmc.AddData(buildGetTask6Result)\n\tmc.AddData(buildGetTask7Result)\n\tmc.AddData(buildGetTask8Result)\n\tmc.AddData(buildGetTask9Result)\n\tmc.AddData(buildGetTask10Result)\n\tmc.AddData(buildGetTask11Result)\n\tmc.AddData(buildGetTask12Result)\n\n\tmc.AddData(buildGetTask0Result)\n\tmc.AddData(buildGetTask1Result)\n\tmc.AddData(buildGetTask2Result)\n\tmc.AddData(buildGetTask3Result)\n\tmc.AddData(buildGetTask4Result)\n\tmc.AddData(buildGetTask5Result)\n\tmc.AddData(buildGetTask6Result)\n\tmc.AddData(buildGetTask7Result)\n\tmc.AddData(buildGetTask8Result)\n\tmc.AddData(buildGetTask9Result)\n\tmc.AddData(buildGetTask10Result)\n\tmc.AddData(buildGetTask11Result)\n\tmc.AddData(buildGetTask12Result)\n\n\tmc.AddData(buildGetTask11ResultMedia)\n\tmc.AddData(buildGetTask11ResultMedia)\n\n\tmc.AddData(buildGetValues)\n\tmc.AddData(buildGetValues)\n\n\treturn mc\n}", "func checkPods(podClient v1.CoreV1Interface, logger *log.Logger, filters ...PodPredicate) ([]kubev1.Pod, error) {\n\tlogger = logging.CreateNewStdLoggerOrUseExistingLogger(logger)\n\n\tlogger.Print(\"Checking that all Pods are running or completed...\")\n\n\tlist, err := podClient.Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error getting pod list: %v\", err)\n\t}\n\n\tif len(list.Items) == 0 {\n\t\treturn nil, fmt.Errorf(\"pod list is empty. this should NOT happen\")\n\t}\n\n\tpods := filterPods(list, filters...)\n\n\t// Keep track of all pending pods that are not associated with a job\n\t// and store all pods associated with a job for further analysis\n\tpendingPods := []kubev1.Pod{}\n\tjobPods := []kubev1.Pod{}\n\tfor _, pod := range pods.Items {\n\t\tif IsNotControlledByJob(pod) {\n\t\t\t// Completed pod not associated with a job, e.g. a standalone pod\n\t\t\tif pod.Status.Phase == kubev1.PodSucceeded {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif pod.Status.Phase != kubev1.PodPending {\n\t\t\t\treturn nil, fmt.Errorf(\"pod %s/%s in unexpected phase %s: reason: %s message: %s\", pod.Namespace, pod.Name, pod.Status.Phase, pod.Status.Reason, pod.Status.Message)\n\t\t\t}\n\t\t\tlogger.Printf(\"pod %s/%s is not ready. Phase: %s, Reason: %s, Message: %s\", pod.Namespace, pod.Name, pod.Status.Phase, pod.Status.Reason, pod.Status.Message)\n\t\t\tpendingPods = append(pendingPods, pod)\n\t\t} else {\n\t\t\tjobPods = append(jobPods, pod)\n\t\t}\n\t}\n\n\tpendingJobPods, err := checkJobPods(jobPods, logger)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlogger.Printf(\"%v pods are currently not running or complete:\", len(pendingPods)+len(pendingJobPods))\n\n\treturn append(pendingPods, pendingJobPods...), nil\n}", "func TestConcurrent2(t *testing.T) {\n\tfmt.Printf(\"Test: more concurrent puts and configuration changes...\\n\")\n\n\tcfg := makeConfig(t, 3, false, -1)\n\tdefer cfg.cleanup()\n\n\tck := cfg.makeClient()\n\n\tcfg.join(1)\n\tcfg.join(0)\n\tcfg.join(2)\n\n\tn := 10\n\tka := make([]string, n)\n\tva := make([]string, n)\n\tfor i := 0; i < n; i++ {\n\t\tka[i] = strconv.Itoa(i) // ensure multiple shards\n\t\tva[i] = randstring(1)\n\t\tck.Put(ka[i], va[i])\n\t}\n\n\tvar done int32\n\tch := make(chan bool)\n\n\tff := func(i int, ck1 *Clerk) {\n\t\tdefer func() { ch <- true }()\n\t\tfor atomic.LoadInt32(&done) == 0 {\n\t\t\tx := randstring(1)\n\t\t\tck1.Append(ka[i], x)\n\t\t\tva[i] += x\n\t\t\ttime.Sleep(50 * time.Millisecond)\n\t\t}\n\t}\n\n\tfor i := 0; i < n; i++ {\n\t\tck1 := cfg.makeClient()\n\t\tgo ff(i, ck1)\n\t}\n\n\tcfg.leave(0)\n\tcfg.leave(2)\n\ttime.Sleep(3000 * time.Millisecond)\n\tcfg.join(0)\n\tcfg.join(2)\n\tcfg.leave(1)\n\ttime.Sleep(3000 * time.Millisecond)\n\tcfg.join(1)\n\tcfg.leave(0)\n\tcfg.leave(2)\n\ttime.Sleep(3000 * time.Millisecond)\n\n\tcfg.ShutdownGroup(1)\n\tcfg.ShutdownGroup(2)\n\ttime.Sleep(1000 * time.Millisecond)\n\tcfg.StartGroup(1)\n\tcfg.StartGroup(2)\n\n\ttime.Sleep(2 * time.Second)\n\n\tatomic.StoreInt32(&done, 1)\n\tfor i := 0; i < n; i++ {\n\t\t<-ch\n\t}\n\n\tfor i := 0; i < n; i++ {\n\t\tcheck(t, ck, ka[i], va[i])\n\t}\n\n\tfmt.Printf(\" ... Passed\\n\")\n}", "func TestMaintenanceCodecov(t *testing.T) {\n\t// For Codecov - this does not test anything\n\tf := NewFactory()\n\tv, command := config.Viperize(f.AddFlags)\n\t// Lets speed up the maintenance ticker..\n\tcommand.ParseFlags([]string{\n\t\t\"--badger.maintenance-interval=10ms\",\n\t})\n\tf.InitFromViper(v, zap.NewNop())\n\tmFactory := metricstest.NewFactory(0)\n\tf.Initialize(mFactory, zap.NewNop())\n\tdefer f.Close()\n\n\twaiter := func() {\n\t\tfor sleeps := 0; sleeps < 8; sleeps++ {\n\t\t\t// Wait for the scheduler\n\t\t\ttime.Sleep(time.Duration(50) * time.Millisecond)\n\t\t}\n\t}\n\n\terr := f.store.Close()\n\tassert.NoError(t, err)\n\twaiter() // This should trigger the logging of error\n}", "func (t *TestSpec) RunTest(kub *helpers.Kubectl) {\n\tdefer func() { go t.Destroy(destroyDelay, kub.BasePath()) }()\n\n\tt.Kub = kub\n\terr := t.CreateManifests()\n\tgomega.Expect(err).To(gomega.BeNil(), \"cannot create pods manifest for %s\", t.Prefix)\n\n\tmanifest, err := t.ApplyManifest(kub.BasePath())\n\tgomega.Expect(err).To(gomega.BeNil(), \"cannot apply pods manifest for %s\", t.Prefix)\n\tlog.WithField(\"prefix\", t.Prefix).Infof(\"Manifest '%s' is created correctly\", manifest)\n\n\terr = t.Destination.CreateApplyManifest(t, kub.BasePath())\n\tgomega.Expect(err).To(gomega.BeNil(), \"cannot apply destination for %s\", t.Prefix)\n\n\tif t.IsPolicyInvalid() {\n\t\t// Some policies cannot be applied correctly because of different\n\t\t// rules. This code makes sure that the status of the policy has a error\n\t\t// in the status.\n\t\tcnp, err := t.InvalidNetworkPolicyApply(kub.BasePath())\n\t\tkub.Exec(fmt.Sprintf(\"%s delete cnp %s\", helpers.KubectlCmd, t.Prefix))\n\t\tgomega.Expect(err).To(gomega.BeNil(), \"Cannot apply network policy\")\n\t\tgomega.Expect(cnp).NotTo(gomega.BeNil(), \"CNP is not a valid struct\")\n\t\tgomega.Expect(cnp.Status.Nodes).NotTo(gomega.BeEmpty(), \"CNP Status is empty\")\n\n\t\tfor node, status := range cnp.Status.Nodes {\n\t\t\tgomega.Expect(status.Error).NotTo(gomega.BeEmpty(),\n\t\t\t\t\"Node %q applied invalid policy and do not raise an error\", node)\n\t\t}\n\t\treturn\n\t}\n\n\terr = t.NetworkPolicyApply(kub.BasePath())\n\tgomega.Expect(err).To(gomega.BeNil(), \"cannot apply network policy for %s\", t.Prefix)\n\n\terr = kub.CiliumEndpointWaitReady()\n\tgomega.Expect(err).To(gomega.BeNil(), \"Endpoints are not ready after timeout\")\n\n\terr = t.ExecTest()\n\tgomega.Expect(err).To(gomega.BeNil(), \"cannot execute test for %s\", t.Prefix)\n}", "func TestGetConcurrentAPIEndpoints(t *testing.T) {\n\tfor _, tc := range []struct {\n\t\tname string\n\t\tddURL, eventsDDURL, apiKey string\n\t\tadditionalEndpoints map[string][]string\n\t\tadditionalEventsEndpoints map[string][]string\n\t\texpectedEndpoints []apicfg.Endpoint\n\t\texpectedEventsEndpoints []apicfg.Endpoint\n\t}{\n\t\t{\n\t\t\tname: \"default\",\n\t\t\tapiKey: \"test\",\n\t\t\texpectedEndpoints: []apicfg.Endpoint{\n\t\t\t\t{\n\t\t\t\t\tAPIKey: \"test\",\n\t\t\t\t\tEndpoint: mkurl(config.DefaultProcessEndpoint),\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedEventsEndpoints: []apicfg.Endpoint{\n\t\t\t\t{\n\t\t\t\t\tAPIKey: \"test\",\n\t\t\t\t\tEndpoint: mkurl(config.DefaultProcessEventsEndpoint),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"set only process endpoint\",\n\t\t\tddURL: \"https://process.datadoghq.eu\",\n\t\t\tapiKey: \"test\",\n\t\t\texpectedEndpoints: []apicfg.Endpoint{\n\t\t\t\t{\n\t\t\t\t\tAPIKey: \"test\",\n\t\t\t\t\tEndpoint: mkurl(\"https://process.datadoghq.eu\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedEventsEndpoints: []apicfg.Endpoint{\n\t\t\t\t{\n\t\t\t\t\tAPIKey: \"test\",\n\t\t\t\t\tEndpoint: mkurl(config.DefaultProcessEventsEndpoint),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"set only process-events endpoint\",\n\t\t\teventsDDURL: \"https://process-events.datadoghq.eu\",\n\t\t\tapiKey: \"test\",\n\t\t\texpectedEndpoints: []apicfg.Endpoint{\n\t\t\t\t{\n\t\t\t\t\tAPIKey: \"test\",\n\t\t\t\t\tEndpoint: mkurl(config.DefaultProcessEndpoint),\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedEventsEndpoints: []apicfg.Endpoint{\n\t\t\t\t{\n\t\t\t\t\tAPIKey: \"test\",\n\t\t\t\t\tEndpoint: mkurl(\"https://process-events.datadoghq.eu\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiple eps\",\n\t\t\tapiKey: \"test\",\n\t\t\tadditionalEndpoints: map[string][]string{\n\t\t\t\t\"https://mock.datadoghq.com\": {\n\t\t\t\t\t\"key1\",\n\t\t\t\t\t\"key2\",\n\t\t\t\t},\n\t\t\t\t\"https://mock2.datadoghq.com\": {\n\t\t\t\t\t\"key3\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tadditionalEventsEndpoints: map[string][]string{\n\t\t\t\t\"https://mock-events.datadoghq.com\": {\n\t\t\t\t\t\"key2\",\n\t\t\t\t},\n\t\t\t\t\"https://mock2-events.datadoghq.com\": {\n\t\t\t\t\t\"key3\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedEndpoints: []apicfg.Endpoint{\n\t\t\t\t{\n\t\t\t\t\tEndpoint: mkurl(config.DefaultProcessEndpoint),\n\t\t\t\t\tAPIKey: \"test\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tEndpoint: mkurl(\"https://mock.datadoghq.com\"),\n\t\t\t\t\tAPIKey: \"key1\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tEndpoint: mkurl(\"https://mock.datadoghq.com\"),\n\t\t\t\t\tAPIKey: \"key2\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tEndpoint: mkurl(\"https://mock2.datadoghq.com\"),\n\t\t\t\t\tAPIKey: \"key3\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedEventsEndpoints: []apicfg.Endpoint{\n\t\t\t\t{\n\t\t\t\t\tEndpoint: mkurl(config.DefaultProcessEventsEndpoint),\n\t\t\t\t\tAPIKey: \"test\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tEndpoint: mkurl(\"https://mock-events.datadoghq.com\"),\n\t\t\t\t\tAPIKey: \"key2\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tEndpoint: mkurl(\"https://mock2-events.datadoghq.com\"),\n\t\t\t\t\tAPIKey: \"key3\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t} {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tcfg := config.Mock(t)\n\t\t\tcfg.Set(\"api_key\", tc.apiKey)\n\t\t\tif tc.ddURL != \"\" {\n\t\t\t\tcfg.Set(\"process_config.process_dd_url\", tc.ddURL)\n\t\t\t}\n\n\t\t\tif tc.eventsDDURL != \"\" {\n\t\t\t\tcfg.Set(\"process_config.events_dd_url\", tc.eventsDDURL)\n\t\t\t}\n\n\t\t\tif tc.additionalEndpoints != nil {\n\t\t\t\tcfg.Set(\"process_config.additional_endpoints\", tc.additionalEndpoints)\n\t\t\t}\n\n\t\t\tif tc.additionalEventsEndpoints != nil {\n\t\t\t\tcfg.Set(\"process_config.events_additional_endpoints\", tc.additionalEventsEndpoints)\n\t\t\t}\n\n\t\t\teps, err := endpoint.GetAPIEndpoints(cfg)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.ElementsMatch(t, tc.expectedEndpoints, eps)\n\n\t\t\teventsEps, err := endpoint.GetEventsAPIEndpoints(cfg)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.ElementsMatch(t, tc.expectedEventsEndpoints, eventsEps)\n\t\t})\n\t}\n}", "func (rm *ReplicationManager) watchControllers(resourceVersion *string) {\n\twatching, err := rm.kubeClient.ReplicationControllers(api.NamespaceAll).Watch(\n\t\tlabels.Everything(),\n\t\tlabels.Everything(),\n\t\t*resourceVersion,\n\t)\n\tif err != nil {\n\t\tutil.HandleError(fmt.Errorf(\"unable to watch: %v\", err))\n\t\ttime.Sleep(5 * time.Second)\n\t\treturn\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-rm.syncTime:\n\t\t\trm.synchronize()\n\t\tcase event, open := <-watching.ResultChan():\n\t\t\tif !open {\n\t\t\t\t// watchChannel has been closed, or something else went\n\t\t\t\t// wrong with our etcd watch call. Let the util.Forever()\n\t\t\t\t// that called us call us again.\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif event.Type == watch.Error {\n\t\t\t\tutil.HandleError(fmt.Errorf(\"error from watch during sync: %v\", errors.FromObject(event.Object)))\n\t\t\t\t// Clear the resource version, this may cause us to skip some elements on the watch,\n\t\t\t\t// but we'll catch them on the synchronize() call, so it works out.\n\t\t\t\t*resourceVersion = \"\"\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tglog.V(4).Infof(\"Got watch: %#v\", event)\n\t\t\trc, ok := event.Object.(*api.ReplicationController)\n\t\t\tif !ok {\n\t\t\t\tif status, ok := event.Object.(*api.Status); ok {\n\t\t\t\t\tif status.Status == api.StatusFailure {\n\t\t\t\t\t\tglog.Errorf(\"failed to watch: %v\", status)\n\t\t\t\t\t\t// Clear resource version here, as above, this won't hurt consistency, but we\n\t\t\t\t\t\t// should consider introspecting more carefully here. (or make the apiserver smarter)\n\t\t\t\t\t\t// \"why not both?\"\n\t\t\t\t\t\t*resourceVersion = \"\"\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tutil.HandleError(fmt.Errorf(\"unexpected object: %#v\", event.Object))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// If we get disconnected, start where we left off.\n\t\t\t*resourceVersion = rc.ResourceVersion\n\t\t\t// Sync even if this is a deletion event, to ensure that we leave\n\t\t\t// it in the desired state.\n\t\t\tglog.V(4).Infof(\"About to sync from watch: %v\", rc.Name)\n\t\t\tif err := rm.syncHandler(*rc); err != nil {\n\t\t\t\tutil.HandleError(fmt.Errorf(\"unexpected sync error: %v\", err))\n\t\t\t}\n\t\t}\n\t}\n}", "func (c *InstallerController) manageInstallationPods(ctx context.Context, operatorSpec *operatorv1.StaticPodOperatorSpec, originalOperatorStatus *operatorv1.StaticPodOperatorStatus) (bool, time.Duration, error) {\n\toperatorStatus := originalOperatorStatus.DeepCopy()\n\n\tif len(operatorStatus.NodeStatuses) == 0 {\n\t\treturn false, 0, nil\n\t}\n\n\t// start with node which is in worst state (instead of terminating healthy pods first)\n\tstartNode, nodeChoiceReason, err := nodeToStartRevisionWith(ctx, c.getStaticPodState, operatorStatus.NodeStatuses)\n\tif err != nil {\n\t\treturn true, 0, err\n\t}\n\n\t// determine the amount of time to delay before creating the next installer pod. We delay to avoid an LB outage (see godoc on minReadySeconds)\n\trequeueAfter := c.timeToWaitBeforeInstallingNextPod(ctx, operatorStatus.NodeStatuses)\n\tif requeueAfter > 0 {\n\t\treturn true, requeueAfter, nil\n\t}\n\n\tfor l := 0; l < len(operatorStatus.NodeStatuses); l++ {\n\t\ti := (startNode + l) % len(operatorStatus.NodeStatuses)\n\n\t\tvar currNodeState *operatorv1.NodeStatus\n\t\tvar prevNodeState *operatorv1.NodeStatus\n\t\tcurrNodeState = &operatorStatus.NodeStatuses[i]\n\t\tif l > 0 {\n\t\t\tprev := (startNode + l - 1) % len(operatorStatus.NodeStatuses)\n\t\t\tprevNodeState = &operatorStatus.NodeStatuses[prev]\n\t\t\tnodeChoiceReason = fmt.Sprintf(\"node %s is the next node in the line\", currNodeState.NodeName)\n\t\t}\n\n\t\t// if we are in a transition, check to see whether our installer pod completed\n\t\tif currNodeState.TargetRevision > currNodeState.CurrentRevision {\n\t\t\tif operatorStatus.LatestAvailableRevision > currNodeState.TargetRevision {\n\t\t\t\t// no backoff if new revision is pending\n\t\t\t} else {\n\t\t\t\tif currNodeState.LastFailedRevision == currNodeState.TargetRevision && currNodeState.LastFailedTime != nil && !currNodeState.LastFailedTime.IsZero() {\n\t\t\t\t\tvar delay time.Duration\n\t\t\t\t\tif currNodeState.LastFailedReason == nodeStatusOperandFailedFallbackReason {\n\t\t\t\t\t\tdelay = c.fallbackBackOff(currNodeState.LastFallbackCount)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tdelay = c.installerBackOff(currNodeState.LastFailedCount)\n\t\t\t\t\t}\n\t\t\t\t\tearliestRetry := currNodeState.LastFailedTime.Add(delay)\n\t\t\t\t\tif !c.now().After(earliestRetry) {\n\t\t\t\t\t\tklog.V(4).Infof(\"Backing off node %s installer retry %d until %v\", currNodeState.NodeName, currNodeState.LastFailedCount+1, earliestRetry)\n\t\t\t\t\t\treturn true, earliestRetry.Sub(c.now()), nil\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif err := c.ensureInstallerPod(ctx, operatorSpec, currNodeState); err != nil {\n\t\t\t\t\tc.eventRecorder.Warningf(\"InstallerPodFailed\", \"Failed to create installer pod for revision %d count %d on node %q: %v\",\n\t\t\t\t\t\tcurrNodeState.TargetRevision, currNodeState.LastFailedCount, currNodeState.NodeName, err)\n\t\t\t\t\t// if a newer revision is pending, continue, so we retry later with the latest available revision\n\t\t\t\t\tif !(operatorStatus.LatestAvailableRevision > currNodeState.TargetRevision) {\n\t\t\t\t\t\treturn true, 0, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tnewCurrNodeState, _, reason, err := c.newNodeStateForInstallInProgress(ctx, currNodeState, operatorStatus.LatestAvailableRevision)\n\t\t\tif err != nil {\n\t\t\t\treturn true, 0, err\n\t\t\t}\n\t\t\tif newCurrNodeState.LastFailedReason == nodeStatusInstalledFailedReason && newCurrNodeState.LastFailedCount != currNodeState.LastFailedCount {\n\t\t\t\tklog.Infof(\"Will retry %q for revision %d for the %s time because %s\", currNodeState.NodeName, currNodeState.TargetRevision, nthTimeOr1st(newCurrNodeState.LastFailedCount), reason)\n\t\t\t}\n\t\t\tif newCurrNodeState.LastFailedReason == nodeStatusOperandFailedFallbackReason && newCurrNodeState.LastFallbackCount != currNodeState.LastFallbackCount {\n\t\t\t\tklog.Infof(\"Will fallback %q for revision %d to last-known-good revision for the %s time because %s\", currNodeState.NodeName, currNodeState.TargetRevision, nthTimeOr1st(newCurrNodeState.LastFallbackCount), reason)\n\t\t\t}\n\n\t\t\t// if we make a change to this status, we want to write it out to the API before we commence work on the next node.\n\t\t\t// it's an extra write/read, but it makes the state debuggable from outside this process\n\t\t\tif !equality.Semantic.DeepEqual(newCurrNodeState, currNodeState) {\n\t\t\t\tklog.Infof(\"%q moving to %v because %s\", currNodeState.NodeName, spew.Sdump(*newCurrNodeState), reason)\n\t\t\t\t_, updated, updateError := v1helpers.UpdateStaticPodStatus(ctx, c.operatorClient, setNodeStatusFn(newCurrNodeState), setAvailableProgressingNodeInstallerFailingConditions)\n\t\t\t\tif updateError != nil {\n\t\t\t\t\treturn false, 0, updateError\n\t\t\t\t} else if updated && currNodeState.CurrentRevision != newCurrNodeState.CurrentRevision {\n\t\t\t\t\tc.eventRecorder.Eventf(\"NodeCurrentRevisionChanged\", \"Updated node %q from revision %d to %d because %s\", currNodeState.NodeName,\n\t\t\t\t\t\tcurrNodeState.CurrentRevision, newCurrNodeState.CurrentRevision, reason)\n\t\t\t\t}\n\n\t\t\t\treturn false, 0, nil // no requeue because UpdateStaticPodStatus triggers an external event anyway\n\t\t\t}\n\n\t\t\tklog.V(2).Infof(\"%q is in transition to %d, but has not made progress because %s\", currNodeState.NodeName, currNodeState.TargetRevision, reasonWithBlame(reason))\n\t\t\treturn false, 0, nil\n\t\t}\n\n\t\t// here we are not in transition, i.e. there is no install pod running\n\n\t\trevisionToStart := c.getRevisionToStart(currNodeState, prevNodeState, operatorStatus)\n\t\tif revisionToStart == 0 {\n\t\t\tklog.V(4).Infof(\"%s, but node %s does not need update\", nodeChoiceReason, currNodeState.NodeName)\n\t\t\tcontinue\n\t\t}\n\n\t\tklog.Infof(\"%s and needs new revision %d\", nodeChoiceReason, revisionToStart)\n\n\t\tnewCurrNodeState := currNodeState.DeepCopy()\n\t\tnewCurrNodeState.TargetRevision = revisionToStart\n\n\t\t// if we make a change to this status, we want to write it out to the API before we commence work on the next node.\n\t\t// it's an extra write/read, but it makes the state debuggable from outside this process\n\t\tif !equality.Semantic.DeepEqual(newCurrNodeState, currNodeState) {\n\t\t\tklog.Infof(\"%q moving to %v\", currNodeState.NodeName, spew.Sdump(*newCurrNodeState))\n\t\t\tif _, updated, updateError := v1helpers.UpdateStaticPodStatus(ctx, c.operatorClient, setNodeStatusFn(newCurrNodeState), setAvailableProgressingNodeInstallerFailingConditions); updateError != nil {\n\t\t\t\treturn false, 0, updateError\n\t\t\t} else if updated && currNodeState.TargetRevision != newCurrNodeState.TargetRevision && newCurrNodeState.TargetRevision != 0 {\n\t\t\t\tc.eventRecorder.Eventf(\"NodeTargetRevisionChanged\", \"Updating node %q from revision %d to %d because %s\", currNodeState.NodeName,\n\t\t\t\t\tcurrNodeState.CurrentRevision, newCurrNodeState.TargetRevision, nodeChoiceReason)\n\t\t\t}\n\n\t\t\treturn false, 0, nil // no requeue because UpdateStaticPodStatus triggers an external event anyway\n\t\t}\n\t\tbreak\n\t}\n\n\treturn false, 0, nil\n}", "func SetupAddExtraControllerVersions(k kubernetes.Interface, namespace string) kubernetes.Interface {\n\tp := MockPod()\n\n\tdv1b1 := appsv1beta1.Deployment{\n\t\tSpec: appsv1beta1.DeploymentSpec{\n\t\t\tTemplate: corev1.PodTemplateSpec{Spec: p.Spec},\n\t\t},\n\t}\n\tif _, err := k.AppsV1beta1().Deployments(namespace).Create(&dv1b1); err != nil {\n\t\tpanic(err)\n\t}\n\n\tdv1b2 := appsv1beta2.Deployment{\n\t\tSpec: appsv1beta2.DeploymentSpec{\n\t\t\tTemplate: corev1.PodTemplateSpec{Spec: p.Spec},\n\t\t},\n\t}\n\tif _, err := k.AppsV1beta2().Deployments(namespace).Create(&dv1b2); err != nil {\n\t\tpanic(err)\n\t}\n\n\tssv1b1 := appsv1beta1.StatefulSet{\n\t\tSpec: appsv1beta1.StatefulSetSpec{\n\t\t\tTemplate: corev1.PodTemplateSpec{Spec: p.Spec},\n\t\t},\n\t}\n\tif _, err := k.AppsV1beta1().StatefulSets(namespace).Create(&ssv1b1); err != nil {\n\t\tpanic(err)\n\t}\n\n\tssv1b2 := appsv1beta2.StatefulSet{\n\t\tSpec: appsv1beta2.StatefulSetSpec{\n\t\t\tTemplate: corev1.PodTemplateSpec{Spec: p.Spec},\n\t\t},\n\t}\n\tif _, err := k.AppsV1beta2().StatefulSets(namespace).Create(&ssv1b2); err != nil {\n\t\tpanic(err)\n\t}\n\n\tdsv1b2 := appsv1beta2.DaemonSet{\n\t\tSpec: appsv1beta2.DaemonSetSpec{\n\t\t\tTemplate: corev1.PodTemplateSpec{Spec: p.Spec},\n\t\t},\n\t}\n\tif _, err := k.AppsV1beta2().DaemonSets(namespace).Create(&dsv1b2); err != nil {\n\t\tpanic(err)\n\t}\n\treturn k\n}", "func TestFrameworkFlagMetaReady(t *testing.T) {\n\tappName := \"framework_test_flagmetaready\"\n\t// launch testing etcd server\n\tm := etcdutil.MustNewMember(t, appName)\n\tm.Launch()\n\tdefer m.Terminate(t)\n\turl := fmt.Sprintf(\"http://%s\", m.ClientListeners[0].Addr().String())\n\n\t// launch controller to setup etcd layout\n\tctl := controller.New(appName, etcd.NewClient([]string{url}), 2)\n\tif err := ctl.InitEtcdLayout(); err != nil {\n\t\tt.Fatalf(\"initEtcdLayout failed: %v\", err)\n\t}\n\tdefer ctl.DestroyEtcdLayout()\n\n\tpDataChan := make(chan *tDataBundle, 1)\n\tcDataChan := make(chan *tDataBundle, 1)\n\n\t// simulate two tasks on two nodes -- 0 and 1\n\t// 0 is parent, 1 is child\n\tf0 := &framework{\n\t\tname: appName,\n\t\tetcdURLs: []string{url},\n\t\tln: createListener(t),\n\t}\n\tf1 := &framework{\n\t\tname: appName,\n\t\tetcdURLs: []string{url},\n\t\tln: createListener(t),\n\t}\n\n\tvar wg sync.WaitGroup\n\ttaskBuilder := &testableTaskBuilder{\n\t\tdataMap: nil,\n\t\tcDataChan: cDataChan,\n\t\tpDataChan: pDataChan,\n\t\tsetupLatch: &wg,\n\t}\n\tf0.SetTaskBuilder(taskBuilder)\n\tf0.SetTopology(example.NewTreeTopology(2, 2))\n\tf1.SetTaskBuilder(taskBuilder)\n\tf1.SetTopology(example.NewTreeTopology(2, 2))\n\n\ttaskBuilder.setupLatch.Add(2)\n\tgo f0.Start()\n\tgo f1.Start()\n\ttaskBuilder.setupLatch.Wait()\n\tif f0.GetTaskID() != 0 {\n\t\tf0, f1 = f1, f0\n\t}\n\n\tdefer f0.ShutdownJob()\n\n\ttests := []struct {\n\t\tcMeta string\n\t\tpMeta string\n\t}{\n\t\t{\"parent\", \"child\"},\n\t\t{\"ParamReady\", \"GradientReady\"},\n\t}\n\n\tfor i, tt := range tests {\n\t\t// 0: F#FlagChildMetaReady -> 1: T#ParentMetaReady\n\t\tf0.flagMetaToChild(tt.cMeta, 0)\n\t\t// from child(1)'s view\n\t\tdata := <-pDataChan\n\t\texpected := &tDataBundle{0, tt.cMeta, \"\", nil}\n\t\tif !reflect.DeepEqual(data, expected) {\n\t\t\tt.Errorf(\"#%d: data bundle want = %v, get = %v\", i, expected, data)\n\t\t}\n\n\t\t// 1: F#FlagParentMetaReady -> 0: T#ChildMetaReady\n\t\tf1.flagMetaToParent(tt.pMeta, 0)\n\t\t// from parent(0)'s view\n\t\tdata = <-cDataChan\n\t\texpected = &tDataBundle{1, tt.pMeta, \"\", nil}\n\t\tif !reflect.DeepEqual(data, expected) {\n\t\t\tt.Errorf(\"#%d: data bundle want = %v, get = %v\", i, expected, data)\n\t\t}\n\t}\n}", "func runMultisyncTests(t *testing.T, ctx context.Context, tests []controllerTest, storageClasses []*storage.StorageClass, defaultStorageClass string) {\n\tlogger := klog.FromContext(ctx)\n\trun := func(t *testing.T, test controllerTest) {\n\t\tlogger.V(4).Info(\"Starting multisync test\", \"testName\", test.name)\n\n\t\t// Initialize the controller\n\t\tclient := &fake.Clientset{}\n\t\tctrl, err := newTestController(ctx, client, nil, true)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Test %q construct persistent volume failed: %v\", test.name, err)\n\t\t}\n\n\t\t// Inject classes into controller via a custom lister.\n\t\tindexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})\n\t\tfor _, class := range storageClasses {\n\t\t\tindexer.Add(class)\n\t\t}\n\t\tctrl.classLister = storagelisters.NewStorageClassLister(indexer)\n\n\t\treactor := newVolumeReactor(ctx, client, ctrl, nil, nil, test.errors)\n\t\tfor _, claim := range test.initialClaims {\n\t\t\tctrl.claims.Add(claim)\n\t\t}\n\t\tfor _, volume := range test.initialVolumes {\n\t\t\tctrl.volumes.store.Add(volume)\n\t\t}\n\t\treactor.AddClaims(test.initialClaims)\n\t\treactor.AddVolumes(test.initialVolumes)\n\n\t\t// Run the tested function\n\t\terr = test.test(ctrl, reactor.VolumeReactor, test)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Test %q failed: %v\", test.name, err)\n\t\t}\n\n\t\t// Simulate any \"changed\" events and \"periodical sync\" until we reach a\n\t\t// stable state.\n\t\tfirstSync := true\n\t\tcounter := 0\n\t\tfor {\n\t\t\tcounter++\n\t\t\tlogger.V(4).Info(\"Test\", \"testName\", test.name, \"iteration\", counter)\n\n\t\t\tif counter > 100 {\n\t\t\t\tt.Errorf(\"Test %q failed: too many iterations\", test.name)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// Wait for all goroutines to finish\n\t\t\treactor.waitForIdle()\n\n\t\t\tobj := reactor.PopChange(ctx)\n\t\t\tif obj == nil {\n\t\t\t\t// Nothing was changed, should we exit?\n\t\t\t\tif firstSync || reactor.GetChangeCount() > 0 {\n\t\t\t\t\t// There were some changes after the last \"periodic sync\".\n\t\t\t\t\t// Simulate \"periodic sync\" of everything (until it produces\n\t\t\t\t\t// no changes).\n\t\t\t\t\tfirstSync = false\n\t\t\t\t\tlogger.V(4).Info(\"Test simulating periodical sync of all claims and volumes\", \"testName\", test.name)\n\t\t\t\t\treactor.SyncAll()\n\t\t\t\t} else {\n\t\t\t\t\t// Last sync did not produce any updates, the test reached\n\t\t\t\t\t// stable state -> finish.\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\t// waiting here cools down exponential backoff\n\t\t\ttime.Sleep(600 * time.Millisecond)\n\n\t\t\t// There were some changes, process them\n\t\t\tswitch obj.(type) {\n\t\t\tcase *v1.PersistentVolumeClaim:\n\t\t\t\tclaim := obj.(*v1.PersistentVolumeClaim)\n\t\t\t\t// Simulate \"claim updated\" event\n\t\t\t\tctrl.claims.Update(claim)\n\t\t\t\terr = ctrl.syncClaim(context.TODO(), claim)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif err == pvtesting.ErrVersionConflict {\n\t\t\t\t\t\t// Ignore version errors\n\t\t\t\t\t\tlogger.V(4).Info(\"Test intentionally ignores version error\")\n\t\t\t\t\t} else {\n\t\t\t\t\t\tt.Errorf(\"Error calling syncClaim: %v\", err)\n\t\t\t\t\t\t// Finish the loop on the first error\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Process generated changes\n\t\t\t\tcontinue\n\t\t\tcase *v1.PersistentVolume:\n\t\t\t\tvolume := obj.(*v1.PersistentVolume)\n\t\t\t\t// Simulate \"volume updated\" event\n\t\t\t\tctrl.volumes.store.Update(volume)\n\t\t\t\terr = ctrl.syncVolume(context.TODO(), volume)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif err == pvtesting.ErrVersionConflict {\n\t\t\t\t\t\t// Ignore version errors\n\t\t\t\t\t\tlogger.V(4).Info(\"Test intentionally ignores version error\")\n\t\t\t\t\t} else {\n\t\t\t\t\t\tt.Errorf(\"Error calling syncVolume: %v\", err)\n\t\t\t\t\t\t// Finish the loop on the first error\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Process generated changes\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tevaluateTestResults(ctx, ctrl, reactor.VolumeReactor, test, t)\n\t\tlogger.V(4).Info(\"Test finished after iterations\", \"testName\", test.name, \"iterations\", counter)\n\t}\n\n\tfor _, test := range tests {\n\t\ttest := test\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\trun(t, test)\n\t\t})\n\t}\n}", "func TestDevPortForwardDeletePod(t *testing.T) {\n\tMarkIntegrationTest(t, CanRunWithoutGcp)\n\ttests := []struct {\n\t\tdir string\n\t}{\n\t\t{dir: \"examples/microservices\"},\n\t\t{dir: \"examples/multi-config-microservices\"},\n\t}\n\tfor _, test := range tests {\n\t\t// pre-build images to avoid tripping the 1-minute timeout in getLocalPortFromPortForwardEvent()\n\t\tskaffold.Build().InDir(test.dir).RunOrFail(t)\n\n\t\tns, client := SetupNamespace(t)\n\n\t\trpcAddr := randomPort()\n\t\tskaffold.Dev(\"--port-forward\", \"--rpc-port\", rpcAddr).InDir(test.dir).InNs(ns.Name).RunBackground(t)\n\t\tclient.WaitForDeploymentsToStabilize(\"leeroy-app\")\n\n\t\t_, entries := apiEvents(t, rpcAddr)\n\n\t\taddress, localPort := getLocalPortFromPortForwardEvent(t, entries, \"leeroy-app\", \"service\", ns.Name)\n\t\tassertResponseFromPort(t, address, localPort, constants.LeeroyAppResponse)\n\n\t\t// now, delete all pods in this namespace.\n\t\tRun(t, \".\", \"kubectl\", \"delete\", \"pods\", \"--all\", \"-n\", ns.Name)\n\n\t\tassertResponseFromPort(t, address, localPort, constants.LeeroyAppResponse)\n\t}\n}", "func ChaosPodStatus(experimentsDetails *types.ExperimentDetails, clients environment.ClientSets) error {\n\n\tfor count := 0; count < (experimentsDetails.Duration / experimentsDetails.Delay); count++ {\n\n\t\tchaosEngine, err := clients.LitmusClient.ChaosEngines(experimentsDetails.ChaosNamespace).Get(experimentsDetails.EngineName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn errors.Errorf(\"fail to get the chaosengine %v err: %v\", experimentsDetails.EngineName, err)\n\t\t}\n\t\tif len(chaosEngine.Status.Experiments) == 0 {\n\t\t\ttime.Sleep(time.Duration(experimentsDetails.Delay) * time.Second)\n\t\t\tlog.Info(\"[Status]: Experiment initializing\")\n\t\t\tif count == ((experimentsDetails.Duration / experimentsDetails.Delay) - 1) {\n\t\t\t\treturn errors.Errorf(\"Experiment pod fail to initialise, due to %v\", err)\n\t\t\t}\n\n\t\t} else if len(chaosEngine.Status.Experiments[0].ExpPod) == 0 {\n\t\t\ttime.Sleep(time.Duration(experimentsDetails.Delay) * time.Second)\n\t\t\tif count == ((experimentsDetails.Duration / experimentsDetails.Delay) - 1) {\n\t\t\t\treturn errors.Errorf(\"Experiment pod fails to create, due to %v\", err)\n\t\t\t}\n\t\t} else if chaosEngine.Status.Experiments[0].Status != \"Running\" {\n\t\t\ttime.Sleep(time.Duration(experimentsDetails.Delay) * time.Second)\n\t\t\tlog.Infof(\"[Status]: Currently, the Chaos Pod is in %v state, Please Wait...\", chaosEngine.Status.Experiments[0].Status)\n\t\t\tif count == ((experimentsDetails.Duration / experimentsDetails.Delay) - 1) {\n\t\t\t\treturn errors.Errorf(\"Experiment pod fails to get in running state, due to %v\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tlog.Info(\"[Status]: Chaos pod initiated successfully\")\n\treturn nil\n}", "func TestServiceCreateWithMultipleContainers(t *testing.T) {\n\tif test.ServingFlags.DisableOptionalAPI {\n\t\tt.Skip(\"Multiple containers support is not required by Knative Serving API Specification\")\n\t}\n\tif !test.ServingFlags.EnableBetaFeatures {\n\t\tt.Skip()\n\t}\n\tt.Parallel()\n\tclients := test.Setup(t)\n\n\tnames := test.ResourceNames{\n\t\tService: test.ObjectNameForTest(t),\n\t\tImage: test.ServingContainer,\n\t\tSidecars: []string{\n\t\t\ttest.SidecarContainer,\n\t\t},\n\t}\n\n\t// Clean up on test failure or interrupt\n\ttest.EnsureTearDown(t, clients, &names)\n\tcontainers := []corev1.Container{{\n\t\tImage: pkgtest.ImagePath(names.Image),\n\t\tPorts: []corev1.ContainerPort{{\n\t\t\tContainerPort: 8881,\n\t\t}},\n\t}, {\n\t\tImage: pkgtest.ImagePath(names.Sidecars[0]),\n\t}}\n\n\t// Setup initial Service\n\tif _, err := v1test.CreateServiceReady(t, clients, &names, func(svc *v1.Service) {\n\t\tsvc.Spec.Template.Spec.Containers = containers\n\t}); err != nil {\n\t\tt.Fatalf(\"Failed to create initial Service %v: %v\", names.Service, err)\n\t}\n\n\t// Validate State after Creation\n\tif err := validateControlPlane(t, clients, names, \"1\" /*1 is the expected generation value*/); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif err := validateDataPlane(t, clients, names, test.MultiContainerResponse); err != nil {\n\t\tt.Error(err)\n\t}\n}" ]
[ "0.8165727", "0.80359614", "0.79608786", "0.5846746", "0.5815157", "0.5800607", "0.5719953", "0.55622447", "0.55490845", "0.55087394", "0.55010694", "0.5484525", "0.5433796", "0.5399612", "0.53902775", "0.53603613", "0.5314078", "0.53021586", "0.52998984", "0.52553374", "0.5234496", "0.52341104", "0.51682353", "0.514461", "0.51414704", "0.51267165", "0.5114701", "0.50759375", "0.5050211", "0.50471246", "0.50381887", "0.50329536", "0.500362", "0.4990736", "0.4979914", "0.49641868", "0.49480525", "0.4946543", "0.49372634", "0.49074292", "0.490263", "0.4901867", "0.4898448", "0.48831004", "0.48828962", "0.488163", "0.4876671", "0.48685402", "0.48486874", "0.48321512", "0.48219407", "0.4820718", "0.48184946", "0.48110706", "0.480729", "0.48004287", "0.47969425", "0.47917008", "0.47881517", "0.47854638", "0.47851896", "0.47836116", "0.4780199", "0.47764817", "0.4765915", "0.47632578", "0.47583282", "0.47495988", "0.47403494", "0.47396177", "0.47374162", "0.47293258", "0.4723519", "0.47147277", "0.47114336", "0.47090712", "0.4706003", "0.47010234", "0.46962607", "0.4691396", "0.46893394", "0.46865094", "0.46864063", "0.46858487", "0.46815282", "0.46812484", "0.4677864", "0.46731976", "0.4658944", "0.46509907", "0.46443397", "0.46411046", "0.4633852", "0.46325597", "0.46298867", "0.4623521", "0.46234474", "0.46212965", "0.46029004", "0.4602257" ]
0.8077532
1
TestConcurrentBuildPodControllers tests the lifecycle of a build pod when running multiple controllers.
TestConcurrentBuildPodControllers проверяет жизненный цикл сборочного пода при запуске нескольких контроллеров.
func TestConcurrentBuildPodControllers(t *testing.T) { defer testutil.DumpEtcdOnFailure(t) // Start a master with multiple BuildPodControllers osClient, kClient := setupBuildControllerTest(controllerCount{BuildPodControllers: 5}, t) build.RunBuildPodControllerTest(t, osClient, kClient) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestConcurrentBuildControllersPodSync(t *testing.T) {\n\t// Start a master with multiple BuildControllers\n\tbuildClient, _, kClient, fn := setupBuildControllerTest(controllerCount{BuildControllers: 5}, t)\n\tdefer fn()\n\tbuild.RunBuildControllerPodSyncTest(t, buildClient, kClient)\n}", "func TestConcurrentBuildControllers(t *testing.T) {\n\tdefer testutil.DumpEtcdOnFailure(t)\n\t// Start a master with multiple BuildControllers\n\tosClient, kClient := setupBuildControllerTest(controllerCount{BuildControllers: 5}, t)\n\tbuild.RunBuildControllerTest(t, osClient, kClient)\n}", "func TestConcurrentBuildControllers(t *testing.T) {\n\t// Start a master with multiple BuildControllers\n\tbuildClient, _, kClient, fn := setupBuildControllerTest(controllerCount{BuildControllers: 5}, t)\n\tdefer fn()\n\tbuild.RunBuildControllerTest(t, buildClient, kClient)\n}", "func validateController(ctx context.Context, c clientset.Interface, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) {\n\tcontainerImage = trimDockerRegistry(containerImage)\n\tgetPodsTemplate := \"--template={{range.items}}{{.metadata.name}} {{end}}\"\n\n\tgetContainerStateTemplate := fmt.Sprintf(`--template={{if (exists . \"status\" \"containerStatuses\")}}{{range .status.containerStatuses}}{{if (and (eq .name \"%s\") (exists . \"state\" \"running\"))}}true{{end}}{{end}}{{end}}`, containername)\n\n\tgetImageTemplate := fmt.Sprintf(`--template={{if (exists . \"spec\" \"containers\")}}{{range .spec.containers}}{{if eq .name \"%s\"}}{{.image}}{{end}}{{end}}{{end}}`, containername)\n\n\tginkgo.By(fmt.Sprintf(\"waiting for all containers in %s pods to come up.\", testname)) //testname should be selector\nwaitLoop:\n\tfor start := time.Now(); time.Since(start) < framework.PodStartTimeout && ctx.Err() == nil; time.Sleep(5 * time.Second) {\n\t\tgetPodsOutput := e2ekubectl.RunKubectlOrDie(ns, \"get\", \"pods\", \"-o\", \"template\", getPodsTemplate, \"-l\", testname)\n\t\tpods := strings.Fields(getPodsOutput)\n\t\tif numPods := len(pods); numPods != replicas {\n\t\t\tginkgo.By(fmt.Sprintf(\"Replicas for %s: expected=%d actual=%d\", testname, replicas, numPods))\n\t\t\tcontinue\n\t\t}\n\t\tvar runningPods []string\n\t\tfor _, podID := range pods {\n\t\t\trunning := e2ekubectl.RunKubectlOrDie(ns, \"get\", \"pods\", podID, \"-o\", \"template\", getContainerStateTemplate)\n\t\t\tif running != \"true\" {\n\t\t\t\tframework.Logf(\"%s is created but not running\", podID)\n\t\t\t\tcontinue waitLoop\n\t\t\t}\n\n\t\t\tcurrentImage := e2ekubectl.RunKubectlOrDie(ns, \"get\", \"pods\", podID, \"-o\", \"template\", getImageTemplate)\n\t\t\tcurrentImage = trimDockerRegistry(currentImage)\n\t\t\tif currentImage != containerImage {\n\t\t\t\tframework.Logf(\"%s is created but running wrong image; expected: %s, actual: %s\", podID, containerImage, currentImage)\n\t\t\t\tcontinue waitLoop\n\t\t\t}\n\n\t\t\t// Call the generic validator function here.\n\t\t\t// This might validate for example, that (1) getting a url works and (2) url is serving correct content.\n\t\t\tif err := validator(ctx, c, podID); err != nil {\n\t\t\t\tframework.Logf(\"%s is running right image but validator function failed: %v\", podID, err)\n\t\t\t\tcontinue waitLoop\n\t\t\t}\n\n\t\t\tframework.Logf(\"%s is verified up and running\", podID)\n\t\t\trunningPods = append(runningPods, podID)\n\t\t}\n\t\t// If we reach here, then all our checks passed.\n\t\tif len(runningPods) == replicas {\n\t\t\treturn\n\t\t}\n\t}\n\t// Reaching here means that one of more checks failed multiple times. Assuming its not a race condition, something is broken.\n\tframework.Failf(\"Timed out after %v seconds waiting for %s pods to reach valid state\", framework.PodStartTimeout.Seconds(), testname)\n}", "func TestController(t *testing.T) {\n\tctx, _ := rtesting.SetupFakeContext(t)\n\tctx, cancel := context.WithTimeout(ctx, 30*time.Second)\n\tdefer cancel()\n\n\t// Create reconcilers, start controller.\n\tresults := test.NewResultsClient(t)\n\n\ttrctrl := taskrun.NewController(ctx, results)\n\tprctrl := pipelinerun.NewController(ctx, results)\n\tgo controller.StartAll(ctx, trctrl, prctrl)\n\n\t// Start informers - this notifies the controller of new events.\n\tgo taskruninformer.Get(ctx).Informer().Run(ctx.Done())\n\tgo pipelineruninformer.Get(ctx).Informer().Run(ctx.Done())\n\n\tpipeline := fakepipelineclient.Get(ctx)\n\tt.Run(\"taskrun\", func(t *testing.T) {\n\t\ttr := &v1beta1.TaskRun{\n\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\tAPIVersion: \"tekton.dev/v1beta1\",\n\t\t\t\tKind: \"TaskRun\",\n\t\t\t},\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"taskrun\",\n\t\t\t\tNamespace: \"ns\",\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\"demo\": \"demo\",\n\t\t\t\t\t// This TaskRun belongs to a PipelineRun, so the record should\n\t\t\t\t\t// be associated with the PipelineRun result.\n\t\t\t\t\t\"tekton.dev/pipelineRun\": \"pr\",\n\t\t\t\t},\n\t\t\t\tOwnerReferences: []metav1.OwnerReference{{\n\t\t\t\t\tAPIVersion: \"tekton.dev/v1beta1\",\n\t\t\t\t\tKind: \"PipelineRun\",\n\t\t\t\t\tUID: \"pr-id\",\n\t\t\t\t}},\n\t\t\t\tUID: \"tr-id\",\n\t\t\t},\n\t\t}\n\n\t\t// The following is a hack to make the fake clients play nice with\n\t\t// each other. While the controller uses the typed informer that uses\n\t\t// the fake pipeline client to receive events, the controller uses the\n\t\t// fake dynamic client to fetch and update objects during reconcile.\n\t\t// These fake clients store objects independently, so we create the\n\t\t// object in each client to make sure the data is populated in both\n\t\t// places.\n\t\tif _, err := pipeline.TektonV1beta1().TaskRuns(tr.GetNamespace()).Create(tr); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdata, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tr)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"ToUnstructured: %v\", err)\n\t\t}\n\t\t_, err = dynamicinject.Get(ctx).Resource(apis.KindToResource(tr.GroupVersionKind())).Namespace(tr.GetNamespace()).Create(&unstructured.Unstructured{Object: data}, metav1.CreateOptions{})\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Create: %v\", err)\n\t\t}\n\n\t\twait(ctx, t, tr, \"ns/results/pr-id\")\n\t})\n\n\tt.Run(\"pipelinerun\", func(t *testing.T) {\n\t\tpr := &v1beta1.PipelineRun{\n\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\tAPIVersion: \"tekton.dev/v1beta1\",\n\t\t\t\tKind: \"PipelineRun\",\n\t\t\t},\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"pr\",\n\t\t\t\tNamespace: \"ns\",\n\t\t\t\tAnnotations: map[string]string{\"demo\": \"demo\"},\n\t\t\t\tUID: \"pr-id\",\n\t\t\t},\n\t\t}\n\n\t\t// Same create hack as taskrun (see above).\n\t\tif _, err := pipeline.TektonV1beta1().PipelineRuns(pr.GetNamespace()).Create(pr); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdata, err := runtime.DefaultUnstructuredConverter.ToUnstructured(pr)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"ToUnstructured: %v\", err)\n\t\t}\n\t\t_, err = dynamicinject.Get(ctx).Resource(apis.KindToResource(pr.GroupVersionKind())).Namespace(pr.GetNamespace()).Create(&unstructured.Unstructured{Object: data}, metav1.CreateOptions{})\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Create: %v\", err)\n\t\t}\n\n\t\twait(ctx, t, pr, \"ns/results/pr-id\")\n\t})\n}", "func TestBuildControllerNoBuildManifestsFirst(t *testing.T) {\n\tf := newTestFixture(t)\n\tdefer f.TearDown()\n\n\tmanifests := make([]model.Manifest, 10)\n\tfor i := 0; i < 10; i++ {\n\t\tsync := model.Sync{LocalPath: f.Path(), ContainerPath: \"/go\"}\n\t\tmanifests[i] = f.newManifest(fmt.Sprintf(\"built%d\", i+1), []model.Sync{sync})\n\t}\n\n\tfor _, i := range []int{3, 7, 8} {\n\t\tmanifests[i] = assembleK8sManifest(\n\t\t\tmodel.Manifest{\n\t\t\t\tName: model.ManifestName(fmt.Sprintf(\"unbuilt%d\", i+1))},\n\t\t\tmodel.K8sTarget{YAML: \"fake-yaml\"})\n\t}\n\tf.Start(manifests, true)\n\n\tvar observedBuildOrder []string\n\tfor i := 0; i < len(manifests); i++ {\n\t\tcall := f.nextCall()\n\t\tobservedBuildOrder = append(observedBuildOrder, call.k8s().Name.String())\n\t}\n\n\t// throwing a bunch of elements at it to increase confidence we maintain order between built and unbuilt\n\t// this might miss bugs since we might just get these elements back in the right order via luck\n\texpectedBuildOrder := []string{\n\t\t\"unbuilt4\",\n\t\t\"unbuilt8\",\n\t\t\"unbuilt9\",\n\t\t\"built1\",\n\t\t\"built2\",\n\t\t\"built3\",\n\t\t\"built5\",\n\t\t\"built6\",\n\t\t\"built7\",\n\t\t\"built10\",\n\t}\n\tassert.Equal(t, expectedBuildOrder, observedBuildOrder)\n}", "func (m *MockPodControllerFactory) Build(mgr mc_manager.AsyncManager, clusterName string) (controller1.PodController, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Build\", mgr, clusterName)\n\tret0, _ := ret[0].(controller1.PodController)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func SetupAddControllers(k kubernetes.Interface, namespace string) kubernetes.Interface {\n\td1 := MockDeploy()\n\tif _, err := k.AppsV1().Deployments(namespace).Create(&d1); err != nil {\n\t\tpanic(err)\n\t}\n\n\ts1 := MockStatefulSet()\n\tif _, err := k.AppsV1().StatefulSets(namespace).Create(&s1); err != nil {\n\t\tpanic(err)\n\t}\n\n\tds1 := MockDaemonSet()\n\tif _, err := k.AppsV1().DaemonSets(namespace).Create(&ds1); err != nil {\n\t\tpanic(err)\n\t}\n\n\tj1 := MockJob()\n\tif _, err := k.BatchV1().Jobs(namespace).Create(&j1); err != nil {\n\t\tpanic(err)\n\t}\n\n\tcj1 := MockCronJob()\n\tif _, err := k.BatchV1beta1().CronJobs(namespace).Create(&cj1); err != nil {\n\t\tpanic(err)\n\t}\n\n\trc1 := MockReplicationController()\n\tif _, err := k.CoreV1().ReplicationControllers(namespace).Create(&rc1); err != nil {\n\t\tpanic(err)\n\t}\n\n\tp1 := MockNakedPod()\n\tif _, err := k.CoreV1().Pods(namespace).Create(&p1); err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn k\n}", "func runSyncTests(t *testing.T, ctx context.Context, tests []controllerTest, storageClasses []*storage.StorageClass, pods []*v1.Pod) {\n\tdoit := func(t *testing.T, test controllerTest) {\n\t\t// Initialize the controller\n\t\tclient := &fake.Clientset{}\n\t\tctrl, err := newTestController(ctx, client, nil, true)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Test %q construct persistent volume failed: %v\", test.name, err)\n\t\t}\n\t\treactor := newVolumeReactor(ctx, client, ctrl, nil, nil, test.errors)\n\t\tfor _, claim := range test.initialClaims {\n\t\t\tif metav1.HasAnnotation(claim.ObjectMeta, annSkipLocalStore) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tctrl.claims.Add(claim)\n\t\t}\n\t\tfor _, volume := range test.initialVolumes {\n\t\t\tif metav1.HasAnnotation(volume.ObjectMeta, annSkipLocalStore) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tctrl.volumes.store.Add(volume)\n\t\t}\n\t\treactor.AddClaims(test.initialClaims)\n\t\treactor.AddVolumes(test.initialVolumes)\n\n\t\t// Inject classes into controller via a custom lister.\n\t\tindexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})\n\t\tfor _, class := range storageClasses {\n\t\t\tindexer.Add(class)\n\t\t}\n\t\tctrl.classLister = storagelisters.NewStorageClassLister(indexer)\n\n\t\tpodIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})\n\t\tfor _, pod := range pods {\n\t\t\tpodIndexer.Add(pod)\n\t\t\tctrl.podIndexer.Add(pod)\n\t\t}\n\t\tctrl.podLister = corelisters.NewPodLister(podIndexer)\n\n\t\t// Run the tested functions\n\t\terr = test.test(ctrl, reactor.VolumeReactor, test)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Test %q failed: %v\", test.name, err)\n\t\t}\n\n\t\t// Wait for the target state\n\t\terr = reactor.waitTest(test)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Test %q failed: %v\", test.name, err)\n\t\t}\n\n\t\tevaluateTestResults(ctx, ctrl, reactor.VolumeReactor, test, t)\n\t}\n\n\tfor _, test := range tests {\n\t\ttest := test\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tdoit(t, test)\n\t\t})\n\t}\n}", "func TestControllerInitPrepare_Parallel(t *testing.T) {\n\t_ = testlib.IntegrationEnv(t)\n\n\tt.Run(\"with parent context that is never canceled\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\t// the nil params should never be used in this case\n\t\tbuildControllers := controllerinit.Prepare(nil, nil, buildBrokenInformer(t))\n\n\t\tstart := time.Now()\n\t\trunControllers, err := buildControllers(context.Background()) // we expect this to not block forever even with a context.Background()\n\t\tdelta := time.Since(start)\n\n\t\trequire.EqualError(t, err,\n\t\t\t\"failed to sync informers of k8s.io/client-go/informers.sharedInformerFactory: \"+\n\t\t\t\t\"[k8s.io/api/core/v1.Namespace k8s.io/api/core/v1.Node]\")\n\t\trequire.Nil(t, runControllers)\n\n\t\trequire.InDelta(t, time.Minute, delta, float64(30*time.Second))\n\t})\n\n\tt.Run(\"with parent context that is canceled early\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\t// the nil params should never be used in this case\n\t\tbuildControllers := controllerinit.Prepare(nil, nil, buildBrokenInformer(t))\n\n\t\t// we expect this to exit sooner because the parent context is shorter\n\t\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\t\tt.Cleanup(cancel)\n\n\t\tstart := time.Now()\n\t\trunControllers, err := buildControllers(ctx)\n\t\tdelta := time.Since(start)\n\n\t\trequire.EqualError(t, err,\n\t\t\t\"failed to sync informers of k8s.io/client-go/informers.sharedInformerFactory: \"+\n\t\t\t\t\"[k8s.io/api/core/v1.Namespace k8s.io/api/core/v1.Node]\")\n\t\trequire.Nil(t, runControllers)\n\n\t\trequire.InDelta(t, 10*time.Second, delta, float64(15*time.Second))\n\t})\n}", "func OperatorRunningTest(bundle *apimanifests.Bundle) scapiv1alpha3.TestStatus {\n\tr := scapiv1alpha3.TestResult{}\n\tr.Name = OperatorRunningTestName\n\tr.State = scapiv1alpha3.PassState\n\tr.Errors = make([]string, 0)\n\tr.Suggestions = make([]string, 0)\n\n\t//\ttime.Sleep(20 * time.Second)\n\n\t//clientset, config, err := util.GetKubeClient()\n\tclientset, _, err := util.GetKubeClient()\n\tif err != nil {\n\t\tr.State = scapiv1alpha3.FailState\n\t\tr.Errors = append(r.Errors, \"unable to connect to kube\")\n\t\treturn wrapResult(r)\n\t}\n\n\tns := \"tekton-pipelines\"\n\n\tnamespaces, err := clientset.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})\n\tif err != nil {\n\t\tr.State = scapiv1alpha3.FailState\n\t\tr.Errors = append(r.Errors, fmt.Sprintf(\"error getting namespaces %s\", err.Error()))\n\t\treturn wrapResult(r)\n\t}\n\n\tfor i := 0; i < len(namespaces.Items); i++ {\n\t\tn := namespaces.Items[i]\n\t\tif n.Name == \"openshift-pipelines\" {\n\t\t\tns = \"openshift-pipelines\"\n\t\t\tbreak\n\t\t}\n\t\tif n.Name == \"tekton-pipelines\" {\n\t\t\tns = \"tekton-pipelines\"\n\t\t\tbreak\n\t\t}\n\t}\n\n\tvar pods *corev1.PodList\n\tvar p corev1.Pod\n\n\t// look for a pod with this label\n\t//app=tekton-pipelines-controller\n\tselector := \"app=tekton-pipelines-controller\"\n\tlistOpts := metav1.ListOptions{LabelSelector: selector}\n\tpods, err = clientset.CoreV1().Pods(ns).List(context.TODO(), listOpts)\n\tif err != nil {\n\t\tr.State = scapiv1alpha3.FailState\n\t\tr.Errors = append(r.Errors, fmt.Sprintf(\"error getting pods %s\", err.Error()))\n\t\treturn wrapResult(r)\n\t}\n\tif len(pods.Items) == 0 {\n\t\tr.State = scapiv1alpha3.FailState\n\t\tr.Errors = append(r.Errors, \"tekton-pipelines-controller pod not found\")\n\t\treturn wrapResult(r)\n\t}\n\tp = pods.Items[0]\n\tif p.Status.Phase != corev1.PodRunning {\n\t\tr.State = scapiv1alpha3.FailState\n\t\tr.Errors = append(r.Errors, \"tekton-pipelines-controller pod not running\")\n\t\treturn wrapResult(r)\n\t}\n\n\t// look for a pod with this label\n\t//app=tekton-pipelines-webhook\n\tselector = \"app=tekton-pipelines-webhook\"\n\tlistOpts = metav1.ListOptions{LabelSelector: selector}\n\tpods, err = clientset.CoreV1().Pods(ns).List(context.TODO(), listOpts)\n\tif err != nil {\n\t\tr.State = scapiv1alpha3.FailState\n\t\tr.Errors = append(r.Errors, fmt.Sprintf(\"error getting pods %s\", err.Error()))\n\t\treturn wrapResult(r)\n\t}\n\tif len(pods.Items) == 0 {\n\t\tr.State = scapiv1alpha3.FailState\n\t\tr.Errors = append(r.Errors, \"tekton-pipelines-webhook pod not found\")\n\t\treturn wrapResult(r)\n\t}\n\n\tp = pods.Items[0]\n\n\tif p.Status.Phase != corev1.PodRunning {\n\t\tr.State = scapiv1alpha3.FailState\n\t\tr.Errors = append(r.Errors, \"tekton-pipelines-webhook pod not running\")\n\t\treturn wrapResult(r)\n\t}\n\n\treturn wrapResult(r)\n}", "func updatePodTests() []*SerialTestCase {\n\tsequence1Tests := []*SerialTestCase{\n\t\t{\n\t\t\tDescription: \"Sequence 1: Pod A create --> Policy create --> Pod A cleanup --> Pod B create\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t\tUpdatePolicy(policyXBaseOnK1V1()),\n\t\t\t\tUpdatePolicy(policyXBase2OnK2V2()),\n\t\t\t\tDeletePod(\"x\", \"a\", ip1, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t\tCreatePod(\"x\", \"b\", ip1, thisNode, map[string]string{\"k2\": \"v2\"}),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: defaultWindowsDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet, ip1),\n\t\t\t\t\t// old labels (not yet garbage collected)\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set),\n\t\t\t\t\t// new labels\n\t\t\t\t\tdptestutils.SetPolicy(podK2Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK2V2Set, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: map[string][]*hnswrapper.FakeEndpointPolicy{\n\t\t\t\t\tendpoint1: {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"Out\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tRemoteAddresses: testNodeIP,\n\t\t\t\t\t\t\tPriority: 201,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tDescription: \"Sequence 1: Policy create --> Pod A create --> Pod A cleanup --> Pod B create\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tUpdatePolicy(policyXBaseOnK1V1()),\n\t\t\t\tUpdatePolicy(policyXBase2OnK2V2()),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t\tDeletePod(\"x\", \"a\", ip1, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t\tCreatePod(\"x\", \"b\", ip1, thisNode, map[string]string{\"k2\": \"v2\"}),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: defaultWindowsDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet, ip1),\n\t\t\t\t\t// old labels (not yet garbage collected)\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set),\n\t\t\t\t\t// new labels\n\t\t\t\t\tdptestutils.SetPolicy(podK2Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK2V2Set, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: map[string][]*hnswrapper.FakeEndpointPolicy{\n\t\t\t\t\tendpoint1: {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"Out\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tRemoteAddresses: testNodeIP,\n\t\t\t\t\t\t\tPriority: 201,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tDescription: \"Sequence 1: Policy create --> Pod A create --> Pod A cleanup --> Pod B create (skip first apply DP)\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tUpdatePolicy(policyXBaseOnK1V1()),\n\t\t\t\tUpdatePolicy(policyXBase2OnK2V2()),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tDeletePod(\"x\", \"a\", ip1, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t\tCreatePod(\"x\", \"b\", ip1, thisNode, map[string]string{\"k2\": \"v2\"}),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: defaultWindowsDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet, ip1),\n\t\t\t\t\t// old labels (not yet garbage collected)\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set),\n\t\t\t\t\t// new labels\n\t\t\t\t\tdptestutils.SetPolicy(podK2Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK2V2Set, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: map[string][]*hnswrapper.FakeEndpointPolicy{\n\t\t\t\t\tendpoint1: {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"Out\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tRemoteAddresses: testNodeIP,\n\t\t\t\t\t\t\tPriority: 201,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tDescription: \"Sequence 1: Policy create --> Pod A create --> Pod A cleanup --> Pod B create (skip first two apply DP)\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tUpdatePolicy(policyXBaseOnK1V1()),\n\t\t\t\tUpdatePolicy(policyXBase2OnK2V2()),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tDeletePod(\"x\", \"a\", ip1, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tCreatePod(\"x\", \"b\", ip1, thisNode, map[string]string{\"k2\": \"v2\"}),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: defaultWindowsDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet, ip1),\n\t\t\t\t\t// old labels (not yet garbage collected)\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set),\n\t\t\t\t\t// new labels\n\t\t\t\t\tdptestutils.SetPolicy(podK2Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK2V2Set, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: map[string][]*hnswrapper.FakeEndpointPolicy{\n\t\t\t\t\tendpoint1: {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"Out\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tRemoteAddresses: testNodeIP,\n\t\t\t\t\t\t\tPriority: 201,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tsequence2Tests := []*SerialTestCase{\n\t\t{\n\t\t\tDescription: \"Sequence 2 with Calico network\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tUpdatePolicy(policyXBaseOnK1V1()),\n\t\t\t\tUpdatePolicy(policyXBase2OnK2V2()),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t\tCreatePod(\"x\", \"b\", ip1, thisNode, map[string]string{\"k2\": \"v2\"}),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: windowsCalicoDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet, ip1),\n\t\t\t\t\t// IP temporarily associated with IPSets of both pod A and pod B\n\t\t\t\t\t// Pod A sets\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set, ip1),\n\t\t\t\t\t// Pod B sets\n\t\t\t\t\tdptestutils.SetPolicy(podK2Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK2V2Set, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: map[string][]*hnswrapper.FakeEndpointPolicy{\n\t\t\t\t\tendpoint1: {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"Out\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tRemoteAddresses: testNodeIP,\n\t\t\t\t\t\t\tPriority: 201,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-baseazurewireserver\",\n\t\t\t\t\t\t\tAction: \"Block\",\n\t\t\t\t\t\t\tDirection: \"Out\",\n\t\t\t\t\t\t\tPriority: 200,\n\t\t\t\t\t\t\tRemoteAddresses: \"168.63.129.16/32\",\n\t\t\t\t\t\t\tRemotePorts: \"80\",\n\t\t\t\t\t\t\tProtocols: \"6\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-baseallowinswitch\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tPriority: 65499,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-baseallowoutswitch\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"Out\",\n\t\t\t\t\t\t\tPriority: 65499,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-baseallowinhost\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tPriority: 0,\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\t// RuleType is unsupported in FakeEndpointPolicy\n\t\t\t\t\t\t\t// RuleType: \"Host\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-baseallowouthost\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"Out\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tPriority: 0,\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\t// RuleType is unsupported in FakeEndpointPolicy\n\t\t\t\t\t\t\t// RuleType: \"Host\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tDescription: \"Sequence 2: Policy create --> Pod A Create --> Pod B create\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tUpdatePolicy(policyXBaseOnK1V1()),\n\t\t\t\tUpdatePolicy(policyXBase2OnK2V2()),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t\tCreatePod(\"x\", \"b\", ip1, thisNode, map[string]string{\"k2\": \"v2\"}),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: defaultWindowsDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet, ip1),\n\t\t\t\t\t// IP temporarily associated with IPSets of both pod A and pod B\n\t\t\t\t\t// Pod A sets\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set, ip1),\n\t\t\t\t\t// Pod B sets\n\t\t\t\t\tdptestutils.SetPolicy(podK2Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK2V2Set, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: map[string][]*hnswrapper.FakeEndpointPolicy{\n\t\t\t\t\tendpoint1: {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"Out\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tRemoteAddresses: testNodeIP,\n\t\t\t\t\t\t\tPriority: 201,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tDescription: \"Sequence 2: Policy create --> Pod A Create --> Pod B create --> Pod A cleanup\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tUpdatePolicy(policyXBaseOnK1V1()),\n\t\t\t\tUpdatePolicy(policyXBase2OnK2V2()),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t\tCreatePod(\"x\", \"b\", ip1, thisNode, map[string]string{\"k2\": \"v2\"}),\n\t\t\t\tApplyDP(),\n\t\t\t\tDeletePod(\"x\", \"a\", ip1, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: defaultWindowsDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet, ip1),\n\t\t\t\t\t// old labels (not yet garbage collected)\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set),\n\t\t\t\t\t// new labels\n\t\t\t\t\tdptestutils.SetPolicy(podK2Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK2V2Set, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: map[string][]*hnswrapper.FakeEndpointPolicy{\n\t\t\t\t\tendpoint1: {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"Out\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tRemoteAddresses: testNodeIP,\n\t\t\t\t\t\t\tPriority: 201,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t// skipping this test. See PR #1856\n\t\t\tDescription: \"Sequence 2: Policy create --> Pod A Create --> Pod B create (skip first ApplyDP())\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tUpdatePolicy(policyXBaseOnK1V1()),\n\t\t\t\tUpdatePolicy(policyXBase2OnK2V2()),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tCreatePod(\"x\", \"b\", ip1, thisNode, map[string]string{\"k2\": \"v2\"}),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: defaultWindowsDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet, ip1),\n\t\t\t\t\t// IP temporarily associated with IPSets of both pod A and pod B\n\t\t\t\t\t// Pod A sets\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set, ip1),\n\t\t\t\t\t// Pod B sets\n\t\t\t\t\tdptestutils.SetPolicy(podK2Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK2V2Set, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: map[string][]*hnswrapper.FakeEndpointPolicy{\n\t\t\t\t\tendpoint1: {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"Out\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tRemoteAddresses: testNodeIP,\n\t\t\t\t\t\t\tPriority: 201,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t// skipping this test. See PR #1856\n\t\t\tDescription: \"Sequence 2: Policy create --> Pod A Create --> Pod B create --> Pod A cleanup (skip first two ApplyDP())\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tUpdatePolicy(policyXBaseOnK1V1()),\n\t\t\t\tUpdatePolicy(policyXBase2OnK2V2()),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tCreatePod(\"x\", \"b\", ip1, thisNode, map[string]string{\"k2\": \"v2\"}),\n\t\t\t\tDeletePod(\"x\", \"a\", ip1, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: defaultWindowsDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet, ip1),\n\t\t\t\t\t// old labels (not yet garbage collected)\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set),\n\t\t\t\t\t// new labels\n\t\t\t\t\tdptestutils.SetPolicy(podK2Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK2V2Set, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: map[string][]*hnswrapper.FakeEndpointPolicy{\n\t\t\t\t\tendpoint1: {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"Out\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tRemoteAddresses: testNodeIP,\n\t\t\t\t\t\t\tPriority: 201,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\totherTests := []*SerialTestCase{\n\t\t{\n\t\t\tDescription: \"ignore Pod update if added then deleted before ApplyDP()\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tUpdatePolicy(policyXBaseOnK1V1()),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tDeletePod(\"x\", \"a\", ip1, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: defaultWindowsDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet),\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: map[string][]*hnswrapper.FakeEndpointPolicy{\n\t\t\t\t\tendpoint1: {},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t// doesn't really enforce behavior in DP, but one could look at logs to make sure we don't make a reset ACL SysCall into HNS\n\t\t\tDescription: \"ignore Pod delete for deleted endpoint\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tUpdatePolicy(policyXBaseOnK1V1()),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t\tDeleteEndpoint(endpoint1),\n\t\t\t\tDeletePod(\"x\", \"a\", ip1, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: defaultWindowsDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet),\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t// doesn't really enforce behavior in DP, but one could look at logs to make sure we don't make a reset ACL SysCall into HNS\n\t\t\tDescription: \"ignore Pod delete for deleted endpoint (skip first ApplyDP())\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tUpdatePolicy(policyXBaseOnK1V1()),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tDeleteEndpoint(endpoint1),\n\t\t\t\tDeletePod(\"x\", \"a\", ip1, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: defaultWindowsDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet),\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t// doesn't really enforce behavior in DP, but one could look at logs to make sure we don't make an add ACL SysCall into HNS\"\n\t\t\tDescription: \"ignore Pod update when there's no corresponding endpoint\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tUpdatePolicy(policyXBaseOnK1V1()),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tDeleteEndpoint(endpoint1),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: defaultWindowsDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tDescription: \"two endpoints, one with policy, one without\",\n\t\t\tActions: []*Action{\n\t\t\t\tFinishBootupPhase(),\n\t\t\t\tUpdatePolicy(policyXBase2OnK2V2()),\n\t\t\t\tCreatePod(\"x\", \"a\", ip1, thisNode, map[string]string{\"k1\": \"v1\"}),\n\t\t\t\tCreateEndpoint(endpoint2, ip2),\n\t\t\t\tCreatePod(\"x\", \"b\", ip2, thisNode, map[string]string{\"k2\": \"v2\"}),\n\t\t\t\tApplyDP(),\n\t\t\t},\n\t\t\tTestCaseMetadata: &TestCaseMetadata{\n\t\t\t\tTags: []Tag{\n\t\t\t\t\tpodCrudTag,\n\t\t\t\t\tnetpolCrudTag,\n\t\t\t\t},\n\t\t\t\tDpCfg: defaultWindowsDPCfg,\n\t\t\t\tInitialEndpoints: []*hcn.HostComputeEndpoint{\n\t\t\t\t\tdptestutils.Endpoint(endpoint1, ip1),\n\t\t\t\t},\n\t\t\t\tExpectedSetPolicies: []*hcn.SetPolicySetting{\n\t\t\t\t\tdptestutils.SetPolicy(emptySet),\n\t\t\t\t\tdptestutils.SetPolicy(allNamespaces, emptySet.GetHashedName(), nsXSet.GetHashedName()),\n\t\t\t\t\tdptestutils.SetPolicy(nsXSet, ip1, ip2),\n\t\t\t\t\tdptestutils.SetPolicy(podK1Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK1V1Set, ip1),\n\t\t\t\t\tdptestutils.SetPolicy(podK2Set, ip2),\n\t\t\t\t\tdptestutils.SetPolicy(podK2V2Set, ip2),\n\t\t\t\t},\n\t\t\t\tExpectedEnpdointACLs: map[string][]*hnswrapper.FakeEndpointPolicy{\n\t\t\t\t\tendpoint1: {},\n\t\t\t\t\tendpoint2: {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tProtocols: \"\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"Out\",\n\t\t\t\t\t\t\tLocalAddresses: \"\",\n\t\t\t\t\t\t\tRemoteAddresses: \"\",\n\t\t\t\t\t\t\tLocalPorts: \"\",\n\t\t\t\t\t\t\tRemotePorts: \"\",\n\t\t\t\t\t\t\tPriority: 222,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tID: \"azure-acl-x-base2\",\n\t\t\t\t\t\t\tAction: \"Allow\",\n\t\t\t\t\t\t\tDirection: \"In\",\n\t\t\t\t\t\t\tRemoteAddresses: testNodeIP,\n\t\t\t\t\t\t\tPriority: 201,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tallTests := sequence1Tests\n\tallTests = append(allTests, sequence2Tests...)\n\t// allTests = append(allTests, podAssignmentSequence3Tests()...)\n\t// make golint happy\n\t_ = podAssignmentSequence3Tests()\n\tallTests = append(allTests, otherTests...)\n\treturn allTests\n}", "func TestDevPortForwardDeletePod(t *testing.T) {\n\tMarkIntegrationTest(t, CanRunWithoutGcp)\n\ttests := []struct {\n\t\tdir string\n\t}{\n\t\t{dir: \"examples/microservices\"},\n\t\t{dir: \"examples/multi-config-microservices\"},\n\t}\n\tfor _, test := range tests {\n\t\t// pre-build images to avoid tripping the 1-minute timeout in getLocalPortFromPortForwardEvent()\n\t\tskaffold.Build().InDir(test.dir).RunOrFail(t)\n\n\t\tns, client := SetupNamespace(t)\n\n\t\trpcAddr := randomPort()\n\t\tskaffold.Dev(\"--port-forward\", \"--rpc-port\", rpcAddr).InDir(test.dir).InNs(ns.Name).RunBackground(t)\n\t\tclient.WaitForDeploymentsToStabilize(\"leeroy-app\")\n\n\t\t_, entries := apiEvents(t, rpcAddr)\n\n\t\taddress, localPort := getLocalPortFromPortForwardEvent(t, entries, \"leeroy-app\", \"service\", ns.Name)\n\t\tassertResponseFromPort(t, address, localPort, constants.LeeroyAppResponse)\n\n\t\t// now, delete all pods in this namespace.\n\t\tRun(t, \".\", \"kubectl\", \"delete\", \"pods\", \"--all\", \"-n\", ns.Name)\n\n\t\tassertResponseFromPort(t, address, localPort, constants.LeeroyAppResponse)\n\t}\n}", "func StartControllers(s *options.MCMServer,\n\tcontrolCoreKubeconfig *rest.Config,\n\ttargetCoreKubeconfig *rest.Config,\n\tcontrolMachineClientBuilder machinecontroller.ClientBuilder,\n\tcontrolCoreClientBuilder corecontroller.ClientBuilder,\n\ttargetCoreClientBuilder corecontroller.ClientBuilder,\n\trecorder record.EventRecorder,\n\tstop <-chan struct{}) error {\n\n\tklog.V(5).Info(\"Getting available resources\")\n\tavailableResources, err := getAvailableResources(controlCoreClientBuilder)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcontrolMachineClient := controlMachineClientBuilder.ClientOrDie(controllerManagerAgentName).MachineV1alpha1()\n\n\tcontrolCoreKubeconfig = rest.AddUserAgent(controlCoreKubeconfig, controllerManagerAgentName)\n\tcontrolCoreClient, err := kubernetes.NewForConfig(controlCoreKubeconfig)\n\tif err != nil {\n\t\tklog.Fatal(err)\n\t}\n\n\ttargetCoreKubeconfig = rest.AddUserAgent(targetCoreKubeconfig, controllerManagerAgentName)\n\ttargetCoreClient, err := kubernetes.NewForConfig(targetCoreKubeconfig)\n\tif err != nil {\n\t\tklog.Fatal(err)\n\t}\n\n\tif availableResources[machineGVR] || availableResources[machineSetGVR] || availableResources[machineDeploymentGVR] {\n\t\tklog.V(5).Infof(\"Creating shared informers; resync interval: %v\", s.MinResyncPeriod)\n\n\t\tcontrolMachineInformerFactory := machineinformers.NewFilteredSharedInformerFactory(\n\t\t\tcontrolMachineClientBuilder.ClientOrDie(\"control-machine-shared-informers\"),\n\t\t\ts.MinResyncPeriod.Duration,\n\t\t\ts.Namespace,\n\t\t\tnil,\n\t\t)\n\n\t\tcontrolCoreInformerFactory := coreinformers.NewFilteredSharedInformerFactory(\n\t\t\tcontrolCoreClientBuilder.ClientOrDie(\"control-core-shared-informers\"),\n\t\t\ts.MinResyncPeriod.Duration,\n\t\t\ts.Namespace,\n\t\t\tnil,\n\t\t)\n\n\t\ttargetCoreInformerFactory := coreinformers.NewSharedInformerFactory(\n\t\t\ttargetCoreClientBuilder.ClientOrDie(\"target-core-shared-informers\"),\n\t\t\ts.MinResyncPeriod.Duration,\n\t\t)\n\n\t\t// All shared informers are v1alpha1 API level\n\t\tmachineSharedInformers := controlMachineInformerFactory.Machine().V1alpha1()\n\n\t\tklog.V(5).Infof(\"Creating controllers...\")\n\t\tmcmcontroller, err := mcmcontroller.NewController(\n\t\t\ts.Namespace,\n\t\t\tcontrolMachineClient,\n\t\t\tcontrolCoreClient,\n\t\t\ttargetCoreClient,\n\t\t\ttargetCoreInformerFactory.Core().V1().PersistentVolumeClaims(),\n\t\t\ttargetCoreInformerFactory.Core().V1().PersistentVolumes(),\n\t\t\tcontrolCoreInformerFactory.Core().V1().Secrets(),\n\t\t\ttargetCoreInformerFactory.Core().V1().Nodes(),\n\t\t\tmachineSharedInformers.OpenStackMachineClasses(),\n\t\t\tmachineSharedInformers.AWSMachineClasses(),\n\t\t\tmachineSharedInformers.AzureMachineClasses(),\n\t\t\tmachineSharedInformers.GCPMachineClasses(),\n\t\t\tmachineSharedInformers.AlicloudMachineClasses(),\n\t\t\tmachineSharedInformers.PacketMachineClasses(),\n\t\t\tmachineSharedInformers.Machines(),\n\t\t\tmachineSharedInformers.MachineSets(),\n\t\t\tmachineSharedInformers.MachineDeployments(),\n\t\t\trecorder,\n\t\t\ts.SafetyOptions,\n\t\t\ts.NodeConditions,\n\t\t\ts.BootstrapTokenAuthExtraGroups,\n\t\t\ts.DeleteMigratedMachineClass,\n\t\t\ts.AutoscalerScaleDownAnnotationDuringRollout,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tklog.V(1).Info(\"Starting shared informers\")\n\n\t\tcontrolMachineInformerFactory.Start(stop)\n\t\tcontrolCoreInformerFactory.Start(stop)\n\t\ttargetCoreInformerFactory.Start(stop)\n\n\t\tklog.V(5).Info(\"Running controller\")\n\t\tgo mcmcontroller.Run(int(s.ConcurrentNodeSyncs), stop)\n\n\t} else {\n\t\treturn fmt.Errorf(\"unable to start machine controller: API GroupVersion %q or %q or %q is not available; \\nFound: %#v\", machineGVR, machineSetGVR, machineDeploymentGVR, availableResources)\n\t}\n\n\tselect {}\n}", "func TestConcurrentAccessToRelatedVolumes(ctx context.Context, f *framework.Framework, cs clientset.Interface, ns string,\n\tnode e2epod.NodeSelection, pvcs []*v1.PersistentVolumeClaim, expectedContent string) {\n\n\tvar pods []*v1.Pod\n\n\t// Create each pod with pvc\n\tfor i := range pvcs {\n\t\tindex := i + 1\n\t\tginkgo.By(fmt.Sprintf(\"Creating pod%d with a volume on %+v\", index, node))\n\t\tpodConfig := e2epod.Config{\n\t\t\tNS: ns,\n\t\t\tPVCs: []*v1.PersistentVolumeClaim{pvcs[i]},\n\t\t\tSeLinuxLabel: e2epod.GetLinuxLabel(),\n\t\t\tNodeSelection: node,\n\t\t\tPVCsReadOnly: false,\n\t\t\tImageID: e2epod.GetTestImageID(imageutils.JessieDnsutils),\n\t\t}\n\t\tpod, err := e2epod.CreateSecPodWithNodeSelection(ctx, cs, &podConfig, f.Timeouts.PodStart)\n\t\tdefer func() {\n\t\t\tframework.ExpectNoError(e2epod.DeletePodWithWait(ctx, cs, pod))\n\t\t}()\n\t\tframework.ExpectNoError(err)\n\t\tpods = append(pods, pod)\n\t\tactualNodeName := pod.Spec.NodeName\n\n\t\t// Always run the subsequent pods on the same node.\n\t\te2epod.SetAffinity(&node, actualNodeName)\n\t}\n\n\tfor i, pvc := range pvcs {\n\t\tvar commands []string\n\n\t\tif *pvc.Spec.VolumeMode == v1.PersistentVolumeBlock {\n\t\t\tfileName := \"/mnt/volume1\"\n\t\t\tcommands = e2evolume.GenerateReadBlockCmd(fileName, len(expectedContent))\n\t\t\t// Check that all pods have the same content\n\t\t\tindex := i + 1\n\t\t\tginkgo.By(fmt.Sprintf(\"Checking if the volume in pod%d has expected initial content\", index))\n\t\t\t_, err := e2eoutput.LookForStringInPodExec(pods[i].Namespace, pods[i].Name, commands, expectedContent, time.Minute)\n\t\t\tframework.ExpectNoError(err, \"failed: finding the contents of the block volume %s.\", fileName)\n\t\t} else {\n\t\t\tfileName := \"/mnt/volume1/index.html\"\n\t\t\tcommands = e2evolume.GenerateReadFileCmd(fileName)\n\t\t\t// Check that all pods have the same content\n\t\t\tindex := i + 1\n\t\t\tginkgo.By(fmt.Sprintf(\"Checking if the volume in pod%d has expected initial content\", index))\n\t\t\t_, err := e2eoutput.LookForStringInPodExec(pods[i].Namespace, pods[i].Name, commands, expectedContent, time.Minute)\n\t\t\tframework.ExpectNoError(err, \"failed: finding the contents of the mounted file %s.\", fileName)\n\t\t}\n\t}\n}", "func TestControllerHandleEvents(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\taddServices []*corev1.Service\n\t\tupdateServices []string\n\t\tdelServices []string\n\t\texpAddedServices []string\n\t\texpDeletedServices []string\n\t}{\n\t\t{\n\t\t\tname: \"If a controller is watching services it should react to the service change events.\",\n\t\t\taddServices: []*corev1.Service{\n\t\t\t\t{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{Name: \"svc1\"},\n\t\t\t\t\tSpec: corev1.ServiceSpec{\n\t\t\t\t\t\tType: \"ClusterIP\",\n\t\t\t\t\t\tPorts: []corev1.ServicePort{\n\t\t\t\t\t\t\tcorev1.ServicePort{Name: \"port1\", Port: 8080},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{Name: \"svc2\"},\n\t\t\t\t\tSpec: corev1.ServiceSpec{\n\t\t\t\t\t\tType: \"ClusterIP\",\n\t\t\t\t\t\tPorts: []corev1.ServicePort{\n\t\t\t\t\t\t\tcorev1.ServicePort{Name: \"port1\", Port: 8080},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tupdateServices: []string{\"svc1\"},\n\t\t\tdelServices: []string{\"svc1\", \"svc2\"},\n\t\t\texpAddedServices: []string{\"svc1\", \"svc2\", \"svc1\"},\n\t\t\texpDeletedServices: []string{\"svc1\", \"svc2\"},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\trequire := require.New(t)\n\t\t\tassert := assert.New(t)\n\t\t\tresync := 30 * time.Second\n\t\t\tstopC := make(chan struct{})\n\t\t\tvar gotAddedServices []string\n\t\t\tvar gotDeletedServices []string\n\n\t\t\t// Create the kubernetes client.\n\t\t\tk8scli, _, _, err := cli.GetK8sClients(\"\")\n\n\t\t\trequire.NoError(err, \"kubernetes client is required\")\n\n\t\t\t// Prepare the environment on the cluster.\n\t\t\tprep := prepare.New(k8scli, t)\n\t\t\tprep.SetUp()\n\t\t\tdefer prep.TearDown()\n\n\t\t\t// Create the reitrever.\n\t\t\trt := &retrieve.Resource{\n\t\t\t\tListerWatcher: cache.NewListWatchFromClient(k8scli.CoreV1().RESTClient(), \"services\", prep.Namespace().Name, fields.Everything()),\n\t\t\t\tObject: &corev1.Service{},\n\t\t\t}\n\n\t\t\t// Call times are the number of times the handler should be called before sending the termination signal.\n\t\t\tstopCallTimes := len(test.addServices) + len(test.updateServices) + len(test.delServices)\n\t\t\tcalledTimes := 0\n\t\t\tvar mx sync.Mutex\n\n\t\t\t// Create the handler.\n\t\t\thl := &handler.HandlerFunc{\n\t\t\t\tAddFunc: func(_ context.Context, obj runtime.Object) error {\n\t\t\t\t\tmx.Lock()\n\t\t\t\t\tcalledTimes++\n\t\t\t\t\tmx.Unlock()\n\n\t\t\t\t\tsvc := obj.(*corev1.Service)\n\t\t\t\t\tgotAddedServices = append(gotAddedServices, svc.Name)\n\t\t\t\t\tif calledTimes >= stopCallTimes {\n\t\t\t\t\t\tclose(stopC)\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t\tDeleteFunc: func(_ context.Context, id string) error {\n\t\t\t\t\tmx.Lock()\n\t\t\t\t\tcalledTimes++\n\t\t\t\t\tmx.Unlock()\n\n\t\t\t\t\t// Ignore namespace.\n\t\t\t\t\tid = strings.Split(id, \"/\")[1]\n\t\t\t\t\tgotDeletedServices = append(gotDeletedServices, id)\n\t\t\t\t\tif calledTimes >= stopCallTimes {\n\t\t\t\t\t\tclose(stopC)\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t}\n\n\t\t\t// Create a Pod controller.\n\t\t\tctrl := controller.NewSequential(resync, hl, rt, nil, log.Dummy)\n\t\t\trequire.NotNil(ctrl, \"controller is required\")\n\t\t\tgo ctrl.Run(stopC)\n\n\t\t\t// Create the required services.\n\t\t\tfor _, svc := range test.addServices {\n\t\t\t\t_, err := k8scli.CoreV1().Services(prep.Namespace().Name).Create(svc)\n\t\t\t\tassert.NoError(err)\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t}\n\n\t\t\tfor _, svc := range test.updateServices {\n\t\t\t\torigSvc, err := k8scli.CoreV1().Services(prep.Namespace().Name).Get(svc, metav1.GetOptions{})\n\t\t\t\tif assert.NoError(err) {\n\t\t\t\t\t// Change something\n\t\t\t\t\torigSvc.Spec.Ports = append(origSvc.Spec.Ports, corev1.ServicePort{Name: \"updateport\", Port: 9876})\n\t\t\t\t\t_, err := k8scli.CoreV1().Services(prep.Namespace().Name).Update(origSvc)\n\t\t\t\t\tassert.NoError(err)\n\t\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Delete the required services.\n\t\t\tfor _, svc := range test.delServices {\n\t\t\t\terr := k8scli.CoreV1().Services(prep.Namespace().Name).Delete(svc, &metav1.DeleteOptions{})\n\t\t\t\tassert.NoError(err)\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t}\n\n\t\t\t// Wait until we have finished.\n\t\t\tselect {\n\t\t\t// Timeout.\n\t\t\tcase <-time.After(20 * time.Second):\n\t\t\t// Finished.\n\t\t\tcase <-stopC:\n\t\t\t}\n\n\t\t\t// Check.\n\t\t\tassert.Equal(test.expAddedServices, gotAddedServices)\n\t\t\tassert.Equal(test.expDeletedServices, gotDeletedServices)\n\t\t})\n\t}\n}", "func TestController(t *testing.T) {\n\tfakeKubeClient, catalogClient, fakeBrokerCatalog, _, _, testController, _, stopCh := newTestController(t)\n\tdefer close(stopCh)\n\n\tt.Log(fakeKubeClient, catalogClient, fakeBrokerCatalog, testController, stopCh)\n\n\tfakeBrokerCatalog.RetCatalog = &brokerapi.Catalog{\n\t\tServices: []*brokerapi.Service{\n\t\t\t{\n\t\t\t\tName: \"test-service\",\n\t\t\t\tID: \"12345\",\n\t\t\t\tDescription: \"a test service\",\n\t\t\t\tPlans: []brokerapi.ServicePlan{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"test-plan\",\n\t\t\t\t\t\tFree: true,\n\t\t\t\t\t\tID: \"34567\",\n\t\t\t\t\t\tDescription: \"a test plan\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tname := \"test-name\"\n\tbroker := &v1alpha1.Broker{\n\t\tObjectMeta: v1.ObjectMeta{Name: name},\n\t\tSpec: v1alpha1.BrokerSpec{\n\t\t\tURL: \"https://example.com\",\n\t\t},\n\t}\n\tbrokerClient := catalogClient.Servicecatalog().Brokers()\n\n\tbrokerServer, err := brokerClient.Create(broker)\n\tif nil != err {\n\t\tt.Fatalf(\"error creating the broker %q (%q)\", broker, err)\n\t}\n\n\tif err := wait.PollImmediate(500*time.Millisecond, wait.ForeverTestTimeout,\n\t\tfunc() (bool, error) {\n\t\t\tbrokerServer, err = brokerClient.Get(name)\n\t\t\tif nil != err {\n\t\t\t\treturn false,\n\t\t\t\t\tfmt.Errorf(\"error getting broker %s (%s)\",\n\t\t\t\t\t\tname, err)\n\t\t\t} else if len(brokerServer.Status.Conditions) > 0 {\n\t\t\t\tt.Log(brokerServer)\n\t\t\t\treturn true, nil\n\t\t\t} else {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t},\n\t); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// check\n\tserviceClassClient := catalogClient.Servicecatalog().ServiceClasses()\n\t_, err = serviceClassClient.Get(\"test-service\")\n\tif nil != err {\n\t\tt.Fatal(\"could not find the test service\", err)\n\t}\n\n\t// cleanup our broker\n\terr = brokerClient.Delete(name, &v1.DeleteOptions{})\n\tif nil != err {\n\t\tt.Fatalf(\"broker should be deleted (%s)\", err)\n\t}\n\n\t// uncomment if/when deleting a broker deletes the associated service\n\t// if class, err := serviceClassClient.Get(\"test-service\"); nil == err {\n\t// \tt.Fatal(\"found the test service that should have been deleted\", err, class)\n\t// }\n}", "func getControllerPods(clientSet kubernetes.Interface, namespace string) (*corev1.PodList, error) {\n\tlabelSelector := metav1.LabelSelector{MatchLabels: map[string]string{constants.AppLabel: constants.OSMControllerName}}\n\tpodClient := clientSet.CoreV1().Pods(namespace)\n\tlistOptions := metav1.ListOptions{\n\t\tLabelSelector: labels.Set(labelSelector.MatchLabels).String(),\n\t}\n\treturn podClient.List(context.TODO(), metav1.ListOptions{LabelSelector: listOptions.LabelSelector})\n}", "func startServerAndControllers(t *testing.T) (\n\t*kubefake.Clientset,\n\twatch.Interface,\n\tclustopclientset.Interface,\n\tcapiclientset.Interface,\n\t*capifakeclientset.Clientset,\n\tfunc()) {\n\n\t// create a fake kube client\n\tfakePtr := clientgotesting.Fake{}\n\tscheme := runtime.NewScheme()\n\tcodecs := serializer.NewCodecFactory(scheme)\n\tmetav1.AddToGroupVersion(scheme, schema.GroupVersion{Version: \"v1\"})\n\tkubefake.AddToScheme(scheme)\n\tobjectTracker := clientgotesting.NewObjectTracker(scheme, codecs.UniversalDecoder())\n\tkubeWatch := watch.NewRaceFreeFake()\n\t// Add a reactor for sending watch events when a job is modified\n\tobjectReaction := clientgotesting.ObjectReaction(objectTracker)\n\tfakePtr.AddReactor(\"*\", \"jobs\", func(action clientgotesting.Action) (bool, runtime.Object, error) {\n\t\tvar deletedObj runtime.Object\n\t\tif action, ok := action.(clientgotesting.DeleteActionImpl); ok {\n\t\t\tdeletedObj, _ = objectTracker.Get(action.GetResource(), action.GetNamespace(), action.GetName())\n\t\t}\n\t\thandled, obj, err := objectReaction(action)\n\t\tswitch action.(type) {\n\t\tcase clientgotesting.CreateActionImpl:\n\t\t\tkubeWatch.Add(obj)\n\t\tcase clientgotesting.UpdateActionImpl:\n\t\t\tkubeWatch.Modify(obj)\n\t\tcase clientgotesting.DeleteActionImpl:\n\t\t\tif deletedObj != nil {\n\t\t\t\tkubeWatch.Delete(deletedObj)\n\t\t\t}\n\t\t}\n\t\treturn handled, obj, err\n\t})\n\tfakePtr.AddWatchReactor(\"*\", clientgotesting.DefaultWatchReactor(kubeWatch, nil))\n\t// Create actual fake kube client\n\tfakeKubeClient := &kubefake.Clientset{Fake: fakePtr}\n\n\t// start the cluster-operator api server\n\tapiServerClientConfig, shutdownServer := servertesting.StartTestServerOrDie(t)\n\n\t// create a cluster-operator client\n\tclustopClient, err := clustopclientset.NewForConfig(apiServerClientConfig)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\t// create a cluster-api client\n\tcapiClient, err := capiclientset.NewForConfig(apiServerClientConfig)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tfakeCAPIClient := &capifakeclientset.Clientset{}\n\n\t// create informers\n\tkubeInformerFactory := kubeinformers.NewSharedInformerFactory(fakeKubeClient, 10*time.Second)\n\tbatchSharedInformers := kubeInformerFactory.Batch().V1()\n\tclustopInformerFactory := clustopinformers.NewSharedInformerFactory(clustopClient, 10*time.Second)\n\tcapiInformerFactory := capiinformers.NewSharedInformerFactory(capiClient, 10*time.Second)\n\tcapiSharedInformers := capiInformerFactory.Cluster().V1alpha1()\n\n\t// create controllers\n\tstopCh := make(chan struct{})\n\tt.Log(\"controller start\")\n\t// Note that controllers must be created prior to starting the informers.\n\t// Otherwise, the controllers will not get the initial sync from the\n\t// informer and will time out waiting to sync.\n\trunControllers := []func(){\n\t\t// infra\n\t\tfunc() func() {\n\t\t\tcontroller := infracontroller.NewController(\n\t\t\t\tcapiSharedInformers.Clusters(),\n\t\t\t\tbatchSharedInformers.Jobs(),\n\t\t\t\tfakeKubeClient,\n\t\t\t\tclustopClient,\n\t\t\t\tcapiClient,\n\t\t\t)\n\t\t\treturn func() { controller.Run(1, stopCh) }\n\t\t}(),\n\t\t// master\n\t\tfunc() func() {\n\t\t\tcontroller := mastercontroller.NewController(\n\t\t\t\tcapiSharedInformers.Clusters(),\n\t\t\t\tcapiSharedInformers.MachineSets(),\n\t\t\t\tbatchSharedInformers.Jobs(),\n\t\t\t\tfakeKubeClient,\n\t\t\t\tclustopClient,\n\t\t\t\tcapiClient,\n\t\t\t)\n\t\t\treturn func() { controller.Run(1, stopCh) }\n\t\t}(),\n\t\t// components\n\t\tfunc() func() {\n\t\t\tcontroller := componentscontroller.NewController(\n\t\t\t\tcapiSharedInformers.Clusters(),\n\t\t\t\tcapiSharedInformers.MachineSets(),\n\t\t\t\tbatchSharedInformers.Jobs(),\n\t\t\t\tfakeKubeClient,\n\t\t\t\tclustopClient,\n\t\t\t\tcapiClient,\n\t\t\t)\n\t\t\treturn func() { controller.Run(1, stopCh) }\n\t\t}(),\n\t\t// nodeconfig\n\t\tfunc() func() {\n\t\t\tcontroller := nodeconfigcontroller.NewController(\n\t\t\t\tcapiSharedInformers.Clusters(),\n\t\t\t\tcapiSharedInformers.MachineSets(),\n\t\t\t\tbatchSharedInformers.Jobs(),\n\t\t\t\tfakeKubeClient,\n\t\t\t\tclustopClient,\n\t\t\t\tcapiClient,\n\t\t\t)\n\t\t\treturn func() { controller.Run(1, stopCh) }\n\t\t}(),\n\t\t// deployclusterapi\n\t\tfunc() func() {\n\t\t\tcontroller := deployclusterapicontroller.NewController(\n\t\t\t\tcapiSharedInformers.Clusters(),\n\t\t\t\tcapiSharedInformers.MachineSets(),\n\t\t\t\tbatchSharedInformers.Jobs(),\n\t\t\t\tfakeKubeClient,\n\t\t\t\tclustopClient,\n\t\t\t\tcapiClient,\n\t\t\t)\n\t\t\treturn func() { controller.Run(1, stopCh) }\n\t\t}(),\n\t\t// awselb\n\t\tfunc() func() {\n\t\t\tcontroller := awselb.NewController(\n\t\t\t\tcapiSharedInformers.Machines(),\n\t\t\t\tfakeKubeClient,\n\t\t\t\tclustopClient,\n\t\t\t\tcapiClient,\n\t\t\t)\n\t\t\treturn func() { controller.Run(1, stopCh) }\n\t\t}(),\n\t}\n\tvar wg sync.WaitGroup\n\twg.Add(len(runControllers))\n\tfor _, run := range runControllers {\n\t\tgo func(r func()) {\n\t\t\tdefer wg.Done()\n\t\t\tr()\n\t\t}(run)\n\t}\n\n\tt.Log(\"informers start\")\n\tkubeInformerFactory.Start(stopCh)\n\tclustopInformerFactory.Start(stopCh)\n\tcapiInformerFactory.Start(stopCh)\n\n\tshutdown := func() {\n\t\t// Shut down controller\n\t\tclose(stopCh)\n\t\t// Wait for all controller to stop\n\t\twg.Wait()\n\t\t// Shut down api server\n\t\tshutdownServer()\n\t}\n\n\treturn fakeKubeClient, kubeWatch, clustopClient, capiClient, fakeCAPIClient, shutdown\n}", "func runControllersAndInformers(t *testing.T, rm *replicaset.ReplicaSetController, dc *deployment.DeploymentController, informers informers.SharedInformerFactory) func() {\n\tctx, cancelFn := context.WithCancel(context.Background())\n\tinformers.Start(ctx.Done())\n\tgo rm.Run(ctx, 5)\n\tgo dc.Run(ctx, 5)\n\treturn cancelFn\n}", "func SetupAddExtraControllerVersions(k kubernetes.Interface, namespace string) kubernetes.Interface {\n\tp := MockPod()\n\n\tdv1b1 := appsv1beta1.Deployment{\n\t\tSpec: appsv1beta1.DeploymentSpec{\n\t\t\tTemplate: corev1.PodTemplateSpec{Spec: p.Spec},\n\t\t},\n\t}\n\tif _, err := k.AppsV1beta1().Deployments(namespace).Create(&dv1b1); err != nil {\n\t\tpanic(err)\n\t}\n\n\tdv1b2 := appsv1beta2.Deployment{\n\t\tSpec: appsv1beta2.DeploymentSpec{\n\t\t\tTemplate: corev1.PodTemplateSpec{Spec: p.Spec},\n\t\t},\n\t}\n\tif _, err := k.AppsV1beta2().Deployments(namespace).Create(&dv1b2); err != nil {\n\t\tpanic(err)\n\t}\n\n\tssv1b1 := appsv1beta1.StatefulSet{\n\t\tSpec: appsv1beta1.StatefulSetSpec{\n\t\t\tTemplate: corev1.PodTemplateSpec{Spec: p.Spec},\n\t\t},\n\t}\n\tif _, err := k.AppsV1beta1().StatefulSets(namespace).Create(&ssv1b1); err != nil {\n\t\tpanic(err)\n\t}\n\n\tssv1b2 := appsv1beta2.StatefulSet{\n\t\tSpec: appsv1beta2.StatefulSetSpec{\n\t\t\tTemplate: corev1.PodTemplateSpec{Spec: p.Spec},\n\t\t},\n\t}\n\tif _, err := k.AppsV1beta2().StatefulSets(namespace).Create(&ssv1b2); err != nil {\n\t\tpanic(err)\n\t}\n\n\tdsv1b2 := appsv1beta2.DaemonSet{\n\t\tSpec: appsv1beta2.DaemonSetSpec{\n\t\t\tTemplate: corev1.PodTemplateSpec{Spec: p.Spec},\n\t\t},\n\t}\n\tif _, err := k.AppsV1beta2().DaemonSets(namespace).Create(&dsv1b2); err != nil {\n\t\tpanic(err)\n\t}\n\treturn k\n}", "func ecsPodTests() map[string]func(ctx context.Context, t *testing.T, pc cocoa.ECSPodCreator, c *ECSClient, smc *SecretsManagerClient) {\n\tmakeSecretEnvVar := func(t *testing.T) *cocoa.EnvironmentVariable {\n\t\treturn cocoa.NewEnvironmentVariable().\n\t\t\tSetName(t.Name()).\n\t\t\tSetSecretOptions(*cocoa.NewSecretOptions().\n\t\t\t\tSetName(t.Name()).\n\t\t\t\tSetValue(utility.RandomString()).\n\t\t\t\tSetOwned(true))\n\t}\n\tmakeContainerDef := func(t *testing.T) *cocoa.ECSContainerDefinition {\n\t\treturn cocoa.NewECSContainerDefinition().\n\t\t\tSetImage(\"image\").\n\t\t\tSetMemoryMB(128).\n\t\t\tSetCPU(128).\n\t\t\tSetName(\"container\")\n\t}\n\n\tmakePodCreationOpts := func(t *testing.T) *cocoa.ECSPodCreationOptions {\n\t\treturn cocoa.NewECSPodCreationOptions().\n\t\t\tSetName(testutil.NewTaskDefinitionFamily(t.Name())).\n\t\t\tSetMemoryMB(128).\n\t\t\tSetCPU(128).\n\t\t\tSetTaskRole(testutil.TaskRole()).\n\t\t\tSetExecutionRole(testutil.ExecutionRole()).\n\t\t\tSetExecutionOptions(*cocoa.NewECSPodExecutionOptions().\n\t\t\t\tSetCluster(testutil.ECSClusterName()))\n\t}\n\n\tcheckPodDeleted := func(ctx context.Context, t *testing.T, p cocoa.ECSPod, c cocoa.ECSClient, smc cocoa.SecretsManagerClient, opts cocoa.ECSPodCreationOptions) {\n\t\tstat := p.StatusInfo()\n\t\tassert.Equal(t, cocoa.StatusDeleted, stat.Status)\n\n\t\tres := p.Resources()\n\n\t\tdescribeTaskDef, err := c.DescribeTaskDefinition(ctx, &awsECS.DescribeTaskDefinitionInput{\n\t\t\tTaskDefinition: res.TaskDefinition.ID,\n\t\t})\n\t\trequire.NoError(t, err)\n\t\trequire.NotZero(t, describeTaskDef.TaskDefinition)\n\t\tassert.Equal(t, utility.FromStringPtr(opts.Name), utility.FromStringPtr(describeTaskDef.TaskDefinition.Family))\n\n\t\tdescribeTasks, err := c.DescribeTasks(ctx, &awsECS.DescribeTasksInput{\n\t\t\tCluster: res.Cluster,\n\t\t\tTasks: []*string{res.TaskID},\n\t\t})\n\t\trequire.NoError(t, err)\n\t\tassert.Empty(t, describeTasks.Failures)\n\t\trequire.Len(t, describeTasks.Tasks, 1)\n\t\tassert.Equal(t, awsECS.DesiredStatusStopped, utility.FromStringPtr(describeTasks.Tasks[0].LastStatus))\n\n\t\tfor _, containerRes := range res.Containers {\n\t\t\tfor _, s := range containerRes.Secrets {\n\t\t\t\t_, err := smc.DescribeSecret(ctx, &secretsmanager.DescribeSecretInput{\n\t\t\t\t\tSecretId: s.Name,\n\t\t\t\t})\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\t_, err = smc.GetSecretValue(ctx, &secretsmanager.GetSecretValueInput{\n\t\t\t\t\tSecretId: s.Name,\n\t\t\t\t})\n\t\t\t\tassert.Error(t, err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn map[string]func(ctx context.Context, t *testing.T, pc cocoa.ECSPodCreator, c *ECSClient, smc *SecretsManagerClient){\n\t\t\"StopIsIdempotentWhenItFails\": func(ctx context.Context, t *testing.T, pc cocoa.ECSPodCreator, c *ECSClient, smc *SecretsManagerClient) {\n\t\t\topts := makePodCreationOpts(t).AddContainerDefinitions(*makeContainerDef(t))\n\t\t\tp, err := pc.CreatePod(ctx, opts)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tc.StopTaskError = errors.New(\"fake error\")\n\n\t\t\trequire.Error(t, p.Stop(ctx))\n\n\t\t\tstat := p.StatusInfo()\n\t\t\tassert.Equal(t, cocoa.StatusStarting, stat.Status)\n\n\t\t\tc.StopTaskError = nil\n\n\t\t\trequire.NoError(t, p.Stop(ctx))\n\t\t\tstat = p.StatusInfo()\n\t\t\tassert.Equal(t, cocoa.StatusStopped, stat.Status)\n\t\t},\n\t\t\"DeleteIsIdempotentWhenStoppingTaskFails\": func(ctx context.Context, t *testing.T, pc cocoa.ECSPodCreator, c *ECSClient, smc *SecretsManagerClient) {\n\t\t\topts := makePodCreationOpts(t).AddContainerDefinitions(\n\t\t\t\t*makeContainerDef(t).AddEnvironmentVariables(\n\t\t\t\t\t*makeSecretEnvVar(t),\n\t\t\t\t),\n\t\t\t)\n\t\t\tp, err := pc.CreatePod(ctx, opts)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tc.StopTaskError = errors.New(\"fake error\")\n\n\t\t\trequire.Error(t, p.Delete(ctx))\n\n\t\t\tstat := p.StatusInfo()\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, cocoa.StatusStarting, stat.Status)\n\n\t\t\tc.StopTaskError = nil\n\n\t\t\trequire.NoError(t, p.Delete(ctx))\n\n\t\t\tcheckPodDeleted(ctx, t, p, c, smc, *opts)\n\t\t},\n\t\t\"DeleteIsIdempotentWhenDeregisteringTaskDefinitionFails\": func(ctx context.Context, t *testing.T, pc cocoa.ECSPodCreator, c *ECSClient, smc *SecretsManagerClient) {\n\t\t\topts := makePodCreationOpts(t).AddContainerDefinitions(\n\t\t\t\t*makeContainerDef(t).AddEnvironmentVariables(\n\t\t\t\t\t*makeSecretEnvVar(t),\n\t\t\t\t),\n\t\t\t)\n\t\t\tp, err := pc.CreatePod(ctx, opts)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tc.DeregisterTaskDefinitionError = errors.New(\"fake error\")\n\n\t\t\trequire.Error(t, p.Delete(ctx))\n\n\t\t\tstat := p.StatusInfo()\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, cocoa.StatusStopped, stat.Status)\n\n\t\t\tc.DeregisterTaskDefinitionError = nil\n\n\t\t\trequire.NoError(t, p.Delete(ctx))\n\n\t\t\tcheckPodDeleted(ctx, t, p, c, smc, *opts)\n\t\t},\n\t\t\"DeleteIsIdempotentWhenDeletingSecretsFails\": func(ctx context.Context, t *testing.T, pc cocoa.ECSPodCreator, c *ECSClient, smc *SecretsManagerClient) {\n\t\t\topts := makePodCreationOpts(t).AddContainerDefinitions(\n\t\t\t\t*makeContainerDef(t).AddEnvironmentVariables(\n\t\t\t\t\t*makeSecretEnvVar(t),\n\t\t\t\t),\n\t\t\t)\n\t\t\tp, err := pc.CreatePod(ctx, opts)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tsmc.DeleteSecretError = errors.New(\"fake error\")\n\n\t\t\trequire.Error(t, p.Delete(ctx))\n\n\t\t\tstat := p.StatusInfo()\n\t\t\tassert.Equal(t, cocoa.StatusStopped, stat.Status)\n\n\t\t\tsmc.DeleteSecretError = nil\n\n\t\t\trequire.NoError(t, p.Delete(ctx))\n\n\t\t\tcheckPodDeleted(ctx, t, p, c, smc, *opts)\n\t\t},\n\t}\n}", "func CreatePods(f *framework.Framework, appName string, ns string, labels map[string]string, spec v1.PodSpec, maxCount int, tuning *TuningSetType) {\n\tfor i := 0; i < maxCount; i++ {\n\t\tframework.Logf(\"%v/%v : Creating pod\", i+1, maxCount)\n\t\t// Retry on pod creation failure\n\t\tfor retryCount := 0; retryCount < maxRetries; retryCount++ {\n\t\t\t_, err := f.ClientSet.Core().Pods(ns).Create(&v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: fmt.Sprintf(appName+\"-pod-%v\", i),\n\t\t\t\t\tNamespace: ns,\n\t\t\t\t\tLabels: labels,\n\t\t\t\t},\n\t\t\t\tSpec: spec,\n\t\t\t})\n\t\t\tif err == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tframework.ExpectNoError(err)\n\t\t}\n\t\tif tuning != nil {\n\t\t\t// If a rate limit has been defined we wait for N ms between creation\n\t\t\tif tuning.Pods.RateLimit.Delay != 0 {\n\t\t\t\tframework.Logf(\"Sleeping %d ms between podcreation.\", tuning.Pods.RateLimit.Delay)\n\t\t\t\ttime.Sleep(tuning.Pods.RateLimit.Delay * time.Millisecond)\n\t\t\t}\n\t\t\t// If a stepping tuningset has been defined in the config, we wait for the step of pods to be created, and pause\n\t\t\tif tuning.Pods.Stepping.StepSize != 0 && (i+1)%tuning.Pods.Stepping.StepSize == 0 {\n\t\t\t\tverifyRunning := f.NewClusterVerification(\n\t\t\t\t\t&v1.Namespace{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: ns,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStatus: v1.NamespaceStatus{},\n\t\t\t\t\t},\n\t\t\t\t\tframework.PodStateVerification{\n\t\t\t\t\t\tSelectors: labels,\n\t\t\t\t\t\tValidPhases: []v1.PodPhase{v1.PodRunning},\n\t\t\t\t\t},\n\t\t\t\t)\n\n\t\t\t\tpods, err := verifyRunning.WaitFor(i+1, tuning.Pods.Stepping.Timeout*time.Second)\n\t\t\t\tif err != nil {\n\t\t\t\t\tframework.Failf(\"Error in wait... %v\", err)\n\t\t\t\t} else if len(pods) < i+1 {\n\t\t\t\t\tframework.Failf(\"Only got %v out of %v\", len(pods), i+1)\n\t\t\t\t}\n\n\t\t\t\tframework.Logf(\"We have created %d pods and are now sleeping for %d seconds\", i+1, tuning.Pods.Stepping.Pause)\n\t\t\t\ttime.Sleep(tuning.Pods.Stepping.Pause * time.Second)\n\t\t\t}\n\t\t}\n\t}\n}", "func (m *MockDeploymentControllerFactory) Build(mgr mc_manager.AsyncManager, clusterName string) (controller0.DeploymentController, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Build\", mgr, clusterName)\n\tret0, _ := ret[0].(controller0.DeploymentController)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestTriggerController(t *testing.T) {\n\tconfig, stopFn := framework.RunControlPlane(t)\n\tdefer stopFn()\n\n\tctx, cancel := context.WithTimeout(context.TODO(), time.Second*20)\n\tdefer cancel()\n\n\tfakeClock := &fakeclock.FakeClock{}\n\t// Build, instantiate and run the trigger controller.\n\tkubeClient, factory, cmCl, cmFactory := framework.NewClients(t, config)\n\n\tnamespace := \"testns\"\n\n\t// Create Namespace\n\tns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}\n\t_, err := kubeClient.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tctrl, queue, mustSync := trigger.NewController(logf.Log, cmCl, factory, cmFactory, framework.NewEventRecorder(t), fakeClock, policies.NewTriggerPolicyChain(fakeClock))\n\tc := controllerpkg.NewController(\n\t\tcontext.Background(),\n\t\t\"trigger_test\",\n\t\tmetrics.New(logf.Log),\n\t\tctrl.ProcessItem,\n\t\tmustSync,\n\t\tnil,\n\t\tqueue,\n\t)\n\tstopController := framework.StartInformersAndController(t, factory, cmFactory, c)\n\tdefer stopController()\n\n\t// Create a Certificate resource and wait for it to have the 'Issuing' condition.\n\tcert, err := cmCl.CertmanagerV1().Certificates(namespace).Create(ctx, &cmapi.Certificate{\n\t\tObjectMeta: metav1.ObjectMeta{Name: \"testcrt\", Namespace: \"testns\"},\n\t\tSpec: cmapi.CertificateSpec{\n\t\t\tSecretName: \"example\",\n\t\t\tCommonName: \"example.com\",\n\t\t\tIssuerRef: cmmeta.ObjectReference{Name: \"testissuer\"}, // doesn't need to exist\n\t\t},\n\t}, metav1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = wait.Poll(time.Millisecond*100, time.Second*5, func() (done bool, err error) {\n\t\tc, err := cmCl.CertmanagerV1().Certificates(cert.Namespace).Get(ctx, cert.Name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tt.Logf(\"Failed to fetch Certificate resource, retrying: %v\", err)\n\t\t\treturn false, nil\n\t\t}\n\t\tif !apiutil.CertificateHasCondition(c, cmapi.CertificateCondition{\n\t\t\tType: cmapi.CertificateConditionIssuing,\n\t\t\tStatus: cmmeta.ConditionTrue,\n\t\t}) {\n\t\t\tt.Logf(\"Certificate does not have expected condition, got=%#v\", apiutil.GetCertificateCondition(c, cmapi.CertificateConditionIssuing))\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func TestBatch(t *testing.T) {\n\tpre := config.Presubmit{\n\t\tName: \"pr-some-job\",\n\t\tAgent: \"jenkins\",\n\t\tContext: \"Some Job Context\",\n\t}\n\tfc := &fkc{\n\t\tprowjobs: []kube.ProwJob{pjutil.NewProwJob(pjutil.BatchSpec(pre, kube.Refs{\n\t\t\tOrg: \"o\",\n\t\t\tRepo: \"r\",\n\t\t\tBaseRef: \"master\",\n\t\t\tBaseSHA: \"123\",\n\t\t\tPulls: []kube.Pull{\n\t\t\t\t{\n\t\t\t\t\tNumber: 1,\n\t\t\t\t\tSHA: \"abc\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tNumber: 2,\n\t\t\t\t\tSHA: \"qwe\",\n\t\t\t\t},\n\t\t\t},\n\t\t}))},\n\t}\n\tjc := &fjc{}\n\tc := Controller{\n\t\tkc: fc,\n\t\tjc: jc,\n\t\tca: newFakeConfigAgent(t),\n\t\tpendingJobs: make(map[string]int),\n\t\tlock: sync.RWMutex{},\n\t}\n\n\tif err := c.Sync(); err != nil {\n\t\tt.Fatalf(\"Error on first sync: %v\", err)\n\t}\n\tif fc.prowjobs[0].Status.State != kube.PendingState {\n\t\tt.Fatalf(\"Wrong state: %v\", fc.prowjobs[0].Status.State)\n\t}\n\tif !fc.prowjobs[0].Status.JenkinsEnqueued {\n\t\tt.Fatal(\"Wrong enqueued.\")\n\t}\n\tjc.enqueued = true\n\tif err := c.Sync(); err != nil {\n\t\tt.Fatalf(\"Error on second sync: %v\", err)\n\t}\n\tif !fc.prowjobs[0].Status.JenkinsEnqueued {\n\t\tt.Fatal(\"Wrong enqueued steady state.\")\n\t}\n\tjc.enqueued = false\n\tif err := c.Sync(); err != nil {\n\t\tt.Fatalf(\"Error on third sync: %v\", err)\n\t}\n\tif fc.prowjobs[0].Status.JenkinsEnqueued {\n\t\tt.Fatal(\"Wrong enqueued after leaving queue.\")\n\t}\n\tjc.status = Status{Building: true}\n\tif err := c.Sync(); err != nil {\n\t\tt.Fatalf(\"Error on fourth sync: %v\", err)\n\t}\n\tif fc.prowjobs[0].Status.State != kube.PendingState {\n\t\tt.Fatalf(\"Wrong state: %v\", fc.prowjobs[0].Status.State)\n\t}\n\tjc.status = Status{\n\t\tBuilding: false,\n\t\tNumber: 42,\n\t}\n\tif err := c.Sync(); err != nil {\n\t\tt.Fatalf(\"Error on fifth sync: %v\", err)\n\t}\n\tif fc.prowjobs[0].Status.PodName != \"pr-some-job-42\" {\n\t\tt.Fatalf(\"Wrong PodName: %s\", fc.prowjobs[0].Status.PodName)\n\t}\n\tif fc.prowjobs[0].Status.State != kube.FailureState {\n\t\tt.Fatalf(\"Wrong state: %v\", fc.prowjobs[0].Status.State)\n\t}\n\n\t// This is what the SQ reads.\n\tif fc.prowjobs[0].Spec.Context != \"Some Job Context\" {\n\t\tt.Fatalf(\"Wrong context: %v\", fc.prowjobs[0].Spec.Context)\n\t}\n}", "func Test_UniformRegistration_RegistrationOfKeptnIntegrationMultiplePods(t *testing.T) {\n\tdefer func(t *testing.T) {\n\t\tPrintLogsOfPods(t, []string{\"shipyard-controller\"})\n\t}(t)\n\n\t// make sure the echo-service uses the same distributor as Keptn core\n\timageName, err := GetImageOfDeploymentContainer(\"lighthouse-service\", \"lighthouse-service\")\n\trequire.Nil(t, err)\n\tdistributorImage := strings.Replace(imageName, \"lighthouse-service\", \"distributor\", 1)\n\n\techoServiceManifestContent := strings.ReplaceAll(echoServiceK8sManifest, \"${distributor-image}\", distributorImage)\n\techoServiceManifestContent = strings.ReplaceAll(echoServiceManifestContent, \"replicas: 1\", \"replicas: 3\")\n\techoServiceManifestContent = strings.ReplaceAll(echoServiceManifestContent, \"${queue-group}\", \"echo-service\")\n\techoServiceManifestContent = strings.ReplaceAll(echoServiceManifestContent, \"${api-endpoint}\", \"\")\n\techoServiceManifestContent = strings.ReplaceAll(echoServiceManifestContent, \"${api-token}\", \"\")\n\n\ttmpFile, err := CreateTmpFile(\"echo-service-*.yaml\", echoServiceManifestContent)\n\tdefer func() {\n\t\tif err := os.Remove(tmpFile); err != nil {\n\t\t\tt.Logf(\"Could not delete file: %v\", err)\n\t\t}\n\t}()\n\ttestUniformIntegration(t, func() {\n\t\t// install echo integration\n\t\t_, err = KubeCtlApplyFromURL(tmpFile)\n\t\trequire.Nil(t, err)\n\n\t\terr = waitForDeploymentToBeRolledOut(false, echoServiceName, GetKeptnNameSpaceFromEnv())\n\t\trequire.Nil(t, err)\n\n\t}, func() {\n\t\terr := KubeCtlDeleteFromURL(tmpFile)\n\t\trequire.Nil(t, err)\n\t}, true)\n}", "func TestServiceCreateWithMultipleContainers(t *testing.T) {\n\tif test.ServingFlags.DisableOptionalAPI {\n\t\tt.Skip(\"Multiple containers support is not required by Knative Serving API Specification\")\n\t}\n\tif !test.ServingFlags.EnableBetaFeatures {\n\t\tt.Skip()\n\t}\n\tt.Parallel()\n\tclients := test.Setup(t)\n\n\tnames := test.ResourceNames{\n\t\tService: test.ObjectNameForTest(t),\n\t\tImage: test.ServingContainer,\n\t\tSidecars: []string{\n\t\t\ttest.SidecarContainer,\n\t\t},\n\t}\n\n\t// Clean up on test failure or interrupt\n\ttest.EnsureTearDown(t, clients, &names)\n\tcontainers := []corev1.Container{{\n\t\tImage: pkgtest.ImagePath(names.Image),\n\t\tPorts: []corev1.ContainerPort{{\n\t\t\tContainerPort: 8881,\n\t\t}},\n\t}, {\n\t\tImage: pkgtest.ImagePath(names.Sidecars[0]),\n\t}}\n\n\t// Setup initial Service\n\tif _, err := v1test.CreateServiceReady(t, clients, &names, func(svc *v1.Service) {\n\t\tsvc.Spec.Template.Spec.Containers = containers\n\t}); err != nil {\n\t\tt.Fatalf(\"Failed to create initial Service %v: %v\", names.Service, err)\n\t}\n\n\t// Validate State after Creation\n\tif err := validateControlPlane(t, clients, names, \"1\" /*1 is the expected generation value*/); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif err := validateDataPlane(t, clients, names, test.MultiContainerResponse); err != nil {\n\t\tt.Error(err)\n\t}\n}", "func (m *MockServiceControllerFactory) Build(mgr mc_manager.AsyncManager, clusterName string) (controller1.ServiceController, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Build\", mgr, clusterName)\n\tret0, _ := ret[0].(controller1.ServiceController)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestNewController(t *testing.T) {\n\tmessagingClientSet, err := clientset.NewForConfig(&rest.Config{})\n\tif err != nil {\n\t\tt.Fail()\n\t}\n\n\tmessagingInformerFactory := informers.NewSharedInformerFactory(messagingClientSet, 0)\n\tnatssChannelInformer := messagingInformerFactory.Messaging().V1alpha1().NatssChannels()\n\n\tc := NewController(reconciler.Options{\n\t\tKubeClientSet: fakekubeclientset.NewSimpleClientset(),\n\t\tDynamicClientSet: nil,\n\t\tNatssClientSet: nil,\n\t\tRecorder: nil,\n\t\tStatsReporter: nil,\n\t\tConfigMapWatcher: nil,\n\t\tLogger: logtesting.TestLogger(t),\n\t\tResyncPeriod: 0,\n\t\tStopChannel: nil,\n\t}, dispatchertesting.NewDispatcherDoNothing(), natssChannelInformer)\n\tif c == nil {\n\t\tt.Errorf(\"unable to create dispatcher controller\")\n\t}\n}", "func TestCancelManyJobs(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\n\tt.Parallel()\n\tc, _ := minikubetestenv.AcquireCluster(t)\n\n\t// Create an input repo\n\trepo := tu.UniqueString(\"TestCancelManyJobs\")\n\trequire.NoError(t, c.CreateRepo(pfs.DefaultProjectName, repo))\n\n\t// Create sleep pipeline\n\tpipeline := tu.UniqueString(\"pipeline\")\n\trequire.NoError(t, c.CreatePipeline(pfs.DefaultProjectName,\n\t\tpipeline,\n\t\t\"\",\n\t\t[]string{\"sleep\", \"600\"},\n\t\tnil,\n\t\t&pps.ParallelismSpec{\n\t\t\tConstant: 1,\n\t\t},\n\t\tclient.NewPFSInput(pfs.DefaultProjectName, repo, \"/*\"),\n\t\t\"\",\n\t\tfalse,\n\t))\n\n\t// Create 10 input commits, to spawn 10 jobs\n\tvar commits []*pfs.Commit\n\tfor i := 0; i < 10; i++ {\n\t\tcommit, err := c.StartCommit(pfs.DefaultProjectName, repo, \"master\")\n\t\trequire.NoError(t, c.PutFile(commit, \"file\", strings.NewReader(\"foo\")))\n\t\trequire.NoError(t, err)\n\t\trequire.NoError(t, c.FinishCommit(pfs.DefaultProjectName, repo, commit.Branch.Name, commit.Id))\n\t\tcommits = append(commits, commit)\n\t}\n\n\t// For each expected job: watch to make sure the input job comes up, make\n\t// sure that it's the only job running, then cancel it\n\tfor _, commit := range commits {\n\t\t// Wait until PPS has started processing commit\n\t\tvar jobInfo *pps.JobInfo\n\t\trequire.NoErrorWithinT(t, 30*time.Second, func() error {\n\t\t\treturn backoff.Retry(func() error {\n\t\t\t\tjobInfos, err := c.ListJob(pfs.DefaultProjectName, pipeline, []*pfs.Commit{commit}, -1, true)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif len(jobInfos) != 1 {\n\t\t\t\t\treturn errors.Errorf(\"Expected one job, but got %d: %v\", len(jobInfos), jobInfos)\n\t\t\t\t}\n\t\t\t\tjobInfo = jobInfos[0]\n\t\t\t\treturn nil\n\t\t\t}, backoff.NewTestingBackOff())\n\t\t})\n\n\t\t// Stop the job\n\t\trequire.NoError(t, c.StopJob(pfs.DefaultProjectName, jobInfo.Job.Pipeline.Name, jobInfo.Job.Id))\n\n\t\t// Check that the job is now killed\n\t\trequire.NoErrorWithinT(t, 30*time.Second, func() error {\n\t\t\treturn backoff.Retry(func() error {\n\t\t\t\t// TODO(msteffen): once github.com/pachyderm/pachyderm/v2/pull/2642 is\n\t\t\t\t// submitted, change ListJob here to filter on commit1 as the input commit,\n\t\t\t\t// rather than inspecting the input in the test\n\t\t\t\tupdatedJobInfo, err := c.InspectJob(pfs.DefaultProjectName, jobInfo.Job.Pipeline.Name, jobInfo.Job.Id, false)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif updatedJobInfo.State != pps.JobState_JOB_KILLED {\n\t\t\t\t\treturn errors.Errorf(\"job %s is still running, but should be KILLED\", jobInfo.Job.Id)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}, backoff.NewTestingBackOff())\n\t\t})\n\t}\n}", "func TestConfigController(t *testing.T) {\n\tvar (\n\t\tname = \"common-service\"\n\t\tnamespace = \"ibm-common-service\"\n\t)\n\n\treq := getReconcileRequest(name, namespace)\n\tr := getReconciler(name, namespace)\n\n\tinitReconcile(t, r, req)\n\n}", "func CreatePods(c kclientset.Interface, appName string, ns string, labels map[string]string, spec kapiv1.PodSpec, maxCount int, tuning *TuningSetType) {\n\tfor i := 0; i < maxCount; i++ {\n\t\tframework.Logf(\"%v/%v : Creating pod\", i+1, maxCount)\n\t\t// Retry on pod creation failure\n\t\tfor retryCount := 0; retryCount < maxRetries; retryCount++ {\n\t\t\t_, err := c.CoreV1().Pods(ns).Create(&kapiv1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: fmt.Sprintf(appName+\"-pod-%v\", i),\n\t\t\t\t\tNamespace: ns,\n\t\t\t\t\tLabels: labels,\n\t\t\t\t},\n\t\t\t\tSpec: spec,\n\t\t\t})\n\t\t\tif err == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tframework.ExpectNoError(err)\n\t\t}\n\t\tif tuning != nil {\n\t\t\t// If a rate limit has been defined we wait for N ms between creation\n\t\t\tif tuning.Pods.RateLimit.Delay != 0 {\n\t\t\t\tframework.Logf(\"Sleeping %d ms between podcreation.\", tuning.Pods.RateLimit.Delay)\n\t\t\t\ttime.Sleep(tuning.Pods.RateLimit.Delay * time.Millisecond)\n\t\t\t}\n\t\t\t// If a stepping tuningset has been defined in the config, we wait for the step of pods to be created, and pause\n\t\t\tif tuning.Pods.Stepping.StepSize != 0 && (i+1)%tuning.Pods.Stepping.StepSize == 0 {\n\t\t\t\tframework.Logf(\"Waiting for pods created this step to be running\")\n\t\t\t\tpods, err := exutil.WaitForPods(c.CoreV1().Pods(ns), exutil.ParseLabelsOrDie(mapToString(labels)), exutil.CheckPodIsRunningFn, i+1, tuning.Pods.Stepping.Timeout*time.Second)\n\t\t\t\tif err != nil {\n\t\t\t\t\tframework.Failf(\"Error in wait... %v\", err)\n\t\t\t\t} else if len(pods) < i+1 {\n\t\t\t\t\tframework.Failf(\"Only got %v out of %v\", len(pods), i+1)\n\t\t\t\t}\n\n\t\t\t\tframework.Logf(\"We have created %d pods and are now sleeping for %d seconds\", i+1, tuning.Pods.Stepping.Pause)\n\t\t\t\ttime.Sleep(tuning.Pods.Stepping.Pause * time.Second)\n\t\t\t}\n\t\t}\n\t}\n}", "func TestLoad(t *testing.T) {\n\tclientset, err := k8sutils.MustGetClientset()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute)\n\tdefer cancel()\n\n\t// Create namespace if it doesn't exist\n\tnamespaceExists, err := k8sutils.NamespaceExists(ctx, clientset, namespace)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !namespaceExists {\n\t\terr = k8sutils.MustCreateNamespace(ctx, clientset, namespace)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\tdeployment, err := k8sutils.MustParseDeployment(noopDeploymentMap[*osType])\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdeploymentsClient := clientset.AppsV1().Deployments(namespace)\n\terr = k8sutils.MustCreateDeployment(ctx, deploymentsClient, deployment)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tt.Log(\"Checking pods are running\")\n\terr = k8sutils.WaitForPodsRunning(ctx, clientset, namespace, podLabelSelector)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tt.Log(\"Repeating the scale up/down cycle\")\n\tfor i := 0; i < *iterations; i++ {\n\t\tt.Log(\"Iteration \", i)\n\t\tt.Log(\"Scale down deployment\")\n\t\terr = k8sutils.MustScaleDeployment(ctx, deploymentsClient, deployment, clientset, namespace, podLabelSelector, *scaleDownReplicas, *skipWait)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tt.Log(\"Scale up deployment\")\n\t\terr = k8sutils.MustScaleDeployment(ctx, deploymentsClient, deployment, clientset, namespace, podLabelSelector, *scaleUpReplicas, *skipWait)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\tt.Log(\"Checking pods are running and IP assigned\")\n\terr = k8sutils.WaitForPodsRunning(ctx, clientset, \"\", \"\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif *validateStateFile {\n\t\tt.Run(\"Validate state file\", TestValidateState)\n\t}\n\n\tif *validateDualStack {\n\t\tt.Run(\"Validate dualstack overlay\", TestDualStackProperties)\n\t}\n}", "func (b *Botanist) WaitForControllersToBeActive(ctx context.Context) error {\n\ttype controllerInfo struct {\n\t\tname string\n\t\tlabels map[string]string\n\t}\n\n\ttype checkOutput struct {\n\t\tcontrollerName string\n\t\tready bool\n\t\terr error\n\t}\n\n\tvar (\n\t\tcontrollers = []controllerInfo{}\n\t\tpollInterval = 5 * time.Second\n\t)\n\n\t// Check whether the kube-controller-manager deployment exists\n\tif err := b.K8sSeedClient.Client().Get(ctx, kutil.Key(b.Shoot.SeedNamespace, v1beta1constants.DeploymentNameKubeControllerManager), &appsv1.Deployment{}); err == nil {\n\t\tcontrollers = append(controllers, controllerInfo{\n\t\t\tname: v1beta1constants.DeploymentNameKubeControllerManager,\n\t\t\tlabels: map[string]string{\n\t\t\t\t\"app\": \"kubernetes\",\n\t\t\t\t\"role\": \"controller-manager\",\n\t\t\t},\n\t\t})\n\t} else if client.IgnoreNotFound(err) != nil {\n\t\treturn err\n\t}\n\n\treturn retry.UntilTimeout(context.TODO(), pollInterval, 90*time.Second, func(ctx context.Context) (done bool, err error) {\n\t\tvar (\n\t\t\twg sync.WaitGroup\n\t\t\tout = make(chan *checkOutput)\n\t\t)\n\n\t\tfor _, controller := range controllers {\n\t\t\twg.Add(1)\n\n\t\t\tgo func(controller controllerInfo) {\n\t\t\t\tdefer wg.Done()\n\n\t\t\t\tpodList := &corev1.PodList{}\n\t\t\t\terr := b.K8sSeedClient.Client().List(ctx, podList,\n\t\t\t\t\tclient.InNamespace(b.Shoot.SeedNamespace),\n\t\t\t\t\tclient.MatchingLabels(controller.labels))\n\t\t\t\tif err != nil {\n\t\t\t\t\tout <- &checkOutput{controllerName: controller.name, err: err}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// Check that only one replica of the controller exists.\n\t\t\t\tif len(podList.Items) != 1 {\n\t\t\t\t\tb.Logger.Infof(\"Waiting for %s to have exactly one replica\", controller.name)\n\t\t\t\t\tout <- &checkOutput{controllerName: controller.name}\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\t// Check that the existing replica is not in getting deleted.\n\t\t\t\tif podList.Items[0].DeletionTimestamp != nil {\n\t\t\t\t\tb.Logger.Infof(\"Waiting for a new replica of %s\", controller.name)\n\t\t\t\t\tout <- &checkOutput{controllerName: controller.name}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// Check if the controller is active by reading its leader election record.\n\t\t\t\tleaderElectionRecord, err := common.ReadLeaderElectionRecord(b.K8sShootClient, resourcelock.EndpointsResourceLock, metav1.NamespaceSystem, controller.name)\n\t\t\t\tif err != nil {\n\t\t\t\t\tout <- &checkOutput{controllerName: controller.name, err: err}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif delta := metav1.Now().UTC().Sub(leaderElectionRecord.RenewTime.Time.UTC()); delta <= pollInterval-time.Second {\n\t\t\t\t\tout <- &checkOutput{controllerName: controller.name, ready: true}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tb.Logger.Infof(\"Waiting for %s to be active\", controller.name)\n\t\t\t\tout <- &checkOutput{controllerName: controller.name}\n\t\t\t}(controller)\n\t\t}\n\n\t\tgo func() {\n\t\t\twg.Wait()\n\t\t\tclose(out)\n\t\t}()\n\n\t\tfor result := range out {\n\t\t\tif result.err != nil {\n\t\t\t\treturn retry.SevereError(fmt.Errorf(\"could not check whether controller %s is active: %+v\", result.controllerName, result.err))\n\t\t\t}\n\t\t\tif !result.ready {\n\t\t\t\treturn retry.MinorError(fmt.Errorf(\"controller %s is not active\", result.controllerName))\n\t\t\t}\n\t\t}\n\n\t\treturn retry.Ok()\n\t})\n}", "func TestTriggerController(t *testing.T) {\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*40)\n\tdefer cancel()\n\n\tconfig, stopFn := framework.RunControlPlane(t, ctx)\n\tdefer stopFn()\n\n\tfakeClock := &fakeclock.FakeClock{}\n\t// Build, instantiate and run the trigger controller.\n\tkubeClient, factory, cmCl, cmFactory := framework.NewClients(t, config)\n\n\tnamespace := \"testns\"\n\n\t// Create Namespace\n\tns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}\n\t_, err := kubeClient.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tshouldReissue := policies.NewTriggerPolicyChain(fakeClock).Evaluate\n\tctrl, queue, mustSync := trigger.NewController(logf.Log, cmCl, factory, cmFactory, framework.NewEventRecorder(t), fakeClock, shouldReissue)\n\tc := controllerpkg.NewController(\n\t\tctx,\n\t\t\"trigger_test\",\n\t\tmetrics.New(logf.Log, clock.RealClock{}),\n\t\tctrl.ProcessItem,\n\t\tmustSync,\n\t\tnil,\n\t\tqueue,\n\t)\n\tstopController := framework.StartInformersAndController(t, factory, cmFactory, c)\n\tdefer stopController()\n\n\t// Create a Certificate resource and wait for it to have the 'Issuing' condition.\n\tcert, err := cmCl.CertmanagerV1().Certificates(namespace).Create(ctx, &cmapi.Certificate{\n\t\tObjectMeta: metav1.ObjectMeta{Name: \"testcrt\", Namespace: \"testns\"},\n\t\tSpec: cmapi.CertificateSpec{\n\t\t\tSecretName: \"example\",\n\t\t\tCommonName: \"example.com\",\n\t\t\tIssuerRef: cmmeta.ObjectReference{Name: \"testissuer\"}, // doesn't need to exist\n\t\t},\n\t}, metav1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = wait.PollImmediateUntil(time.Millisecond*100, func() (done bool, err error) {\n\t\tc, err := cmCl.CertmanagerV1().Certificates(cert.Namespace).Get(ctx, cert.Name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tt.Logf(\"Failed to fetch Certificate resource, retrying: %v\", err)\n\t\t\treturn false, nil\n\t\t}\n\t\tif !apiutil.CertificateHasCondition(c, cmapi.CertificateCondition{\n\t\t\tType: cmapi.CertificateConditionIssuing,\n\t\t\tStatus: cmmeta.ConditionTrue,\n\t\t}) {\n\t\t\tt.Logf(\"Certificate does not have expected condition, got=%#v\", apiutil.GetCertificateCondition(c, cmapi.CertificateConditionIssuing))\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t}, ctx.Done())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func controllerSubtest(name string, tc *sessionTestCase) func(t *testing.T) {\n\treturn func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\t// This test uses the same controller to manage two sessions that are communicating with\n\t\t// each other (basically, both the \"local\" and \"remote\" session are on the same system).\n\t\t// This is possible because the local discriminators are chosen such that they are\n\t\t// different.\n\t\t//\n\t\t// While this is something that rarely (if ever) occurs in practice, it makes test setup\n\t\t// much simpler here. In the real world, BFD would configured between two systems and each\n\t\t// system would have its own controller which is in charge only of sessions on that system.\n\t\tcontroller := &bfd.Controller{\n\t\t\tSessions: []*bfd.Session{tc.sessionA, tc.sessionB},\n\t\t\tReceiveQueueSize: 10,\n\t\t}\n\n\t\t// both sessions send their messages through the same controller\n\t\tmessageQueue := &redirectSender{Destination: controller.Messages()}\n\t\ttc.sessionA.Sender = messageQueue\n\t\ttc.sessionB.Sender = messageQueue\n\t\ttc.sessionA.Logger = testlog.NewLogger(t).New(\"session\", \"a\")\n\t\ttc.sessionB.Logger = testlog.NewLogger(t).New(\"session\", \"b\")\n\n\t\t// the wait group is not used for synchronization, but rather to check that the controller\n\t\t// returns\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\n\t\tgo func() {\n\t\t\terr := controller.Run()\n\t\t\trequire.NoError(t, err)\n\t\t\twg.Done()\n\t\t}()\n\n\t\t// second argument is not used because we have a single queue\n\t\ttc.testBehavior(messageQueue, nil)\n\n\t\tassert.Equal(t, tc.expectedUpA, controller.IsUp(tc.sessionA.LocalDiscriminator))\n\t\tassert.Equal(t, tc.expectedUpB, controller.IsUp(tc.sessionB.LocalDiscriminator))\n\n\t\tmessageQueue.Close()\n\n\t\tfor i := 0; i < 2; i++ {\n\t\t\terr := <-controller.Errors()\n\t\t\tassert.NoError(t, err)\n\t\t}\n\t\twg.Wait()\n\t}\n}", "func TestGetCanGetAlldata(t *testing.T) {\n\ts1, closeFn1, clientset1, configFilename1, s2, closeFn2, clientset2, configFilename2 := setUpTwoApiservers(t)\n\tdefer deleteSinglePartitionConfigFile(t, configFilename1)\n\tdefer deleteSinglePartitionConfigFile(t, configFilename2)\n\tdefer closeFn1()\n\tdefer closeFn2()\n\n\t// create pods via 2 different api servers\n\tpod1 := createPod(t, clientset1, tenant1, \"te\", \"pod1\")\n\tdefer framework.DeleteTestingTenant(tenant1, s1, t)\n\tpod2 := createPod(t, clientset2, tenant2, \"te\", \"pod1\")\n\tdefer framework.DeleteTestingTenant(tenant2, s2, t)\n\tassert.NotNil(t, pod1)\n\tassert.NotNil(t, pod2)\n\tassert.NotEqual(t, pod1.UID, pod2.UID)\n\n\t// verify get pod with same api server\n\treadPod1, err := clientset1.CoreV1().PodsWithMultiTenancy(pod1.Namespace, pod1.Tenant).Get(pod1.Name, metav1.GetOptions{})\n\tassert.Nil(t, err, \"Failed to get pod 1 from same clientset\")\n\tassert.NotNil(t, readPod1)\n\treadPod2, err := clientset2.CoreV1().PodsWithMultiTenancy(pod2.Namespace, pod2.Tenant).Get(pod2.Name, metav1.GetOptions{})\n\tassert.Nil(t, err, \"Failed to get pod 2 from same clientset\")\n\tassert.NotNil(t, readPod2)\n\n\t// verify get pod through different api server\n\treadPod1, err = clientset2.CoreV1().PodsWithMultiTenancy(pod1.Namespace, pod1.Tenant).Get(pod1.Name, metav1.GetOptions{})\n\tassert.Nil(t, err, \"Failed to get pod 1 from different clientset\")\n\tif err == nil {\n\t\tcheckPodEquality(t, pod1, readPod1)\n\t}\n\treadPod2, err = clientset1.CoreV1().PodsWithMultiTenancy(pod2.Namespace, pod2.Tenant).Get(pod2.Name, metav1.GetOptions{})\n\tassert.Nil(t, err, \"Failed to get pod 2 from different clientset\")\n\tif err == nil {\n\t\tcheckPodEquality(t, pod2, readPod2)\n\t}\n\n\t// create replicaset via 2 different api servers\n\trs1 := createRS(t, clientset1, tenant1, \"rs1\", \"default\", 1)\n\trs2 := createRS(t, clientset2, tenant2, \"rs2\", \"default\", 1)\n\tassert.NotNil(t, rs1)\n\tassert.NotNil(t, rs2)\n\tassert.NotEqual(t, rs1.UID, rs2.UID)\n\n\t// verify get rs through different api server\n\treadRs1, err := clientset2.AppsV1().ReplicaSetsWithMultiTenancy(rs1.Namespace, rs1.Tenant).Get(rs1.Name, metav1.GetOptions{})\n\tassert.Nil(t, err, \"Failed to get rs 1 from different clientset\")\n\tif err == nil {\n\t\tcheckRSEquality(t, rs1, readRs1)\n\t}\n\treadRs2, err := clientset1.AppsV1().ReplicaSetsWithMultiTenancy(rs2.Namespace, rs2.Tenant).Get(rs2.Name, metav1.GetOptions{})\n\tassert.Nil(t, err, \"Failed to get rs 2 from different clientset\")\n\tif err == nil {\n\t\tcheckRSEquality(t, rs2, readRs2)\n\t}\n\n\t// tear down\n\tdeletePod(t, clientset1, pod1)\n\tdeletePod(t, clientset1, pod2)\n\tdeleteRS(t, clientset2, rs1)\n\tdeleteRS(t, clientset2, rs2)\n}", "func setUpTestApps(envoyMaxTLSVersion string, clientName string, serverName string, ns []string) (*v1.Pod, *v1.Service) {\n\t// Install OSM\n\tinstallOpts := Td.GetOSMInstallOpts()\n\tinstallOpts.EnablePermissiveMode = true\n\tExpect(Td.InstallOSM(installOpts)).To(Succeed())\n\tExpect(Td.WaitForPodsRunningReady(Td.OsmNamespace, 60*time.Second, 3 /* 1 controller, 1 injector, 1 bootstrap */, nil)).To(Succeed())\n\n\t// Get the meshConfig CRD\n\tmeshConfig, err := Td.GetMeshConfig(Td.OsmNamespace)\n\tExpect(err).NotTo(HaveOccurred())\n\n\t// Update envoy maxTLSVersion\n\tBy(fmt.Sprintf(\"Patching envoy maxTLSVersion to be %s\", envoyMaxTLSVersion))\n\tmeshConfig.Spec.Sidecar.TLSMaxProtocolVersion = envoyMaxTLSVersion\n\t_, err = Td.UpdateOSMConfig(meshConfig)\n\tExpect(err).NotTo(HaveOccurred())\n\n\t// Create Meshed Test NS\n\tfor _, n := range ns {\n\t\tExpect(Td.CreateNs(n, nil)).To(Succeed())\n\t\tExpect(Td.AddNsToMesh(true, n)).To(Succeed())\n\t}\n\n\t// Get simple pod definitions for the HTTP server\n\tsvcAccDef, podDef, svcDef, err := Td.GetOSSpecificHTTPBinPod(serverName, serverName, PodCommandDefault...)\n\tExpect(err).NotTo(HaveOccurred())\n\n\t// Create Server Pod\n\t_, err = Td.CreateServiceAccount(serverName, &svcAccDef)\n\tExpect(err).NotTo(HaveOccurred())\n\t_, err = Td.CreatePod(serverName, podDef)\n\tExpect(err).NotTo(HaveOccurred())\n\n\t// Create Server Service\n\tdstSvc, err := Td.CreateService(serverName, svcDef)\n\tExpect(err).NotTo(HaveOccurred())\n\t// Expect it to be up and running in it's receiver namespace\n\tExpect(Td.WaitForPodsRunningReady(serverName, 90*time.Second, 1, nil)).To(Succeed())\n\n\t// Create Client Pod\n\twithSourceKubernetesService := true\n\t// setupSource sets up a curl source service and returns the pod object\n\tclientPod := setupSource(clientName, withSourceKubernetesService)\n\treturn clientPod, dstSvc\n}", "func waitForPods(cs *framework.ClientSet, expectedTotal, min, max int32) error {\n\terr := wait.PollImmediate(1*time.Second, 5*time.Minute, func() (bool, error) {\n\t\td, err := cs.AppsV1Interface.Deployments(\"openshift-machine-config-operator\").Get(context.TODO(), \"etcd-quorum-guard\", metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\t// By this point the deployment should exist.\n\t\t\tfmt.Printf(\" error waiting for etcd-quorum-guard deployment to exist: %v\\n\", err)\n\t\t\treturn true, err\n\t\t}\n\t\tif d.Status.Replicas < 1 {\n\t\t\tfmt.Println(\"operator deployment has no replicas\")\n\t\t\treturn false, nil\n\t\t}\n\t\tif d.Status.Replicas == expectedTotal &&\n\t\t\td.Status.AvailableReplicas >= min &&\n\t\t\td.Status.AvailableReplicas <= max {\n\t\t\tfmt.Printf(\" Deployment is ready! %d %d\\n\", d.Status.Replicas, d.Status.AvailableReplicas)\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor pod, info := range pods {\n\t\tif info.status == \"Running\" {\n\t\t\tnode := info.node\n\t\t\tif node == \"\" {\n\t\t\t\treturn fmt.Errorf(\"Pod %s not associated with a node\", pod)\n\t\t\t}\n\t\t\tif _, ok := nodes[node]; !ok {\n\t\t\t\treturn fmt.Errorf(\"pod %s running on %s, not a master\", pod, node)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func checkPods(podClient v1.CoreV1Interface, logger *log.Logger, filters ...PodPredicate) ([]kubev1.Pod, error) {\n\tlogger = logging.CreateNewStdLoggerOrUseExistingLogger(logger)\n\n\tlogger.Print(\"Checking that all Pods are running or completed...\")\n\n\tlist, err := podClient.Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error getting pod list: %v\", err)\n\t}\n\n\tif len(list.Items) == 0 {\n\t\treturn nil, fmt.Errorf(\"pod list is empty. this should NOT happen\")\n\t}\n\n\tpods := filterPods(list, filters...)\n\n\t// Keep track of all pending pods that are not associated with a job\n\t// and store all pods associated with a job for further analysis\n\tpendingPods := []kubev1.Pod{}\n\tjobPods := []kubev1.Pod{}\n\tfor _, pod := range pods.Items {\n\t\tif IsNotControlledByJob(pod) {\n\t\t\t// Completed pod not associated with a job, e.g. a standalone pod\n\t\t\tif pod.Status.Phase == kubev1.PodSucceeded {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif pod.Status.Phase != kubev1.PodPending {\n\t\t\t\treturn nil, fmt.Errorf(\"pod %s/%s in unexpected phase %s: reason: %s message: %s\", pod.Namespace, pod.Name, pod.Status.Phase, pod.Status.Reason, pod.Status.Message)\n\t\t\t}\n\t\t\tlogger.Printf(\"pod %s/%s is not ready. Phase: %s, Reason: %s, Message: %s\", pod.Namespace, pod.Name, pod.Status.Phase, pod.Status.Reason, pod.Status.Message)\n\t\t\tpendingPods = append(pendingPods, pod)\n\t\t} else {\n\t\t\tjobPods = append(jobPods, pod)\n\t\t}\n\t}\n\n\tpendingJobPods, err := checkJobPods(jobPods, logger)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlogger.Printf(\"%v pods are currently not running or complete:\", len(pendingPods)+len(pendingJobPods))\n\n\treturn append(pendingPods, pendingJobPods...), nil\n}", "func (k *Kubernetes) Test(ctx context.Context) error {\n\tk.l.Lock()\n\tdefer k.l.Unlock()\n\n\treturn k.updatePods(ctx)\n}", "func newPodClients(kubeClient kubernetes.Interface, namespace string) v1lister.PodLister {\n\t// We are interested in pods which are Running or Unknown or Pending\n\t// We don't want succeeded and failed pods because they don't generate any usage anymore.\n\tselector := fields.ParseSelectorOrDie(\"status.phase!=\" + string(v1.PodSucceeded) +\n\t\t\",status.phase!=\" + string(v1.PodFailed))\n\tpodListWatch := cache.NewListWatchFromClient(kubeClient.CoreV1().RESTClient(), \"pods\", namespace, selector)\n\tindexer, controller := cache.NewIndexerInformer(\n\t\tpodListWatch,\n\t\t&v1.Pod{},\n\t\ttime.Hour,\n\t\t&cache.ResourceEventHandlerFuncs{},\n\t\tcache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},\n\t)\n\tpodLister := v1lister.NewPodLister(indexer)\n\tstopCh := make(chan struct{})\n\tgo controller.Run(stopCh)\n\tif !cache.WaitForCacheSync(make(<-chan struct{}), controller.HasSynced) {\n\t\tlogrus.Fatalf(\"Failed to sync pod cache during initialization\")\n\t} else {\n\t\tlogrus.Info(\"Initial pod synced successfully\")\n\t}\n\treturn podLister\n}", "func (a *AppBuilder) Controllers(ctors ...Controller) *AppBuilder {\n\ta.init()\n\n\tvar configFile string\n\tflag.StringVar(&configFile, \"config\", \"\",\n\t\t\"The controller will load its initial configuration from this file. \"+\n\t\t\t\"Omit this flag to use the default configuration values. \"+\n\t\t\t\"Command-line flags override configuration from this file.\")\n\n\tflag.Parse()\n\n\tvar err error\n\toptions := ctrl.Options{Scheme: a.scheme}\n\tif configFile != \"\" {\n\t\toptions, err = options.AndFrom(ctrl.ConfigFile().AtPath(configFile))\n\t\tif err != nil {\n\t\t\ta.Logger.Fatalw(\"unable to load the config file\", \"err\", err)\n\t\t}\n\t}\n\toptions.Scheme = a.scheme\n\n\ta.Manager, err = ctrl.NewManager(a.Config, options)\n\tif err != nil {\n\t\ta.Logger.Fatalw(\"unable to start manager\", \"err\", err)\n\t}\n\tif err := a.Manager.AddHealthzCheck(\"healthz\", healthz.Ping); err != nil {\n\t\ta.Logger.Fatalw(\"unable to set up health check\", \"err\", err)\n\t}\n\tif err := a.Manager.AddReadyzCheck(\"readyz\", healthz.Ping); err != nil {\n\t\ta.Logger.Fatalw(\"unable to set up ready check\", \"err\", err)\n\t}\n\n\ta.Context = kmanager.WithManager(a.Context, a.Manager)\n\t// a manager implements all cluster.Cluster methods\n\ta.Context = kclient.WithCluster(a.Context, a.Manager)\n\ta.initClient(a.Manager.GetClient())\n\n\tfor _, controller := range ctors {\n\t\tname := controller.Name()\n\t\tcontrollerAtomicLevel := a.LevelManager.Get(name)\n\t\tcontrollerLogger := a.Logger.Desugar().WithOptions(zap.UpdateCore(controllerAtomicLevel, *a.ZapConfig)).Named(name).Sugar()\n\t\tif err := controller.Setup(a.Context, a.Manager, controllerLogger); err != nil {\n\t\t\ta.Logger.Fatalw(\"controller setup error\", \"ctrl\", name, \"err\", err)\n\t\t}\n\t}\n\n\ta.startFunc = append(a.startFunc, func(ctx context.Context) error {\n\t\treturn a.Manager.Start(ctx)\n\t})\n\n\treturn a\n}", "func (o *ControllerBuildOptions) Run() error {\n\tjxClient, devNs, err := o.JXClientAndDevNamespace()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkubeClient, err := o.KubeClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttektonClient, _, err := o.TektonClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !o.GitReporting {\n\t\tif strings.ToLower(os.Getenv(\"GIT_REPORTING\")) == \"true\" {\n\t\t\to.GitReporting = true\n\t\t}\n\t}\n\tif o.GitReporting {\n\t\t_, err = kubeClient.AppsV1().Deployments(devNs).Get(foghornDeploymentName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tif !k8sErrors.IsNotFound(err) {\n\t\t\t\tlog.Logger().Warnf(\"failed to look up deployment %s in namespace %s: %s\", foghornDeploymentName, devNs, err)\n\t\t\t}\n\t\t} else {\n\t\t\to.foghornPresent = true\n\t\t}\n\t\tif o.TargetURLTemplate == \"\" {\n\t\t\to.TargetURLTemplate = os.Getenv(\"TARGET_URL_TEMPLATE\")\n\t\t}\n\t\tif o.TargetURLTemplate == \"\" {\n\t\t\to.TargetURLTemplate = defaultTargetURLTemplate\n\t\t}\n\t}\n\n\tns := o.Namespace\n\tif ns == \"\" {\n\t\tns = devNs\n\t}\n\n\to.EnvironmentCache = kube.CreateEnvironmentCache(jxClient, ns)\n\n\tif o.InitGitCredentials {\n\t\terr = o.InitGitConfigAndUser()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = o.ensureSourceRepositoryHasLabels(jxClient, ns)\n\tif err != nil {\n\t\tlog.Logger().Warnf(\"failed to label the legacy SourceRepository resources: %s\", err)\n\t}\n\n\terr = o.ensurePipelineActivityHasLabels(jxClient, ns)\n\tif err != nil {\n\t\tlog.Logger().Warnf(\"failed to label the legacy PipelineActivity resources: %s\", err)\n\t}\n\n\tpod := &corev1.Pod{}\n\tlog.Logger().Infof(\"Watching for Pods in namespace %s\", util.ColorInfo(ns))\n\tlistWatch := cache.NewListWatchFromClient(kubeClient.CoreV1().RESTClient(), \"pods\", ns, fields.Everything())\n\tkube.SortListWatchByName(listWatch)\n\t_, controller := cache.NewInformer(\n\t\tlistWatch,\n\t\tpod,\n\t\ttime.Minute*10,\n\t\tcache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\to.onPipelinePod(obj, kubeClient, jxClient, tektonClient, ns)\n\t\t\t},\n\t\t\tUpdateFunc: func(oldObj, newObj interface{}) {\n\t\t\t\to.onPipelinePod(newObj, kubeClient, jxClient, tektonClient, ns)\n\t\t\t},\n\t\t\tDeleteFunc: func(obj interface{}) {\n\t\t\t},\n\t\t},\n\t)\n\n\tstop := make(chan struct{})\n\tgo controller.Run(stop)\n\n\t// Wait forever\n\tselect {}\n}", "func runAllControllers(controllers []controllerHelper, controllerThreadiness int, stopCh <-chan struct{}) {\n\n\t// Start the informer factories to begin populating the informer caches\n\tlog.Info(\"Starting controllers\")\n\n\t// for all our controllers\n\tfor _, c := range controllers {\n\t\tgo func(ch controllerHelper) {\n\t\t\terr := ch.Run(controllerThreadiness, stopCh)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Cannot run controller %s\", reflect.TypeOf(ch))\n\t\t\t}\n\t\t}(c)\n\t}\n\n\t<-stopCh\n\tlog.Info(\"Controllers stopped\")\n}", "func testDebug(t *testing.T, c *client.APIClient, projectName, repoName string) {\n\tt.Helper()\n\t// Get all the authenticated clients at the beginning of the test.\n\t// GetAuthenticatedPachClient will always re-activate auth, which\n\t// causes PPS to rotate all the pipeline tokens. This makes the RCs\n\t// change and recreates all the pods, which used to race with collecting logs.\n\talice := tu.Robot(tu.UniqueString(\"alice\"))\n\taliceClient, adminClient := tu.AuthenticateClient(t, c, alice), tu.AuthenticateClient(t, c, auth.RootUser)\n\tif projectName != \"default\" {\n\t\trequire.NoError(t, aliceClient.CreateProject(projectName))\n\t}\n\n\trequire.NoError(t, aliceClient.CreateRepo(projectName, repoName))\n\n\texpectedFiles, pipelines := tu.DebugFiles(t, projectName, repoName)\n\n\tfor _, p := range pipelines {\n\t\trequire.NoError(t, aliceClient.CreatePipeline(projectName,\n\t\t\tp,\n\t\t\t\"\",\n\t\t\t[]string{\"bash\"},\n\t\t\t[]string{\n\t\t\t\tfmt.Sprintf(\"cp /pfs/%s/* /pfs/out/\", repoName),\n\t\t\t\t\"sleep 45\",\n\t\t\t},\n\t\t\t&pps.ParallelismSpec{\n\t\t\t\tConstant: 1,\n\t\t\t},\n\t\t\tclient.NewPFSInput(projectName, repoName, \"/*\"),\n\t\t\t\"\",\n\t\t\tfalse,\n\t\t))\n\t}\n\n\tcommit1, err := aliceClient.StartCommit(projectName, repoName, \"master\")\n\trequire.NoError(t, err)\n\terr = aliceClient.PutFile(commit1, \"file\", strings.NewReader(\"foo\"))\n\trequire.NoError(t, err)\n\trequire.NoError(t, aliceClient.FinishCommit(projectName, repoName, commit1.Branch.Name, commit1.Id))\n\n\tjobInfos, err := aliceClient.WaitJobSetAll(commit1.Id, false)\n\trequire.NoError(t, err)\n\trequire.Equal(t, 3, len(jobInfos))\n\n\trequire.YesError(t, aliceClient.Dump(nil, 0, &bytes.Buffer{}))\n\n\trequire.NoErrorWithinT(t, time.Minute, func() error {\n\t\t// Only admins can collect a debug dump.\n\t\tbuf := &bytes.Buffer{}\n\t\trequire.NoError(t, adminClient.Dump(nil, 0, buf))\n\t\tgr, err := gzip.NewReader(buf)\n\t\tif err != nil {\n\t\t\treturn err //nolint:wrapcheck\n\t\t}\n\t\tdefer func() {\n\t\t\trequire.NoError(t, gr.Close())\n\t\t}()\n\t\t// Check that all of the expected files were returned.\n\t\ttr := tar.NewReader(gr)\n\t\tfor {\n\t\t\thdr, err := tr.Next()\n\t\t\tif err != nil {\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\treturn err //nolint:wrapcheck\n\t\t\t}\n\t\t\tfor pattern, g := range expectedFiles {\n\t\t\t\tif g.Match(hdr.Name) {\n\t\t\t\t\tdelete(expectedFiles, pattern)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif len(expectedFiles) > 0 {\n\t\t\treturn errors.Errorf(\"Debug dump hasn't produced the exepcted files: %v\", expectedFiles)\n\t\t}\n\t\treturn nil\n\t})\n}", "func (t DefaultBuildManager) PodWatcher() {\n\n\tt.logger.Printf(\"Starting pod watcher\")\n\n\tdeleted := make(map[string]struct{})\n\n\tfor {\n\t\twatched, err := t.kubernetesClient.Pods(\"decap\").Watch(k8sapi.ListOptions{\n\t\t\tLabelSelector: \"type=decap-build\",\n\t\t})\n\t\tif err != nil {\n\t\t\tt.logger.Printf(\"Error watching cluster: %v\\n\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tevents := watched.ResultChan()\n\n\t\tfor event := range events {\n\t\t\tpod, ok := event.Object.(*k8sapi.Pod)\n\t\t\tif !ok {\n\t\t\t\t// we selected pods, so this will be a pod, but be conservative.\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdeletePod := false\n\t\t\tfor _, v := range pod.Status.ContainerStatuses {\n\t\t\t\tif v.Name == \"build-server\" && v.State.Terminated != nil && v.State.Terminated.ContainerID != \"\" {\n\t\t\t\t\tdeletePod = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Try to elete the build pod if it has not already been deleted.\n\t\t\tif _, present := deleted[pod.Name]; !present && deletePod {\n\t\t\t\tif err := t.kubernetesClient.Pods(\"decap\").Delete(pod.Name, nil); err != nil {\n\t\t\t\t\tt.logger.Printf(\"Error deleting build-server pod: %v\\n\", err)\n\t\t\t\t} else {\n\t\t\t\t\tt.logger.Printf(\"Deleted pod %s\\n\", pod.Name)\n\t\t\t\t}\n\t\t\t\tdeleted[pod.Name] = struct{}{}\n\t\t\t}\n\t\t}\n\t}\n}", "func TestKnativeServingDeploymentRecreationReady(t *testing.T) {\n\tcancel := logstream.Start(t)\n\tdefer cancel()\n\tclients := Setup(t)\n\n\tdpList, err := clients.KubeClient.Kube.AppsV1().Deployments(test.ServingOperatorNamespace).List(metav1.ListOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to get any deployment under the namespace %q: %v\",\n\t\t\ttest.ServingOperatorNamespace, err)\n\t}\n\t// Delete the deployments one by one to see if they will be recreated.\n\tfor _, deployment := range dpList.Items {\n\t\tif err := clients.KubeClient.Kube.AppsV1().Deployments(deployment.Namespace).Delete(deployment.Name,\n\t\t\t&metav1.DeleteOptions{}); err != nil {\n\t\t\tt.Fatalf(\"Failed to delete deployment %s/%s: %v\", deployment.Namespace, deployment.Name, err)\n\t\t}\n\t\tif _, err = resources.WaitForDeploymentAvailable(clients, deployment.Name, deployment.Namespace,\n\t\t\tresources.IsDeploymentAvailable); err != nil {\n\t\t\tt.Fatalf(\"The deployment %s/%s failed to reach the desired state: %v\",\n\t\t\t\tdeployment.Namespace, deployment.Name, err)\n\t\t}\n\t\tif _, err := resources.WaitForKnativeServingState(clients.KnativeServingAlphaClient, test.ServingOperatorName,\n\t\t\tresources.IsKnativeServingReady); err != nil {\n\t\t\tt.Fatalf(\"KnativeService %q failed to reach the desired state: %v\", test.ServingOperatorName, err)\n\t\t}\n\t\tt.Logf(\"The deployment %s/%s reached the desired state.\", deployment.Namespace, deployment.Name)\n\t}\n}", "func (mock *PodSecurityPolicyTemplateInterfaceMock) ControllerCalls() []struct {\n} {\n\tvar calls []struct {\n\t}\n\tlockPodSecurityPolicyTemplateInterfaceMockController.RLock()\n\tcalls = mock.calls.Controller\n\tlockPodSecurityPolicyTemplateInterfaceMockController.RUnlock()\n\treturn calls\n}", "func (h *KubernetesHelper) CheckPods(ctx context.Context, namespace string, deploymentName string, replicas int) error {\n\tvar checkedPods []corev1.Pod\n\n\terr := h.retryFor(60*time.Minute, func() error {\n\t\tcheckedPods = []corev1.Pod{}\n\t\tpods, err := h.GetPodsForDeployment(ctx, namespace, deploymentName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvar deploymentReplicas int\n\t\tfor _, pod := range pods {\n\t\t\tcheckedPods = append(checkedPods, pod)\n\n\t\t\tdeploymentReplicas++\n\t\t\tif pod.Status.Phase != \"Running\" {\n\t\t\t\treturn fmt.Errorf(\"Pod [%s] in namespace [%s] is not running\",\n\t\t\t\t\tpod.Name, pod.Namespace)\n\t\t\t}\n\t\t\tfor _, container := range pod.Status.ContainerStatuses {\n\t\t\t\tif !container.Ready {\n\t\t\t\t\treturn fmt.Errorf(\"Container [%s] in pod [%s] in namespace [%s] is not running\",\n\t\t\t\t\t\tcontainer.Name, pod.Name, pod.Namespace)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif deploymentReplicas != replicas {\n\t\t\treturn fmt.Errorf(\"Expected there to be [%d] pods in deployment [%s] in namespace [%s], but found [%d]\",\n\t\t\t\treplicas, deploymentName, namespace, deploymentReplicas)\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, pod := range checkedPods {\n\t\tfor _, status := range append(pod.Status.ContainerStatuses, pod.Status.InitContainerStatuses...) {\n\t\t\terrStr := fmt.Sprintf(\"Container [%s] in pod [%s] in namespace [%s] has restart count [%d]\",\n\t\t\t\tstatus.Name, pod.Name, pod.Namespace, status.RestartCount)\n\t\t\tif status.RestartCount == 1 {\n\t\t\t\treturn &RestartCountError{errStr}\n\t\t\t}\n\t\t\tif status.RestartCount > 1 {\n\t\t\t\treturn errors.New(errStr)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storage.StorageClass) {\n\tfor _, test := range tests {\n\t\tglog.V(4).Infof(\"starting test %q\", test.name)\n\n\t\t// Initialize the controller\n\t\tclient := &fake.Clientset{}\n\t\tctrl, err := newTestController(client, nil, true)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Test %q construct persistent volume failed: %v\", test.name, err)\n\t\t}\n\t\treactor := newVolumeReactor(client, ctrl, nil, nil, test.errors)\n\t\tfor _, claim := range test.initialClaims {\n\t\t\tctrl.claims.Add(claim)\n\t\t\treactor.claims[claim.Name] = claim\n\t\t}\n\t\tfor _, volume := range test.initialVolumes {\n\t\t\tctrl.volumes.store.Add(volume)\n\t\t\treactor.volumes[volume.Name] = volume\n\t\t}\n\n\t\t// Inject classes into controller via a custom lister.\n\t\tindexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})\n\t\tfor _, class := range storageClasses {\n\t\t\tindexer.Add(class)\n\t\t}\n\t\tctrl.classLister = storagelisters.NewStorageClassLister(indexer)\n\n\t\t// Run the tested functions\n\t\terr = test.test(ctrl, reactor, test)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Test %q failed: %v\", test.name, err)\n\t\t}\n\n\t\t// Wait for the target state\n\t\terr = reactor.waitTest(test)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Test %q failed: %v\", test.name, err)\n\t\t}\n\n\t\tevaluateTestResults(ctrl, reactor, test, t)\n\t}\n}", "func verifyInternalIngressController(t *testing.T, name types.NamespacedName, hostname, address, image string) {\n\tkubeConfig, err := config.GetConfig()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get kube config: %v\", err)\n\t}\n\tclient, err := kubernetes.NewForConfig(kubeConfig)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create kube client: %v\", err)\n\t}\n\n\techoPod := buildEchoPod(name.Name, name.Namespace)\n\tif err := kclient.Create(context.TODO(), echoPod); err != nil {\n\t\tt.Fatalf(\"failed to create pod %s/%s: %v\", echoPod.Namespace, echoPod.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoPod); err != nil {\n\t\t\tt.Fatalf(\"failed to delete pod %s/%s: %v\", echoPod.Namespace, echoPod.Name, err)\n\t\t}\n\t}()\n\n\techoService := buildEchoService(echoPod.Name, echoPod.Namespace, echoPod.ObjectMeta.Labels)\n\tif err := kclient.Create(context.TODO(), echoService); err != nil {\n\t\tt.Fatalf(\"failed to create service %s/%s: %v\", echoService.Namespace, echoService.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoService); err != nil {\n\t\t\tt.Fatalf(\"failed to delete service %s/%s: %v\", echoService.Namespace, echoService.Name, err)\n\t\t}\n\t}()\n\n\techoRoute := buildRouteWithHost(echoPod.Name, echoPod.Namespace, echoService.Name, hostname)\n\tif err := kclient.Create(context.TODO(), echoRoute); err != nil {\n\t\tt.Fatalf(\"failed to create route %s/%s: %v\", echoRoute.Namespace, echoRoute.Name, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), echoRoute); err != nil {\n\t\t\tt.Fatalf(\"failed to delete route %s/%s: %v\", echoRoute.Namespace, echoRoute.Name, err)\n\t\t}\n\t}()\n\n\textraArgs := []string{\n\t\t\"--header\", \"HOST:\" + echoRoute.Spec.Host,\n\t\t\"-v\",\n\t\t\"--retry-delay\", \"20\",\n\t\t\"--max-time\", \"10\",\n\t}\n\tclientPodName := types.NamespacedName{Namespace: name.Namespace, Name: \"curl-\" + name.Name}\n\tclientPodSpec := buildCurlPod(clientPodName.Name, clientPodName.Namespace, image, address, echoRoute.Spec.Host, extraArgs...)\n\tclientPod := clientPodSpec.DeepCopy()\n\tif err := kclient.Create(context.TODO(), clientPod); err != nil {\n\t\tt.Fatalf(\"failed to create pod %q: %v\", clientPodName, err)\n\t}\n\tdefer func() {\n\t\tif err := kclient.Delete(context.TODO(), clientPod); err != nil {\n\t\t\tif errors.IsNotFound(err) {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Fatalf(\"failed to delete pod %q: %v\", clientPodName, err)\n\t\t}\n\t}()\n\n\tvar curlPodLogs string\n\terr = wait.PollImmediate(10*time.Second, 10*time.Minute, func() (bool, error) {\n\t\tif err := kclient.Get(context.TODO(), clientPodName, clientPod); err != nil {\n\t\t\tt.Logf(\"error getting client pod %q: %v, retrying...\", clientPodName, err)\n\t\t\treturn false, nil\n\t\t}\n\t\t// First check if client curl pod is still starting or not running.\n\t\tif clientPod.Status.Phase == corev1.PodPending {\n\t\t\tt.Logf(\"waiting for client pod %q to start\", clientPodName)\n\t\t\treturn false, nil\n\t\t}\n\t\treadCloser, err := client.CoreV1().Pods(clientPod.Namespace).GetLogs(clientPod.Name, &corev1.PodLogOptions{\n\t\t\tContainer: \"curl\",\n\t\t\tFollow: false,\n\t\t}).Stream(context.TODO())\n\t\tif err != nil {\n\t\t\tt.Logf(\"failed to read output from pod %s: %v\", clientPod.Name, err)\n\t\t\treturn false, nil\n\t\t}\n\t\tscanner := bufio.NewScanner(readCloser)\n\t\tdefer func() {\n\t\t\tif err := readCloser.Close(); err != nil {\n\t\t\t\tt.Errorf(\"failed to close reader for pod %s: %v\", clientPod.Name, err)\n\t\t\t}\n\t\t}()\n\t\tcurlPodLogs = \"\"\n\t\tfor scanner.Scan() {\n\t\t\tline := scanner.Text()\n\t\t\tcurlPodLogs += line + \"\\n\"\n\t\t\tif strings.Contains(line, \"HTTP/1.0 200 OK\") {\n\t\t\t\tt.Logf(\"verified connectivity with workload with address: %s with response %s\", address, line)\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t}\n\t\t// If failed or succeeded, the pod is stopped, but didn't provide us 200 response, let's try again.\n\t\tif clientPod.Status.Phase == corev1.PodFailed || clientPod.Status.Phase == corev1.PodSucceeded {\n\t\t\tt.Logf(\"client pod %q has stopped...restarting. Curl Pod Logs:\\n%s\", clientPodName, curlPodLogs)\n\t\t\tif err := kclient.Delete(context.TODO(), clientPod); err != nil && errors.IsNotFound(err) {\n\t\t\t\tt.Fatalf(\"failed to delete pod %q: %v\", clientPodName, err)\n\t\t\t}\n\t\t\t// Wait for deletion to prevent a race condition. Use PollInfinite since we are already in a Poll.\n\t\t\twait.PollInfinite(5*time.Second, func() (bool, error) {\n\t\t\t\terr = kclient.Get(context.TODO(), clientPodName, clientPod)\n\t\t\t\tif !errors.IsNotFound(err) {\n\t\t\t\t\tt.Logf(\"waiting for %q: to be deleted\", clientPodName)\n\t\t\t\t\treturn false, nil\n\t\t\t\t}\n\t\t\t\treturn true, nil\n\t\t\t})\n\t\t\tclientPod = clientPodSpec.DeepCopy()\n\t\t\tif err := kclient.Create(context.TODO(), clientPod); err != nil {\n\t\t\t\tt.Fatalf(\"failed to create pod %q: %v\", clientPodName, err)\n\t\t\t}\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to verify connectivity with workload with address: %s using internal curl client. Curl Pod Logs:\\n%s\", address, curlPodLogs)\n\t}\n}", "func TestIssue351MultipleJobRun(t *testing.T) {\n\tctx, err := NewContext(t)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer ctx.Cleanup()\n\n\t// Create initial CR to generate an initial job and create the initial k8s resources\n\n\tgitops := &gitopsv1alpha1.GitOpsConfig{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"GitOpsConfig\",\n\t\t\tAPIVersion: \"eunomia.kohls.io/v1alpha1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"gitops-issue351\",\n\t\t\tNamespace: ctx.namespace,\n\t\t},\n\t\tSpec: gitopsv1alpha1.GitOpsConfigSpec{\n\t\t\tTemplateSource: gitopsv1alpha1.GitConfig{\n\t\t\t\tURI: ctx.eunomiaURI,\n\t\t\t\tRef: ctx.eunomiaRef,\n\t\t\t\tContextDir: \"test/e2e/testdata/hello-a\",\n\t\t\t},\n\t\t\tParameterSource: gitopsv1alpha1.GitConfig{\n\t\t\t\tURI: ctx.eunomiaURI,\n\t\t\t\tRef: ctx.eunomiaRef,\n\t\t\t\tContextDir: \"test/e2e/testdata/empty-yaml\",\n\t\t\t},\n\t\t\tTriggers: []gitopsv1alpha1.GitOpsTrigger{\n\t\t\t\t{Type: \"Change\"},\n\t\t\t},\n\t\t\tTemplateProcessorImage: \"quay.io/kohlstechnology/eunomia-base:dev\",\n\t\t\tResourceHandlingMode: \"Apply\",\n\t\t\tResourceDeletionMode: \"Delete\",\n\t\t\tServiceAccountRef: \"eunomia-operator\",\n\t\t},\n\t}\n\n\terr = framework.Global.Client.Create(ctx, gitops, &framework.CleanupOptions{TestContext: ctx.TestCtx, Timeout: timeout, RetryInterval: retryInterval})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// When the initial job is created, we will use it as a template to create two additional jobs at the same time\n\terr = WaitForJobCreation(ctx.namespace, \"gitopsconfig-gitops-issue351-\", framework.Global.KubeClient)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tgitopsJob, err := GetJob(ctx.namespace, \"gitopsconfig-gitops-issue351-\", framework.Global.KubeClient)\n\n\tfirstJob := &batchv1.Job{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"GitOpsConfig\",\n\t\t\tAPIVersion: \"eunomia.kohls.io/v1alpha1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"first-job\",\n\t\t\tNamespace: ctx.namespace,\n\t\t},\n\t\tSpec: gitopsJob.Spec,\n\t}\n\t// The deep copy of the job keeps the selector and selector label that has to be generated by k8s.\n\t// Trying to create a job with those set will fail.\n\tfirstJob.Spec.Template.SetLabels(map[string]string{})\n\tfirstJob.Spec.Selector.Reset()\n\n\terr = framework.Global.Client.Create(ctx, firstJob, &framework.CleanupOptions{TestContext: ctx.TestCtx, Timeout: timeout, RetryInterval: retryInterval})\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tsecondJob := &batchv1.Job{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"GitOpsConfig\",\n\t\t\tAPIVersion: \"eunomia.kohls.io/v1alpha1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"second-job\",\n\t\t\tNamespace: ctx.namespace,\n\t\t},\n\t\tSpec: gitopsJob.Spec,\n\t}\n\tsecondJob.Spec.Template.SetLabels(map[string]string{})\n\tsecondJob.Spec.Selector.Reset()\n\n\terr = framework.Global.Client.Create(ctx, secondJob, &framework.CleanupOptions{TestContext: ctx.TestCtx, Timeout: timeout, RetryInterval: retryInterval})\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// Wait to make sure both of the jobs finish running\n\terr = wait.Poll(retryInterval, 60*time.Second, func() (done bool, err error) {\n\t\tjobOne, _ := GetJob(ctx.namespace, \"first-job\", framework.Global.KubeClient)\n\t\tjobTwo, _ := GetJob(ctx.namespace, \"second-job\", framework.Global.KubeClient)\n\n\t\tswitch {\n\t\tcase jobOne.Status.Succeeded == 1 && jobTwo.Status.Succeeded == 1:\n\t\t\tt.Logf(\"Both jobs are done\")\n\t\t\treturn true, nil\n\t\tdefault:\n\t\t\tt.Logf(\"Both jobs are not done\")\n\t\t\treturn false, nil\n\t\t}\n\t})\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tdeploymentList, err := framework.Global.KubeClient.AppsV1().Deployments(ctx.namespace).List(metav1.ListOptions{})\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif len(deploymentList.Items) != 1 {\n\t\tt.Errorf(\"There was only %d deployments when we were expecting 1\", len(deploymentList.Items))\n\t}\n\tif deploymentList.Items[0].GetDeletionTimestamp() != nil {\n\t\tt.Errorf(\"The deployment has been marked for deletion\")\n\t}\n}", "func Test_Pod_Checker(t *testing.T) {\n\tworkflow := func(name string) string {\n\t\treturn workflowPath(\"pod\", name)\n\t}\n\tconst (\n\t\tadded = \"added\"\n\t\tcontainerTerminatedError = \"containerTerminatedError\"\n\t\tcontainerTerminatedSuccess = \"containerTerminatedSuccess\"\n\t\tcontainerTerminatedSuccessRestartNever = \"containerTerminatedSuccessRestartNever\"\n\t\tcreateSuccess = \"createSuccess\"\n\t\timagePullError = \"imagePullError\"\n\t\timagePullErrorResolved = \"imagePullErrorResolved\"\n\t\tscheduled = \"scheduled\"\n\t\tunready = \"unready\"\n\t\tunscheduled = \"unscheduled\"\n\t)\n\n\ttests := []struct {\n\t\tname string\n\t\trecordingPaths []string\n\t\t// TODO: optional message validator function to check returned messages\n\t\texpectReady bool\n\t}{\n\t\t{\n\t\t\tname: \"Pod added but not ready\",\n\t\t\trecordingPaths: []string{workflow(added)},\n\t\t\texpectReady: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod scheduled but not ready\",\n\t\t\trecordingPaths: []string{workflow(scheduled)},\n\t\t\texpectReady: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod create success\",\n\t\t\trecordingPaths: []string{workflow(createSuccess)},\n\t\t\texpectReady: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod image pull error\",\n\t\t\trecordingPaths: []string{workflow(imagePullError)},\n\t\t\texpectReady: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod create success after image pull failure resolved\",\n\t\t\trecordingPaths: []string{workflow(imagePullError), workflow(imagePullErrorResolved)},\n\t\t\texpectReady: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod unscheduled\",\n\t\t\trecordingPaths: []string{workflow(unscheduled)},\n\t\t\texpectReady: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod unready\",\n\t\t\trecordingPaths: []string{workflow(unready)},\n\t\t\texpectReady: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod container terminated with error\",\n\t\t\trecordingPaths: []string{workflow(containerTerminatedError)},\n\t\t\texpectReady: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod container terminated successfully\",\n\t\t\trecordingPaths: []string{workflow(containerTerminatedSuccess)},\n\t\t\texpectReady: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Pod container terminated successfully with restartPolicy: Never\",\n\t\t\trecordingPaths: []string{workflow(containerTerminatedSuccessRestartNever)},\n\t\t\texpectReady: true,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tchecker := NewPodChecker()\n\n\t\t\tready, messages := mustCheckIfRecordingsReady(tt.recordingPaths, checker)\n\t\t\tif ready != tt.expectReady {\n\t\t\t\tt.Errorf(\"Ready() = %t, want %t\\nMessages: %s\", ready, tt.expectReady, messages)\n\t\t\t}\n\t\t})\n\t}\n}", "func newPodClients(kubeClient kube_client.Interface, resourceEventHandler cache.ResourceEventHandler, namespace string) v1lister.PodLister {\n\t// We are interested in pods which are Running or Unknown (in case the pod is\n\t// running but there are some transient errors we don't want to delete it from\n\t// our model).\n\t// We don't want to watch Pending pods because they didn't generate any usage\n\t// yet.\n\t// Succeeded and Failed failed pods don't generate any usage anymore but we\n\t// don't necessarily want to immediately delete them.\n\tselector := fields.ParseSelectorOrDie(\"status.phase!=\" + string(apiv1.PodPending))\n\tpodListWatch := cache.NewListWatchFromClient(kubeClient.CoreV1().RESTClient(), \"pods\", namespace, selector)\n\tindexer, controller := cache.NewIndexerInformer(\n\t\tpodListWatch,\n\t\t&apiv1.Pod{},\n\t\ttime.Hour,\n\t\tresourceEventHandler,\n\t\tcache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},\n\t)\n\tpodLister := v1lister.NewPodLister(indexer)\n\tstopCh := make(chan struct{})\n\tgo controller.Run(stopCh)\n\treturn podLister\n}", "func add(mgr manager.Manager, r reconcile.Reconciler) error {\n\t// Create a new controller\n\tc, err := controller.New(\"jenkinsinstance-controller\", mgr, controller.Options{Reconciler: r})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\twatchPredicate := util.NewPredicate(viper.GetString(\"namespace\"))\n\n\t// Watch for changes to JenkinsInstance\n\terr = c.Watch(&source.Kind{Type: &jenkinsv1alpha1.JenkinsInstance{}}, &handler.EnqueueRequestForObject{}, watchPredicate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch a Deployment created by JenkinsInstance\n\terr = c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &jenkinsv1alpha1.JenkinsInstance{},\n\t}, watchPredicate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch a PVC created by JenkinsInstance\n\terr = c.Watch(&source.Kind{Type: &corev1.PersistentVolumeClaim{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &jenkinsv1alpha1.JenkinsInstance{},\n\t}, watchPredicate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch a Secret created by JenkinsInstance\n\terr = c.Watch(&source.Kind{Type: &corev1.Secret{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &jenkinsv1alpha1.JenkinsInstance{},\n\t}, watchPredicate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch a Service created by JenkinsInstance\n\terr = c.Watch(&source.Kind{Type: &corev1.Service{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &jenkinsv1alpha1.JenkinsInstance{},\n\t}, watchPredicate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch an Ingress created by JenkinsInstance\n\terr = c.Watch(&source.Kind{Type: &v1beta1.Ingress{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &jenkinsv1alpha1.JenkinsInstance{},\n\t}, watchPredicate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch a Networkpolicy created by JenkinsInstance\n\terr = c.Watch(&source.Kind{Type: &netv1.NetworkPolicy{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &jenkinsv1alpha1.JenkinsInstance{},\n\t}, watchPredicate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch Secret resources not owned by the JenkinsInstance change\n\t// This is needed for re-loading login information from the pre-provided secret\n\t// When admin login secret changes, Watch will list all JenkinsInstances\n\t// and re-enqueue the keys for the ones that refer to that admin login secret via their spec.\n\terr = c.Watch(\n\t\t&source.Kind{Type: &corev1.Secret{}},\n\t\t&handler.EnqueueRequestsFromMapFunc{\n\t\t\tToRequests: handler.ToRequestsFunc(func(a handler.MapObject) []reconcile.Request {\n\n\t\t\t\tjenkinsInstances := &jenkinsv1alpha1.JenkinsInstanceList{}\n\t\t\t\terr = mgr.GetClient().List(\n\t\t\t\t\tcontext.TODO(),\n\t\t\t\t\t&client.ListOptions{LabelSelector: labels.Everything()},\n\t\t\t\t\tjenkinsInstances)\n\t\t\t\tif err != nil {\n\t\t\t\t\tglog.Errorf(\"Could not list JenkinsInstances\")\n\t\t\t\t\treturn []reconcile.Request{}\n\t\t\t\t}\n\n\t\t\t\tvar keys []reconcile.Request\n\t\t\t\tfor _, inst := range jenkinsInstances.Items {\n\t\t\t\t\tif inst.Spec.AdminSecret == a.Meta.GetName() {\n\t\t\t\t\t\tkeys = append(keys, reconcile.Request{\n\t\t\t\t\t\t\tNamespacedName: types.NewNamespacedNameFromString(\n\t\t\t\t\t\t\t\tfmt.Sprintf(\"%s%c%s\", inst.GetNamespace(), types.Separator, inst.GetName())),\n\t\t\t\t\t\t})\n\t\t\t\t\t} else if inst.Spec.PluginConfig != nil {\n\t\t\t\t\t\tif inst.Spec.PluginConfig.ConfigSecret == a.Meta.GetName() {\n\t\t\t\t\t\t\tkeys = append(keys, reconcile.Request{\n\t\t\t\t\t\t\t\tNamespacedName: types.NewNamespacedNameFromString(\n\t\t\t\t\t\t\t\t\tfmt.Sprintf(\"%s%c%s\", inst.GetNamespace(), types.Separator, inst.GetName())),\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// return found keys\n\t\t\t\treturn keys\n\t\t\t}),\n\t\t}, watchPredicate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func TestDashboardHandler_Deployments(t *testing.T) {\n\tfor _, test := range tests {\n\t\tt.Logf(\"Running test: %s\", test.Name)\n\t\tsubTest(t, test)\n\t}\n}", "func Test_Reconcile(t *testing.T) {\n\t// Fake client is buggy, and it looks like it more or less works for very basic and simple scenarios\n\t// https://github.com/kubernetes-sigs/controller-runtime/issues/348\n\tfakeClientBuilder := fake.NewClientBuilder()\n\n\t//mock registry\n\tmockRegistry := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprintln(w, \"Fake registry\")\n\t}))\n\tdefer mockRegistry.Close()\n\n\tu,_ := url.Parse(mockRegistry.URL)\n\n\treconc := reconciler{\n\t\tclient: nil,\n\t\tignoredNamespaces: map[string]struct{}{\"kube-system\": {}},\n\t\tbackupRegistry: u.Host+\"/namespace/backup\",\n\t}\n\n\ttests := []struct {\n\t\t// test case short title\n\t\ttitle string\n\t\tobjects []client.Object\n\t\texpetedImage string\n\t\texpectError bool\n\t}{\n\t\t{\n\t\t\ttitle: \"reconcile deployment\",\n\t\t\texpetedImage: reconc.getTargetImage(u.Host+\"/nginx:latest\"),\n\t\t\tobjects: []client.Object{\n\t\t\t\t&appsv1.Deployment{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: \"server\",\n\t\t\t\t\t\tNamespace: \"test\",\n\t\t\t\t\t},\n\t\t\t\t\tSpec: appsv1.DeploymentSpec{\n\t\t\t\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\t\t\t\tMatchLabels: map[string]string{\"deployment\": \"test\" + \"-deployment\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{Labels: map[string]string{\"deployment\": \"test\" + \"-deployment\"}},\n\t\t\t\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"nginx\",\n\t\t\t\t\t\t\t\t\t\tImage: u.Host+\"/nginx:latest\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ttitle: \"reconcile daemonset\",\n\t\t\texpetedImage: reconc.getTargetImage(u.Host+\"/nginx:latest\"),\n\t\t\tobjects: []client.Object{\n\t\t\t\t&appsv1.DaemonSet{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: \"server\",\n\t\t\t\t\t\tNamespace: \"test\",\n\t\t\t\t\t},\n\t\t\t\t\tSpec: appsv1.DaemonSetSpec{\n\t\t\t\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\t\t\t\tMatchLabels: map[string]string{\"deployment\": \"test\" + \"-deployment\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{Labels: map[string]string{\"deployment\": \"test\" + \"-deployment\"}},\n\t\t\t\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"nginx\",\n\t\t\t\t\t\t\t\t\t\tImage: u.Host+\"/nginx:latest\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tt.Run(test.title, func(t *testing.T) {\n\t\t\t//Put mock objects to fake client\n\t\t\tfakeClientBuilder.WithObjects(test.objects...)\n\t\t\t//Set fake client to reconciler\n\t\t\treconc.client = fakeClientBuilder.Build()\n\n\t\t\tfor _, o := range test.objects {\n\t\t\t\tkind := \"\"\n\n\t\t\t\tif _, isDeployment := o.(*appsv1.Deployment); isDeployment {\n\t\t\t\t\tkind = \"Deployment\"\n\t\t\t\t}else {\n\t\t\t\t\tkind = \"DaemonSet\"\n\t\t\t\t}\n\n\t\t\t\tr := reconcile.Request{NamespacedName: types.NamespacedName{\n\t\t\t\t\tNamespace: o.GetNamespace(),\n\t\t\t\t\tName: fmt.Sprintf(\"%s:%s\", kind, o.GetName()),\n\t\t\t\t}}\n\t\t\t\t_, e := reconc.Reconcile(context.Background(), r)\n\t\t\t\trequire.Nil(t, e)\n\n\t\t\t\t//Checking if reconciled object has the right image\n\t\t\t\tkey := types.NamespacedName{\n\t\t\t\t\tName: o.GetName(),\n\t\t\t\t\tNamespace: o.GetNamespace(),\n\t\t\t\t}\n\t\t\t\tswitch kind {\n\t\t\t\tcase \"Deployment\":\n\t\t\t\t\tdp := appsv1.Deployment{}\n\t\t\t\t\treconc.client.Get(context.Background(),key,&dp)\n\t\t\t\t\trequire.Equal(t,dp.Spec.Template.Spec.Containers[0].Image,test.expetedImage)\n\n\t\t\t\tcase \"DaemonSet\":\n\t\t\t\t\tds := appsv1.DaemonSet{}\n\t\t\t\t\treconc.client.Get(context.Background(),key,&ds)\n\t\t\t\t\trequire.Equal(t,ds.Spec.Template.Spec.Containers[0].Image,test.expetedImage)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}", "func (mock *MultiClusterAppInterfaceMock) ControllerCalls() []struct {\n} {\n\tvar calls []struct {\n\t}\n\tlockMultiClusterAppInterfaceMockController.RLock()\n\tcalls = mock.calls.Controller\n\tlockMultiClusterAppInterfaceMockController.RUnlock()\n\treturn calls\n}", "func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) podActions {\n\tklog.V(5).InfoS(\"Syncing Pod\", \"pod\", klog.KObj(pod))\n\n\tcreatePodSandbox, attempt, sandboxID := runtimeutil.PodSandboxChanged(pod, podStatus)\n\tchanges := podActions{\n\t\tKillPod: createPodSandbox,\n\t\tCreateSandbox: createPodSandbox,\n\t\tSandboxID: sandboxID,\n\t\tAttempt: attempt,\n\t\tContainersToStart: []int{},\n\t\tContainersToKill: make(map[kubecontainer.ContainerID]containerToKillInfo),\n\t}\n\n\t// If we need to (re-)create the pod sandbox, everything will need to be\n\t// killed and recreated, and init containers should be purged.\n\tif createPodSandbox {\n\t\tif !shouldRestartOnFailure(pod) && attempt != 0 && len(podStatus.ContainerStatuses) != 0 {\n\t\t\t// Should not restart the pod, just return.\n\t\t\t// we should not create a sandbox, and just kill the pod if it is already done.\n\t\t\t// if all containers are done and should not be started, there is no need to create a new sandbox.\n\t\t\t// this stops confusing logs on pods whose containers all have exit codes, but we recreate a sandbox before terminating it.\n\t\t\t//\n\t\t\t// If ContainerStatuses is empty, we assume that we've never\n\t\t\t// successfully created any containers. In this case, we should\n\t\t\t// retry creating the sandbox.\n\t\t\tchanges.CreateSandbox = false\n\t\t\treturn changes\n\t\t}\n\n\t\t// Get the containers to start, excluding the ones that succeeded if RestartPolicy is OnFailure.\n\t\tvar containersToStart []int\n\t\tfor idx, c := range pod.Spec.Containers {\n\t\t\tif pod.Spec.RestartPolicy == v1.RestartPolicyOnFailure && containerSucceeded(&c, podStatus) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcontainersToStart = append(containersToStart, idx)\n\t\t}\n\n\t\t// If there is any regular container, it means all init containers have\n\t\t// been initialized.\n\t\thasInitialized := hasAnyRegularContainerCreated(pod, podStatus)\n\t\t// We should not create a sandbox, and just kill the pod if initialization\n\t\t// is done and there is no container to start.\n\t\tif hasInitialized && len(containersToStart) == 0 {\n\t\t\tchanges.CreateSandbox = false\n\t\t\treturn changes\n\t\t}\n\n\t\t// If we are creating a pod sandbox, we should restart from the initial\n\t\t// state.\n\t\tif len(pod.Spec.InitContainers) != 0 {\n\t\t\t// Pod has init containers, return the first one.\n\t\t\tchanges.InitContainersToStart = []int{0}\n\t\t\treturn changes\n\t\t}\n\t\tchanges.ContainersToStart = containersToStart\n\t\treturn changes\n\t}\n\n\t// Ephemeral containers may be started even if initialization is not yet complete.\n\tfor i := range pod.Spec.EphemeralContainers {\n\t\tc := (*v1.Container)(&pod.Spec.EphemeralContainers[i].EphemeralContainerCommon)\n\n\t\t// Ephemeral Containers are never restarted\n\t\tif podStatus.FindContainerStatusByName(c.Name) == nil {\n\t\t\tchanges.EphemeralContainersToStart = append(changes.EphemeralContainersToStart, i)\n\t\t}\n\t}\n\n\thasInitialized := m.computeInitContainerActions(pod, podStatus, &changes)\n\tif changes.KillPod || !hasInitialized {\n\t\t// Initialization failed or still in progress. Skip inspecting non-init\n\t\t// containers.\n\t\treturn changes\n\t}\n\n\tif isInPlacePodVerticalScalingAllowed(pod) {\n\t\tchanges.ContainersToUpdate = make(map[v1.ResourceName][]containerToUpdateInfo)\n\t\tlatestPodStatus, err := m.GetPodStatus(ctx, podStatus.ID, pod.Name, pod.Namespace)\n\t\tif err == nil {\n\t\t\tpodStatus = latestPodStatus\n\t\t}\n\t}\n\n\t// Number of running containers to keep.\n\tkeepCount := 0\n\t// check the status of containers.\n\tfor idx, container := range pod.Spec.Containers {\n\t\tcontainerStatus := podStatus.FindContainerStatusByName(container.Name)\n\n\t\t// Call internal container post-stop lifecycle hook for any non-running container so that any\n\t\t// allocated cpus are released immediately. If the container is restarted, cpus will be re-allocated\n\t\t// to it.\n\t\tif containerStatus != nil && containerStatus.State != kubecontainer.ContainerStateRunning {\n\t\t\tif err := m.internalLifecycle.PostStopContainer(containerStatus.ID.ID); err != nil {\n\t\t\t\tklog.ErrorS(err, \"Internal container post-stop lifecycle hook failed for container in pod with error\",\n\t\t\t\t\t\"containerName\", container.Name, \"pod\", klog.KObj(pod))\n\t\t\t}\n\t\t}\n\n\t\t// If container does not exist, or is not running, check whether we\n\t\t// need to restart it.\n\t\tif containerStatus == nil || containerStatus.State != kubecontainer.ContainerStateRunning {\n\t\t\tif kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) {\n\t\t\t\tklog.V(3).InfoS(\"Container of pod is not in the desired state and shall be started\", \"containerName\", container.Name, \"pod\", klog.KObj(pod))\n\t\t\t\tchanges.ContainersToStart = append(changes.ContainersToStart, idx)\n\t\t\t\tif containerStatus != nil && containerStatus.State == kubecontainer.ContainerStateUnknown {\n\t\t\t\t\t// If container is in unknown state, we don't know whether it\n\t\t\t\t\t// is actually running or not, always try killing it before\n\t\t\t\t\t// restart to avoid having 2 running instances of the same container.\n\t\t\t\t\tchanges.ContainersToKill[containerStatus.ID] = containerToKillInfo{\n\t\t\t\t\t\tname: containerStatus.Name,\n\t\t\t\t\t\tcontainer: &pod.Spec.Containers[idx],\n\t\t\t\t\t\tmessage: fmt.Sprintf(\"Container is in %q state, try killing it before restart\",\n\t\t\t\t\t\t\tcontainerStatus.State),\n\t\t\t\t\t\treason: reasonUnknown,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\t// The container is running, but kill the container if any of the following condition is met.\n\t\tvar message string\n\t\tvar reason containerKillReason\n\t\trestart := shouldRestartOnFailure(pod)\n\t\t// Do not restart if only the Resources field has changed with InPlacePodVerticalScaling enabled\n\t\tif _, _, changed := containerChanged(&container, containerStatus); changed &&\n\t\t\t(!isInPlacePodVerticalScalingAllowed(pod) ||\n\t\t\t\tkubecontainer.HashContainerWithoutResources(&container) != containerStatus.HashWithoutResources) {\n\t\t\tmessage = fmt.Sprintf(\"Container %s definition changed\", container.Name)\n\t\t\t// Restart regardless of the restart policy because the container\n\t\t\t// spec changed.\n\t\t\trestart = true\n\t\t} else if liveness, found := m.livenessManager.Get(containerStatus.ID); found && liveness == proberesults.Failure {\n\t\t\t// If the container failed the liveness probe, we should kill it.\n\t\t\tmessage = fmt.Sprintf(\"Container %s failed liveness probe\", container.Name)\n\t\t\treason = reasonLivenessProbe\n\t\t} else if startup, found := m.startupManager.Get(containerStatus.ID); found && startup == proberesults.Failure {\n\t\t\t// If the container failed the startup probe, we should kill it.\n\t\t\tmessage = fmt.Sprintf(\"Container %s failed startup probe\", container.Name)\n\t\t\treason = reasonStartupProbe\n\t\t} else if isInPlacePodVerticalScalingAllowed(pod) && !m.computePodResizeAction(pod, idx, containerStatus, &changes) {\n\t\t\t// computePodResizeAction updates 'changes' if resize policy requires restarting this container\n\t\t\tcontinue\n\t\t} else {\n\t\t\t// Keep the container.\n\t\t\tkeepCount++\n\t\t\tcontinue\n\t\t}\n\n\t\t// We need to kill the container, but if we also want to restart the\n\t\t// container afterwards, make the intent clear in the message. Also do\n\t\t// not kill the entire pod since we expect container to be running eventually.\n\t\tif restart {\n\t\t\tmessage = fmt.Sprintf(\"%s, will be restarted\", message)\n\t\t\tchanges.ContainersToStart = append(changes.ContainersToStart, idx)\n\t\t}\n\n\t\tchanges.ContainersToKill[containerStatus.ID] = containerToKillInfo{\n\t\t\tname: containerStatus.Name,\n\t\t\tcontainer: &pod.Spec.Containers[idx],\n\t\t\tmessage: message,\n\t\t\treason: reason,\n\t\t}\n\t\tklog.V(2).InfoS(\"Message for Container of pod\", \"containerName\", container.Name, \"containerStatusID\", containerStatus.ID, \"pod\", klog.KObj(pod), \"containerMessage\", message)\n\t}\n\n\tif keepCount == 0 && len(changes.ContainersToStart) == 0 {\n\t\tchanges.KillPod = true\n\t\t// To prevent the restartable init containers to keep pod alive, we should\n\t\t// not restart them.\n\t\tchanges.InitContainersToStart = nil\n\t}\n\n\treturn changes\n}", "func (t *JobUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {\n\t<-done\n\tBy(\"Ensuring active pods == parallelism\")\n\trunning, err := framework.CheckForAllJobPodsRunning(f.ClientSet, t.namespace, t.job.Name, 2)\n\tExpect(err).NotTo(HaveOccurred())\n\tExpect(running).To(BeTrue())\n}", "func checkPods(podClient v1.CoreV1Interface, logger *log.Logger, filters ...PodPredicate) (bool, error) {\n\tlogger = logging.CreateNewStdLoggerOrUseExistingLogger(logger)\n\n\tlogger.Print(\"Checking that all Pods are running or completed...\")\n\n\tlistOpts := metav1.ListOptions{}\n\tlist, err := podClient.Pods(metav1.NamespaceAll).List(context.TODO(), listOpts)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"error getting pod list: %v\", err)\n\t}\n\n\tif len(list.Items) == 0 {\n\t\treturn false, fmt.Errorf(\"pod list is empty. this should NOT happen\")\n\t}\n\n\tpods := filterPods(list, filters...)\n\n\tlogger.Printf(\"%v pods are currently not running or complete:\", len(pods.Items))\n\tfor _, pod := range pods.Items {\n\t\tif pod.Status.Phase != kubev1.PodPending {\n\t\t\treturn false, fmt.Errorf(\"Pod %s errored: %s - %s\", pod.GetName(), pod.Status.Reason, pod.Status.Message)\n\t\t}\n\t\tlogger.Printf(\"%s is not ready. Phase: %s, Message: %s, Reason: %s\", pod.Name, pod.Status.Phase, pod.Status.Message, pod.Status.Reason)\n\t}\n\n\treturn len(pods.Items) > 0, nil\n}", "func (d *deploymentTester) markUpdatedPodsReady(wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\n\tns := d.deployment.Namespace\n\terr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {\n\t\t// We're done when the deployment is complete\n\t\tif completed, err := d.deploymentComplete(); err != nil {\n\t\t\treturn false, err\n\t\t} else if completed {\n\t\t\treturn true, nil\n\t\t}\n\t\t// Otherwise, mark remaining pods as ready\n\t\tpods, err := d.listUpdatedPods()\n\t\tif err != nil {\n\t\t\td.t.Log(err)\n\t\t\treturn false, nil\n\t\t}\n\t\td.t.Logf(\"%d/%d of deployment pods are created\", len(pods), *d.deployment.Spec.Replicas)\n\t\tfor i := range pods {\n\t\t\tpod := pods[i]\n\t\t\tif podutil.IsPodReady(&pod) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err = markPodReady(d.c, ns, &pod); err != nil {\n\t\t\t\td.t.Logf(\"failed to update Deployment pod %s, will retry later: %v\", pod.Name, err)\n\t\t\t}\n\t\t}\n\t\treturn false, nil\n\t})\n\tif err != nil {\n\t\td.t.Errorf(\"failed to mark updated Deployment pods to ready: %v\", err)\n\t}\n}", "func (c *InstallerController) manageInstallationPods(ctx context.Context, operatorSpec *operatorv1.StaticPodOperatorSpec, originalOperatorStatus *operatorv1.StaticPodOperatorStatus) (bool, time.Duration, error) {\n\toperatorStatus := originalOperatorStatus.DeepCopy()\n\n\tif len(operatorStatus.NodeStatuses) == 0 {\n\t\treturn false, 0, nil\n\t}\n\n\t// start with node which is in worst state (instead of terminating healthy pods first)\n\tstartNode, nodeChoiceReason, err := nodeToStartRevisionWith(ctx, c.getStaticPodState, operatorStatus.NodeStatuses)\n\tif err != nil {\n\t\treturn true, 0, err\n\t}\n\n\t// determine the amount of time to delay before creating the next installer pod. We delay to avoid an LB outage (see godoc on minReadySeconds)\n\trequeueAfter := c.timeToWaitBeforeInstallingNextPod(ctx, operatorStatus.NodeStatuses)\n\tif requeueAfter > 0 {\n\t\treturn true, requeueAfter, nil\n\t}\n\n\tfor l := 0; l < len(operatorStatus.NodeStatuses); l++ {\n\t\ti := (startNode + l) % len(operatorStatus.NodeStatuses)\n\n\t\tvar currNodeState *operatorv1.NodeStatus\n\t\tvar prevNodeState *operatorv1.NodeStatus\n\t\tcurrNodeState = &operatorStatus.NodeStatuses[i]\n\t\tif l > 0 {\n\t\t\tprev := (startNode + l - 1) % len(operatorStatus.NodeStatuses)\n\t\t\tprevNodeState = &operatorStatus.NodeStatuses[prev]\n\t\t\tnodeChoiceReason = fmt.Sprintf(\"node %s is the next node in the line\", currNodeState.NodeName)\n\t\t}\n\n\t\t// if we are in a transition, check to see whether our installer pod completed\n\t\tif currNodeState.TargetRevision > currNodeState.CurrentRevision {\n\t\t\tif operatorStatus.LatestAvailableRevision > currNodeState.TargetRevision {\n\t\t\t\t// no backoff if new revision is pending\n\t\t\t} else {\n\t\t\t\tif currNodeState.LastFailedRevision == currNodeState.TargetRevision && currNodeState.LastFailedTime != nil && !currNodeState.LastFailedTime.IsZero() {\n\t\t\t\t\tvar delay time.Duration\n\t\t\t\t\tif currNodeState.LastFailedReason == nodeStatusOperandFailedFallbackReason {\n\t\t\t\t\t\tdelay = c.fallbackBackOff(currNodeState.LastFallbackCount)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tdelay = c.installerBackOff(currNodeState.LastFailedCount)\n\t\t\t\t\t}\n\t\t\t\t\tearliestRetry := currNodeState.LastFailedTime.Add(delay)\n\t\t\t\t\tif !c.now().After(earliestRetry) {\n\t\t\t\t\t\tklog.V(4).Infof(\"Backing off node %s installer retry %d until %v\", currNodeState.NodeName, currNodeState.LastFailedCount+1, earliestRetry)\n\t\t\t\t\t\treturn true, earliestRetry.Sub(c.now()), nil\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif err := c.ensureInstallerPod(ctx, operatorSpec, currNodeState); err != nil {\n\t\t\t\t\tc.eventRecorder.Warningf(\"InstallerPodFailed\", \"Failed to create installer pod for revision %d count %d on node %q: %v\",\n\t\t\t\t\t\tcurrNodeState.TargetRevision, currNodeState.LastFailedCount, currNodeState.NodeName, err)\n\t\t\t\t\t// if a newer revision is pending, continue, so we retry later with the latest available revision\n\t\t\t\t\tif !(operatorStatus.LatestAvailableRevision > currNodeState.TargetRevision) {\n\t\t\t\t\t\treturn true, 0, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tnewCurrNodeState, _, reason, err := c.newNodeStateForInstallInProgress(ctx, currNodeState, operatorStatus.LatestAvailableRevision)\n\t\t\tif err != nil {\n\t\t\t\treturn true, 0, err\n\t\t\t}\n\t\t\tif newCurrNodeState.LastFailedReason == nodeStatusInstalledFailedReason && newCurrNodeState.LastFailedCount != currNodeState.LastFailedCount {\n\t\t\t\tklog.Infof(\"Will retry %q for revision %d for the %s time because %s\", currNodeState.NodeName, currNodeState.TargetRevision, nthTimeOr1st(newCurrNodeState.LastFailedCount), reason)\n\t\t\t}\n\t\t\tif newCurrNodeState.LastFailedReason == nodeStatusOperandFailedFallbackReason && newCurrNodeState.LastFallbackCount != currNodeState.LastFallbackCount {\n\t\t\t\tklog.Infof(\"Will fallback %q for revision %d to last-known-good revision for the %s time because %s\", currNodeState.NodeName, currNodeState.TargetRevision, nthTimeOr1st(newCurrNodeState.LastFallbackCount), reason)\n\t\t\t}\n\n\t\t\t// if we make a change to this status, we want to write it out to the API before we commence work on the next node.\n\t\t\t// it's an extra write/read, but it makes the state debuggable from outside this process\n\t\t\tif !equality.Semantic.DeepEqual(newCurrNodeState, currNodeState) {\n\t\t\t\tklog.Infof(\"%q moving to %v because %s\", currNodeState.NodeName, spew.Sdump(*newCurrNodeState), reason)\n\t\t\t\t_, updated, updateError := v1helpers.UpdateStaticPodStatus(ctx, c.operatorClient, setNodeStatusFn(newCurrNodeState), setAvailableProgressingNodeInstallerFailingConditions)\n\t\t\t\tif updateError != nil {\n\t\t\t\t\treturn false, 0, updateError\n\t\t\t\t} else if updated && currNodeState.CurrentRevision != newCurrNodeState.CurrentRevision {\n\t\t\t\t\tc.eventRecorder.Eventf(\"NodeCurrentRevisionChanged\", \"Updated node %q from revision %d to %d because %s\", currNodeState.NodeName,\n\t\t\t\t\t\tcurrNodeState.CurrentRevision, newCurrNodeState.CurrentRevision, reason)\n\t\t\t\t}\n\n\t\t\t\treturn false, 0, nil // no requeue because UpdateStaticPodStatus triggers an external event anyway\n\t\t\t}\n\n\t\t\tklog.V(2).Infof(\"%q is in transition to %d, but has not made progress because %s\", currNodeState.NodeName, currNodeState.TargetRevision, reasonWithBlame(reason))\n\t\t\treturn false, 0, nil\n\t\t}\n\n\t\t// here we are not in transition, i.e. there is no install pod running\n\n\t\trevisionToStart := c.getRevisionToStart(currNodeState, prevNodeState, operatorStatus)\n\t\tif revisionToStart == 0 {\n\t\t\tklog.V(4).Infof(\"%s, but node %s does not need update\", nodeChoiceReason, currNodeState.NodeName)\n\t\t\tcontinue\n\t\t}\n\n\t\tklog.Infof(\"%s and needs new revision %d\", nodeChoiceReason, revisionToStart)\n\n\t\tnewCurrNodeState := currNodeState.DeepCopy()\n\t\tnewCurrNodeState.TargetRevision = revisionToStart\n\n\t\t// if we make a change to this status, we want to write it out to the API before we commence work on the next node.\n\t\t// it's an extra write/read, but it makes the state debuggable from outside this process\n\t\tif !equality.Semantic.DeepEqual(newCurrNodeState, currNodeState) {\n\t\t\tklog.Infof(\"%q moving to %v\", currNodeState.NodeName, spew.Sdump(*newCurrNodeState))\n\t\t\tif _, updated, updateError := v1helpers.UpdateStaticPodStatus(ctx, c.operatorClient, setNodeStatusFn(newCurrNodeState), setAvailableProgressingNodeInstallerFailingConditions); updateError != nil {\n\t\t\t\treturn false, 0, updateError\n\t\t\t} else if updated && currNodeState.TargetRevision != newCurrNodeState.TargetRevision && newCurrNodeState.TargetRevision != 0 {\n\t\t\t\tc.eventRecorder.Eventf(\"NodeTargetRevisionChanged\", \"Updating node %q from revision %d to %d because %s\", currNodeState.NodeName,\n\t\t\t\t\tcurrNodeState.CurrentRevision, newCurrNodeState.TargetRevision, nodeChoiceReason)\n\t\t\t}\n\n\t\t\treturn false, 0, nil // no requeue because UpdateStaticPodStatus triggers an external event anyway\n\t\t}\n\t\tbreak\n\t}\n\n\treturn false, 0, nil\n}", "func TestSidecarsAndLabelsCheckOneValidPod(t *testing.T) {\n\tfakePodList := []v1.Pod{\n\t\tbuildPodWith(\"myPodWithSidecar\", true, true, true),\n\t}\n\n\tchecker := PodChecker{Pods: fakePodList}\n\tvalidations := checker.Check()\n\n\tassert.Equal(t, 1, len(validations))\n\tvalidation, ok := validations[models.IstioValidationKey{\"pod\", \"myPodWithSidecar\"}]\n\tassert.True(t, ok)\n\tassert.True(t, validation.Valid)\n\tassert.Equal(t, 0, len(validation.Checks))\n}", "func containerGCTest(f *framework.Framework, test testRun) {\n\tvar runtime internalapi.RuntimeService\n\tginkgo.BeforeEach(func() {\n\t\tvar err error\n\t\truntime, _, err = getCRIClient()\n\t\tframework.ExpectNoError(err)\n\t})\n\tfor _, pod := range test.testPods {\n\t\t// Initialize the getContainerNames function to use CRI runtime client.\n\t\tpod.getContainerNames = func() ([]string, error) {\n\t\t\trelevantContainers := []string{}\n\t\t\tcontainers, err := runtime.ListContainers(context.Background(), &runtimeapi.ContainerFilter{\n\t\t\t\tLabelSelector: map[string]string{\n\t\t\t\t\ttypes.KubernetesPodNameLabel: pod.podName,\n\t\t\t\t\ttypes.KubernetesPodNamespaceLabel: f.Namespace.Name,\n\t\t\t\t},\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn relevantContainers, err\n\t\t\t}\n\t\t\tfor _, container := range containers {\n\t\t\t\trelevantContainers = append(relevantContainers, container.Labels[types.KubernetesContainerNameLabel])\n\t\t\t}\n\t\t\treturn relevantContainers, nil\n\t\t}\n\t}\n\n\tginkgo.Context(fmt.Sprintf(\"Garbage Collection Test: %s\", test.testName), func() {\n\t\tginkgo.BeforeEach(func(ctx context.Context) {\n\t\t\trealPods := getPods(test.testPods)\n\t\t\te2epod.NewPodClient(f).CreateBatch(ctx, realPods)\n\t\t\tginkgo.By(\"Making sure all containers restart the specified number of times\")\n\t\t\tgomega.Eventually(ctx, func(ctx context.Context) error {\n\t\t\t\tfor _, podSpec := range test.testPods {\n\t\t\t\t\terr := verifyPodRestartCount(ctx, f, podSpec.podName, podSpec.numContainers, podSpec.restartCount)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}, setupDuration, runtimePollInterval).Should(gomega.BeNil())\n\t\t})\n\n\t\tginkgo.It(\"Should eventually garbage collect containers when we exceed the number of dead containers per container\", func(ctx context.Context) {\n\t\t\ttotalContainers := 0\n\t\t\tfor _, pod := range test.testPods {\n\t\t\t\ttotalContainers += pod.numContainers*2 + 1\n\t\t\t}\n\t\t\tgomega.Eventually(ctx, func() error {\n\t\t\t\ttotal := 0\n\t\t\t\tfor _, pod := range test.testPods {\n\t\t\t\t\tcontainerNames, err := pod.getContainerNames()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\ttotal += len(containerNames)\n\t\t\t\t\t// Check maxPerPodContainer for each container in the pod\n\t\t\t\t\tfor i := 0; i < pod.numContainers; i++ {\n\t\t\t\t\t\tcontainerCount := 0\n\t\t\t\t\t\tfor _, containerName := range containerNames {\n\t\t\t\t\t\t\tif containerName == pod.getContainerName(i) {\n\t\t\t\t\t\t\t\tcontainerCount++\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif containerCount > maxPerPodContainer+1 {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"expected number of copies of container: %s, to be <= maxPerPodContainer: %d; list of containers: %v\",\n\t\t\t\t\t\t\t\tpod.getContainerName(i), maxPerPodContainer, containerNames)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t//Check maxTotalContainers. Currently, the default is -1, so this will never happen until we can configure maxTotalContainers\n\t\t\t\tif maxTotalContainers > 0 && totalContainers <= maxTotalContainers && total > maxTotalContainers {\n\t\t\t\t\treturn fmt.Errorf(\"expected total number of containers: %v, to be <= maxTotalContainers: %v\", total, maxTotalContainers)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}, garbageCollectDuration, runtimePollInterval).Should(gomega.BeNil())\n\n\t\t\tif maxPerPodContainer >= 2 && maxTotalContainers < 0 { // make sure constraints wouldn't make us gc old containers\n\t\t\t\tginkgo.By(\"Making sure the kubelet consistently keeps around an extra copy of each container.\")\n\t\t\t\tgomega.Consistently(ctx, func() error {\n\t\t\t\t\tfor _, pod := range test.testPods {\n\t\t\t\t\t\tcontainerNames, err := pod.getContainerNames()\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfor i := 0; i < pod.numContainers; i++ {\n\t\t\t\t\t\t\tcontainerCount := 0\n\t\t\t\t\t\t\tfor _, containerName := range containerNames {\n\t\t\t\t\t\t\t\tif containerName == pod.getContainerName(i) {\n\t\t\t\t\t\t\t\t\tcontainerCount++\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif pod.restartCount > 0 && containerCount < maxPerPodContainer+1 {\n\t\t\t\t\t\t\t\treturn fmt.Errorf(\"expected pod %v to have extra copies of old containers\", pod.podName)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t}, garbageCollectDuration, runtimePollInterval).Should(gomega.BeNil())\n\t\t\t}\n\t\t})\n\n\t\tginkgo.AfterEach(func(ctx context.Context) {\n\t\t\tfor _, pod := range test.testPods {\n\t\t\t\tginkgo.By(fmt.Sprintf(\"Deleting Pod %v\", pod.podName))\n\t\t\t\te2epod.NewPodClient(f).DeleteSync(ctx, pod.podName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)\n\t\t\t}\n\n\t\t\tginkgo.By(\"Making sure all containers get cleaned up\")\n\t\t\tgomega.Eventually(ctx, func() error {\n\t\t\t\tfor _, pod := range test.testPods {\n\t\t\t\t\tcontainerNames, err := pod.getContainerNames()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tif len(containerNames) > 0 {\n\t\t\t\t\t\treturn fmt.Errorf(\"%v containers still remain\", containerNames)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}, garbageCollectDuration, runtimePollInterval).Should(gomega.BeNil())\n\n\t\t\tif ginkgo.CurrentSpecReport().Failed() && framework.TestContext.DumpLogsOnFailure {\n\t\t\t\tlogNodeEvents(ctx, f)\n\t\t\t\tlogPodEvents(ctx, f)\n\t\t\t}\n\t\t})\n\t})\n}", "func (mock *PersistentVolumeClaimInterfaceMock) ControllerCalls() []struct {\n} {\n\tvar calls []struct {\n\t}\n\tlockPersistentVolumeClaimInterfaceMockController.RLock()\n\tcalls = mock.calls.Controller\n\tlockPersistentVolumeClaimInterfaceMockController.RUnlock()\n\treturn calls\n}", "func TestGetConcurrentAPIEndpoints(t *testing.T) {\n\tfor _, tc := range []struct {\n\t\tname string\n\t\tddURL, eventsDDURL, apiKey string\n\t\tadditionalEndpoints map[string][]string\n\t\tadditionalEventsEndpoints map[string][]string\n\t\texpectedEndpoints []apicfg.Endpoint\n\t\texpectedEventsEndpoints []apicfg.Endpoint\n\t}{\n\t\t{\n\t\t\tname: \"default\",\n\t\t\tapiKey: \"test\",\n\t\t\texpectedEndpoints: []apicfg.Endpoint{\n\t\t\t\t{\n\t\t\t\t\tAPIKey: \"test\",\n\t\t\t\t\tEndpoint: mkurl(config.DefaultProcessEndpoint),\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedEventsEndpoints: []apicfg.Endpoint{\n\t\t\t\t{\n\t\t\t\t\tAPIKey: \"test\",\n\t\t\t\t\tEndpoint: mkurl(config.DefaultProcessEventsEndpoint),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"set only process endpoint\",\n\t\t\tddURL: \"https://process.datadoghq.eu\",\n\t\t\tapiKey: \"test\",\n\t\t\texpectedEndpoints: []apicfg.Endpoint{\n\t\t\t\t{\n\t\t\t\t\tAPIKey: \"test\",\n\t\t\t\t\tEndpoint: mkurl(\"https://process.datadoghq.eu\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedEventsEndpoints: []apicfg.Endpoint{\n\t\t\t\t{\n\t\t\t\t\tAPIKey: \"test\",\n\t\t\t\t\tEndpoint: mkurl(config.DefaultProcessEventsEndpoint),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"set only process-events endpoint\",\n\t\t\teventsDDURL: \"https://process-events.datadoghq.eu\",\n\t\t\tapiKey: \"test\",\n\t\t\texpectedEndpoints: []apicfg.Endpoint{\n\t\t\t\t{\n\t\t\t\t\tAPIKey: \"test\",\n\t\t\t\t\tEndpoint: mkurl(config.DefaultProcessEndpoint),\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedEventsEndpoints: []apicfg.Endpoint{\n\t\t\t\t{\n\t\t\t\t\tAPIKey: \"test\",\n\t\t\t\t\tEndpoint: mkurl(\"https://process-events.datadoghq.eu\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiple eps\",\n\t\t\tapiKey: \"test\",\n\t\t\tadditionalEndpoints: map[string][]string{\n\t\t\t\t\"https://mock.datadoghq.com\": {\n\t\t\t\t\t\"key1\",\n\t\t\t\t\t\"key2\",\n\t\t\t\t},\n\t\t\t\t\"https://mock2.datadoghq.com\": {\n\t\t\t\t\t\"key3\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tadditionalEventsEndpoints: map[string][]string{\n\t\t\t\t\"https://mock-events.datadoghq.com\": {\n\t\t\t\t\t\"key2\",\n\t\t\t\t},\n\t\t\t\t\"https://mock2-events.datadoghq.com\": {\n\t\t\t\t\t\"key3\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedEndpoints: []apicfg.Endpoint{\n\t\t\t\t{\n\t\t\t\t\tEndpoint: mkurl(config.DefaultProcessEndpoint),\n\t\t\t\t\tAPIKey: \"test\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tEndpoint: mkurl(\"https://mock.datadoghq.com\"),\n\t\t\t\t\tAPIKey: \"key1\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tEndpoint: mkurl(\"https://mock.datadoghq.com\"),\n\t\t\t\t\tAPIKey: \"key2\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tEndpoint: mkurl(\"https://mock2.datadoghq.com\"),\n\t\t\t\t\tAPIKey: \"key3\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedEventsEndpoints: []apicfg.Endpoint{\n\t\t\t\t{\n\t\t\t\t\tEndpoint: mkurl(config.DefaultProcessEventsEndpoint),\n\t\t\t\t\tAPIKey: \"test\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tEndpoint: mkurl(\"https://mock-events.datadoghq.com\"),\n\t\t\t\t\tAPIKey: \"key2\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tEndpoint: mkurl(\"https://mock2-events.datadoghq.com\"),\n\t\t\t\t\tAPIKey: \"key3\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t} {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tcfg := config.Mock(t)\n\t\t\tcfg.Set(\"api_key\", tc.apiKey)\n\t\t\tif tc.ddURL != \"\" {\n\t\t\t\tcfg.Set(\"process_config.process_dd_url\", tc.ddURL)\n\t\t\t}\n\n\t\t\tif tc.eventsDDURL != \"\" {\n\t\t\t\tcfg.Set(\"process_config.events_dd_url\", tc.eventsDDURL)\n\t\t\t}\n\n\t\t\tif tc.additionalEndpoints != nil {\n\t\t\t\tcfg.Set(\"process_config.additional_endpoints\", tc.additionalEndpoints)\n\t\t\t}\n\n\t\t\tif tc.additionalEventsEndpoints != nil {\n\t\t\t\tcfg.Set(\"process_config.events_additional_endpoints\", tc.additionalEventsEndpoints)\n\t\t\t}\n\n\t\t\teps, err := endpoint.GetAPIEndpoints(cfg)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.ElementsMatch(t, tc.expectedEndpoints, eps)\n\n\t\t\teventsEps, err := endpoint.GetEventsAPIEndpoints(cfg)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.ElementsMatch(t, tc.expectedEventsEndpoints, eventsEps)\n\t\t})\n\t}\n}", "func TestHandle_updateOk(t *testing.T) {\n\tvar (\n\t\tconfig *deployapi.DeploymentConfig\n\t\tdeployed *kapi.ReplicationController\n\t\texistingDeployments *kapi.ReplicationControllerList\n\t)\n\n\tcontroller := &DeploymentConfigController{\n\t\tmakeDeployment: func(config *deployapi.DeploymentConfig) (*kapi.ReplicationController, error) {\n\t\t\treturn deployutil.MakeDeployment(config, api.Codec)\n\t\t},\n\t\tdeploymentClient: &deploymentClientImpl{\n\t\t\tcreateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {\n\t\t\t\tdeployed = deployment\n\t\t\t\treturn deployment, nil\n\t\t\t},\n\t\t\tlistDeploymentsForConfigFunc: func(namespace, configName string) (*kapi.ReplicationControllerList, error) {\n\t\t\t\treturn existingDeployments, nil\n\t\t\t},\n\t\t\tupdateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {\n\t\t\t\tt.Fatalf(\"unexpected update call with deployment %v\", deployment)\n\t\t\t\treturn nil, nil\n\t\t\t},\n\t\t},\n\t\trecorder: &record.FakeRecorder{},\n\t}\n\n\ttype existing struct {\n\t\tversion int\n\t\treplicas int\n\t\tstatus deployapi.DeploymentStatus\n\t}\n\n\ttype scenario struct {\n\t\tversion int\n\t\texpectedReplicas int\n\t\texisting []existing\n\t}\n\n\tscenarios := []scenario{\n\t\t{1, 1, []existing{}},\n\t\t{2, 1, []existing{\n\t\t\t{1, 1, deployapi.DeploymentStatusComplete},\n\t\t}},\n\t\t{3, 4, []existing{\n\t\t\t{1, 0, deployapi.DeploymentStatusComplete},\n\t\t\t{2, 4, deployapi.DeploymentStatusComplete},\n\t\t}},\n\t\t{3, 4, []existing{\n\t\t\t{1, 4, deployapi.DeploymentStatusComplete},\n\t\t\t{2, 1, deployapi.DeploymentStatusFailed},\n\t\t}},\n\t\t{4, 2, []existing{\n\t\t\t{1, 0, deployapi.DeploymentStatusComplete},\n\t\t\t{2, 0, deployapi.DeploymentStatusFailed},\n\t\t\t{3, 2, deployapi.DeploymentStatusComplete},\n\t\t}},\n\t\t// Scramble the order of the previous to ensure we still get it right.\n\t\t{4, 2, []existing{\n\t\t\t{2, 0, deployapi.DeploymentStatusFailed},\n\t\t\t{3, 2, deployapi.DeploymentStatusComplete},\n\t\t\t{1, 0, deployapi.DeploymentStatusComplete},\n\t\t}},\n\t}\n\n\tfor _, scenario := range scenarios {\n\t\tdeployed = nil\n\t\tconfig = deploytest.OkDeploymentConfig(scenario.version)\n\t\texistingDeployments = &kapi.ReplicationControllerList{}\n\t\tfor _, e := range scenario.existing {\n\t\t\td, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(e.version), api.Codec)\n\t\t\td.Spec.Replicas = e.replicas\n\t\t\td.Annotations[deployapi.DeploymentStatusAnnotation] = string(e.status)\n\t\t\texistingDeployments.Items = append(existingDeployments.Items, *d)\n\t\t}\n\t\terr := controller.Handle(config)\n\n\t\tif deployed == nil {\n\t\t\tt.Fatalf(\"expected a deployment\")\n\t\t}\n\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\n\t\tdesired, hasDesired := deployutil.DeploymentDesiredReplicas(deployed)\n\t\tif !hasDesired {\n\t\t\tt.Fatalf(\"expected desired replicas\")\n\t\t}\n\t\tif e, a := scenario.expectedReplicas, desired; e != a {\n\t\t\tt.Errorf(\"expected desired replicas %d, got %d\", e, a)\n\t\t}\n\t}\n}", "func (suite *PouchRestartSuite) TestPouchRestartMultiContainers(c *check.C) {\n\tcontainernames := []string{\"TestPouchRestartMultiContainer-1\", \"TestPouchRestartMultiContainer-2\"}\n\tfor _, name := range containernames {\n\t\tres := command.PouchRun(\"run\", \"-d\", \"--name\", name, busyboxImage, \"top\")\n\t\tdefer DelContainerForceMultyTime(c, name)\n\t\tres.Assert(c, icmd.Success)\n\t}\n\n\tres := command.PouchRun(\"restart\", \"-t\", \"1\", containernames[0], containernames[1])\n\tres.Assert(c, icmd.Success)\n\n\tif out := res.Combined(); !strings.Contains(out, containernames[0]) || !strings.Contains(out, containernames[1]) {\n\t\tc.Fatalf(\"unexpected output: %s, expected: %s\\n%s\", out, containernames[0], containernames[1])\n\t}\n}", "func (suite *PouchRestartSuite) TestPouchRestartMultiContainers(c *check.C) {\n\tcontainernames := []string{\"TestPouchRestartMultiContainer-1\", \"TestPouchRestartMultiContainer-2\"}\n\tfor _, name := range containernames {\n\t\tres := command.PouchRun(\"run\", \"-d\", \"--name\", name, busyboxImage, \"top\")\n\t\tdefer DelContainerForceMultyTime(c, name)\n\t\tres.Assert(c, icmd.Success)\n\t}\n\n\tres := command.PouchRun(\"restart\", \"-t\", \"1\", containernames[0], containernames[1])\n\tres.Assert(c, icmd.Success)\n\n\tif out := res.Combined(); !strings.Contains(out, containernames[0]) || !strings.Contains(out, containernames[1]) {\n\t\tc.Fatalf(\"unexpected output: %s, expected: %s\\n%s\", out, containernames[0], containernames[1])\n\t}\n}", "func TestCreate(t *testing.T) {\n\t// set up fake web server\n\tr := gin.Default()\n\tbuilds.Mount(r)\n\n\t// test artifacts send to callback URL\n\tr.POST(\"/callback\", func(c *gin.Context) {\n\t\tr, _, err := c.Request.FormFile(\"file\")\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tres, err := testhelper.ShouldIncludeFileInTar(r, \"app\")\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tif !res {\n\t\t\tt.Error(\"artifact should be found\")\n\t\t}\n\t})\n\n\t// run web server\n\ts := httptest.NewServer(r)\n\tdefer s.Close()\n\n\t// prepare jobqueue\n\tgo jobqueue.Wait()\n\tdefer jobqueue.Close()\n\n\t// send request\n\tbuild, err := controller_helper.Create(s.URL, \"./example/app.tar\", s.URL+\"/callback\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// wait for finishing build\n\texitCode := make(chan int, 1)\n\tgo func() {\n\t\tfor {\n\t\t\tif res, err := controller_helper.Show(s.URL, build.Id); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t} else {\n\t\t\t\tif res.Job.Finished {\n\t\t\t\t\texitCode <- res.Job.ExitCode\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\ttime.Sleep(200 * time.Millisecond)\n\t\t}\n\t}()\n\n\t// timeout after three seconds\n\tselect {\n\tcase c := <-exitCode:\n\t\tif c != 0 {\n\t\t\tt.Fatal(c)\n\t\t}\n\tcase <-time.After(3 * time.Second):\n\t\tt.Fatal(\"the build should be finished in a few second\")\n\t}\n\n\treq, err := testhelper.Get(s.URL+\"/builds/\"+build.Id+\"/log.txt\", map[string]string{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tc := http.Client{}\n\tres, err := c.Do(req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer res.Body.Close()\n\n\tif b, err := ioutil.ReadAll(res.Body); err != nil {\n\t\tt.Fatal(err)\n\t} else if !strings.Contains(string(b), \"make\") {\n\t\tt.Fatal(\"example build shuold start with make command\")\n\t}\n}", "func TestSidecarsCheckNoPods(t *testing.T) {\n\tchecker := PodChecker{Pods: []v1.Pod{}}\n\tresult := checker.Check()\n\n\tassert.Equal(t, 0, len(result))\n}", "func ChaosPodStatus(experimentsDetails *types.ExperimentDetails, clients environment.ClientSets) error {\n\n\tfor count := 0; count < (experimentsDetails.Duration / experimentsDetails.Delay); count++ {\n\n\t\tchaosEngine, err := clients.LitmusClient.ChaosEngines(experimentsDetails.ChaosNamespace).Get(experimentsDetails.EngineName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn errors.Errorf(\"fail to get the chaosengine %v err: %v\", experimentsDetails.EngineName, err)\n\t\t}\n\t\tif len(chaosEngine.Status.Experiments) == 0 {\n\t\t\ttime.Sleep(time.Duration(experimentsDetails.Delay) * time.Second)\n\t\t\tlog.Info(\"[Status]: Experiment initializing\")\n\t\t\tif count == ((experimentsDetails.Duration / experimentsDetails.Delay) - 1) {\n\t\t\t\treturn errors.Errorf(\"Experiment pod fail to initialise, due to %v\", err)\n\t\t\t}\n\n\t\t} else if len(chaosEngine.Status.Experiments[0].ExpPod) == 0 {\n\t\t\ttime.Sleep(time.Duration(experimentsDetails.Delay) * time.Second)\n\t\t\tif count == ((experimentsDetails.Duration / experimentsDetails.Delay) - 1) {\n\t\t\t\treturn errors.Errorf(\"Experiment pod fails to create, due to %v\", err)\n\t\t\t}\n\t\t} else if chaosEngine.Status.Experiments[0].Status != \"Running\" {\n\t\t\ttime.Sleep(time.Duration(experimentsDetails.Delay) * time.Second)\n\t\t\tlog.Infof(\"[Status]: Currently, the Chaos Pod is in %v state, Please Wait...\", chaosEngine.Status.Experiments[0].Status)\n\t\t\tif count == ((experimentsDetails.Duration / experimentsDetails.Delay) - 1) {\n\t\t\t\treturn errors.Errorf(\"Experiment pod fails to get in running state, due to %v\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tlog.Info(\"[Status]: Chaos pod initiated successfully\")\n\treturn nil\n}", "func (mock *NamespacedCertificateInterfaceMock) ControllerCalls() []struct {\n} {\n\tvar calls []struct {\n\t}\n\tlockNamespacedCertificateInterfaceMockController.RLock()\n\tcalls = mock.calls.Controller\n\tlockNamespacedCertificateInterfaceMockController.RUnlock()\n\treturn calls\n}", "func runMultisyncTests(t *testing.T, ctx context.Context, tests []controllerTest, storageClasses []*storage.StorageClass, defaultStorageClass string) {\n\tlogger := klog.FromContext(ctx)\n\trun := func(t *testing.T, test controllerTest) {\n\t\tlogger.V(4).Info(\"Starting multisync test\", \"testName\", test.name)\n\n\t\t// Initialize the controller\n\t\tclient := &fake.Clientset{}\n\t\tctrl, err := newTestController(ctx, client, nil, true)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Test %q construct persistent volume failed: %v\", test.name, err)\n\t\t}\n\n\t\t// Inject classes into controller via a custom lister.\n\t\tindexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})\n\t\tfor _, class := range storageClasses {\n\t\t\tindexer.Add(class)\n\t\t}\n\t\tctrl.classLister = storagelisters.NewStorageClassLister(indexer)\n\n\t\treactor := newVolumeReactor(ctx, client, ctrl, nil, nil, test.errors)\n\t\tfor _, claim := range test.initialClaims {\n\t\t\tctrl.claims.Add(claim)\n\t\t}\n\t\tfor _, volume := range test.initialVolumes {\n\t\t\tctrl.volumes.store.Add(volume)\n\t\t}\n\t\treactor.AddClaims(test.initialClaims)\n\t\treactor.AddVolumes(test.initialVolumes)\n\n\t\t// Run the tested function\n\t\terr = test.test(ctrl, reactor.VolumeReactor, test)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Test %q failed: %v\", test.name, err)\n\t\t}\n\n\t\t// Simulate any \"changed\" events and \"periodical sync\" until we reach a\n\t\t// stable state.\n\t\tfirstSync := true\n\t\tcounter := 0\n\t\tfor {\n\t\t\tcounter++\n\t\t\tlogger.V(4).Info(\"Test\", \"testName\", test.name, \"iteration\", counter)\n\n\t\t\tif counter > 100 {\n\t\t\t\tt.Errorf(\"Test %q failed: too many iterations\", test.name)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// Wait for all goroutines to finish\n\t\t\treactor.waitForIdle()\n\n\t\t\tobj := reactor.PopChange(ctx)\n\t\t\tif obj == nil {\n\t\t\t\t// Nothing was changed, should we exit?\n\t\t\t\tif firstSync || reactor.GetChangeCount() > 0 {\n\t\t\t\t\t// There were some changes after the last \"periodic sync\".\n\t\t\t\t\t// Simulate \"periodic sync\" of everything (until it produces\n\t\t\t\t\t// no changes).\n\t\t\t\t\tfirstSync = false\n\t\t\t\t\tlogger.V(4).Info(\"Test simulating periodical sync of all claims and volumes\", \"testName\", test.name)\n\t\t\t\t\treactor.SyncAll()\n\t\t\t\t} else {\n\t\t\t\t\t// Last sync did not produce any updates, the test reached\n\t\t\t\t\t// stable state -> finish.\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\t// waiting here cools down exponential backoff\n\t\t\ttime.Sleep(600 * time.Millisecond)\n\n\t\t\t// There were some changes, process them\n\t\t\tswitch obj.(type) {\n\t\t\tcase *v1.PersistentVolumeClaim:\n\t\t\t\tclaim := obj.(*v1.PersistentVolumeClaim)\n\t\t\t\t// Simulate \"claim updated\" event\n\t\t\t\tctrl.claims.Update(claim)\n\t\t\t\terr = ctrl.syncClaim(context.TODO(), claim)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif err == pvtesting.ErrVersionConflict {\n\t\t\t\t\t\t// Ignore version errors\n\t\t\t\t\t\tlogger.V(4).Info(\"Test intentionally ignores version error\")\n\t\t\t\t\t} else {\n\t\t\t\t\t\tt.Errorf(\"Error calling syncClaim: %v\", err)\n\t\t\t\t\t\t// Finish the loop on the first error\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Process generated changes\n\t\t\t\tcontinue\n\t\t\tcase *v1.PersistentVolume:\n\t\t\t\tvolume := obj.(*v1.PersistentVolume)\n\t\t\t\t// Simulate \"volume updated\" event\n\t\t\t\tctrl.volumes.store.Update(volume)\n\t\t\t\terr = ctrl.syncVolume(context.TODO(), volume)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif err == pvtesting.ErrVersionConflict {\n\t\t\t\t\t\t// Ignore version errors\n\t\t\t\t\t\tlogger.V(4).Info(\"Test intentionally ignores version error\")\n\t\t\t\t\t} else {\n\t\t\t\t\t\tt.Errorf(\"Error calling syncVolume: %v\", err)\n\t\t\t\t\t\t// Finish the loop on the first error\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Process generated changes\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tevaluateTestResults(ctx, ctrl, reactor.VolumeReactor, test, t)\n\t\tlogger.V(4).Info(\"Test finished after iterations\", \"testName\", test.name, \"iterations\", counter)\n\t}\n\n\tfor _, test := range tests {\n\t\ttest := test\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\trun(t, test)\n\t\t})\n\t}\n}", "func TestRunConsulDeploymentsPackageTests(t *testing.T) {\n\tcfg := testutil.SetupTestConfig(t)\n\tsrv, client := testutil.NewTestConsulInstance(t, &cfg)\n\tdefer func() {\n\t\tsrv.Stop()\n\t\tos.RemoveAll(cfg.WorkingDirectory)\n\t}()\n\n\tt.Run(\"groupDeployments\", func(t *testing.T) {\n\t\tt.Run(\"testArtifacts\", func(t *testing.T) {\n\t\t\ttestArtifacts(t, srv)\n\t\t})\n\t\tt.Run(\"testCapabilities\", func(t *testing.T) {\n\t\t\ttestCapabilities(t, srv)\n\t\t})\n\t\tt.Run(\"testDefinitionStore\", func(t *testing.T) {\n\t\t\ttestDefinitionStore(t)\n\t\t})\n\t\tt.Run(\"testDeploymentNodes\", func(t *testing.T) {\n\t\t\ttestDeploymentNodes(t, srv)\n\t\t})\n\t\tt.Run(\"testRequirements\", func(t *testing.T) {\n\t\t\ttestRequirements(t, srv)\n\t\t})\n\t\tt.Run(\"testResolver\", func(t *testing.T) {\n\t\t\ttestResolver(t)\n\t\t})\n\t\tt.Run(\"testGetTypePropertyDataType\", func(t *testing.T) {\n\t\t\ttestGetTypePropertyDataType(t)\n\t\t})\n\t\tt.Run(\"testGetNestedDataType\", func(t *testing.T) {\n\t\t\ttestGetNestedDataType(t)\n\t\t})\n\t\tt.Run(\"testReadComplexVA\", func(t *testing.T) {\n\t\t\ttestReadComplexVA(t)\n\t\t})\n\t\tt.Run(\"testIssueGetEmptyPropRel\", func(t *testing.T) {\n\t\t\ttestIssueGetEmptyPropRel(t)\n\t\t})\n\t\tt.Run(\"testRelationshipWorkflow\", func(t *testing.T) {\n\t\t\ttestRelationshipWorkflow(t)\n\t\t})\n\t\tt.Run(\"testGlobalInputs\", func(t *testing.T) {\n\t\t\ttestGlobalInputs(t)\n\t\t})\n\t\tt.Run(\"testInlineWorkflow\", func(t *testing.T) {\n\t\t\ttestInlineWorkflow(t)\n\t\t})\n\t\tt.Run(\"testDeleteWorkflow\", func(t *testing.T) {\n\t\t\ttestDeleteWorkflow(t)\n\t\t})\n\t\tt.Run(\"testCheckCycleInNestedWorkflows\", func(t *testing.T) {\n\t\t\ttestCheckCycleInNestedWorkflows(t)\n\t\t})\n\t\tt.Run(\"testGetCapabilityProperties\", func(t *testing.T) {\n\t\t\ttestGetCapabilityProperties(t)\n\t\t})\n\t\tt.Run(\"testSubstitutionServiceCapabilityMappings\", func(t *testing.T) {\n\t\t\ttestSubstitutionServiceCapabilityMappings(t)\n\t\t})\n\t\tt.Run(\"testSubstitutionServiceRequirementMappings\", func(t *testing.T) {\n\t\t\ttestSubstitutionServiceRequirementMappings(t)\n\t\t})\n\t\tt.Run(\"testSubstitutionClientDirective\", func(t *testing.T) {\n\t\t\ttestSubstitutionClientDirective(t)\n\t\t})\n\t\tt.Run(\"testSubstitutionClientServiceInstance\", func(t *testing.T) {\n\t\t\ttestSubstitutionClientServiceInstance(t)\n\t\t})\n\t\tt.Run(\"TestOperationImplementationArtifact\", func(t *testing.T) {\n\t\t\ttestOperationImplementationArtifact(t)\n\t\t})\n\t\tt.Run(\"TestOperationHost\", func(t *testing.T) {\n\t\t\ttestOperationHost(t)\n\t\t})\n\t\tt.Run(\"testIssueGetEmptyPropOnRelationship\", func(t *testing.T) {\n\t\t\ttestIssueGetEmptyPropOnRelationship(t)\n\t\t})\n\n\t\tt.Run(\"testTopologyUpdate\", func(t *testing.T) {\n\t\t\ttestTopologyUpdate(t)\n\t\t})\n\t\tt.Run(\"testTopologyBadUpdate\", func(t *testing.T) {\n\t\t\ttestTopologyBadUpdate(t)\n\t\t})\n\t\tt.Run(\"testRepositories\", func(t *testing.T) {\n\t\t\ttestRepositories(t)\n\t\t})\n\t\tt.Run(\"testPurgedDeployments\", func(t *testing.T) {\n\t\t\ttestPurgedDeployments(t, client)\n\t\t})\n\t\tt.Run(\"testDeleteDeployment\", func(t *testing.T) {\n\t\t\ttestDeleteDeployment(t)\n\t\t})\n\t\tt.Run(\"testDeleteInstance\", func(t *testing.T) {\n\t\t\ttestDeleteInstance(t)\n\t\t})\n\t\tt.Run(\"testDeleteAllInstances\", func(t *testing.T) {\n\t\t\ttestDeleteAllInstances(t)\n\t\t})\n\t\tt.Run(\"testDeleteRelationshipInstance\", func(t *testing.T) {\n\t\t\ttestDeleteRelationshipInstance(t)\n\t\t})\n\n\t\tt.Run(\"testResolveAttributeMapping\", func(t *testing.T) {\n\t\t\ttestResolveAttributeMapping(t)\n\t\t})\n\n\t})\n\n\tt.Run(\"CommonsTestsOn_test_topology.yml\", func(t *testing.T) {\n\t\tdeploymentID := testutil.BuildDeploymentID(t)\n\t\terr := StoreDeploymentDefinition(context.Background(), deploymentID, \"testdata/test_topology.yml\")\n\t\trequire.NoError(t, err)\n\n\t\tt.Run(\"TestNodeHasAttribute\", func(t *testing.T) {\n\t\t\ttestNodeHasAttribute(t, deploymentID)\n\t\t})\n\t\tt.Run(\"TestNodeHasProperty\", func(t *testing.T) {\n\t\t\ttestNodeHasProperty(t, deploymentID)\n\t\t})\n\t\tt.Run(\"TestTopologyTemplateMetadata\", func(t *testing.T) {\n\t\t\ttestTopologyTemplateMetadata(t, deploymentID)\n\t\t})\n\t\tt.Run(\"TestAttributeNotifications\", func(t *testing.T) {\n\t\t\ttestAttributeNotifications(t, deploymentID)\n\t\t})\n\t\tt.Run(\"TestNotifyAttributeOnValueChange\", func(t *testing.T) {\n\t\t\ttestNotifyAttributeOnValueChange(t, deploymentID)\n\t\t})\n\t\tt.Run(\"TestImportTopologyTemplate\", func(t *testing.T) {\n\t\t\ttestImportTopologyTemplateNodeMetadata(t, deploymentID)\n\t\t})\n\t\tt.Run(\"TestTopologyTemplateMetadata\", func(t *testing.T) {\n\t\t\ttestTopologyTemplateMetadata(t, deploymentID)\n\t\t})\n\t})\n\n\tt.Run(\"CommonsTestsOn_test_topology_substitution.yml\", func(t *testing.T) {\n\t\tdeploymentID := testutil.BuildDeploymentID(t)\n\t\terr := StoreDeploymentDefinition(context.Background(), deploymentID, \"testdata/test_topology_substitution.yml\")\n\t\trequire.NoError(t, err)\n\n\t\tt.Run(\"TestAddSubstitutionMappingAttributeHostNotification\", func(t *testing.T) {\n\t\t\ttestAddSubstitutionMappingAttributeHostNotification(t, deploymentID)\n\t\t})\n\t})\n}", "func TestHandle_existingDeployments(t *testing.T) {\n\tvar updatedDeployments []kapi.ReplicationController\n\tvar (\n\t\tconfig *deployapi.DeploymentConfig\n\t\tdeployed *kapi.ReplicationController\n\t\texistingDeployments *kapi.ReplicationControllerList\n\t)\n\n\tcontroller := &DeploymentConfigController{\n\t\tmakeDeployment: func(config *deployapi.DeploymentConfig) (*kapi.ReplicationController, error) {\n\t\t\treturn deployutil.MakeDeployment(config, api.Codec)\n\t\t},\n\t\tdeploymentClient: &deploymentClientImpl{\n\t\t\tcreateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {\n\t\t\t\tdeployed = deployment\n\t\t\t\treturn deployment, nil\n\t\t\t},\n\t\t\tlistDeploymentsForConfigFunc: func(namespace, configName string) (*kapi.ReplicationControllerList, error) {\n\t\t\t\treturn existingDeployments, nil\n\t\t\t},\n\t\t\tupdateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {\n\t\t\t\tupdatedDeployments = append(updatedDeployments, *deployment)\n\t\t\t\t//t.Fatalf(\"unexpected update call with deployment %v\", deployment)\n\t\t\t\treturn deployment, nil\n\t\t\t},\n\t\t},\n\t\trecorder: &record.FakeRecorder{},\n\t}\n\n\ttype existing struct {\n\t\tversion int\n\t\tstatus deployapi.DeploymentStatus\n\t\tshouldCancel bool\n\t}\n\n\ttype scenario struct {\n\t\tversion int\n\t\texisting []existing\n\t\terrorType reflect.Type\n\t\texpectDeployment bool\n\t}\n\n\ttransientErrorType := reflect.TypeOf(transientError(\"\"))\n\tscenarios := []scenario{\n\t\t// No existing deployments\n\t\t{1, []existing{}, nil, true},\n\t\t// A single existing completed deployment\n\t\t{2, []existing{{1, deployapi.DeploymentStatusComplete, false}}, nil, true},\n\t\t// A single existing failed deployment\n\t\t{2, []existing{{1, deployapi.DeploymentStatusFailed, false}}, nil, true},\n\t\t// Multiple existing completed/failed deployments\n\t\t{3, []existing{{2, deployapi.DeploymentStatusFailed, false}, {1, deployapi.DeploymentStatusComplete, false}}, nil, true},\n\n\t\t// A single existing deployment in the default state\n\t\t{2, []existing{{1, \"\", false}}, transientErrorType, false},\n\t\t// A single existing new deployment\n\t\t{2, []existing{{1, deployapi.DeploymentStatusNew, false}}, transientErrorType, false},\n\t\t// A single existing pending deployment\n\t\t{2, []existing{{1, deployapi.DeploymentStatusPending, false}}, transientErrorType, false},\n\t\t// A single existing running deployment\n\t\t{2, []existing{{1, deployapi.DeploymentStatusRunning, false}}, transientErrorType, false},\n\t\t// Multiple existing deployments with one in new/pending/running\n\t\t{4, []existing{{3, deployapi.DeploymentStatusRunning, false}, {2, deployapi.DeploymentStatusComplete, false}, {1, deployapi.DeploymentStatusFailed, false}}, transientErrorType, false},\n\n\t\t// Latest deployment exists and has already failed/completed\n\t\t{2, []existing{{2, deployapi.DeploymentStatusFailed, false}, {1, deployapi.DeploymentStatusComplete, false}}, nil, false},\n\t\t// Latest deployment exists and is in new/pending/running state\n\t\t{2, []existing{{2, deployapi.DeploymentStatusRunning, false}, {1, deployapi.DeploymentStatusComplete, false}}, nil, false},\n\n\t\t// Multiple existing deployments with more than one in new/pending/running\n\t\t{4, []existing{{3, deployapi.DeploymentStatusNew, false}, {2, deployapi.DeploymentStatusRunning, true}, {1, deployapi.DeploymentStatusFailed, false}}, transientErrorType, false},\n\t\t// Multiple existing deployments with more than one in new/pending/running\n\t\t// Latest deployment has already failed\n\t\t{6, []existing{{5, deployapi.DeploymentStatusFailed, false}, {4, deployapi.DeploymentStatusRunning, false}, {3, deployapi.DeploymentStatusNew, true}, {2, deployapi.DeploymentStatusComplete, false}, {1, deployapi.DeploymentStatusNew, true}}, transientErrorType, false},\n\t}\n\n\tfor _, scenario := range scenarios {\n\t\tupdatedDeployments = []kapi.ReplicationController{}\n\t\tdeployed = nil\n\t\tconfig = deploytest.OkDeploymentConfig(scenario.version)\n\t\texistingDeployments = &kapi.ReplicationControllerList{}\n\t\tfor _, e := range scenario.existing {\n\t\t\td, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(e.version), api.Codec)\n\t\t\tif e.status != \"\" {\n\t\t\t\td.Annotations[deployapi.DeploymentStatusAnnotation] = string(e.status)\n\t\t\t}\n\t\t\texistingDeployments.Items = append(existingDeployments.Items, *d)\n\t\t}\n\t\terr := controller.Handle(config)\n\n\t\tif scenario.expectDeployment && deployed == nil {\n\t\t\tt.Fatalf(\"expected a deployment\")\n\t\t}\n\n\t\tif scenario.errorType == nil {\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tif err == nil {\n\t\t\t\tt.Fatalf(\"expected error\")\n\t\t\t}\n\t\t\tif reflect.TypeOf(err) != scenario.errorType {\n\t\t\t\tt.Fatalf(\"error expected: %s, got: %s\", scenario.errorType, reflect.TypeOf(err))\n\t\t\t}\n\t\t}\n\n\t\texpectedCancellations := []int{}\n\t\tactualCancellations := []int{}\n\t\tfor _, e := range scenario.existing {\n\t\t\tif e.shouldCancel {\n\t\t\t\texpectedCancellations = append(expectedCancellations, e.version)\n\t\t\t}\n\t\t}\n\t\tfor _, d := range updatedDeployments {\n\t\t\tactualCancellations = append(actualCancellations, deployutil.DeploymentVersionFor(&d))\n\t\t}\n\n\t\tsort.Ints(actualCancellations)\n\t\tsort.Ints(expectedCancellations)\n\t\tif !reflect.DeepEqual(actualCancellations, expectedCancellations) {\n\t\t\tt.Fatalf(\"expected cancellations: %v, actual: %v\", expectedCancellations, actualCancellations)\n\t\t}\n\t}\n}", "func TestDeviceController(t *testing.T) {\n\n\t// Set the logger to development mode for verbose logs.\n\tlogf.SetLogger(zap.New(zap.UseDevMode(true)))\n\n\t// Create a fake client to mock API calls.\n\tcl, s := CreateFakeClient(t)\n\n\t// Create a ReconcileBlockDevice object with the scheme and fake client.\n\tr := &BlockDeviceReconciler{Client: cl, Scheme: s, Recorder: fakeRecorder}\n\n\t// Mock request to simulate Reconcile() being called on an event for a\n\t// watched resource .\n\treq := reconcile.Request{\n\t\tNamespacedName: types.NamespacedName{\n\t\t\tName: deviceName,\n\t\t\tNamespace: namespace,\n\t\t},\n\t}\n\n\tres, err := r.Reconcile(context.TODO(), req)\n\tif err != nil {\n\t\tt.Fatalf(\"reconcile: (%v)\", err)\n\t}\n\n\t// Check the result of reconciliation to make sure it has the desired state.\n\tif !res.Requeue {\n\t\tt.Log(\"reconcile did not requeue request as expected\")\n\t}\n\n\tdeviceInstance := &openebsv1alpha1.BlockDevice{}\n\terr = r.Client.Get(context.TODO(), req.NamespacedName, deviceInstance)\n\tif err != nil {\n\t\tt.Errorf(\"get deviceInstance : (%v)\", err)\n\t}\n\n\t// Disk Status state should be Active as expected.\n\tif deviceInstance.Status.State == ndm.NDMActive {\n\t\tt.Logf(\"BlockDevice Object state:%v match expected state:%v\", deviceInstance.Status.State, ndm.NDMActive)\n\t} else {\n\t\tt.Fatalf(\"BlockDevice Object state:%v did not match expected state:%v\", deviceInstance.Status.State, ndm.NDMActive)\n\t}\n}", "func getOSMControllerPods(clientSet kubernetes.Interface, ns string) *corev1.PodList {\n\tlabelSelector := metav1.LabelSelector{MatchLabels: map[string]string{constants.AppLabel: constants.OSMControllerName}}\n\tlistOptions := metav1.ListOptions{\n\t\tLabelSelector: labels.Set(labelSelector.MatchLabels).String(),\n\t}\n\tpodList, _ := clientSet.CoreV1().Pods(ns).List(context.TODO(), listOptions)\n\treturn podList\n}", "func TestController(t *testing.T) {\n\tsuite.Run(t, new(ControllerTestSuite))\n}", "func TestBucketController(t *testing.T) {\n\t// Set the logger to development mode for verbose logs.\n\tlogf.SetLogger(logf.ZapLogger(true))\n\n\t// setup fake GCS storage backend\n\tserver := setupFakeStorage(t)\n\tstorageClient := server.Client()\n\n\tvar (\n\t\tname = \"bucket-operator\"\n\t\tnamespace = \"bucket\"\n\t\tbucketName string = \"my-new-bucket\"\n\t)\n\n\t// A Bucket resource with metadata and spec.\n\tbucket := &cachev1alpha1.Bucket{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: namespace,\n\t\t},\n\t\tSpec: cachev1alpha1.BucketSpec{\n\t\t\tName: bucketName,\n\t\t},\n\t}\n\t// Objects to track in the fake client.\n\tobjs := []runtime.Object{\n\t\tbucket,\n\t}\n\n\t// Register operator types with the runtime scheme.\n\ts := scheme.Scheme\n\ts.AddKnownTypes(cachev1alpha1.SchemeGroupVersion, bucket)\n\t// Create a fake client to mock API calls.\n\tcl := fake.NewFakeClient(objs...)\n\t// Create a ReconcileBucket object with the scheme and fake client.\n\tr := &ReconcileBucket{storageClient: storageClient, client: cl, scheme: s}\n\n\t// Mock request to simulate Reconcile() being called on an event for a\n\t// watched resource .\n\treq := reconcile.Request{\n\t\tNamespacedName: types.NamespacedName{\n\t\t\tName: name,\n\t\t\tNamespace: namespace,\n\t\t},\n\t}\n\tres, err := r.Reconcile(req)\n\tif err != nil {\n\t\tt.Fatalf(\"reconcile: (%v)\", err)\n\t}\n\t// Check the result of reconciliation to make sure it has the desired state.\n\tif !res.Requeue {\n\t\tt.Error(\"reconcile did not requeue request as expected\")\n\t}\n\n\tbh := storageClient.Bucket(bucketName)\n\t// Next check if the bucket exists\n\tif _, err := bh.Attrs(context.TODO()); err != nil {\n\t\tt.Errorf(\"expected bucket %s to exist\", bucketName)\n\t}\n\n\tres, err = r.Reconcile(req)\n\tif err != nil {\n\t\tt.Fatalf(\"reconcile: (%v)\", err)\n\t}\n\t// Check the result of reconciliation to make sure it has the desired state.\n\tif res.Requeue {\n\t\tt.Error(\"reconcile requeue which is not expected\")\n\t}\n\n\tif err = os.Remove(\"./testdata/my-new-bucket\"); err != nil {\n\t\tt.Fatalf(\"couldn't delete bucket %s\", bucketName)\n\t}\n\n\t// Get the updated Bucket object.\n\tbucket = &cachev1alpha1.Bucket{}\n\terr = r.client.Get(context.TODO(), req.NamespacedName, bucket)\n\tif err != nil {\n\t\tt.Errorf(\"get bucket: (%v)\", err)\n\t}\n\n\t// Ensure Reconcile() updated the Bucket's Status as expected.\n\t// nodes := bucket.Status.Nodes\n\t// if !reflect.DeepEqual(podNames, nodes) {\n\t// \tt.Errorf(\"pod names %v did not match expected %v\", nodes, podNames)\n\t// }\n\n\tserver.Stop()\n}", "func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *kubecontainer.PodStatus) podActions {\n\tklog.V(5).Infof(\"Syncing Pod %q: %+v\", format.Pod(pod), pod)\n\tklog.V(5).Infof(\"podstatus %v\", podStatus)\n\tif podStatus.SandboxStatuses != nil {\n\t\tklog.V(5).Infof(\"pod sandbox length %v\", len(podStatus.SandboxStatuses))\n\t\tfor _, sb := range podStatus.SandboxStatuses {\n\t\t\tklog.V(5).Infof(\"pod sandbox status %v\", sb)\n\t\t}\n\t}\n\n\tcreatePodSandbox, attempt, sandboxID := m.podSandboxChanged(pod, podStatus)\n\tchanges := podActions{\n\t\tKillPod: createPodSandbox,\n\t\tCreateSandbox: createPodSandbox,\n\t\tSandboxID: sandboxID,\n\t\tAttempt: attempt,\n\t\tContainersToStart: []int{},\n\t\tContainersToKill: make(map[kubecontainer.ContainerID]containerToKillInfo),\n\t\tContainersToUpdate: make(map[string][]containerToUpdateInfo),\n\t\tContainersToRestart: []int{},\n\t}\n\n\t// If we need to (re-)create the pod sandbox, everything will need to be\n\t// killed and recreated, and init containers should be purged.\n\tif createPodSandbox {\n\t\tif !shouldRestartOnFailure(pod) && attempt != 0 {\n\t\t\t// Should not restart the pod, just return.\n\t\t\t// we should not create a sandbox for a pod if it is already done.\n\t\t\t// if all containers are done and should not be started, there is no need to create a new sandbox.\n\t\t\t// this stops confusing logs on pods whose containers all have exit codes, but we recreate a sandbox before terminating it.\n\t\t\tchanges.CreateSandbox = false\n\t\t\treturn changes\n\t\t}\n\t\tif len(pod.Spec.InitContainers) != 0 {\n\t\t\t// Pod has init containers, return the first one.\n\t\t\tchanges.NextInitContainerToStart = &pod.Spec.InitContainers[0]\n\t\t\treturn changes\n\t\t}\n\t\t// Start all containers by default but exclude the ones that succeeded if\n\t\t// RestartPolicy is OnFailure.\n\t\tfor idx, c := range pod.Spec.Containers {\n\t\t\tif containerSucceeded(&c, podStatus) && pod.Spec.RestartPolicy == v1.RestartPolicyOnFailure {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tchanges.ContainersToStart = append(changes.ContainersToStart, idx)\n\t\t}\n\t\treturn changes\n\t}\n\n\t// Check initialization progress.\n\tinitLastStatus, next, done := findNextInitContainerToRun(pod, podStatus)\n\tif !done {\n\t\tif next != nil {\n\t\t\tinitFailed := initLastStatus != nil && isInitContainerFailed(initLastStatus)\n\t\t\tif initFailed && !shouldRestartOnFailure(pod) {\n\t\t\t\tchanges.KillPod = true\n\t\t\t} else {\n\t\t\t\t// Always try to stop containers in unknown state first.\n\t\t\t\tif initLastStatus != nil && initLastStatus.State == kubecontainer.ContainerStateUnknown {\n\t\t\t\t\tchanges.ContainersToKill[initLastStatus.ID] = containerToKillInfo{\n\t\t\t\t\t\tname: next.Name,\n\t\t\t\t\t\tcontainer: next,\n\t\t\t\t\t\tmessage: fmt.Sprintf(\"Init container is in %q state, try killing it before restart\",\n\t\t\t\t\t\t\tinitLastStatus.State),\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tchanges.NextInitContainerToStart = next\n\t\t\t}\n\t\t}\n\t\t// Initialization failed or still in progress. Skip inspecting non-init\n\t\t// containers.\n\t\treturn changes\n\t}\n\n\t// Number of running containers to keep.\n\tkeepCount := 0\n\n\t// check the status of containers.\n\tfor idx, container := range pod.Spec.Containers {\n\t\tcontainerStatus := podStatus.FindContainerStatusByName(container.Name)\n\n\t\t// Call internal container post-stop lifecycle hook for any non-running container so that any\n\t\t// allocated cpus are released immediately. If the container is restarted, cpus will be re-allocated\n\t\t// to it.\n\t\tif containerStatus != nil && containerStatus.State != kubecontainer.ContainerStateRunning {\n\t\t\tif err := m.internalLifecycle.PostStopContainer(containerStatus.ID.ID); err != nil {\n\t\t\t\tklog.Errorf(\"internal container post-stop lifecycle hook failed for container %v in pod %v with error %v\",\n\t\t\t\t\tcontainer.Name, pod.Name, err)\n\t\t\t}\n\t\t}\n\n\t\t// If container does not exist, or is not running, check whether we\n\t\t// need to restart it.\n\t\tif containerStatus == nil || containerStatus.State != kubecontainer.ContainerStateRunning {\n\t\t\tif kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) {\n\t\t\t\tmessage := fmt.Sprintf(\"Container %+v is dead, but RestartPolicy says that we should restart it.\", container)\n\t\t\t\tklog.V(3).Infof(message)\n\t\t\t\tchanges.ContainersToStart = append(changes.ContainersToStart, idx)\n\t\t\t\tif containerStatus != nil && containerStatus.State == kubecontainer.ContainerStateUnknown {\n\t\t\t\t\t// If container is in unknown state, we don't know whether it\n\t\t\t\t\t// is actually running or not, always try killing it before\n\t\t\t\t\t// restart to avoid having 2 running instances of the same container.\n\t\t\t\t\tchanges.ContainersToKill[containerStatus.ID] = containerToKillInfo{\n\t\t\t\t\t\tname: containerStatus.Name,\n\t\t\t\t\t\tcontainer: &pod.Spec.Containers[idx],\n\t\t\t\t\t\tmessage: fmt.Sprintf(\"Container is in %q state, try killing it before restart\", containerStatus.State),\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\t// The container is running, but kill the container if any of the following condition is met.\n\t\tvar message string\n\t\trestart := shouldRestartOnFailure(pod)\n\t\tif _, _, changed := containerChanged(&container, containerStatus); changed {\n\t\t\tmessage = fmt.Sprintf(\"Container %s definition changed\", container.Name)\n\t\t\t// Restart regardless of the restart policy because the container\n\t\t\t// spec changed.\n\t\t\trestart = true\n\t\t} else if liveness, found := m.livenessManager.Get(containerStatus.ID); found && liveness == proberesults.Failure {\n\t\t\t// If the container failed the liveness probe, we should kill it.\n\t\t\tmessage = fmt.Sprintf(\"Container %s failed liveness probe\", container.Name)\n\t\t} else {\n\t\t\tif utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {\n\t\t\t\tkeepCount++\n\t\t\t\tapiContainerStatuses := pod.Status.ContainerStatuses\n\t\t\t\tif pod.Spec.VirtualMachine != nil && pod.Status.VirtualMachineStatus != nil {\n\t\t\t\t\tvar vmContainerState v1.ContainerState\n\t\t\t\t\tif pod.Status.VirtualMachineStatus.State == v1.VmActive {\n\t\t\t\t\t\tvmContainerState = v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: *pod.Status.StartTime}}\n\t\t\t\t\t}\n\t\t\t\t\tvmContainerId := kubecontainer.BuildContainerID(containerStatus.ID.Type, pod.Status.VirtualMachineStatus.VirtualMachineId)\n\t\t\t\t\tapiContainerStatuses = []v1.ContainerStatus{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: pod.Status.VirtualMachineStatus.Name,\n\t\t\t\t\t\t\tContainerID: vmContainerId.String(),\n\t\t\t\t\t\t\tState: vmContainerState,\n\t\t\t\t\t\t\tReady: pod.Status.VirtualMachineStatus.Ready,\n\t\t\t\t\t\t\tRestartCount: pod.Status.VirtualMachineStatus.RestartCount,\n\t\t\t\t\t\t\tImage: pod.Status.VirtualMachineStatus.Image,\n\t\t\t\t\t\t\tImageID: pod.Status.VirtualMachineStatus.ImageId,\n\t\t\t\t\t\t\tResources: pod.Status.VirtualMachineStatus.Resources,\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif container.Resources.Limits == nil || len(apiContainerStatuses) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tapiContainerStatus, exists := podutil.GetContainerStatus(apiContainerStatuses, container.Name)\n\t\t\t\tif !exists || apiContainerStatus.State.Running == nil ||\n\t\t\t\t\tcontainerStatus.State != kubecontainer.ContainerStateRunning ||\n\t\t\t\t\tcontainerStatus.ID.String() != apiContainerStatus.ContainerID ||\n\t\t\t\t\tlen(diff.ObjectDiff(container.Resources.Requests, container.ResourcesAllocated)) != 0 ||\n\t\t\t\t\tlen(diff.ObjectDiff(apiContainerStatus.Resources, container.Resources)) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// If runtime status resources is available from CRI or previous update, compare with it.\n\t\t\t\tif len(diff.ObjectDiff(containerStatus.Resources, container.Resources)) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tresizePolicy := make(map[v1.ResourceName]v1.ContainerResizePolicy)\n\t\t\t\tfor _, pol := range container.ResizePolicy {\n\t\t\t\t\tresizePolicy[pol.ResourceName] = pol.Policy\n\t\t\t\t}\n\t\t\t\tdetermineContainerResize := func(rName v1.ResourceName, specValue, statusValue int64) (bool, bool) {\n\t\t\t\t\tif specValue == statusValue {\n\t\t\t\t\t\treturn false, false\n\t\t\t\t\t}\n\t\t\t\t\tif resizePolicy[rName] == v1.RestartContainer {\n\t\t\t\t\t\treturn true, true\n\t\t\t\t\t}\n\t\t\t\t\treturn true, false\n\t\t\t\t}\n\t\t\t\tmarkContainerForUpdate := func(rName string, specValue, statusValue int64) {\n\t\t\t\t\tcUpdateInfo := containerToUpdateInfo{\n\t\t\t\t\t\tapiContainer: &pod.Spec.Containers[idx],\n\t\t\t\t\t\tapiContainerStatus: &apiContainerStatus,\n\t\t\t\t\t\tkubeContainerStatus: containerStatus,\n\t\t\t\t\t}\n\t\t\t\t\t// Container updates are ordered so that resource decreases are applied before increases\n\t\t\t\t\tswitch {\n\t\t\t\t\tcase specValue > statusValue: // append\n\t\t\t\t\t\tchanges.ContainersToUpdate[rName] = append(changes.ContainersToUpdate[rName], cUpdateInfo)\n\t\t\t\t\tcase specValue < statusValue: // prepend\n\t\t\t\t\t\tchanges.ContainersToUpdate[rName] = append(changes.ContainersToUpdate[rName], containerToUpdateInfo{})\n\t\t\t\t\t\tcopy(changes.ContainersToUpdate[rName][1:], changes.ContainersToUpdate[rName])\n\t\t\t\t\t\tchanges.ContainersToUpdate[rName][0] = cUpdateInfo\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tspecLim := container.Resources.Limits\n\t\t\t\tspecReq := container.Resources.Requests\n\t\t\t\tstatusLim := apiContainerStatus.Resources.Limits\n\t\t\t\tstatusReq := apiContainerStatus.Resources.Requests\n\t\t\t\t// Runtime container status resources, if set, takes precedence.\n\t\t\t\tif containerStatus.Resources.Limits != nil {\n\t\t\t\t\tstatusLim = containerStatus.Resources.Limits\n\t\t\t\t}\n\t\t\t\tif containerStatus.Resources.Requests != nil {\n\t\t\t\t\tstatusReq = containerStatus.Resources.Requests\n\t\t\t\t}\n\t\t\t\tresizeMemLim, restartMemLim := determineContainerResize(v1.ResourceMemory, specLim.Memory().Value(), statusLim.Memory().Value())\n\t\t\t\tresizeCPUReq, restartCPUReq := determineContainerResize(v1.ResourceCPU, specReq.Cpu().MilliValue(), statusReq.Cpu().MilliValue())\n\t\t\t\tresizeCPULim, restartCPULim := determineContainerResize(v1.ResourceCPU, specLim.Cpu().MilliValue(), statusLim.Cpu().MilliValue())\n\t\t\t\tif restartMemLim || restartCPULim || restartCPUReq {\n\t\t\t\t\t// resize policy requires this container to restart\n\t\t\t\t\tchanges.ContainersToKill[containerStatus.ID] = containerToKillInfo{\n\t\t\t\t\t\tname: containerStatus.Name,\n\t\t\t\t\t\tcontainer: &pod.Spec.Containers[idx],\n\t\t\t\t\t\tmessage: fmt.Sprintf(\"Container %s resize requires restart\", container.Name),\n\t\t\t\t\t}\n\t\t\t\t\tchanges.ContainersToRestart = append(changes.ContainersToRestart, idx)\n\t\t\t\t\tkeepCount--\n\t\t\t\t} else {\n\t\t\t\t\tif resizeMemLim {\n\t\t\t\t\t\tmarkContainerForUpdate(memLimit, specLim.Memory().Value(), statusLim.Memory().Value())\n\t\t\t\t\t}\n\t\t\t\t\tif resizeCPUReq {\n\t\t\t\t\t\tmarkContainerForUpdate(cpuRequest, specReq.Cpu().MilliValue(), statusReq.Cpu().MilliValue())\n\t\t\t\t\t}\n\t\t\t\t\tif resizeCPULim {\n\t\t\t\t\t\tmarkContainerForUpdate(cpuLimit, specLim.Cpu().MilliValue(), statusLim.Cpu().MilliValue())\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Keep the container.\n\t\t\tkeepCount++\n\t\t\tcontinue\n\t\t}\n\n\t\t// We need to kill the container, but if we also want to restart the\n\t\t// container afterwards, make the intent clear in the message. Also do\n\t\t// not kill the entire pod since we expect container to be running eventually.\n\t\tif restart {\n\t\t\tmessage = fmt.Sprintf(\"%s, will be restarted\", message)\n\t\t\tchanges.ContainersToStart = append(changes.ContainersToStart, idx)\n\t\t}\n\n\t\tchanges.ContainersToKill[containerStatus.ID] = containerToKillInfo{\n\t\t\tname: containerStatus.Name,\n\t\t\tcontainer: &pod.Spec.Containers[idx],\n\t\t\tmessage: message,\n\t\t}\n\t\tklog.V(2).Infof(\"Container %q (%q) of pod %s: %s\", container.Name, containerStatus.ID, format.Pod(pod), message)\n\t}\n\n\tif keepCount == 0 && len(changes.ContainersToStart) == 0 {\n\t\tchanges.KillPod = true\n\t\tif utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {\n\t\t\tif len(changes.ContainersToRestart) != 0 {\n\t\t\t\tchanges.KillPod = false\n\t\t\t}\n\t\t}\n\t}\n\n\t// always attempts to identify hotplug nic based on pod spec & pod status (got from runtime)\n\tif m.canHotplugNIC(pod, podStatus) {\n\t\tif len(podStatus.SandboxStatuses) > 0 && podStatus.SandboxStatuses[0].GetNetwork() != nil {\n\t\t\tnicsToAttach, nicsToDetach := computeNICHotplugs(pod.Spec.Nics, podStatus.SandboxStatuses[0].GetNetwork().GetNics())\n\t\t\tif len(nicsToAttach) > 0 {\n\t\t\t\tchanges.Hotplugs.NICsToAttach = nicsToAttach\n\t\t\t}\n\t\t\tif len(nicsToDetach) > 0 {\n\t\t\t\tchanges.Hotplugs.NICsToDetach = nicsToDetach\n\t\t\t}\n\t\t}\n\t}\n\n\treturn changes\n}", "func main() {\n\tkubeconfig := filepath.Join(\"/Users/julz\", \".kube\", \"config\")\n\tccApi := flag.String(\"cc_api\", \"internal_user\", \"\")\n\tccUser := flag.String(\"cc_user\", \"internal_user\", \"\")\n\tccPass := flag.String(\"cc_pass\", \"\", \"\")\n\tflag.Parse()\n\n\tconfig, err := clientcmd.BuildConfigFromFlags(\"\", kubeconfig)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\tclientset, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\tbatchSize := 50\n\n\tlog := lager.NewLogger(\"sink\")\n\tlog.RegisterSink(lager.NewWriterSink(os.Stdout, lager.DEBUG))\n\n\tcancel := make(chan struct{})\n\tclient := &http.Client{Transport: &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}}\n\n\tfetcher := &bulk.CCFetcher{\n\t\tBaseURI: *ccApi,\n\t\tBatchSize: batchSize,\n\t\tUsername: *ccUser,\n\t\tPassword: *ccPass,\n\t}\n\n\tticker := time.NewTicker(15 * time.Second).C\n\tfor range ticker {\n\t\tlog.Info(\"tick\", nil)\n\n\t\texisting, err := clientset.AppsV1beta1().Deployments(\"default\").List(av1.ListOptions{\n\t\t\tLabelSelector: \"cube\",\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Error(\"fetch-from-kube\", err, nil)\n\t\t\tbreak\n\t\t}\n\n\t\texistingByGuid := make(map[string]string)\n\t\tfor _, e := range existing.Items {\n\t\t\texistingByGuid[e.Name] = e.Labels[\"etag\"]\n\t\t}\n\n\t\tlog.Info(\"got-existing\", lager.Data{\"existing\": existingByGuid})\n\n\t\tfingerprints, fingerprintErr := fetcher.FetchFingerprints(log, cancel, client)\n\t\tdesired, desiredErr := fetcher.FetchDesiredApps(log, cancel, client, fingerprints)\n\t\tdeployments := convert(log, cancel, desired)\n\n\t\tfor d := range deployments {\n\t\t\tif _, ok := existingByGuid[d.Name]; !ok {\n\t\t\t\t_, err = clientset.AppsV1beta1().Deployments(\"default\").Create(d)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(\"created-deployment-failed\", err, nil)\n\t\t\t\t}\n\n\t\t\t\tlog.Info(\"created\", lager.Data{\"d\": d, \"e\": err})\n\t\t\t} else if existingByGuid[d.Name] != d.Labels[\"etag\"] {\n\t\t\t\t_, err = clientset.AppsV1beta1().Deployments(\"default\").Update(d)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(\"created-deployment-failed\", err, nil)\n\t\t\t\t}\n\n\t\t\t\tlog.Info(\"updated\", lager.Data{\"d\": d, \"e\": err})\n\t\t\t} else {\n\t\t\t\tlog.Info(\"skipped\", lager.Data{\"name\": d.Name})\n\t\t\t}\n\t\t}\n\n\t\twait(log, \"fetch-fingerprints-error\", fingerprintErr)\n\t\twait(log, \"fetch-desired-error\", desiredErr)\n\t}\n}", "func TestSimplePipelineRun(t *testing.T) {\n\tt.Parallel()\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\tc, namespace := setup(ctx, t)\n\n\tknativetest.CleanupOnInterrupt(func() { tearDown(context.Background(), t, c, namespace) }, t.Logf)\n\tdefer tearDown(context.Background(), t, c, namespace)\n\n\tt.Logf(\"Creating Task in namespace %s\", namespace)\n\ttask := parse.MustParseV1Task(t, fmt.Sprintf(simpleTaskYaml, task1Name, namespace))\n\tif _, err := c.V1TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil {\n\t\tt.Fatalf(\"Failed to create Task `%s`: %s\", task.Name, err)\n\t}\n\n\tpipeline := parse.MustParseV1Pipeline(t, fmt.Sprintf(simplePipelineYaml, helpers.ObjectNameForTest(t), namespace, task1Name, task.Name))\n\tpipelineName := pipeline.Name\n\tpipelineRun := parse.MustParseV1PipelineRun(t, fmt.Sprintf(simplePipelineRunYaml, helpers.ObjectNameForTest(t), namespace, pipeline.Name))\n\tpipelineRunName := pipelineRun.Name\n\n\tif _, err := c.V1PipelineClient.Create(ctx, pipeline, metav1.CreateOptions{}); err != nil {\n\t\tt.Fatalf(\"Failed to create Pipeline `%s`: %s\", pipeline.Name, err)\n\t}\n\tif _, err := c.V1PipelineRunClient.Create(ctx, pipelineRun, metav1.CreateOptions{}); err != nil {\n\t\tt.Fatalf(\"Failed to create PipelineRun `%s`: %s\", pipelineRunName, err)\n\t}\n\n\tt.Logf(\"Waiting for PipelineRun %s in namespace %s to complete\", pipelineRunName, namespace)\n\tif err := WaitForPipelineRunState(ctx, c, pipelineRunName, timeout, PipelineRunSucceed(pipelineRunName), \"PipelineRunSuccess\", v1Version); err != nil {\n\t\tt.Fatalf(\"Error waiting for PipelineRun %s to finish: %s\", pipelineRunName, err)\n\t}\n\n\ttaskRunName := strings.Join([]string{pipelineRunName, task1Name}, \"-\")\n\n\tpr, err := c.V1PipelineRunClient.Get(ctx, pipelineRunName, metav1.GetOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"Couldn't get expected PipelineRun %s: %s\", pipelineRunName, err)\n\t}\n\n\texpectedPipelineRun := parse.MustParseV1PipelineRun(t, fmt.Sprintf(expectedSimplePipelineRunYaml, pipelineRunName, namespace, pipelineName, taskRunName))\n\tif d := cmp.Diff(expectedPipelineRun, pr, append([]cmp.Option{filterV1PipelineRunStatus, filterV1PipelineRunSA, filterPipelineRunStatusFields}, filterV1PipelineRunFields...)...); d != \"\" {\n\t\tt.Errorf(\"Cannot get expected PipelineRun, -want, +got: %v\", d)\n\t}\n}", "func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podNum int) {\n\tif err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) {\n\t\tobjects := podInformer.GetIndexer().List()\n\t\tif len(objects) == podNum {\n\t\t\treturn true, nil\n\t\t} else {\n\t\t\treturn false, nil\n\t\t}\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (o *ControllerCommitStatusOptions) Run() error {\n\t// Always run in batch mode as a controller is never run interactively\n\to.BatchMode = true\n\n\tjxClient, ns, err := o.JXClientAndDevNamespace()\n\tif err != nil {\n\t\treturn err\n\t}\n\tkubeClient, _, err := o.KubeClientAndDevNamespace()\n\tif err != nil {\n\t\treturn err\n\t}\n\tapisClient, err := o.ApiExtensionsClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = kube.RegisterCommitStatusCRD(apisClient)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = kube.RegisterPipelineActivityCRD(apisClient)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcommitstatusListWatch := cache.NewListWatchFromClient(jxClient.JenkinsV1().RESTClient(), \"commitstatuses\", ns, fields.Everything())\n\tkube.SortListWatchByName(commitstatusListWatch)\n\t_, commitstatusController := cache.NewInformer(\n\t\tcommitstatusListWatch,\n\t\t&jenkinsv1.CommitStatus{},\n\t\ttime.Minute*10,\n\t\tcache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\to.onCommitStatusObj(obj, jxClient, ns)\n\t\t\t},\n\t\t\tUpdateFunc: func(oldObj, newObj interface{}) {\n\t\t\t\to.onCommitStatusObj(newObj, jxClient, ns)\n\t\t\t},\n\t\t\tDeleteFunc: func(obj interface{}) {\n\n\t\t\t},\n\t\t},\n\t)\n\tstop := make(chan struct{})\n\tgo commitstatusController.Run(stop)\n\n\tpodListWatch := cache.NewListWatchFromClient(kubeClient.CoreV1().RESTClient(), \"pods\", ns, fields.Everything())\n\tkube.SortListWatchByName(podListWatch)\n\t_, podWatch := cache.NewInformer(\n\t\tpodListWatch,\n\t\t&corev1.Pod{},\n\t\ttime.Minute*10,\n\t\tcache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\to.onPodObj(obj, jxClient, kubeClient, ns)\n\t\t\t},\n\t\t\tUpdateFunc: func(oldObj, newObj interface{}) {\n\t\t\t\to.onPodObj(newObj, jxClient, kubeClient, ns)\n\t\t\t},\n\t\t\tDeleteFunc: func(obj interface{}) {\n\n\t\t\t},\n\t\t},\n\t)\n\tstop = make(chan struct{})\n\tpodWatch.Run(stop)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (c *TestClient) createPodInformer() error {\n\tklog.Infof(\"Creating PodWatcher for namespace %q and labelSelector %q\", c.TargetConfig.TargetNamespace, c.TargetConfig.TargetLabelSelector)\n\n\tlistWatch := &cache.ListWatch{\n\t\tListFunc: func(options metav1.ListOptions) (runtime.Object, error) {\n\t\t\toptions.LabelSelector = c.TargetConfig.TargetLabelSelector\n\t\t\treturn c.K8sClient.CoreV1().Pods(c.TargetConfig.TargetNamespace).List(context.TODO(), options)\n\t\t},\n\t\tWatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {\n\t\t\toptions.LabelSelector = c.TargetConfig.TargetLabelSelector\n\t\t\treturn c.K8sClient.CoreV1().Pods(c.TargetConfig.TargetNamespace).Watch(context.TODO(), options)\n\t\t},\n\t}\n\n\thandlePodEvent := func(obj interface{}, isAddEvent bool) {\n\t\tpod, ok := obj.(*corev1.Pod)\n\t\tif !ok {\n\t\t\tklog.Warningf(\"handlePodEvent() failed to convert newObj (%T) to *corev1.Pod\", obj)\n\t\t\treturn\n\t\t}\n\n\t\tpodEvent := utils.PodEvent{PodName: pod.GetName(), IsAddEvent: isAddEvent}\n\t\tc.podCreationWorkQueue.Add(podEvent)\n\t}\n\n\tinformer := cache.NewSharedIndexInformer(listWatch, nil, 0, cache.Indexers{utils.NameIndex: utils.MetaNameIndexFunc})\n\t_, err := informer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\thandlePodEvent(obj, true)\n\t\t},\n\t\tUpdateFunc: func(oldObj, newObj interface{}) {\n\t\t\thandlePodEvent(newObj, false)\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.podInformer = informer\n\tgo informer.Run(c.informerStopChan)\n\terr = utils.Retry(10, 500*time.Millisecond, func() error {\n\t\treturn utils.InformerSynced(informer.HasSynced, \"pod informer\")\n\t})\n\n\treturn err\n}", "func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*storage.StorageClass, defaultStorageClass string) {\n\tfor _, test := range tests {\n\t\tglog.V(4).Infof(\"starting multisync test %q\", test.name)\n\n\t\t// Initialize the controller\n\t\tclient := &fake.Clientset{}\n\t\tctrl, err := newTestController(client, nil, true)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Test %q construct persistent volume failed: %v\", test.name, err)\n\t\t}\n\n\t\t// Inject classes into controller via a custom lister.\n\t\tindexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})\n\t\tfor _, class := range storageClasses {\n\t\t\tindexer.Add(class)\n\t\t}\n\t\tctrl.classLister = storagelisters.NewStorageClassLister(indexer)\n\n\t\treactor := newVolumeReactor(client, ctrl, nil, nil, test.errors)\n\t\tfor _, claim := range test.initialClaims {\n\t\t\tctrl.claims.Add(claim)\n\t\t\treactor.claims[claim.Name] = claim\n\t\t}\n\t\tfor _, volume := range test.initialVolumes {\n\t\t\tctrl.volumes.store.Add(volume)\n\t\t\treactor.volumes[volume.Name] = volume\n\t\t}\n\n\t\t// Run the tested function\n\t\terr = test.test(ctrl, reactor, test)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Test %q failed: %v\", test.name, err)\n\t\t}\n\n\t\t// Simulate any \"changed\" events and \"periodical sync\" until we reach a\n\t\t// stable state.\n\t\tfirstSync := true\n\t\tcounter := 0\n\t\tfor {\n\t\t\tcounter++\n\t\t\tglog.V(4).Infof(\"test %q: iteration %d\", test.name, counter)\n\n\t\t\tif counter > 100 {\n\t\t\t\tt.Errorf(\"Test %q failed: too many iterations\", test.name)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// Wait for all goroutines to finish\n\t\t\treactor.waitForIdle()\n\n\t\t\tobj := reactor.popChange()\n\t\t\tif obj == nil {\n\t\t\t\t// Nothing was changed, should we exit?\n\t\t\t\tif firstSync || reactor.changedSinceLastSync > 0 {\n\t\t\t\t\t// There were some changes after the last \"periodic sync\".\n\t\t\t\t\t// Simulate \"periodic sync\" of everything (until it produces\n\t\t\t\t\t// no changes).\n\t\t\t\t\tfirstSync = false\n\t\t\t\t\tglog.V(4).Infof(\"test %q: simulating periodical sync of all claims and volumes\", test.name)\n\t\t\t\t\treactor.syncAll()\n\t\t\t\t} else {\n\t\t\t\t\t// Last sync did not produce any updates, the test reached\n\t\t\t\t\t// stable state -> finish.\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\t// waiting here cools down exponential backoff\n\t\t\ttime.Sleep(600 * time.Millisecond)\n\n\t\t\t// There were some changes, process them\n\t\t\tswitch obj.(type) {\n\t\t\tcase *v1.PersistentVolumeClaim:\n\t\t\t\tclaim := obj.(*v1.PersistentVolumeClaim)\n\t\t\t\t// Simulate \"claim updated\" event\n\t\t\t\tctrl.claims.Update(claim)\n\t\t\t\terr = ctrl.syncClaim(claim)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif err == versionConflictError {\n\t\t\t\t\t\t// Ignore version errors\n\t\t\t\t\t\tglog.V(4).Infof(\"test intentionaly ignores version error.\")\n\t\t\t\t\t} else {\n\t\t\t\t\t\tt.Errorf(\"Error calling syncClaim: %v\", err)\n\t\t\t\t\t\t// Finish the loop on the first error\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Process generated changes\n\t\t\t\tcontinue\n\t\t\tcase *v1.PersistentVolume:\n\t\t\t\tvolume := obj.(*v1.PersistentVolume)\n\t\t\t\t// Simulate \"volume updated\" event\n\t\t\t\tctrl.volumes.store.Update(volume)\n\t\t\t\terr = ctrl.syncVolume(volume)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif err == versionConflictError {\n\t\t\t\t\t\t// Ignore version errors\n\t\t\t\t\t\tglog.V(4).Infof(\"test intentionaly ignores version error.\")\n\t\t\t\t\t} else {\n\t\t\t\t\t\tt.Errorf(\"Error calling syncVolume: %v\", err)\n\t\t\t\t\t\t// Finish the loop on the first error\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Process generated changes\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tevaluateTestResults(ctrl, reactor, test, t)\n\t\tglog.V(4).Infof(\"test %q finished after %d iterations\", test.name, counter)\n\t}\n}", "func (m *MockMeshServiceControllerFactory) Build(mgr mc_manager.AsyncManager, clusterName string) (controller.MeshServiceController, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Build\", mgr, clusterName)\n\tret0, _ := ret[0].(controller.MeshServiceController)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestAdmissionLifecycle(t *testing.T) {\n\tnamespaceObj := &kapi.Namespace{\n\t\tObjectMeta: kapi.ObjectMeta{\n\t\t\tName: \"test\",\n\t\t\tNamespace: \"\",\n\t\t},\n\t\tStatus: kapi.NamespaceStatus{\n\t\t\tPhase: kapi.NamespaceActive,\n\t\t},\n\t}\n\tstore := cache.NewStore(cache.IndexFuncToKeyFuncAdapter(cache.MetaNamespaceIndexFunc))\n\tstore.Add(namespaceObj)\n\tmockClient := &testclient.Fake{}\n\tprojectcache.FakeProjectCache(mockClient, store, \"\")\n\thandler := &lifecycle{client: mockClient}\n\tbuild := &buildapi.Build{\n\t\tObjectMeta: kapi.ObjectMeta{Name: \"buildid\", Namespace: \"other\"},\n\t\tSpec: buildapi.BuildSpec{\n\t\t\tSource: buildapi.BuildSource{\n\t\t\t\tGit: &buildapi.GitBuildSource{\n\t\t\t\t\tURI: \"http://github.com/my/repository\",\n\t\t\t\t},\n\t\t\t\tContextDir: \"context\",\n\t\t\t},\n\t\t\tStrategy: buildapi.BuildStrategy{\n\t\t\t\tDockerStrategy: &buildapi.DockerBuildStrategy{},\n\t\t\t},\n\t\t\tOutput: buildapi.BuildOutput{\n\t\t\t\tTo: &kapi.ObjectReference{\n\t\t\t\t\tKind: \"DockerImage\",\n\t\t\t\t\tName: \"repository/data\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tStatus: buildapi.BuildStatus{\n\t\t\tPhase: buildapi.BuildPhaseNew,\n\t\t},\n\t}\n\terr := handler.Admit(admission.NewAttributesRecord(build, \"Build\", build.Namespace, \"name\", \"builds\", \"\", \"CREATE\", nil))\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error returned from admission handler: %v\", err)\n\t}\n\n\t// change namespace state to terminating\n\tnamespaceObj.Status.Phase = kapi.NamespaceTerminating\n\tstore.Add(namespaceObj)\n\n\t// verify create operations in the namespace cause an error\n\terr = handler.Admit(admission.NewAttributesRecord(build, \"Build\", build.Namespace, \"name\", \"builds\", \"\", \"CREATE\", nil))\n\tif err == nil {\n\t\tt.Errorf(\"Expected error rejecting creates in a namespace when it is terminating\")\n\t}\n\n\t// verify update operations in the namespace can proceed\n\terr = handler.Admit(admission.NewAttributesRecord(build, \"Build\", build.Namespace, \"name\", \"builds\", \"\", \"UPDATE\", nil))\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error returned from admission handler: %v\", err)\n\t}\n\n\t// verify delete operations in the namespace can proceed\n\terr = handler.Admit(admission.NewAttributesRecord(nil, \"Build\", build.Namespace, \"name\", \"builds\", \"\", \"DELETE\", nil))\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error returned from admission handler: %v\", err)\n\t}\n\n}", "func (rm *ReplicationManager) watchControllers(resourceVersion *string) {\n\twatching, err := rm.kubeClient.ReplicationControllers(api.NamespaceAll).Watch(\n\t\tlabels.Everything(),\n\t\tlabels.Everything(),\n\t\t*resourceVersion,\n\t)\n\tif err != nil {\n\t\tutil.HandleError(fmt.Errorf(\"unable to watch: %v\", err))\n\t\ttime.Sleep(5 * time.Second)\n\t\treturn\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-rm.syncTime:\n\t\t\trm.synchronize()\n\t\tcase event, open := <-watching.ResultChan():\n\t\t\tif !open {\n\t\t\t\t// watchChannel has been closed, or something else went\n\t\t\t\t// wrong with our etcd watch call. Let the util.Forever()\n\t\t\t\t// that called us call us again.\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif event.Type == watch.Error {\n\t\t\t\tutil.HandleError(fmt.Errorf(\"error from watch during sync: %v\", errors.FromObject(event.Object)))\n\t\t\t\t// Clear the resource version, this may cause us to skip some elements on the watch,\n\t\t\t\t// but we'll catch them on the synchronize() call, so it works out.\n\t\t\t\t*resourceVersion = \"\"\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tglog.V(4).Infof(\"Got watch: %#v\", event)\n\t\t\trc, ok := event.Object.(*api.ReplicationController)\n\t\t\tif !ok {\n\t\t\t\tif status, ok := event.Object.(*api.Status); ok {\n\t\t\t\t\tif status.Status == api.StatusFailure {\n\t\t\t\t\t\tglog.Errorf(\"failed to watch: %v\", status)\n\t\t\t\t\t\t// Clear resource version here, as above, this won't hurt consistency, but we\n\t\t\t\t\t\t// should consider introspecting more carefully here. (or make the apiserver smarter)\n\t\t\t\t\t\t// \"why not both?\"\n\t\t\t\t\t\t*resourceVersion = \"\"\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tutil.HandleError(fmt.Errorf(\"unexpected object: %#v\", event.Object))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// If we get disconnected, start where we left off.\n\t\t\t*resourceVersion = rc.ResourceVersion\n\t\t\t// Sync even if this is a deletion event, to ensure that we leave\n\t\t\t// it in the desired state.\n\t\t\tglog.V(4).Infof(\"About to sync from watch: %v\", rc.Name)\n\t\t\tif err := rm.syncHandler(*rc); err != nil {\n\t\t\t\tutil.HandleError(fmt.Errorf(\"unexpected sync error: %v\", err))\n\t\t\t}\n\t\t}\n\t}\n}", "func TestConstructGo(t *testing.T) {\n\ttests := []struct {\n\t\tcomponentDir string\n\t\texpectedResourceCount int\n\t\tenv []string\n\t}{\n\t\t{\n\t\t\tcomponentDir: \"testcomponent\",\n\t\t\texpectedResourceCount: 9,\n\t\t\t// TODO[pulumi/pulumi#5455]: Dynamic providers fail to load when used from multi-lang components.\n\t\t\t// Until we've addressed this, set PULUMI_TEST_YARN_LINK_PULUMI, which tells the integration test\n\t\t\t// module to run `yarn install && yarn link @pulumi/pulumi` in the Go program's directory, allowing\n\t\t\t// the Node.js dynamic provider plugin to load.\n\t\t\t// When the underlying issue has been fixed, the use of this environment variable inside the integration\n\t\t\t// test module should be removed.\n\t\t\tenv: []string{\"PULUMI_TEST_YARN_LINK_PULUMI=true\"},\n\t\t},\n\t\t{\n\t\t\tcomponentDir: \"testcomponent-python\",\n\t\t\texpectedResourceCount: 9,\n\t\t\tenv: []string{pulumiRuntimeVirtualEnv(t, filepath.Join(\"..\", \"..\"))},\n\t\t},\n\t\t{\n\t\t\tcomponentDir: \"testcomponent-go\",\n\t\t\texpectedResourceCount: 8, // One less because no dynamic provider.\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.componentDir, func(t *testing.T) {\n\t\t\tpathEnv := pathEnv(t, filepath.Join(\"construct_component\", test.componentDir))\n\t\t\tintegration.ProgramTest(t, optsForConstructGo(t, test.expectedResourceCount, append(test.env, pathEnv)...))\n\t\t})\n\t}\n}", "func TestConcurrent(t *testing.T) {\n\tt.Parallel()\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tt.Cleanup(cancel)\n\n\tconfig := Config{MaxConcurrency: 4}\n\tcountdown := NewCountdown(config.MaxConcurrency)\n\tprocess := NewMockEventsProcess(ctx, t, config, func(ctx context.Context, event types.Event) error {\n\t\tdefer countdown.Decrement()\n\t\ttime.Sleep(time.Second)\n\t\treturn trace.Wrap(ctx.Err())\n\t})\n\n\ttimeBefore := time.Now()\n\tfor i := 0; i < config.MaxConcurrency; i++ {\n\t\tresource, err := types.NewAccessRequest(fmt.Sprintf(\"REQ-%v\", i+1), \"foo\", \"admin\")\n\t\trequire.NoError(t, err)\n\t\tprocess.Events.Fire(types.Event{Type: types.OpPut, Resource: resource})\n\t}\n\trequire.NoError(t, countdown.Wait(ctx))\n\n\ttimeAfter := time.Now()\n\tassert.InDelta(t, time.Second, timeAfter.Sub(timeBefore), float64(500*time.Millisecond))\n}", "func (mock *ComposeConfigInterfaceMock) ControllerCalls() []struct {\n} {\n\tvar calls []struct {\n\t}\n\tlockComposeConfigInterfaceMockController.RLock()\n\tcalls = mock.calls.Controller\n\tlockComposeConfigInterfaceMockController.RUnlock()\n\treturn calls\n}", "func TestGetStatus(t *testing.T) {\n\t_, ip := fakeInstanceProvider()\n\tpod := &v1.Pod{}\n\ttestCases := []struct {\n\t\tmilpaPodPhase api.PodPhase\n\t\tk8sPodPhase v1.PodPhase\n\t\tmodPod func(*api.Pod)\n\t}{\n\t\t{\n\t\t\tmilpaPodPhase: api.PodDispatching,\n\t\t\tk8sPodPhase: v1.PodPending,\n\t\t},\n\t\t{\n\t\t\tmilpaPodPhase: api.PodFailed,\n\t\t\tk8sPodPhase: v1.PodPending,\n\t\t},\n\t\t{\n\t\t\tmilpaPodPhase: api.PodFailed,\n\t\t\tk8sPodPhase: v1.PodFailed,\n\t\t\tmodPod: func(p *api.Pod) {\n\t\t\t\tp.Spec.RestartPolicy = api.RestartPolicyNever\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tmilpaPodPhase: api.PodFailed,\n\t\t\tk8sPodPhase: v1.PodFailed,\n\t\t\tmodPod: func(p *api.Pod) {\n\t\t\t\tp.Status.StartFailures = allowedStartFailures + 1\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tmilpaPodPhase: api.PodRunning,\n\t\t\tk8sPodPhase: v1.PodRunning,\n\t\t},\n\t\t{\n\t\t\tmilpaPodPhase: api.PodSucceeded,\n\t\t\tk8sPodPhase: v1.PodSucceeded,\n\t\t},\n\t\t{\n\t\t\tmilpaPodPhase: api.PodTerminated,\n\t\t\tk8sPodPhase: v1.PodFailed,\n\t\t},\n\t\t{\n\t\t\tmilpaPodPhase: api.PodWaiting,\n\t\t\tk8sPodPhase: v1.PodPending,\n\t\t},\n\t}\n\tfor _, tc := range testCases {\n\t\tmilpaPod := api.GetFakePod()\n\t\tmilpaPod.Status.Phase = tc.milpaPodPhase\n\t\tif tc.modPod != nil {\n\t\t\ttc.modPod(milpaPod)\n\t\t}\n\t\tpodStatus := getStatus(ip, milpaPod, pod)\n\t\tassert.Equal(t, podStatus.Phase, tc.k8sPodPhase)\n\t}\n}", "func getControllerDeployments(clientSet kubernetes.Interface) (*appsv1.DeploymentList, error) {\n\tdeploymentsClient := clientSet.AppsV1().Deployments(\"\") // Get deployments from all namespaces\n\tlabelSelector := metav1.LabelSelector{MatchLabels: map[string]string{constants.AppLabel: constants.OSMControllerName}}\n\tlistOptions := metav1.ListOptions{\n\t\tLabelSelector: labels.Set(labelSelector.MatchLabels).String(),\n\t}\n\treturn deploymentsClient.List(context.TODO(), listOptions)\n}", "func (t *TestSpec) RunTest(kub *helpers.Kubectl) {\n\tdefer func() { go t.Destroy(destroyDelay, kub.BasePath()) }()\n\n\tt.Kub = kub\n\terr := t.CreateManifests()\n\tgomega.Expect(err).To(gomega.BeNil(), \"cannot create pods manifest for %s\", t.Prefix)\n\n\tmanifest, err := t.ApplyManifest(kub.BasePath())\n\tgomega.Expect(err).To(gomega.BeNil(), \"cannot apply pods manifest for %s\", t.Prefix)\n\tlog.WithField(\"prefix\", t.Prefix).Infof(\"Manifest '%s' is created correctly\", manifest)\n\n\terr = t.Destination.CreateApplyManifest(t, kub.BasePath())\n\tgomega.Expect(err).To(gomega.BeNil(), \"cannot apply destination for %s\", t.Prefix)\n\n\tif t.IsPolicyInvalid() {\n\t\t// Some policies cannot be applied correctly because of different\n\t\t// rules. This code makes sure that the status of the policy has a error\n\t\t// in the status.\n\t\tcnp, err := t.InvalidNetworkPolicyApply(kub.BasePath())\n\t\tkub.Exec(fmt.Sprintf(\"%s delete cnp %s\", helpers.KubectlCmd, t.Prefix))\n\t\tgomega.Expect(err).To(gomega.BeNil(), \"Cannot apply network policy\")\n\t\tgomega.Expect(cnp).NotTo(gomega.BeNil(), \"CNP is not a valid struct\")\n\t\tgomega.Expect(cnp.Status.Nodes).NotTo(gomega.BeEmpty(), \"CNP Status is empty\")\n\n\t\tfor node, status := range cnp.Status.Nodes {\n\t\t\tgomega.Expect(status.Error).NotTo(gomega.BeEmpty(),\n\t\t\t\t\"Node %q applied invalid policy and do not raise an error\", node)\n\t\t}\n\t\treturn\n\t}\n\n\terr = t.NetworkPolicyApply(kub.BasePath())\n\tgomega.Expect(err).To(gomega.BeNil(), \"cannot apply network policy for %s\", t.Prefix)\n\n\terr = kub.CiliumEndpointWaitReady()\n\tgomega.Expect(err).To(gomega.BeNil(), \"Endpoints are not ready after timeout\")\n\n\terr = t.ExecTest()\n\tgomega.Expect(err).To(gomega.BeNil(), \"cannot execute test for %s\", t.Prefix)\n}", "func ECSPodCreatorTests() map[string]ECSPodCreatorTestCase {\n\treturn map[string]ECSPodCreatorTestCase{\n\t\t\"CreatePodSucceedsWithNonSecretSettings\": func(ctx context.Context, t *testing.T, c cocoa.ECSPodCreator) {\n\t\t\tenvVar := cocoa.NewEnvironmentVariable().SetName(\"name\").SetValue(\"value\")\n\t\t\tcontainerDef := cocoa.NewECSContainerDefinition().\n\t\t\t\tSetImage(\"image\").\n\t\t\t\tSetWorkingDir(\"working_dir\").\n\t\t\t\tAddEnvironmentVariables(*envVar).\n\t\t\t\tSetMemoryMB(128).\n\t\t\t\tSetCPU(128).\n\t\t\t\tAddPortMappings(*cocoa.NewPortMapping().SetContainerPort(1337)).\n\t\t\t\tSetName(\"container\")\n\n\t\t\texecOpts := cocoa.NewECSPodExecutionOptions().SetCluster(testutil.ECSClusterName())\n\n\t\t\topts := cocoa.NewECSPodCreationOptions().\n\t\t\t\tSetName(testutil.NewTaskDefinitionFamily(t)).\n\t\t\t\tAddContainerDefinitions(*containerDef).\n\t\t\t\tSetMemoryMB(128).\n\t\t\t\tSetCPU(128).\n\t\t\t\tSetNetworkMode(cocoa.NetworkModeBridge).\n\t\t\t\tSetExecutionOptions(*execOpts)\n\t\t\tassert.NoError(t, opts.Validate())\n\n\t\t\tp, err := c.CreatePod(ctx, *opts)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NotNil(t, p)\n\n\t\t\tdefer func() {\n\t\t\t\trequire.NoError(t, p.Delete(ctx))\n\t\t\t}()\n\n\t\t\tps := p.StatusInfo()\n\t\t\tassert.Equal(t, cocoa.StatusStarting, ps.Status)\n\t\t},\n\t\t\"CreatePodFailsWithInvalidCreationOpts\": func(ctx context.Context, t *testing.T, c cocoa.ECSPodCreator) {\n\t\t\topts := cocoa.NewECSPodCreationOptions()\n\n\t\t\tp, err := c.CreatePod(ctx, *opts)\n\t\t\trequire.Error(t, err)\n\t\t\trequire.Zero(t, p)\n\t\t},\n\t\t\"CreatePodFailsWithSecretsButNoVault\": func(ctx context.Context, t *testing.T, c cocoa.ECSPodCreator) {\n\t\t\tenvVar := cocoa.NewEnvironmentVariable().\n\t\t\t\tSetName(\"envVar\").\n\t\t\t\tSetSecretOptions(*cocoa.NewSecretOptions().\n\t\t\t\t\tSetName(testutil.NewSecretName(t)).\n\t\t\t\t\tSetNewValue(\"value\"))\n\t\t\tcontainerDef := cocoa.NewECSContainerDefinition().\n\t\t\t\tSetImage(\"image\").\n\t\t\t\tAddEnvironmentVariables(*envVar).\n\t\t\t\tSetName(\"container\")\n\n\t\t\texecOpts := cocoa.NewECSPodExecutionOptions().SetCluster(testutil.ECSClusterName())\n\n\t\t\topts := cocoa.NewECSPodCreationOptions().\n\t\t\t\tSetName(testutil.NewTaskDefinitionFamily(t)).\n\t\t\t\tAddContainerDefinitions(*containerDef).\n\t\t\t\tSetMemoryMB(128).\n\t\t\t\tSetCPU(128).\n\t\t\t\tSetTaskRole(testutil.ECSTaskRole()).\n\t\t\t\tSetExecutionRole(testutil.ECSExecutionRole()).\n\t\t\t\tSetExecutionOptions(*execOpts)\n\t\t\tassert.NoError(t, opts.Validate())\n\n\t\t\tp, err := c.CreatePod(ctx, *opts)\n\t\t\trequire.Error(t, err)\n\t\t\trequire.Zero(t, p)\n\t\t},\n\t\t\"CreatePodFailsWithRepoCredsButNoVault\": func(ctx context.Context, t *testing.T, c cocoa.ECSPodCreator) {\n\t\t\tstoredCreds := cocoa.NewStoredRepositoryCredentials().\n\t\t\t\tSetUsername(\"username\").\n\t\t\t\tSetPassword(\"password\")\n\t\t\tcreds := cocoa.NewRepositoryCredentials().\n\t\t\t\tSetName(testutil.NewSecretName(t)).\n\t\t\t\tSetNewCredentials(*storedCreds)\n\t\t\tcontainerDef := cocoa.NewECSContainerDefinition().\n\t\t\t\tSetImage(\"image\").\n\t\t\t\tSetRepositoryCredentials(*creds).\n\t\t\t\tSetName(\"container\")\n\n\t\t\texecOpts := cocoa.NewECSPodExecutionOptions().SetCluster(testutil.ECSClusterName())\n\n\t\t\topts := cocoa.NewECSPodCreationOptions().\n\t\t\t\tSetName(testutil.NewTaskDefinitionFamily(t)).\n\t\t\t\tAddContainerDefinitions(*containerDef).\n\t\t\t\tSetMemoryMB(128).\n\t\t\t\tSetCPU(128).\n\t\t\t\tSetTaskRole(testutil.ECSTaskRole()).\n\t\t\t\tSetExecutionRole(testutil.ECSExecutionRole()).\n\t\t\t\tSetExecutionOptions(*execOpts)\n\t\t\tassert.NoError(t, opts.Validate())\n\n\t\t\tp, err := c.CreatePod(ctx, *opts)\n\t\t\trequire.Error(t, err)\n\t\t\trequire.Zero(t, p)\n\t\t},\n\t}\n}" ]
[ "0.84638107", "0.8257509", "0.8186716", "0.60766035", "0.6004586", "0.59796965", "0.59095436", "0.5886478", "0.58263564", "0.5762557", "0.56721723", "0.558922", "0.5555907", "0.552705", "0.54982966", "0.5487726", "0.5478104", "0.54566073", "0.54116815", "0.5399274", "0.5397287", "0.53831124", "0.5361766", "0.53452146", "0.53281623", "0.53267515", "0.52472734", "0.52287954", "0.52143574", "0.52141976", "0.51844275", "0.5173044", "0.5171113", "0.51710427", "0.51660305", "0.516224", "0.515601", "0.51433486", "0.5142413", "0.51164305", "0.5103288", "0.50923485", "0.50708205", "0.5052067", "0.505137", "0.5045441", "0.50349444", "0.50343037", "0.503374", "0.503335", "0.50230014", "0.50012356", "0.49980873", "0.49966413", "0.49963886", "0.49950498", "0.4983873", "0.497605", "0.49729398", "0.49639863", "0.49630603", "0.49536446", "0.4952177", "0.4940275", "0.49355957", "0.4934006", "0.49330196", "0.49272802", "0.49231806", "0.4921366", "0.4907492", "0.4907492", "0.49047804", "0.4900442", "0.48930517", "0.4878781", "0.48750737", "0.48745224", "0.4865608", "0.4859887", "0.4851759", "0.48477086", "0.4831941", "0.48176813", "0.48141533", "0.4811102", "0.4808589", "0.4804731", "0.4799627", "0.47935545", "0.4792832", "0.47924808", "0.4787456", "0.47651082", "0.47648245", "0.47590473", "0.474897", "0.47489592", "0.47480914", "0.47465536" ]
0.88891226
0
RegisterTx is just like Register but marks the migration to be executed inside a transaction.
RegisterTx похож на Register, но помечает миграцию, которая должна быть выполнена внутри транзакции.
func RegisterTx(fns ...func(DB) error) error { return DefaultCollection.RegisterTx(fns...) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (pg *Postgres) Tx(ctx context.Context, txFunc store.TxFunc) (err error) {\n\ttx := shared.GetTx(ctx)\n\n\tif tx != nil {\n\t\treturn txFunc(ctx)\n\t}\n\n\ttx, err = pg.BeginTx(ctx, nil)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"begin tx failed\")\n\t}\n\n\tctx = shared.WithTx(ctx, tx)\n\n\t//nolint:gocritic\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tif err := tx.Rollback(); err != nil {\n\t\t\t\tlog.Warn(ctx, \"tx rollback failed\", \"err\", err)\n\t\t\t}\n\t\t\tpanic(r)\n\t\t} else if err != nil {\n\t\t\tif err := tx.Rollback(); err != nil {\n\t\t\t\tlog.Warn(ctx, \"tx rollback failed\", \"err\", err)\n\t\t\t}\n\t\t} else {\n\t\t\terr = tx.Commit()\n\t\t}\n\t}()\n\n\terr = txFunc(ctx)\n\n\treturn err\n}", "func (p *Postgres) Tx(ctx context.Context, txFunc store.TxFunc) (err error) {\n\ttx := shared.GetTx(ctx)\n\n\tif tx != nil {\n\t\treturn txFunc(ctx)\n\t}\n\n\ttx, err = p.BeginTx(ctx, nil)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"begin tx failed\")\n\t}\n\n\tctx = shared.WithTx(ctx, tx)\n\n\t//nolint:gocritic\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tif err := tx.Rollback(); err != nil {\n\t\t\t\tlog.Warn(ctx, \"tx rollback failed\", \"err\", err)\n\t\t\t}\n\t\t\tpanic(r)\n\t\t} else if err != nil {\n\t\t\tif err := tx.Rollback(); err != nil {\n\t\t\t\tlog.Warn(ctx, \"tx rollback failed\", \"err\", err)\n\t\t\t}\n\t\t} else {\n\t\t\terr = tx.Commit()\n\t\t}\n\t}()\n\n\terr = txFunc(ctx)\n\n\treturn err\n}", "func RegisterTransaction(data models.TransactionCache) (string, error) {\n\n\tgenKey, err := shortid.Generate()\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn \"\", err\n\t}\n\tstoreKey := fmt.Sprintf(transactionKeyFmt, data.UserID, genKey)\n\n\tdataJSON, err := json.Marshal(data)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn \"\", err\n\t}\n\n\tif err := redisClient.Set(ctx, storeKey, dataJSON, 180*time.Second).Err(); err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn \"\", err\n\t}\n\n\treturn storeKey, nil\n\n}", "func registerMigration(dir string) {\n\tmigrations.MustRegisterTx(func(db migrations.DB) error {\n\t\tpath := dir + \"/up.sql\"\n\t\tsql, err := loadMigrationFile(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = db.Exec(sql)\n\t\treturn err\n\t}, func(db migrations.DB) error {\n\t\tpath := dir + \"/down.sql\"\n\t\tsql, err := loadMigrationFile(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = db.Exec(sql)\n\t\treturn err\n\t})\n}", "func (db *DB) Transaction(fc func(db *DB) error) (err error) {\n\tpanicked := true\n\ttx := &DB{db.DB.Begin()}\n\n\tdefer func() {\n\t\t// Make sure to rollback when panic, Block error or Commit error\n\t\tif panicked || err != nil {\n\t\t\ttx.Rollback()\n\t\t}\n\t}()\n\n\terr = fc(tx)\n\tif err == nil {\n\t\terr = tx.DB.Commit().Error\n\t}\n\tpanicked = false\n\treturn\n}", "func (m Middleware) Tx(db *sql.DB) TxFunc {\n\treturn func(f func(tx daos.Transaction, w http.ResponseWriter, r *http.Request) error) http.HandlerFunc {\n\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\t\tt, err := db.Begin()\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tl := m.log.WithRequest(r)\n\t\t\t\tif p := recover(); p != nil {\n\t\t\t\t\tt.Rollback()\n\t\t\t\t\tl.Info(\"transaction rollbacked\")\n\t\t\t\t\tpanic(p)\n\t\t\t\t} else if err != nil {\n\t\t\t\t\tt.Rollback()\n\t\t\t\t\tl.Info(\"transaction rollbacked\")\n\t\t\t\t\tpanic(err)\n\t\t\t\t} else {\n\t\t\t\t\terr = t.Commit()\n\t\t\t\t\tl.Info(\"transaction commited\")\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\terr = f(t, w, r)\n\t\t}\n\t}\n}", "func (m CarregisterMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil }", "func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil }", "func (d *Driver) Tx(ctx context.Context) (dialect.Tx, error) {\n\treturn d.BeginTx(ctx, nil)\n}", "func Tx(ctx context.Context, db *sqlx.DB, opts *sqltx.Options, fn TXFn) (err error) {\n\treturn sqltx.TxHandler(ctx, &sqlxDB{db}, opts, func(tx sqltx.TXer) error {\n\t\treturn fn(tx.(*sqlx.Tx))\n\t})\n}", "func (db *DB) Transaction(ctx context.Context, fn TxHandlerFunc) error {\n\tdb.mu.Lock()\n\tdefer db.mu.Unlock()\n\n\torigin, err := db.master.BeginTx(ctx, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to begin transaction: %v\", err)\n\t}\n\ttx := &Tx{origin}\n\n\tif err := fn(ctx, tx); err != nil {\n\t\tif re := tx.parent.Rollback(); re != nil {\n\t\t\tif re.Error() != sql.ErrTxDone.Error() {\n\t\t\t\treturn fmt.Errorf(\"fialed to rollback: %v\", err)\n\t\t\t}\n\t\t}\n\t\treturn fmt.Errorf(\"failed to execcute transaction: %v\", err)\n\t}\n\treturn tx.parent.Commit()\n}", "func (_UpkeepRegistrationRequests *UpkeepRegistrationRequestsTransactor) Register(opts *bind.TransactOpts, name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, amount *big.Int, source uint8) (*types.Transaction, error) {\n\treturn _UpkeepRegistrationRequests.contract.Transact(opts, \"register\", name, encryptedEmail, upkeepContract, gasLimit, adminAddress, checkData, amount, source)\n}", "func (_KNS *KNSTransactor) Register(opts *bind.TransactOpts, prime_owner common.Address, wallet common.Address, Jid string, tel string) (*types.Transaction, error) {\n\treturn _KNS.contract.Transact(opts, \"Register\", prime_owner, wallet, Jid, tel)\n}", "func Transaction(db *sql.DB, fns ...func(DB) error) error {\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, fn := range fns {\n\t\terr := fn(tx)\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = tx.Commit()\n\terr = interpretScanError(err)\n\treturn err\n}", "func Example_transactions() {\n\tdb, _ := dbx.Open(\"mysql\", \"user:pass@/example\")\n\n\tdb.Transactional(func(tx *dbx.Tx) error {\n\t\t_, err := tx.Insert(\"user\", dbx.Params{\n\t\t\t\"name\": \"user1\",\n\t\t}).Execute()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = tx.Insert(\"user\", dbx.Params{\n\t\t\t\"name\": \"user2\",\n\t\t}).Execute()\n\t\treturn err\n\t})\n}", "func (ds *MySQLDatastore) Tx(f func(*sql.Tx) error) error {\n\ttx, err := ds.db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = f(tx)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\treturn tx.Commit()\n}", "func Transaction(db *sql.DB, f func()) {\n\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr := tx.Rollback()\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tpanic(r)\n\t\t} else {\n\t\t\terr = tx.Commit()\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}\n\t}()\n\tf()\n}", "func sendRegisterTx(cdc *wire.Codec) client.CommandTxCallback {\n\treturn func(cmd *cobra.Command, args []string) error {\n\t\tctx := client.NewCoreContextFromViper()\n\t\tname := viper.GetString(client.FlagUser)\n\t\treferrer := viper.GetString(client.FlagReferrer)\n\t\tamount := viper.GetString(client.FlagAmount)\n\n\t\tresetPriv := secp256k1.GenPrivKey()\n\t\ttransactionPriv := secp256k1.GenPrivKey()\n\t\tappPriv := secp256k1.GenPrivKey()\n\n\t\tfmt.Println(\"reset private key is:\", strings.ToUpper(hex.EncodeToString(resetPriv.Bytes())))\n\t\tfmt.Println(\"transaction private key is:\", strings.ToUpper(hex.EncodeToString(transactionPriv.Bytes())))\n\t\tfmt.Println(\"app private key is:\", strings.ToUpper(hex.EncodeToString(appPriv.Bytes())))\n\n\t\t// // create the message\n\t\tmsg := acc.NewRegisterMsg(\n\t\t\treferrer, name, types.LNO(amount),\n\t\t\tresetPriv.PubKey(), transactionPriv.PubKey(), appPriv.PubKey())\n\n\t\t// build and sign the transaction, then broadcast to Tendermint\n\t\tres, err := ctx.SignBuildBroadcast([]sdk.Msg{msg}, cdc)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Printf(\"Committed at block %d. Hash: %s\\n\", res.Height, res.Hash.String())\n\t\treturn nil\n\t}\n}", "func Transaction(ctx context.Context, db *sql.DB, f func(tx *sql.Tx) error) (err error) {\n\tfinish := func(tx *sql.Tx) {\n\t\tif err != nil {\n\t\t\tif err2 := tx.Rollback(); err2 != nil {\n\t\t\t\terr = multierror.Append(err, err2)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\terr = tx.Commit()\n\t}\n\n\tspan, ctx := opentracing.StartSpanFromContext(ctx, \"Transaction\")\n\tdefer func() {\n\t\tif err != nil {\n\t\t\text.Error.Set(span, true)\n\t\t\tspan.SetTag(\"err\", err.Error())\n\t\t}\n\t\tspan.Finish()\n\t}()\n\n\ttx, err := db.BeginTx(ctx, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer finish(tx)\n\treturn f(tx)\n}", "func (dao *PagesDao) Transaction(ctx context.Context, f func(ctx context.Context, tx *gdb.TX) error) (err error) {\n\treturn dao.Ctx(ctx).Transaction(ctx, f)\n}", "func Transact(db *sql.DB, txFunc func(*sql.Tx) error) (err error) {\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif p := recover(); p != nil {\n\t\t\ttx.Rollback()\n\t\t\tpanic(p)\n\t\t} else if err != nil {\n\t\t\ttx.Rollback()\n\t\t} else {\n\t\t\terr = tx.Commit()\n\t\t}\n\t}()\n\terr = txFunc(tx)\n\treturn err\n}", "func (dao *SysConfigDao) Transaction(ctx context.Context, f func(ctx context.Context, tx gdb.TX) error) (err error) {\n\treturn dao.Ctx(ctx).Transaction(ctx, f)\n}", "func (dp *dataProvider) Tx(fn func(*pg.Tx) error) error {\n\treturn wrapError(dp.db.RunInTransaction(func(tx *pg.Tx) error {\n\t\tdefer func(t *pg.Tx) {\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tt.Rollback()\n\t\t\t\t// rethrow the panic once the database is safe\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}(tx)\n\t\treturn fn(tx)\n\t}))\n}", "func (c *Conn) Transaction(fn func(*Conn) error) error {\r\n\tvar (\r\n\t\ttx = c.Begin()\r\n\t\tconn = &Conn{}\r\n\t)\r\n\tcopier.Copy(conn, c)\r\n\tconn.DB = tx\r\n\tif err := fn(conn); err != nil {\r\n\t\ttx.Rollback()\r\n\t\treturn err\r\n\t}\r\n\ttx.Commit()\r\n\treturn nil\r\n}", "func (m PeopleMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func Register(up, down func(DB) error) error {\n\t_, file, _, _ := runtime.Caller(1)\n\tversion, err := extractVersion(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tallMigrations = append(allMigrations, Migration{\n\t\tVersion: version,\n\t\tUp: up,\n\t\tDown: down,\n\t})\n\treturn nil\n}", "func register(transaction string, cfg *Config) error {\n\treturn circuit.Register(transaction, cfg.commandName(), cfg.circuitBreaker(),\n\t\tfunc(transaction, commandName string, state string) {\n\t\t\tif state == circuit.Open {\n\t\t\t\tReConnect(transaction, cfg)\n\t\t\t} else if state == circuit.Close {\n\t\t\t\tConnected(transaction, cfg)\n\t\t\t}\n\t\t})\n}", "func Transact(db *gorm.DB, tf func(tx *gorm.DB) error) (err error) {\n\tif commonDB, ok := db.CommonDB().(sqlTx); ok && commonDB != nil {\n\t\t// If the db is already in a transaction, just execute tf\n\t\t// and let the outer transaction handle Rollback and Commit.\n\t\treturn tf(db)\n\t}\n\n\ttx := db.Begin()\n\tif tx.Error != nil {\n\t\treturn fmt.Errorf(\"could not start transaction. %s\", err)\n\t}\n\tdefer func() {\n\t\tif p := recover(); p != nil {\n\t\t\ttx.Rollback()\n\t\t\tpanic(p)\n\t\t}\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t} else {\n\t\t\terr = tx.Commit().Error\n\t\t}\n\t}()\n\treturn tf(tx)\n}", "func (_Ethdkg *EthdkgTransactor) Register(opts *bind.TransactOpts, public_key [2]*big.Int) (*types.Transaction, error) {\n\treturn _Ethdkg.contract.Transact(opts, \"register\", public_key)\n}", "func (_UpkeepRegistrationRequests *UpkeepRegistrationRequestsTransactorSession) Register(name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, amount *big.Int, source uint8) (*types.Transaction, error) {\n\treturn _UpkeepRegistrationRequests.Contract.Register(&_UpkeepRegistrationRequests.TransactOpts, name, encryptedEmail, upkeepContract, gasLimit, adminAddress, checkData, amount, source)\n}", "func (s *PersonStore) Transaction(callback func(*PersonStore) error) error {\n\tif callback == nil {\n\t\treturn kallax.ErrInvalidTxCallback\n\t}\n\n\treturn s.Store.Transaction(func(store *kallax.Store) error {\n\t\treturn callback(&PersonStore{store})\n\t})\n}", "func (db *DB) TransactionTx(ctx context.Context, fn TxHandlerFunc, opts *sql.TxOptions) error {\n\tdb.mu.Lock()\n\tdefer db.mu.Unlock()\n\n\torigin, err := db.master.BeginTx(ctx, opts)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to begin transaction: %v\", err)\n\t}\n\ttx := &Tx{origin}\n\n\tif err := fn(ctx, tx); err != nil {\n\t\tif re := tx.parent.Rollback(); re != nil {\n\t\t\tif re.Error() != sql.ErrTxDone.Error() {\n\t\t\t\treturn fmt.Errorf(\"fialed to rollback: %v\", err)\n\t\t\t}\n\t\t}\n\t\treturn fmt.Errorf(\"failed to execcute transaction: %v\", err)\n\t}\n\treturn tx.parent.Commit()\n}", "func (m EventRSVPMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (ingest *Ingestion) Transaction(\n\tid int64,\n\ttx *core.Transaction,\n\tfee *core.TransactionFee,\n) error {\n\n\tsql := ingest.transactionInsertBuilder(id, tx, fee)\n\t_, err := ingest.DB.Exec(sql)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (dao *InfoDao) Transaction(ctx context.Context, f func(ctx context.Context, tx *gdb.TX) error) (err error) {\n\treturn dao.Ctx(ctx).Transaction(ctx, f)\n}", "func (m CleanernameMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (m MedicalrecordstaffMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (_KNS *KNSTransactorSession) Register(prime_owner common.Address, wallet common.Address, Jid string, tel string) (*types.Transaction, error) {\n\treturn _KNS.Contract.Register(&_KNS.TransactOpts, prime_owner, wallet, Jid, tel)\n}", "func (db *database) Transact(txHandler func(tx *sqlx.Tx) error) (err error) {\n\ttx, err := db.Beginx()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to begin transaction\")\n\t}\n\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\t_ = tx.Rollback()\n\t\t\tpanic(r)\n\t\t}\n\n\t\tif err != nil {\n\t\t\t_ = tx.Rollback()\n\t\t\treturn\n\t\t}\n\n\t\ttx.Commit()\n\t}()\n\n\terr = txHandler(tx)\n\treturn\n}", "func WithTransaction(db *sqlx.DB, fn TxFn) (err error) {\n\ttx, err := db.Beginx()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\tif p := recover(); p != nil { // recover from panic\n\t\t\ttx.Rollback()\n\t\t\tpanic(p)\n\t\t} else if err != nil { // rollback because error happens\n\t\t\ttx.Rollback()\n\t\t} else { // no error, commit\n\t\t\terr = tx.Commit()\n\t\t}\n\t}()\n\n\terr = fn(tx)\n\treturn err\n}", "func Transact(db DB, fn func(tx *sql.Tx) error) (err error) {\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif p := recover(); p != nil {\n\t\t\ttx.Rollback()\n\t\t\tpanic(p)\n\t\t} else if err != nil {\n\t\t\ttx.Rollback()\n\t\t} else {\n\t\t\terr = tx.Commit()\n\t\t}\n\t}()\n\terr = fn(tx)\n\treturn err\n}", "func (exec *StormPerf) runInTx(fn func(tx storm.Node) error) error {\n\ttx, err := exec.db.Begin(true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tx.Rollback()\n\n\tif err := fn(tx); err != nil {\n\t\treturn err\n\t}\n\n\treturn tx.Commit()\n}", "func (s *UserStore) Transaction(callback func(*UserStore) error) error {\n\tif callback == nil {\n\t\treturn kallax.ErrInvalidTxCallback\n\t}\n\n\treturn s.Store.Transaction(func(store *kallax.Store) error {\n\t\treturn callback(&UserStore{store})\n\t})\n}", "func (store *Store) execTx(ctx context.Context, fn func(*Queries) error) error {\n\ttx, err := store.db.BeginTx(ctx, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tq := New(tx)\n\terr = fn(q)\n\tif err != nil {\n\t\tif rbErr := tx.Rollback(); rbErr != nil {\n\t\t\treturn fmt.Errorf(\"tx err: %v, rb err: %v\", err, rbErr)\n\t\t}\n\t\treturn err\n\t}\n\n\treturn tx.Commit()\n}", "func upsertMigrationStateTx(tx *sql.Tx, state *MigrationState, incrementNextMigration bool) (err error) {\n\tif incrementNextMigration {\n\t\tstate.NextMigration++\n\t}\n\tmigrationStateJSON := encoding.EncodeJSON(state)\n\t_, err = tx.Exec(setMetastateUpsert, migrationMetastateKey, migrationStateJSON)\n\n\treturn err\n}", "func (db *DataBase) Register(user *models.UserPrivateInfo) (userID int, err error) {\n\n\tvar (\n\t\ttx *sql.Tx\n\t)\n\n\tif tx, err = db.Db.Begin(); err != nil {\n\t\treturn\n\t}\n\tdefer tx.Rollback()\n\n\tif userID, err = db.createPlayer(tx, user); err != nil {\n\t\treturn\n\t}\n\n\tif err = db.createRecords(tx, userID); err != nil {\n\t\treturn\n\t}\n\n\terr = tx.Commit()\n\treturn\n}", "func NewTx(sqlTx *sql.Tx) (sqlbuilder.Tx, error) {\n\treturn registeredAdapter.NewTx(sqlTx)\n}", "func (m ZoneMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (t *Transaction) RegisterExecute(fun func() error) {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\tt.execute = append(t.execute, fun)\n}", "func setupTX(t testing.TB) (*reform.DB, *reform.TX) {\n\tt.Helper()\n\n\tdb := setupDB(t)\n\n\ttx, err := db.Begin()\n\trequire.NoError(t, err)\n\treturn db, tx\n}", "func (_KNS *KNSSession) Register(prime_owner common.Address, wallet common.Address, Jid string, tel string) (*types.Transaction, error) {\n\treturn _KNS.Contract.Register(&_KNS.TransactOpts, prime_owner, wallet, Jid, tel)\n}", "func (m PatientrightstypeMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (m StartWorkMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (b *Store) CreateTx(ctx context.Context, tx *sql.Tx, userID, username, password string) error {\n\terr := permission.LimitCheckAny(ctx, permission.System, permission.Admin, permission.MatchUser(userID))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = validate.Many(\n\t\tvalidate.UUID(\"UserID\", userID),\n\t\tvalidate.UserName(\"UserName\", username),\n\t\tvalidate.Text(\"Password\", password, 8, 200),\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thashedPassword, err := bcrypt.GenerateFromPassword([]byte(password), passCost)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = tx.StmtContext(ctx, b.insert).ExecContext(ctx, userID, username, string(hashedPassword))\n\treturn err\n}", "func (r *OrdersRepository) Tx() (tx *dbr.Tx, err error) {\n\tdb := r.PG.PostgresTrade()\n\n\ttx, err = db.Begin()\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"event\": \"error when begin transaction in postgres\",\n\t\t}).Error(err)\n\t}\n\n\treturn\n}", "func Transaction(next http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\tt, ctx := orm.NewTransaction(r.Context())\n\t\tdefer func() {\n\t\t\tif rec := recover(); rec != nil {\n\t\t\t\tt.Rollback()\n\t\t\t\t// Panic to let recoverer handle 500\n\t\t\t\tpanic(rec)\n\t\t\t} else {\n\t\t\t\terr := t.Commit()\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\tnext.ServeHTTP(w, r.WithContext(ctx))\n\t}\n\treturn http.HandlerFunc(fn)\n}", "func (m ResourceMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (d DB) Transaction(f func(DB) error) error {\n\tif _, ok := d.dbProxy.(*sql.Tx); ok {\n\t\t// Already in a nested transaction\n\t\treturn f(d)\n\t}\n\n\ttx, err := d.dbProxy.(*sql.DB).Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = f(DB{\n\t\tdbProxy: tx,\n\t\tStatementBuilderType: statementBuilder(tx),\n\t})\n\tif err != nil {\n\t\t// Rollback error is ignored as we already have one in progress\n\t\tif err2 := tx.Rollback(); err2 != nil {\n\t\t\tlevel.Warn(util_log.Logger).Log(\"msg\", \"transaction rollback error (ignored)\", \"err\", err2)\n\t\t}\n\t\treturn err\n\t}\n\treturn tx.Commit()\n}", "func (_UpkeepRegistrationRequests *UpkeepRegistrationRequestsSession) Register(name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, amount *big.Int, source uint8) (*types.Transaction, error) {\n\treturn _UpkeepRegistrationRequests.Contract.Register(&_UpkeepRegistrationRequests.TransactOpts, name, encryptedEmail, upkeepContract, gasLimit, adminAddress, checkData, amount, source)\n}", "func (acn *Account) registryTransaction(tsn *Transaction) {\n\tacn.transactions = append(acn.transactions, tsn)\n}", "func (m NametitleMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (m CarbrandMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (dao *ConfigAuditProcessDao) Transaction(ctx context.Context, f func(ctx context.Context, tx *gdb.TX) error) (err error) {\n\treturn dao.Ctx(ctx).Transaction(ctx, f)\n}", "func (pge *PgEngine) StartTransaction(ctx context.Context) (pgx.Tx, error) {\n\treturn pge.ConfigDb.Begin(ctx)\n}", "func (m PlaceMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (m CarserviceMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (m BillingstatusMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (m TenantMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (m TenantMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (m BookcourseMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (store *Store) execTx(ctx context.Context, fn func(*Queries) error) error {\n\ttx, err := store.db.BeginTx(ctx, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tq := New(tx) // create a new Queries object but instead of passing in sql.DB, we are passing in a sql.Tx Object for transactions\n\terr = fn(q) // call the input function as queries\n\t// If there was an error with the transaction\n\tif err != nil {\n\t\tif rollbackErr := tx.Rollback(); rollbackErr != nil {\n\t\t\treturn fmt.Errorf(\"tx error: %v, rollback error: %v\", err, rollbackErr)\n\t\t}\n\t\treturn err\n\t}\n\treturn tx.Commit()\n}", "func (m PatientrightsMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (m BarTimeRangeMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, errors.New(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func ExecuteInTxAndRollback(t *testing.T, db *sql.DB, f func(tx *sql.Tx)) {\n\ttx, err := db.Begin()\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\terr := tx.Rollback()\n\t\trequire.NoError(t, err)\n\t}()\n\n\tf(tx)\n}", "func (m CarRepairrecordMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (db *DB) WriteTx() *WriteTx {\n\treturn &WriteTx{\n\t\tdb: db,\n\t}\n}", "func AddTx(m *Tx) (err error) {\n\td := db.Create(m)\n\treturn d.Error\n}", "func (s *Service) CreateTx(tx *sql.Tx, headerID uint, ms Models) error {\n\treturn s.storage.CreateTx(tx, headerID, ms)\n}", "func (store *SQLStore) execTx(ctx context.Context, fn func(*Queries) error) error {\n\ttx, err := store.db.BeginTx(ctx, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// get back new query object \n\tq := New(tx)\n\terr = fn(q)\n\t// rollback on error - if rollback error return both errors \n\tif err != nil {\n\t\tif rbErr := tx.Rollback(); rbErr != nil {\n\t\t\treturn fmt.Errorf(\"tx err: %v, rb err: %v\", err, rbErr)\n\t\t}\n\t\treturn err\n\t}\n\treturn tx.Commit()\n}", "func (m EndWorkMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (m ZoneproductMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func StartTransaction(w http.ResponseWriter, DB *sql.DB) (*sql.Tx, error) {\n\tvar err error\n\ttx, err := DB.Begin()\n\tif err != nil {\n\t\ttx.Rollback()\n\t\trequestUtils.RespondWithError(w, http.StatusInternalServerError, err.Error())\n\t\treturn nil, err\n\t}\n\treturn tx, err\n}", "func (m CompanyMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func Transaction(ctx context.Context, driver Driver, opts *TxOptions,\n\thandler func(driver Driver) error) error {\n\n\tif driver == nil {\n\t\treturn errors.Wrap(ErrInvalidDriver, \"makroud: cannot create a transaction\")\n\t}\n\n\ttx, err := driver.Begin(ctx, opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = handler(tx)\n\tif err != nil {\n\n\t\tthr := tx.Rollback()\n\t\tif thr != nil && driver.HasObserver() {\n\t\t\tthr = errors.Wrap(thr, \"makroud: trying to rollback transaction\")\n\t\t\tdriver.Observer().OnRollback(thr, nil)\n\t\t}\n\n\t\treturn err\n\t}\n\n\terr = tx.Commit()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (s *PetStore) Transaction(callback func(*PetStore) error) error {\n\tif callback == nil {\n\t\treturn kallax.ErrInvalidTxCallback\n\t}\n\n\treturn s.Store.Transaction(func(store *kallax.Store) error {\n\t\treturn callback(&PetStore{store})\n\t})\n}", "func (_PlasmaFramework *PlasmaFrameworkTransactor) RegisterVault(opts *bind.TransactOpts, _vaultId *big.Int, _vaultAddress common.Address) (*types.Transaction, error) {\n\treturn _PlasmaFramework.contract.Transact(opts, \"registerVault\", _vaultId, _vaultAddress)\n}", "func WithTx(db *sql.DB, query func(*sql.Tx) error) error {\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tx.Rollback()\n\terr = query(tx)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn tx.Commit()\n}", "func (m AgeMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func withTransaction(db *sql.DB, fn func(txn *sql.Tx) error) (err error) {\n\ttxn, err := db.Begin()\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\ttxn.Rollback()\n\t\t\tpanic(r)\n\t\t} else if err != nil {\n\t\t\ttxn.Rollback()\n\t\t} else {\n\t\t\terr = txn.Commit()\n\t\t}\n\t}()\n\terr = fn(txn)\n\treturn\n}", "func Register(ext *sqlite.ExtensionApi, opt *options.Options) (_ sqlite.ErrorCode, err error) {\n\t// register virtual table modules\n\tvar modules = map[string]sqlite.Module{\n\t\t\"commits\": &LogModule{Locator: opt.Locator, Context: opt.Context},\n\t\t\"refs\": &RefModule{Locator: opt.Locator, Context: opt.Context},\n\t\t\"stats\": native.NewStatsModule(opt.Locator, opt.Context),\n\t\t\"files\": native.NewFilesModule(opt.Locator, opt.Context),\n\t\t\"blame\": native.NewBlameModule(opt.Locator, opt.Context),\n\t}\n\n\tfor name, mod := range modules {\n\t\tif err = ext.CreateModule(name, mod); err != nil {\n\t\t\treturn sqlite.SQLITE_ERROR, errors.Wrapf(err, \"failed to register %q module\", name)\n\t\t}\n\t}\n\n\tvar fns = map[string]sqlite.Function{\n\t\t\"commit_from_tag\": &CommitFromTagFn{},\n\t}\n\n\tfor name, fn := range fns {\n\t\tif err = ext.CreateFunction(name, fn); err != nil {\n\t\t\treturn sqlite.SQLITE_ERROR, errors.Wrapf(err, \"failed to register %q function\", name)\n\t\t}\n\t}\n\n\treturn sqlite.SQLITE_OK, nil\n}", "func (m RobberMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (m IntervalMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, errors.New(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (m RestaurantMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (m RestaurantMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (m HexMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (m StreetMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (m BloodtypeMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func RegisterTxHandlers(mux *http.ServeMux, gateway *daemon.Gateway) {\n\t// get set of pending transactions\n\tmux.HandleFunc(\"/pendingTxs\", getPendingTxs(gateway))\n\t// get latest confirmed transactions\n\tmux.HandleFunc(\"/lastTxs\", getLastTxs(gateway))\n\t// get txn by txid\n\tmux.HandleFunc(\"/transaction\", getTransactionByID(gateway))\n\t//inject a transaction into network\n\tmux.HandleFunc(\"/injectTransaction\", injectTransaction(gateway))\n\tmux.HandleFunc(\"/resendUnconfirmedTxns\", resendUnconfirmedTxns(gateway))\n\t// get raw tx by txid.\n\tmux.HandleFunc(\"/rawtx\", getRawTx(gateway))\n}", "func (m BarGroupMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, errors.New(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}" ]
[ "0.6274189", "0.6211087", "0.6139333", "0.61136764", "0.61018455", "0.6091702", "0.6074261", "0.6053163", "0.6053163", "0.6019075", "0.6001216", "0.5968726", "0.59503096", "0.59449434", "0.5939692", "0.5934833", "0.5924858", "0.5907003", "0.59001046", "0.5847809", "0.5808601", "0.58057183", "0.57678246", "0.57521385", "0.57298934", "0.5723418", "0.56890404", "0.5686981", "0.5682871", "0.56789476", "0.56754917", "0.5669826", "0.5666833", "0.56366825", "0.5630492", "0.5623383", "0.56181926", "0.5609718", "0.56075746", "0.55959666", "0.5594622", "0.5593809", "0.558488", "0.55811334", "0.5580892", "0.5573819", "0.5570477", "0.5565422", "0.55588746", "0.5558231", "0.5556222", "0.5548939", "0.5548236", "0.55463326", "0.55455434", "0.5545094", "0.5538741", "0.5538221", "0.55315137", "0.552274", "0.5512343", "0.5487771", "0.5486829", "0.54835147", "0.5483118", "0.5481372", "0.5478997", "0.54788065", "0.54774946", "0.54774946", "0.54640216", "0.54635143", "0.5459946", "0.5458405", "0.54575187", "0.54568124", "0.545679", "0.5454698", "0.5443165", "0.5442465", "0.5431117", "0.54172456", "0.54128116", "0.54123956", "0.54115444", "0.5410089", "0.54045856", "0.5403393", "0.540323", "0.540245", "0.540213", "0.539376", "0.5391048", "0.5390724", "0.5390724", "0.5388489", "0.53811234", "0.5376541", "0.53723437", "0.53618026" ]
0.7387559
0