query
stringlengths
7
3.85k
document
stringlengths
11
430k
metadata
dict
negatives
listlengths
0
101
negative_scores
listlengths
0
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Returns the value of the 'go_package' option of the first .proto file found in the same directory as projectFile
func detectGoPackageForProject(projectFile string) (string, error) { var goPkg string projectDir := filepath.Dir(projectFile) if err := filepath.Walk(projectDir, func(protoFile string, info os.FileInfo, err error) error { // already set if goPkg != "" { return nil } if !strings.HasSuffix(protoFile, ".proto") { return nil } // search for go_package on protos in the same dir as the project.json if projectDir != filepath.Dir(protoFile) { return nil } content, err := ioutil.ReadFile(protoFile) if err != nil { return err } lines := strings.Split(string(content), "\n") for _, line := range lines { goPackage := goPackageStatementRegex.FindStringSubmatch(line) if len(goPackage) == 0 { continue } if len(goPackage) != 2 { return errors.Errorf("parsing go_package error: from %v found %v", line, goPackage) } goPkg = goPackage[1] break } return nil }); err != nil { return "", err } if goPkg == "" { return "", errors.Errorf("no go_package statement found in root dir of project %v", projectFile) } return goPkg, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (g *Generator) GoFilePackage(depfile *fdep.DepFile) string {\n\treturn fproto_wrap.BaseName(g.GoWrapPackage(depfile))\n}", "func (g *Generator) GoPackage(depfile *fdep.DepFile) string {\n\tfor _, o := range depfile.ProtoFile.Options {\n\t\tif o.Name == \"go_package\" {\n\t\t\treturn o.Value.String()\n\t\t}\...
[ "0.6514599", "0.6418956", "0.62568474", "0.6100058", "0.6059389", "0.6033528", "0.5760218", "0.571393", "0.56766814", "0.56747204", "0.55886865", "0.55810106", "0.55710924", "0.5528127", "0.5502348", "0.55021805", "0.5456921", "0.5454185", "0.54367715", "0.54307157", "0.54025...
0.7273096
0
NewQueueManager instantiates a new QueueManager object This constructor will assign default values to properties that have it defined, and makes sure properties required by API are set, but the set of arguments will change when the set of required properties is changed
func NewQueueManager(name string, clusters []string, aliasQueues []AliasQueue, remoteQueues []RemoteQueue, clusterQueues []ClusterQueue, ) *QueueManager { this := QueueManager{} this.Name = name this.Clusters = clusters this.AliasQueues = aliasQueues this.RemoteQueues = remoteQueues this.ClusterQueues = clusterQueues return &this }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewQueueManager(logger log.Logger, cfg config.QueueConfig, externalLabels labels.Labels, relabelConfigs []*relabel.Config, client StorageClient, flushDeadline time.Duration) *QueueManager {\n\tif logger == nil {\n\t\tlogger = log.NewNopLogger()\n\t} else {\n\t\tlogger = log.With(logger, \"queue\", client.Name...
[ "0.6894487", "0.6460588", "0.6222042", "0.61513555", "0.6091006", "0.606205", "0.60386145", "0.6006943", "0.59639966", "0.594565", "0.5942067", "0.59363455", "0.5910915", "0.5891163", "0.5883823", "0.5868089", "0.5867437", "0.58120775", "0.579116", "0.57845575", "0.5739835", ...
0.6490378
1
NewQueueManagerWithDefaults instantiates a new QueueManager object This constructor will only assign default values to properties that have it defined, but it doesn't guarantee that properties required by API are set
func NewQueueManagerWithDefaults() *QueueManager { this := QueueManager{} return &this }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewQueueManager(logger log.Logger, cfg config.QueueConfig, externalLabels labels.Labels, relabelConfigs []*relabel.Config, client StorageClient, flushDeadline time.Duration) *QueueManager {\n\tif logger == nil {\n\t\tlogger = log.NewNopLogger()\n\t} else {\n\t\tlogger = log.With(logger, \"queue\", client.Name...
[ "0.6368561", "0.5929313", "0.5928396", "0.59103316", "0.5819523", "0.58130354", "0.5753316", "0.5739753", "0.568538", "0.5661701", "0.5606484", "0.5487868", "0.54643965", "0.5463153", "0.54630005", "0.5449085", "0.544472", "0.54301214", "0.540289", "0.5370558", "0.535047", ...
0.7690877
0
GetName returns the Name field value
func (o *QueueManager) GetName() string { if o == nil { var ret string return ret } return o.Name }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *SingleSelectFieldField) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (f *Field) GetName() string {\n\treturn formatGoName(f.Name)\n}", "func (e *Entry) GetName() string {\n\tif len(e.NameRaw) > 0 {\n\t\treturn string(e.NameRaw)\n\t}\n\tretu...
[ "0.8270152", "0.79907626", "0.7765679", "0.76368356", "0.7604266", "0.7600852", "0.75710016", "0.7564642", "0.7548394", "0.75191027", "0.7517443", "0.7511258", "0.7505484", "0.74717504", "0.7460151", "0.7448915", "0.7438394", "0.7437716", "0.7437274", "0.74279296", "0.7417602...
0.0
-1
GetNameOk returns a tuple with the Name field value and a boolean to check if the value has been set.
func (o *QueueManager) GetNameOk() (*string, bool) { if o == nil { return nil, false } return &o.Name, true }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *SingleSelectFieldField) GetNameOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Name, true\n}", "func (o *FormField) GetNameOk() (*string, bool) {\n\tif o == nil || o.Name == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Name, true\n}", "func (o *Credit1099Payer) GetNam...
[ "0.78737646", "0.7743277", "0.76780003", "0.76587296", "0.7625746", "0.7576448", "0.7566445", "0.754901", "0.7547995", "0.7547995", "0.7546563", "0.7531728", "0.74989444", "0.7477683", "0.7444606", "0.74311113", "0.74224156", "0.7415727", "0.73950857", "0.73914826", "0.737795...
0.0
-1
SetName sets field value
func (o *QueueManager) SetName(v string) { o.Name = v }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (cli *SetWrapper) SetName(name string) error {\n\treturn cli.set.SetValue(fieldSetName, name)\n}", "func (m *ModelStructRecord) SetField(name string, value reflect.Value) {\n\tif name == \"\" {\n\t\treturn\n\t}\n\tfieldValue := m.FieldValues[name]\n\t//if value.Kind() == reflect.Ptr {\n\t//\tpanic(\"RecordF...
[ "0.6978644", "0.67127097", "0.6554158", "0.6535701", "0.648949", "0.6488116", "0.64872724", "0.64611167", "0.641035", "0.63914317", "0.6386742", "0.6377272", "0.63651955", "0.6351144", "0.6337588", "0.6311778", "0.6311778", "0.6304745", "0.62829113", "0.6272751", "0.6268671",...
0.0
-1
GetClusters returns the Clusters field value
func (o *QueueManager) GetClusters() []string { if o == nil { var ret []string return ret } return o.Clusters }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *Config) GetClusters(ctx context.Context, quiet bool, filterMap map[string]string, clustersName ...string) (string, error) {\n\tc.Logger.Debugf(\"Sending parameters to server to get the clusters %q\", strings.Join(clustersName, \", \"))\n\n\tfilter := MapToSlice(filterMap)\n\n\treturn c.RunGRPCnRESTFunc(\"...
[ "0.7150111", "0.70848936", "0.7050372", "0.69830257", "0.69812804", "0.68225586", "0.6785897", "0.67075115", "0.67008924", "0.66717374", "0.6653548", "0.6628853", "0.65589386", "0.6548702", "0.6535888", "0.6530604", "0.6489139", "0.6475645", "0.6451163", "0.6437961", "0.64292...
0.76226926
0
GetClustersOk returns a tuple with the Clusters field value and a boolean to check if the value has been set.
func (o *QueueManager) GetClustersOk() (*[]string, bool) { if o == nil { return nil, false } return &o.Clusters, true }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewGetClustersOK() *GetClustersOK {\n\treturn &GetClustersOK{}\n}", "func (o *ResourceLimits) GetK8sClustersProvisionedOk() (*int32, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.K8sClustersProvisioned, true\n}", "func (o *VirtualizationVmwareVirtualMachineAllOf) GetClusterOk() (*Virtua...
[ "0.6719827", "0.65017885", "0.6465093", "0.646219", "0.6398727", "0.63660103", "0.63552487", "0.63049424", "0.627333", "0.62654936", "0.62586135", "0.6258082", "0.6212132", "0.6196596", "0.6170194", "0.61492944", "0.611987", "0.60981774", "0.60886943", "0.6081035", "0.6077892...
0.8224033
0
SetClusters sets field value
func (o *QueueManager) SetClusters(v []string) { o.Clusters = v }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *RaftDatabase) SetClusters(clusters int) {\n\ts.clusters = clusters\n}", "func (s *RaftDatabase) Clusters() int {\n\treturn GetArg(s.name, \"clusters\").Int(s.clusters)\n}", "func (store *CenterStore) SetCenters(clust core.Clust) {\n\tstore.centers[len(clust)] = clust\n}", "func (s *ListClustersOutpu...
[ "0.623222", "0.5735698", "0.55403745", "0.5447475", "0.5413994", "0.5402886", "0.53507334", "0.5320656", "0.5263007", "0.5236094", "0.52265227", "0.52218485", "0.52067995", "0.51981896", "0.51796174", "0.5176415", "0.5163213", "0.5159419", "0.5066361", "0.5066361", "0.5045236...
0.680183
0
GetAliasQueues returns the AliasQueues field value
func (o *QueueManager) GetAliasQueues() []AliasQueue { if o == nil { var ret []AliasQueue return ret } return o.AliasQueues }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *QueueManager) GetAliasQueuesOk() (*[]AliasQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.AliasQueues, true\n}", "func (o *QueueManager) SetAliasQueues(v []AliasQueue) {\n\to.AliasQueues = v\n}", "func (b *backend) Queues(ctx context.Context, qq *entroq.QueuesQuery) (map[stri...
[ "0.69151425", "0.6435573", "0.6247431", "0.6227677", "0.6098082", "0.59897524", "0.58581173", "0.5692911", "0.56441885", "0.5639317", "0.5635707", "0.54951936", "0.54639095", "0.5432769", "0.53856283", "0.53819096", "0.53484887", "0.5290492", "0.5287443", "0.5241385", "0.5220...
0.7781025
0
GetAliasQueuesOk returns a tuple with the AliasQueues field value and a boolean to check if the value has been set.
func (o *QueueManager) GetAliasQueuesOk() (*[]AliasQueue, bool) { if o == nil { return nil, false } return &o.AliasQueues, true }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *QueueManager) GetAliasQueues() []AliasQueue {\n\tif o == nil {\n\t\tvar ret []AliasQueue\n\t\treturn ret\n\t}\n\n\treturn o.AliasQueues\n}", "func (o *QueueManager) SetAliasQueues(v []AliasQueue) {\n\to.AliasQueues = v\n}", "func (o *QueueManager) GetClusterQueuesOk() (*[]ClusterQueue, bool) {\n\tif ...
[ "0.62684894", "0.6182187", "0.6036427", "0.5732854", "0.55811864", "0.5505796", "0.5485279", "0.54428786", "0.535559", "0.5352854", "0.52992433", "0.5274927", "0.5233722", "0.5213937", "0.5182979", "0.51760995", "0.51753753", "0.51179737", "0.510792", "0.5093728", "0.50886714...
0.8438502
0
SetAliasQueues sets field value
func (o *QueueManager) SetAliasQueues(v []AliasQueue) { o.AliasQueues = v }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (p *Process) CmdSetQueue(pac teoapi.Packet) (err error) {\n\tdata := pac.RemoveTrailingZero(pac.Data())\n\trequest := cdb.KeyValue{Cmd: pac.Cmd()}\n\tif err = request.UnmarshalText(data); err != nil {\n\t\treturn\n\t} else if err = p.tcdb.SetQueue(request.Key, request.Value); err != nil {\n\t\treturn\n\t}\n\t...
[ "0.58699656", "0.5864439", "0.5774504", "0.57054865", "0.56945664", "0.5664903", "0.5560596", "0.5487568", "0.54635847", "0.54438704", "0.54421866", "0.54008865", "0.5364136", "0.53472435", "0.53410256", "0.53387004", "0.52517086", "0.5212178", "0.5209478", "0.51821357", "0.5...
0.6917118
0
GetRemoteQueues returns the RemoteQueues field value
func (o *QueueManager) GetRemoteQueues() []RemoteQueue { if o == nil { var ret []RemoteQueue return ret } return o.RemoteQueues }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *QueueManager) GetRemoteQueuesOk() (*[]RemoteQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.RemoteQueues, true\n}", "func (o *QueueManager) SetRemoteQueues(v []RemoteQueue) {\n\to.RemoteQueues = v\n}", "func GetQueues(c *gin.Context) {\n\t//TODO: create a while both back and ...
[ "0.71619385", "0.61695373", "0.6127869", "0.6031343", "0.5995589", "0.5916818", "0.58918214", "0.5793508", "0.57635283", "0.57260025", "0.57182837", "0.56593096", "0.5565563", "0.5556938", "0.5451114", "0.54059637", "0.5343811", "0.53087986", "0.53000456", "0.52932256", "0.52...
0.76292956
0
GetRemoteQueuesOk returns a tuple with the RemoteQueues field value and a boolean to check if the value has been set.
func (o *QueueManager) GetRemoteQueuesOk() (*[]RemoteQueue, bool) { if o == nil { return nil, false } return &o.RemoteQueues, true }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *QueueManager) GetRemoteQueues() []RemoteQueue {\n\tif o == nil {\n\t\tvar ret []RemoteQueue\n\t\treturn ret\n\t}\n\n\treturn o.RemoteQueues\n}", "func (o *QueueManager) GetAliasQueuesOk() (*[]AliasQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.AliasQueues, true\n}", "func (...
[ "0.63053876", "0.61726433", "0.5913533", "0.5784514", "0.57285035", "0.533683", "0.53098166", "0.52802896", "0.5174174", "0.5162988", "0.5104646", "0.50820893", "0.5044645", "0.5033634", "0.50302416", "0.5001202", "0.49617392", "0.49253264", "0.49231443", "0.48999754", "0.488...
0.8458737
0
SetRemoteQueues sets field value
func (o *QueueManager) SetRemoteQueues(v []RemoteQueue) { o.RemoteQueues = v }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *Filters) SetQueues(v []*string) *Filters {\n\ts.Queues = v\n\treturn s\n}", "func (p *Process) CmdSetQueue(pac teoapi.Packet) (err error) {\n\tdata := pac.RemoveTrailingZero(pac.Data())\n\trequest := cdb.KeyValue{Cmd: pac.Cmd()}\n\tif err = request.UnmarshalText(data); err != nil {\n\t\treturn\n\t} else...
[ "0.6111192", "0.59353924", "0.5917593", "0.59074235", "0.584018", "0.5630969", "0.56206805", "0.5559726", "0.547905", "0.53623676", "0.53450924", "0.5335673", "0.5274789", "0.52499294", "0.52425617", "0.51923627", "0.5152956", "0.5142641", "0.50583076", "0.5016923", "0.499975...
0.68890077
0
GetClusterQueues returns the ClusterQueues field value
func (o *QueueManager) GetClusterQueues() []ClusterQueue { if o == nil { var ret []ClusterQueue return ret } return o.ClusterQueues }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *QueueManager) GetClusterQueuesOk() (*[]ClusterQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.ClusterQueues, true\n}", "func (o *QueueManager) GetClusters() []string {\n\tif o == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\n\treturn o.Clusters\n}", "func (client *Clie...
[ "0.6691833", "0.6425696", "0.6164038", "0.6133957", "0.6117693", "0.6031181", "0.60217136", "0.5825728", "0.57695156", "0.57689", "0.5742274", "0.56948394", "0.55601054", "0.55268496", "0.5474696", "0.5450112", "0.5429199", "0.539163", "0.5356741", "0.52030075", "0.51988304",...
0.741813
0
GetClusterQueuesOk returns a tuple with the ClusterQueues field value and a boolean to check if the value has been set.
func (o *QueueManager) GetClusterQueuesOk() (*[]ClusterQueue, bool) { if o == nil { return nil, false } return &o.ClusterQueues, true }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *QueueManager) GetAliasQueuesOk() (*[]AliasQueue, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.AliasQueues, true\n}", "func (o *QueueManager) GetClustersOk() (*[]string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Clusters, true\n}", "func (o *QueueManager) ...
[ "0.64735645", "0.64523965", "0.6035547", "0.58038056", "0.53999776", "0.53208125", "0.5234418", "0.5219659", "0.5205979", "0.51352835", "0.5088557", "0.5061583", "0.5047421", "0.50225717", "0.50075495", "0.49929222", "0.49726918", "0.49626175", "0.49571374", "0.49505204", "0....
0.832443
0
SetClusterQueues sets field value
func (o *QueueManager) SetClusterQueues(v []ClusterQueue) { o.ClusterQueues = v }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (tcdb *Teocdb) SetQueue(key string, value []byte) (err error) {\n\treturn tcdb.session.Query(`UPDATE queue SET lock = '', data = ? WHERE key = ? AND time = toTimestamp(now()) AND random = UUID()`,\n\t\tvalue, key).Exec()\n}", "func SetQueueSettings(ctx *context.Context) {\n\tqid := ctx.ParamsInt64(\"qid\")\...
[ "0.60655606", "0.60586584", "0.5939442", "0.58560574", "0.57875776", "0.5729049", "0.5456519", "0.54093945", "0.52865255", "0.5281743", "0.5238613", "0.520454", "0.5202154", "0.51686084", "0.51261485", "0.5113021", "0.5069352", "0.5063116", "0.5051894", "0.50452304", "0.50420...
0.6035805
2
Is the tiploc one for this station
func (bf *boardFilter) atStation(tpl string) bool { for _, s := range bf.res.Station { if s == tpl { return true } } return false }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (me TxsdType) IsLocator() bool { return me == \"locator\" }", "func (me TxsdType) IsLocator() bool { return me.String() == \"locator\" }", "func (l Location) IsStation() bool {\n\treturn l.Station != nil\n}", "func (t *Tangle) HasTip(h hash.Hash) bool {\n\treturn t.tips[h]\n}", "func (d *LDB) GetStati...
[ "0.67006546", "0.66539073", "0.63916194", "0.59491193", "0.56473047", "0.5592531", "0.55731654", "0.55731654", "0.54852706", "0.54088384", "0.54081464", "0.5377661", "0.5375327", "0.5370738", "0.53356165", "0.53268975", "0.5317167", "0.53046083", "0.52147436", "0.51936233", "...
0.5378452
11
Does the service call at a specific station
func (bf *boardFilter) callsAt(callingPoints []darwind3.CallingPoint, tpls []string) bool { for _, cp := range callingPoints { for _, tpl := range tpls { if tpl == cp.Tiploc { return true } } } return false }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *service) GetByStation(stationID int) (*Status, error) {\n\treturn s.adapter.GetByStation(stationID)\n}", "func isStationInJourney(st string, journey timetableRouteJourney) bool {\n\tfor _, call := range journey.Calls.Call {\n\t\tif call.ScheduledStopPointRef.Ref == st {\n\t\t\treturn true\n\t\t}\n\t}\n\...
[ "0.59751016", "0.59400904", "0.5820535", "0.5803581", "0.57665825", "0.56083244", "0.5489674", "0.5329984", "0.5267891", "0.5153535", "0.5140189", "0.510179", "0.50758654", "0.50346726", "0.5016249", "0.4981579", "0.4963392", "0.4916773", "0.4902284", "0.48805675", "0.4861543...
0.0
-1
Add a tiploc to the result so that it will be included in the tiploc map
func (bf *boardFilter) addTiploc(tiploc string) { if tiploc != "" { bf.tiplocs[tiploc] = nil } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *LocationMap) Add(t *Location) {\n\tif _, ok := r.m[t.Tiploc]; !ok {\n\t\tr.m[t.Tiploc] = t\n\t}\n}", "func (bd *BlockDAG) updateTips(b *Block) {\n\tif bd.tips == nil {\n\t\tbd.tips = NewHashSet()\n\t\tbd.tips.AddPair(b.GetHash(), b)\n\t\treturn\n\t}\n\tfor k := range bd.tips.GetMap() {\n\t\tblock := bd....
[ "0.6330537", "0.5318772", "0.49911645", "0.49783835", "0.48557892", "0.48480755", "0.48384988", "0.48337668", "0.47259057", "0.47020352", "0.4641511", "0.4637678", "0.46198624", "0.45897534", "0.45555168", "0.45366836", "0.4514625", "0.4468566", "0.44587126", "0.44583687", "0...
0.66976255
0
Add a ViaResolveRequest to the response
func (bf *boardFilter) addVia(rid, dest string) *darwinref.ViaResolveRequest { viaRequest := &darwinref.ViaResolveRequest{ Crs: bf.station.Crs, Destination: dest, } bf.vias[rid] = viaRequest return viaRequest }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (dr *DarwinRefService) ViaResolveHandler(r *rest.Rest) error {\n\n\t// The query\n\tqueries := make(map[string]*darwinref.ViaResolveRequest)\n\n\t// The response\n\tresponse := make(map[string]*darwinref.Via)\n\n\t// Run the queries\n\tif err := r.Body(&queries); err != nil {\n\n\t\t// Fail safe by returning ...
[ "0.60127324", "0.57031727", "0.5662719", "0.55577856", "0.54912895", "0.54861164", "0.5339197", "0.5312581", "0.5171849", "0.51083773", "0.5107675", "0.505698", "0.5027165", "0.49780568", "0.4963554", "0.49580607", "0.4939172", "0.4843758", "0.4825311", "0.48133907", "0.47647...
0.48631617
17
Process calling points so that we generate the appropriate via and include their tiplocs
func (bf *boardFilter) processCallingPoints(s ldb.Service) { if len(s.CallingPoints) > 0 { viaRequest := bf.addVia(s.RID, s.CallingPoints[len(s.CallingPoints)-1].Tiploc) for _, cp := range s.CallingPoints { bf.addTiploc(cp.Tiploc) viaRequest.AppendTiploc(cp.Tiploc) } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (bf *boardFilter) callsAt(callingPoints []darwind3.CallingPoint, tpls []string) bool {\n\tfor _, cp := range callingPoints {\n\t\tfor _, tpl := range tpls {\n\t\t\tif tpl == cp.Tiploc {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}", "func CheckpointCaller(handler interface{}, params ...int...
[ "0.5804814", "0.55561084", "0.54828817", "0.54530644", "0.5207425", "0.51024735", "0.5098401", "0.50678176", "0.5043976", "0.50209445", "0.49973452", "0.49861223", "0.49611437", "0.4915316", "0.4851949", "0.48359668", "0.48310632", "0.48305863", "0.4799254", "0.4785857", "0.4...
0.72903216
0
Process any associations, pulling in their schedules
func (bf *boardFilter) processAssociations(s ldb.Service) { for _, assoc := range s.Associations { assoc.AddTiplocs(bf.tiplocs) //if assoc.IsJoin() || assoc.IsSplit() { ar := assoc.Main.RID ai := assoc.Main.LocInd if ar == s.RID { ar = assoc.Assoc.RID ai = assoc.Assoc.LocInd } // Resolve the schedule if a split, join or if NP only if previous service & we are not yet running //if ar != s.RID { if assoc.Category != "NP" || (s.LastReport.Tiploc == "" && assoc.Assoc.RID == s.RID) { as := bf.d.ldb.GetSchedule(ar) if as != nil { assoc.Schedule = as as.AddTiplocs(bf.tiplocs) as.LastReport = as.GetLastReport() bf.processToc(as.Toc) if ai < (len(as.Locations) - 1) { if as.Origin != nil { bf.addTiploc(as.Destination.Tiploc) } destination := as.Locations[len(as.Locations)-1].Tiploc if as.Destination != nil { destination = as.Destination.Tiploc } viaRequest := bf.addVia(ar, destination) for _, l := range as.Locations[ai:] { bf.addTiploc(l.Tiploc) viaRequest.AppendTiploc(l.Tiploc) } } bf.processReason(as.CancelReason, true) bf.processReason(as.LateReason, false) } } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *candidate) Schedule() (constructedSchedule, error) {\n\tsch := constructedSchedule{\n\t\tearliest: s.earliest,\n\t\teventsByAttendee: make(map[AttendeeID]*attendeeEvents),\n\t}\n\tfor _, event := range s.order {\n\t\tif err := sch.Add(s.reqs[event]); err != nil {\n\t\t\treturn sch, err\n\t\t}\n\t}...
[ "0.56238943", "0.52930427", "0.523047", "0.5199952", "0.5147988", "0.5058819", "0.50440824", "0.5031958", "0.50148183", "0.49584836", "0.492599", "0.48424038", "0.47939503", "0.47636837", "0.474132", "0.47204733", "0.47124", "0.4708795", "0.46959302", "0.46930283", "0.4644104...
0.6897732
0
acceptService returns true if the service is to be accepted, false if it's to be ignored
func (bf *boardFilter) acceptService(service ldb.Service) bool { // Original requirement, must have an RID if service.RID == "" { return false } // remove terminating services if bf.terminated && bf.atStation(service.Destination) { return false } if bf.callAt && !bf.callsAt(service.CallingPoints, bf.callAtTiplocs) { return false } return true }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (f *aclFilter) allowService(service string) bool {\n\tif service == \"\" {\n\t\treturn true\n\t}\n\n\tif !f.enforceVersion8 && service == structs.ConsulServiceID {\n\t\treturn true\n\t}\n\treturn f.authorizer.ServiceRead(service)\n}", "func (m *MockMessageSvc) Accept(msgType string, purpose []string) bool {...
[ "0.63734", "0.6060541", "0.59677523", "0.58842057", "0.5720938", "0.5569348", "0.5523225", "0.54433316", "0.5340439", "0.53300637", "0.5299485", "0.5284296", "0.52749", "0.52703655", "0.52669793", "0.5251331", "0.52413315", "0.52043164", "0.51288015", "0.5126478", "0.511291",...
0.75590676
0
NewSettingsDialog(parent widgets.QWidget, flags) is automatically generated
func (sd *SettingsDialog) OpenSettingsDialog() { if !sd.isDisplayInited { sd.__init_display() } sd.populateFields() sd.Open() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *User) SettingsUI(title string, editors []string) {\n\tapp := tview.NewApplication()\n\n\tform := tview.NewForm().\n\t\tAddCheckbox(\"Update on starting katbox\", s.AutoUpdate, nil).\n\t\tAddDropDown(\"Editor\", editors, 0, nil).\n\t\tAddInputField(\"(optional) Custom editor Path\", s.Editor, 30, nil, nil)...
[ "0.6050621", "0.6031621", "0.5928538", "0.58055913", "0.57716674", "0.51237786", "0.5116521", "0.51071554", "0.5070288", "0.49982792", "0.4967534", "0.4916568", "0.4900102", "0.4772437", "0.47615722", "0.47517538", "0.47404546", "0.47228342", "0.4722345", "0.47178954", "0.471...
0.56048965
5
/ Send file to server in app dir, used for making dockercompose and nginx configuration available.
func Upload( src, dest string ) ( err error ) { cfg, err := config.Read() if err != nil { return } connString := "leo-deploy@" + cfg.ServerName dest = connString + ":apps/" + cfg.AppName + "/" + dest fmt.Printf( "Copying %s into %s\n", src, dest ) out, err := exec.Command( "scp", src, dest ).CombinedOutput() fmt.Println( string( out ) ) if err != nil { return fmt.Errorf( "Can't upload to " + connString + ". Did you install your public key there?" ) } return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func sendFile(w http.ResponseWriter, r *http.Request, file string) {\n\tfilepath := \"./assets/\" + file\n\tlog.Printf(\"Serving file %s\", filepath)\n\thttp.ServeFile(w, r, filepath)\n}", "func FileServer(c Config) Server {\n abs, err := filepath.Abs(c.Root)\n if err != nil { panic(err) }\n c.Root = abs\n\n ...
[ "0.68540484", "0.64961594", "0.6205975", "0.6052243", "0.5979095", "0.5941416", "0.5835912", "0.58248794", "0.5780296", "0.5768345", "0.5687944", "0.5671151", "0.56280226", "0.56184274", "0.5585889", "0.55772424", "0.55736756", "0.55638856", "0.5534361", "0.5530158", "0.55291...
0.0
-1
List all the known records
func (s *sqlStore) List(opts ...store.ListOption) ([]string, error) { options := store.ListOptions{ Order: store.OrderAsc, } for _, o := range opts { o(&options) } db, queries, err := s.db(options.Database, options.Table) if err != nil { return nil, err } pattern := "%" if options.Prefix != "" { pattern = options.Prefix + pattern } if options.Suffix != "" { pattern = pattern + options.Suffix } var rows pgx.Rows if options.Limit > 0 { if options.Order == store.OrderAsc { rows, err = db.Query(s.options.Context, queries.ListAscLimit, pattern, options.Limit, options.Offset) } else { rows, err = db.Query(s.options.Context, queries.ListDescLimit, pattern, options.Limit, options.Offset) } } else { if options.Order == store.OrderAsc { rows, err = db.Query(s.options.Context, queries.ListAsc, pattern) } else { rows, err = db.Query(s.options.Context, queries.ListDesc, pattern) } } if err != nil { if err == pgx.ErrNoRows { return nil, nil } return nil, err } defer rows.Close() keys := make([]string, 0, 10) for rows.Next() { var key string err = rows.Scan(&key) if err != nil { return nil, err } keys = append(keys, key) } return keys, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (sr *StoredRecording) List(filter *ari.Key) (sx []*ari.Key, err error) {\n\tvar recs []struct {\n\t\tName string `json:\"name\"`\n\t}\n\n\tif filter == nil {\n\t\tfilter = sr.client.stamp(ari.NewKey(ari.StoredRecordingKey, \"\"))\n\t}\n\n\terr = sr.client.get(\"/recordings/stored\", &recs)\n\n\tfor _, rec := ...
[ "0.6895555", "0.6680016", "0.66310436", "0.6533258", "0.6475071", "0.64534694", "0.64447963", "0.6441886", "0.64385545", "0.63736784", "0.6371333", "0.6340122", "0.6304793", "0.62884146", "0.62821186", "0.6224998", "0.62233186", "0.6219472", "0.6186004", "0.61814195", "0.6165...
0.58201265
43
rowToRecord converts from pgx.Row to a store.Record
func (s *sqlStore) rowToRecord(row pgx.Row) (*store.Record, error) { var expiry *time.Time record := &store.Record{} metadata := make(Metadata) if err := row.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil { if err == sql.ErrNoRows { return record, store.ErrNotFound } return nil, err } // set the metadata record.Metadata = toMetadata(&metadata) if expiry != nil { record.Expiry = time.Until(*expiry) } return record, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *sqlStore) rowsToRecords(rows pgx.Rows) ([]*store.Record, error) {\n\tvar records []*store.Record\n\n\tfor rows.Next() {\n\t\tvar expiry *time.Time\n\t\trecord := &store.Record{}\n\t\tmetadata := make(Metadata)\n\n\t\tif err := rows.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil {\n\t\t\t...
[ "0.6539114", "0.64575875", "0.61635786", "0.59165096", "0.5815466", "0.576414", "0.5693766", "0.5690296", "0.5623456", "0.56130004", "0.5544808", "0.5532619", "0.5377269", "0.5357239", "0.5338372", "0.5308314", "0.5301193", "0.5299179", "0.52847064", "0.52828956", "0.52663213...
0.8461538
0
rowsToRecords converts from pgx.Rows to []store.Record
func (s *sqlStore) rowsToRecords(rows pgx.Rows) ([]*store.Record, error) { var records []*store.Record for rows.Next() { var expiry *time.Time record := &store.Record{} metadata := make(Metadata) if err := rows.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil { return records, err } // set the metadata record.Metadata = toMetadata(&metadata) if expiry != nil { record.Expiry = time.Until(*expiry) } records = append(records, record) } return records, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *sqlStore) rowToRecord(row pgx.Row) (*store.Record, error) {\n\tvar expiry *time.Time\n\trecord := &store.Record{}\n\tmetadata := make(Metadata)\n\n\tif err := row.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn record, store.ErrNotFound\n\t\t...
[ "0.6726777", "0.67042124", "0.62163883", "0.6109386", "0.6026409", "0.59923404", "0.58899695", "0.58857733", "0.5779613", "0.5778647", "0.5747893", "0.56930035", "0.56913865", "0.56230724", "0.5609854", "0.55732936", "0.5567313", "0.5530029", "0.551342", "0.5497782", "0.54577...
0.8269437
0
Read a single key
func (s *sqlStore) Read(key string, opts ...store.ReadOption) ([]*store.Record, error) { options := store.ReadOptions{ Order: store.OrderAsc, } for _, o := range opts { o(&options) } db, queries, err := s.db(options.Database, options.Table) if err != nil { return nil, err } // read one record if !options.Prefix && !options.Suffix { row := db.QueryRow(s.options.Context, queries.ReadOne, key) record, err := s.rowToRecord(row) if err != nil { return nil, err } return []*store.Record{record}, nil } // read by pattern pattern := "%" if options.Prefix { pattern = key + pattern } if options.Suffix { pattern = pattern + key } var rows pgx.Rows if options.Limit > 0 { if options.Order == store.OrderAsc { rows, err = db.Query(s.options.Context, queries.ListAscLimit, pattern, options.Limit, options.Offset) } else { rows, err = db.Query(s.options.Context, queries.ListDescLimit, pattern, options.Limit, options.Offset) } } else { if options.Order == store.OrderAsc { rows, err = db.Query(s.options.Context, queries.ListAsc, pattern) } else { rows, err = db.Query(s.options.Context, queries.ListDesc, pattern) } } if err != nil { if err == pgx.ErrNoRows { return nil, nil } return nil, err } defer rows.Close() return s.rowsToRecords(rows) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func readKey(key string, path string) (string, error) {\n\tdata, err := ioutil.ReadFile(filepath.Join(path, key))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(data), nil\n}", "func ReadKey(keypath string) string {\n\tkey, err := ioutil.ReadFile(keypath)\n\tif err != nil {\n\t\tLog(fmt.Sprintf(\...
[ "0.7725113", "0.7333868", "0.7230678", "0.7222155", "0.7193259", "0.71848077", "0.71133333", "0.7095759", "0.7083336", "0.6862008", "0.68467087", "0.68404", "0.67486924", "0.6740755", "0.6702578", "0.6700314", "0.66522616", "0.6648966", "0.663196", "0.6580056", "0.6573138", ...
0.0
-1
Delete records with keys
func (s *sqlStore) Delete(key string, opts ...store.DeleteOption) error { var options store.DeleteOptions for _, o := range opts { o(&options) } db, queries, err := s.db(options.Database, options.Table) if err != nil { return err } _, err = db.Exec(s.options.Context, queries.Delete, key) return err }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (db *FlatDatabase) Delete(key []byte) error { panic(\"not supported\") }", "func (fb *FlatBatch) Delete(key []byte) error { panic(\"not supported\") }", "func (t *tableCommon) deleteBatchKeys(ctx context.Context, lvs []*tspb.ListValue, delOpts *deleteOptions) error {\n\tfor _, lv := range lvs {\n\t\tpkeys...
[ "0.6760398", "0.66280895", "0.65578467", "0.65088505", "0.6401223", "0.63470906", "0.63015383", "0.6252232", "0.6213148", "0.6193571", "0.6180504", "0.61795783", "0.6149718", "0.61181146", "0.61107606", "0.6109994", "0.6107761", "0.6069126", "0.60688424", "0.60659015", "0.605...
0.56412256
77
NewStore returns a new micro Store backed by sql
func NewStore(opts ...store.Option) store.Store { options := store.Options{ Database: defaultDatabase, Table: defaultTable, } for _, o := range opts { o(&options) } // new store s := new(sqlStore) s.options = options s.databases = make(map[string]DB) s.re = regexp.MustCompile("[^a-zA-Z0-9]+") go s.expiryLoop() // return store return s }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewStore()(*Store) {\n m := &Store{\n Entity: *iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.NewEntity(),\n }\n return m\n}", "func newStore(ts service.Service, config *Config) (*Store, error) {\n\tif config.Datastore == nil {\n\t\tdatastore, err := newDefaultDatastore(co...
[ "0.7291816", "0.6905972", "0.6899488", "0.67929655", "0.67790884", "0.67777205", "0.6675343", "0.665811", "0.6637835", "0.66238797", "0.6618824", "0.66062766", "0.6601314", "0.6590567", "0.6570274", "0.656122", "0.65415144", "0.6537513", "0.6537513", "0.6536951", "0.65154004"...
0.6265493
39
Easier getters to check whether those commands should be started or not
func (d *DB) startHourly(key string) bool { // Try to load this key, if not present then it can be started lastCall, err := d.loadTime(key) if err != nil { Log.Println("Command: ", key, " was never called") return true } return int(time.Since(lastCall).Hours()) > 0 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func IsCommand(cmd string) bool {\n for val := range DaemonizedCommands() {\n if val == cmd {\n return true\n }\n }\n for val := range InfoCommands() {\n if val == cmd {\n return true\n }\n }\n\n return false\n}", "func isCommand(name string) bool {\n\tfor _, cmd := range []string{\"_h...
[ "0.6994663", "0.6508269", "0.6502186", "0.63603437", "0.6129595", "0.6112796", "0.6082147", "0.60248846", "0.6018903", "0.5977348", "0.5970306", "0.5944507", "0.59397984", "0.593732", "0.5927465", "0.59095526", "0.58668464", "0.5851266", "0.5814823", "0.5798538", "0.5794736",...
0.0
-1
findConflict finds the index of the conflict. It returns the first pair of conflicting entries between the existing entries and the given entries, if there are any. If there is no conflicting entries, and the existing entries contains all the given entries, zero will be returned. If there is no conflicting entries, but the given entries contains new entries, the index of the first new entry will be returned. An entry is considered to be conflicting if it has the same index but a different term. The first entry MUST have an index equal to the argument 'from'. The index of the given entries MUST be continuously increasing.
func (l *LogStore) findConflict(entries []*pb.Entry) uint64 { // TODO: 会有第0个冲突么? for _, ne := range entries { if !l.matchTerm(ne.Index, ne.Term) { if ne.Index <= l.lastIndex() { l.logger.Info("log found conflict", zap.Uint64("conflictIndex", ne.Index), zap.Uint64("conflictTerm", ne.Term), zap.Uint64("existTerm", l.termOrPanic(l.term(ne.Index)))) } return ne.Index } } return 0 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func FindConflictsByUser(entries []*RenderedScheduleEntry) map[string][]*Conflict {\n\tentriesByUser := RenderedScheduleEntries(entries).GroupBy(func(entry *RenderedScheduleEntry) string {\n\t\treturn entry.User.ID\n\t})\n\n\tvar (\n\t\tm sync.Mutex\n\t\twg sync.WaitGroup\n\t\tresults = make(map[string]...
[ "0.46936142", "0.42734542", "0.42118675", "0.41973543", "0.41771796", "0.41696936", "0.41489935", "0.41426665", "0.41424075", "0.41103593", "0.4097609", "0.40779626", "0.40646788", "0.406086", "0.40478206", "0.40368852", "0.4034429", "0.40295273", "0.40270522", "0.40177962", ...
0.6421718
0
New creates a new crawler
func New(ctx context.Context, logger yolo.Logger, client *http.Client, filter FilterFunc, mapper Mapper) *Crawler { return &Crawler{ ctx: ctx, logger: logger, client: client, filter: filter, mapper: mapper, visitChan: make(chan visit, visitChanBuffer), toVisit: make(map[string]struct{}), } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func New() (*SingleCrawler, error) {\n\n defer glog.Flush()\n\n var crawler SingleCrawler\n startURL := *UrlPtr\n maxp := *MaxpPtr\n maxc := *MaxcPtr\n maxt := *MaxtPtr\n Filename := *OutfilePtr\n NumWorkers := *NumwPtr\n\n // validate the user input URL and decide if it's okay to use\n ...
[ "0.73082036", "0.72865236", "0.7228419", "0.7207095", "0.719269", "0.70568573", "0.7003078", "0.6945463", "0.68530333", "0.6658365", "0.6632175", "0.6534232", "0.6392795", "0.63671196", "0.63658637", "0.6364868", "0.6352427", "0.6246249", "0.6245264", "0.5961489", "0.5958432"...
0.69358087
8
Add adds one or more previously unadded urls to crawler to visit. source can be nil to indicate root. Returns a list of errors if any occured.
func (c *Crawler) Add(source *url.URL, uri ...*url.URL) []error { var errs []error for _, u := range uri { var err error u := u u.Fragment = "" // reset fragment, we don't want it messing our visited list if source != nil { u = source.ResolveReference(u) } if u.Scheme != "http" && u.Scheme != "https" { err = ErrUnsupportedScheme } else if err == nil && c.filter != nil && !c.filter(u) { err = ErrFilteredOut } us := u.String() // For the already-visited test we need to clean up each URL a bit vkey := strings.TrimRight(us[strings.Index(us, ":")+1:], "/") // Remove scheme and trailing slash if err == nil { c.toVisitMu.RLock() if _, ok := c.toVisit[vkey]; ok { err = ErrAlreadyInList } c.toVisitMu.RUnlock() } if err == nil { c.logger.Debugf("Add(%v %v): OK", source, us) atomic.AddUint64(&c.numQueued, 1) } else if err != nil { //c.logger.Warnf("Add(%v %v): %v", source, us, err) atomic.AddUint64(&c.numEncountered, 1) errs = append(errs, errors.Wrapf(err, "Invalid URL %v", u)) continue } c.toVisitMu.Lock() c.toVisit[vkey] = struct{}{} c.toVisitMu.Unlock() { uu := *u uu.Scheme = "" if source != nil && source.Host == uu.Host { uu.Host = "" } if source == nil { c.mapper.Add("<root>", uu.String()) } else { c.mapper.Add(source.String(), uu.String()) } } v := visit{ source: source, target: u, } select { case c.visitChan <- v: case <-c.ctx.Done(): return append(errs, c.ctx.Err()) } } return errs }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (os *OriginChecker) AddRawURLs(urls []string) {\n\tos.Lock()\n\tdefer os.Unlock()\n\n\tfor _, u := range urls {\n\t\tclean, err := cleanOrigin(u)\n\t\tif err == nil {\n\t\t\tos.origins[clean] = true\n\t\t}\n\t}\n}", "func (r *RssFeedEmitter) Add(url string) {\n\tfor _, feed := range r.feeds {\n\t\tif feed.L...
[ "0.5309802", "0.52734095", "0.52264124", "0.5196098", "0.51324165", "0.5123398", "0.5056944", "0.496427", "0.49161023", "0.48829758", "0.48795125", "0.4875261", "0.48127764", "0.48127764", "0.48080775", "0.47849753", "0.4765869", "0.47221217", "0.47214398", "0.47201884", "0.4...
0.7516235
0
Run launches the worker pool and blocks until they all finish.
func (c *Crawler) Run(numWorkers int) { // Here we create a new cancellable context to control goroutines and requests. // We can't just rely on the parent context because we want to shutdown goroutines if there's no activity for some time. // Closing the channel would introduce "send on closed chan" errors since channel consumers also produce new messages. ctx, cancel := context.WithCancel(c.ctx) c.wg.Add(numWorkers) for i := 0; i < numWorkers; i++ { go c.worker(ctx) } // Check every second if we're still actively crawling pages limit := 1.5 * c.client.Timeout.Seconds() for { if len(c.visitChan) == 0 && time.Since(c.lastActivity()).Seconds() > limit { break } select { case <-c.ctx.Done(): goto endfor case <-time.After(time.Second): } } endfor: cancel() // cancel goroutines and in-flight requests (if any) c.wg.Wait() // wait for shutdown close(c.visitChan) // close after we're done (not before) to prevent send on closed channel errors }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (p *Pool) Run() {\n\t// starting n number of workers\n\tfor i := 0; i < p.maxWorkers; i++ {\n\t\tworker := NewWorker(i+1, p.workerPool, true)\n\t\tworker.Start()\n\t\tp.workers = append(p.workers, worker)\n\t}\n\n\tgo p.dispatch()\n\n\tp.status = Started\n}", "func (p *Pool) Run(w Worker) {\n p.job <- w\n}...
[ "0.8138579", "0.7590641", "0.7388474", "0.7091481", "0.70803887", "0.7080261", "0.6920038", "0.6850398", "0.67854327", "0.67674756", "0.67474246", "0.668862", "0.6676274", "0.6672302", "0.6565446", "0.65594107", "0.6554532", "0.6526274", "0.65239984", "0.6519984", "0.65196973...
0.0
-1
getSourcegraphVersion queries the Sourcegraph GraphQL API to get the current version of the Sourcegraph instance.
func (svc *Service) getSourcegraphVersion(ctx context.Context) (string, error) { var result struct { Site struct { ProductVersion string } } ok, err := svc.client.NewQuery(sourcegraphVersionQuery).Do(ctx, &result) if err != nil || !ok { return "", err } return result.Site.ProductVersion, err }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (obj *Edge) GetVersion() int {\n\treturn obj.getVersion()\n}", "func GetVersion() string {\n\treturn version\n}", "func GetVersion() string {\n\treturn version\n}", "func (c *Context) GetVersion() string { // 获取版本号\n\treturn c.GetGinCtx().Param(\"version\")\n}", "func (o VirtualDatabaseSpecBuildSource...
[ "0.5781868", "0.5636606", "0.5636606", "0.5576799", "0.55693847", "0.5562794", "0.55050653", "0.549268", "0.54771435", "0.5471268", "0.5450064", "0.5450064", "0.5440955", "0.5429904", "0.541623", "0.5398498", "0.5388633", "0.53843856", "0.53595656", "0.5346013", "0.53183573",...
0.80999446
0
DetermineFeatureFlags fetches the version of the configured Sourcegraph instance and then sets flags on the Service itself to use features available in that version, e.g. gzip compression.
func (svc *Service) DetermineFeatureFlags(ctx context.Context) error { version, err := svc.getSourcegraphVersion(ctx) if err != nil { return errors.Wrap(err, "failed to query Sourcegraph version to check for available features") } return svc.features.setFromVersion(version) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func InitFeatureFlags(flag *pflag.FlagSet) {\n\tflag.Bool(FeatureFlagAccessCode, false, \"Flag (bool) to enable requires-access-code\")\n\tflag.Bool(FeatureFlagRoleBasedAuth, false, \"Flag (bool) to enable role-based-auth\")\n\tflag.Bool(FeatureFlagConvertPPMsToGHC, false, \"Flag (bool) to enable convert-ppms-to-g...
[ "0.6283442", "0.58581585", "0.5759627", "0.56996477", "0.56361234", "0.5633944", "0.5601203", "0.5553357", "0.5548972", "0.55474377", "0.53957814", "0.53191704", "0.5276696", "0.5251556", "0.52426904", "0.5235017", "0.5213624", "0.5191827", "0.5179796", "0.5133154", "0.512049...
0.78591985
0
Handler maps the different existing endpoints with the functions they must call
func Handler(a adding.Service, l listing.Service, d deleting.Service) *gin.Engine { router := gin.Default() router.GET("/movies", listMovies(l)) router.GET("/movies/:id", getMovie(l)) router.POST("/movies", addMovie(a)) router.DELETE("/movies/:id", deleteMovie(d)) return router }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (service *MetadataService) Endpoints() map[string]map[string]http.HandlerFunc {\n\thandlers := map[string]map[string]http.HandlerFunc{}\n\n\tfor index, value := range service.config.MetadataPrefixes {\n\t\tserver.Log.Info(\"adding Metadata prefix (\", index, \") \", value)\n\t\thandlers[value+\"/\"] = map[str...
[ "0.66714734", "0.6644675", "0.635691", "0.6305351", "0.62427664", "0.6203105", "0.61850226", "0.61681867", "0.616791", "0.6160118", "0.6154112", "0.6140186", "0.61255753", "0.6122404", "0.6095826", "0.60942966", "0.6088335", "0.60751134", "0.6050221", "0.6041102", "0.6012753"...
0.0
-1
ApplyOptions applies given opts and returns the resulting Options. This function should not be used directly by end users; it's only exposed as a side effect of Option.
func ApplyOptions(opt ...Option) Options { opts := Options{ MaxTraversalLinks: math.MaxInt64, //default: traverse all MaxAllowedHeaderSize: carv1.DefaultMaxAllowedHeaderSize, MaxAllowedSectionSize: carv1.DefaultMaxAllowedSectionSize, } for _, o := range opt { o(&opts) } // Set defaults for zero valued fields. if opts.IndexCodec == 0 { opts.IndexCodec = multicodec.CarMultihashIndexSorted } if opts.MaxIndexCidSize == 0 { opts.MaxIndexCidSize = DefaultMaxIndexCidSize } return opts }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *MatchOptions) ApplyOptions(opts []MatchOption) *MatchOptions {\n\tfor _, opt := range opts {\n\t\topt.ApplyToMatcher(o)\n\t}\n\treturn o\n}", "func (o *PatchOptions) ApplyOptions(opts []PatchOption) {\n\tfor _, opt := range opts {\n\t\topt.ApplyToHelper(o)\n\t}\n}", "func (uo *SubResourceUpdateOptions...
[ "0.7181345", "0.7011728", "0.66453886", "0.6608931", "0.66003937", "0.6593034", "0.6515448", "0.6360162", "0.6322618", "0.63175213", "0.6217988", "0.6186789", "0.6160557", "0.6152657", "0.6120525", "0.60960215", "0.6058859", "0.6029311", "0.5828136", "0.57501745", "0.5745696"...
0.6390833
7
ZeroLengthSectionAsEOF sets whether to allow the CARv1 decoder to treat a zerolength section as the end of the input CAR file. For example, this can be useful to allow "null padding" after a CARv1 without knowing where the padding begins.
func ZeroLengthSectionAsEOF(enable bool) Option { return func(o *Options) { o.ZeroLengthSectionAsEOF = enable } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestReadEmptyAtEOF(t *testing.T) {\n\tb := new(Builder)\n\tslice := make([]byte, 0)\n\tn, err := b.Read(slice)\n\tif err != nil {\n\t\tt.Errorf(\"read error: %v\", err)\n\t}\n\tif n != 0 {\n\t\tt.Errorf(\"wrong count; got %d want 0\", n)\n\t}\n}", "func IsEOF(c rune, n int) bool {\n\treturn n == 0\n}", "f...
[ "0.52038777", "0.4886099", "0.46940255", "0.45598406", "0.45515433", "0.44806105", "0.44333175", "0.44333175", "0.44333175", "0.44333175", "0.44333175", "0.4400239", "0.43830788", "0.43444872", "0.43374717", "0.43305448", "0.43090913", "0.42828125", "0.42710194", "0.4262915", ...
0.81493616
0
UseDataPadding sets the padding to be added between CARv2 header and its data payload on Finalize.
func UseDataPadding(p uint64) Option { return func(o *Options) { o.DataPadding = p } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func padData(rawData []byte) []byte {\n\tneedPadding := aes.BlockSize - ((len(rawData) + 2) % aes.BlockSize)\n\n\tvar dataBuf bytes.Buffer\n\tdataBuf.Grow(2 + len(rawData) + (aes.BlockSize % (len(rawData) + 2)))\n\n\tdataBuf.Write([]byte(\"|\"))\n\tdataBuf.Write(rawData)\n\tdataBuf.Write([]byte(\"|\"))\n\n\tfor i ...
[ "0.5258283", "0.4555469", "0.4480234", "0.44573864", "0.43533745", "0.4337491", "0.4326558", "0.42585558", "0.42437828", "0.42047027", "0.41544282", "0.41253066", "0.41225743", "0.41110697", "0.40878302", "0.40734228", "0.4069261", "0.40326965", "0.40245518", "0.40231052", "0...
0.7320334
0
UseIndexPadding sets the padding between data payload and its index on Finalize.
func UseIndexPadding(p uint64) Option { return func(o *Options) { o.IndexPadding = p } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *BasePlSqlParserListener) ExitUsing_index_clause(ctx *Using_index_clauseContext) {}", "func UseDataPadding(p uint64) Option {\n\treturn func(o *Options) {\n\t\to.DataPadding = p\n\t}\n}", "func (vm *vrfManager) releaseIndex(vrf *VRF) {\n\tvm.byIndex[int(vrf.index)] = nil\n}", "func (dict *Dictionary)...
[ "0.496226", "0.49470598", "0.49359372", "0.4929836", "0.48813668", "0.48754716", "0.48505393", "0.47770718", "0.47507846", "0.4717412", "0.47086638", "0.47060275", "0.46133706", "0.45682114", "0.45536166", "0.45449305", "0.45405245", "0.45287335", "0.45117015", "0.45112503", ...
0.7548068
0
UseIndexCodec sets the codec used for index generation.
func UseIndexCodec(c multicodec.Code) Option { return func(o *Options) { o.IndexCodec = c } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func UseIndex(designDocument, name string) Parameter {\n\treturn func(pa Parameterizable) {\n\t\tif name == \"\" {\n\t\t\tpa.SetParameter(\"use_index\", designDocument)\n\t\t} else {\n\t\t\tpa.SetParameter(\"use_index\", []string{designDocument, name})\n\t\t}\n\t}\n}", "func (o *BlockBasedTableOptions) SetIndexT...
[ "0.5502938", "0.52192473", "0.51957196", "0.51697326", "0.5116415", "0.5042837", "0.50319487", "0.48964846", "0.48657367", "0.48536038", "0.48389342", "0.4827276", "0.48076987", "0.4801819", "0.47965235", "0.4753826", "0.47460607", "0.4742304", "0.47193858", "0.47038877", "0....
0.8574102
0
WithoutIndex flags that no index should be included in generation.
func WithoutIndex() Option { return func(o *Options) { o.IndexCodec = index.CarIndexNone } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func IndexOptionsNone() IndexOptions {\n\tresult := IndexOptions{}\n\n\treturn result\n}", "func (dict *Dictionary) DropIndex() {\n\tdict.shortIndex = nil\n\tdict.longIndex = nil\n}", "func (_m *DirectRepositoryWriter) DisableIndexRefresh() {\n\t_m.Called()\n}", "func (r *Search) AllowNoIndices(allownoindice...
[ "0.62528235", "0.6120058", "0.6103179", "0.5899328", "0.5636369", "0.56347483", "0.56101", "0.5556883", "0.54259133", "0.5418361", "0.54178435", "0.53887594", "0.5346785", "0.5267003", "0.52648", "0.52637637", "0.5242605", "0.5237273", "0.52297145", "0.5219901", "0.5217866", ...
0.7322742
0
StoreIdentityCIDs sets whether to persist sections that are referenced by CIDs with multihash.IDENTITY digest. When writing CAR files with this option, Characteristics.IsFullyIndexed will be set. By default, the blockstore interface will always return true for Has() called with identity CIDs, but when this option is turned on, it will defer to the index. When creating an index (or loading a CARv1 as a blockstore), when this option is on, identity CIDs will be included in the index. This option is disabled by default.
func StoreIdentityCIDs(b bool) Option { return func(o *Options) { o.StoreIdentityCIDs = b } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (cosi *cosiAggregate) StoreIdentities(idents map[string]proto.Message) {\n\tfor k, v := range idents {\n\t\tpoint := suite.G2().Point()\n\t\terr := point.UnmarshalBinary(v.(*BdnIdentity).PublicKey)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tcosi.skipchain.identities[k] = point\n\t}\n}", "func Stor...
[ "0.5328413", "0.48285127", "0.46915764", "0.460706", "0.45859408", "0.45038688", "0.4317314", "0.43106538", "0.4235155", "0.42342368", "0.42137128", "0.41937977", "0.41885242", "0.41864616", "0.41690555", "0.4165227", "0.41583925", "0.41418397", "0.41260913", "0.41253287", "0...
0.7659749
0
MaxIndexCidSize specifies the maximum allowed size for indexed CIDs in bytes. Indexing a CID with larger than the allowed size results in ErrCidTooLarge error.
func MaxIndexCidSize(s uint64) Option { return func(o *Options) { o.MaxIndexCidSize = s } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *Cache) MaxSize() (maxSize int64) {\n\tfor _, shard := range c.shards {\n\t\tmaxSize += shard.maxSize\n\t}\n\treturn int64(bytesToMB(int(maxSize)))\n}", "func (p *MessagePartition) calculateMaxMessageIdFromIndex(fileId uint64) (uint64, error) {\n\tstat, err := os.Stat(p.indexFilenameByMessageId(fileId))\...
[ "0.54128367", "0.5239222", "0.5225754", "0.5207175", "0.517067", "0.5167994", "0.51654756", "0.514978", "0.5139812", "0.5123632", "0.51004505", "0.5087268", "0.50867826", "0.507669", "0.5049227", "0.50329137", "0.5025627", "0.5024108", "0.49574798", "0.49260557", "0.49127924"...
0.8566187
0
WithTraversalPrototypeChooser specifies the prototype chooser that should be used when performing traversals in writes from a linksystem.
func WithTraversalPrototypeChooser(t traversal.LinkTargetNodePrototypeChooser) Option { return func(o *Options) { o.TraversalPrototypeChooser = t } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewSocketsTraversalExtension() *SocketsTraversalExtension {\n\treturn &SocketsTraversalExtension{\n\t\tSocketsToken: traversalSocketsToken,\n\t}\n}", "func WithSortingByPathAscAndRevisionDesc() GetImplementationOption {\n\treturn func(options *ListImplementationRevisionsOptions) {\n\t\toptions.sortByPathAsc...
[ "0.39877933", "0.39078513", "0.38892928", "0.374556", "0.37302673", "0.36932385", "0.35834628", "0.33725303", "0.336618", "0.33358172", "0.33044896", "0.32975402", "0.32719445", "0.32489514", "0.32471502", "0.32446983", "0.32418078", "0.3223724", "0.32211462", "0.3219564", "0...
0.7862286
0
WithTrustedCAR specifies whether CIDs match the block data as they are read from the CAR files.
func WithTrustedCAR(t bool) Option { return func(o *Options) { o.TrustedCAR = t } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func isSpecTrustedCASet(proxyConfig *configv1.ProxySpec) bool {\n\treturn len(proxyConfig.TrustedCA.Name) > 0\n}", "func WithTrusted(trusted bool) Option {\n\treturn func(linter *Linter) {\n\t\tlinter.trusted = trusted\n\t}\n}", "func (_Casper *CasperTransactor) SetTrusted(opts *bind.TransactOpts, addr common....
[ "0.5050521", "0.46469218", "0.44809136", "0.4400281", "0.43353248", "0.43324968", "0.42241368", "0.41937608", "0.41843116", "0.41048527", "0.4037473", "0.40296867", "0.4016661", "0.40072775", "0.39855403", "0.39722", "0.39688164", "0.39688164", "0.3953601", "0.39267403", "0.3...
0.7244435
0
MaxAllowedHeaderSize overrides the default maximum size (of 32 MiB) that a CARv1 decode (including within a CARv2 container) will allow a header to be without erroring.
func MaxAllowedHeaderSize(max uint64) Option { return func(o *Options) { o.MaxAllowedHeaderSize = max } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *fseEncoder) maxHeaderSize() uint32 {\n\tif s.preDefined {\n\t\treturn 0\n\t}\n\tif s.useRLE {\n\t\treturn 8\n\t}\n\treturn (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8\n}", "func (*testObject) MaxHeaderLength() uint16 {\n\treturn 0\n}", "func (*endpoint) MaxHeaderLength() uint16 {...
[ "0.67647296", "0.63822037", "0.60273457", "0.5796491", "0.5741116", "0.5712794", "0.5712794", "0.55037737", "0.54724437", "0.54210025", "0.54210025", "0.54165447", "0.53984994", "0.5197604", "0.5164738", "0.5153342", "0.50923467", "0.50772697", "0.5059694", "0.502238", "0.501...
0.7392962
0
MaxAllowedSectionSize overrides the default maximum size (of 8 MiB) that a CARv1 decode (including within a CARv2 container) will allow a header to be without erroring. Typically IPLD blocks should be under 2 MiB (ideally under 1 MiB), so unless atypical data is expected, this should not be a large value.
func MaxAllowedSectionSize(max uint64) Option { return func(o *Options) { o.MaxAllowedSectionSize = max } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func MaxAllowedHeaderSize(max uint64) Option {\n\treturn func(o *Options) {\n\t\to.MaxAllowedHeaderSize = max\n\t}\n}", "func (*endpoint) MaxHeaderLength() uint16 {\n\treturn header.EthernetMinimumSize\n}", "func (*testObject) MaxHeaderLength() uint16 {\n\treturn 0\n}", "func (st *Settings) MaxHeaderListSize...
[ "0.64527285", "0.56640345", "0.5559902", "0.5421008", "0.5421008", "0.5341554", "0.5331863", "0.53089076", "0.519577", "0.5166597", "0.51638806", "0.5088928", "0.5074577", "0.50688905", "0.50639635", "0.5061279", "0.50553644", "0.5040562", "0.5002771", "0.49799952", "0.497520...
0.6830897
0
WriteAsCarV1 is a write option which makes a CAR interface (blockstore or storage) write the output as a CARv1 only, with no CARv2 header or index. Indexing is used internally during write but is discarded upon finalization. Note that this option only affects the storage interfaces (blockstore or storage), and is ignored by the root gocar/v2 package.
func WriteAsCarV1(asCarV1 bool) Option { return func(o *Options) { o.WriteAsCarV1 = asCarV1 } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *RAMOutputStream) WriteToV1(bytes []byte) error {\n\terr := r.flush()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tend := int(r.file.length)\n\tpos, buffer, bytesUpto := 0, 0, 0\n\n\tfor pos < end {\n\t\tlength := r.bufferSize\n\t\tnextPos := pos + length\n\t\tif nextPos > end {\n\t\t\tlength = end - pos\n...
[ "0.48664775", "0.44362253", "0.43605676", "0.43476164", "0.4318693", "0.42778337", "0.42568678", "0.4225701", "0.4217781", "0.41908678", "0.41819924", "0.41723937", "0.41719002", "0.41454837", "0.41401184", "0.4108826", "0.41082826", "0.4107811", "0.41051665", "0.40967038", "...
0.8315218
0
AllowDuplicatePuts is a write option which makes a CAR interface (blockstore or storage) not deduplicate blocks in Put and PutMany. The default is to deduplicate, which matches the current semantics of goipfsblockstore v1. Note that this option only affects the storage interfaces (blockstore or storage), and is ignored by the root gocar/v2 package.
func AllowDuplicatePuts(allow bool) Option { return func(o *Options) { o.BlockstoreAllowDuplicatePuts = allow } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func DisallowDuplicateKey() DecodeOption {\n\treturn func(d *Decoder) error {\n\t\td.disallowDuplicateKey = true\n\t\treturn nil\n\t}\n}", "func (c *Client) PutDuplicate(oldName, newName upspin.PathName) (*upspin.DirEntry, error) {\n\tconst op errors.Op = \"client.PutDuplicate\"\n\tm, s := newMetric(op)\n\tdefer...
[ "0.5433435", "0.51779854", "0.5121265", "0.45189086", "0.44898877", "0.43579203", "0.43035212", "0.42886138", "0.4212644", "0.4183098", "0.4140767", "0.4131019", "0.40938774", "0.40888754", "0.40803897", "0.40694353", "0.40229222", "0.40060905", "0.4005224", "0.39877382", "0....
0.8344131
0
Hours returns the duration as a floating point number of hours.
func (d Duration) Hours() float64 { return time.Duration(d).Hours() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (d duration) hours() float64 {\n\thour := d / hour\n\tnsec := d % hour\n\treturn float64(hour) + float64(nsec)*(1e-9/60/60)\n}", "func (d Duration) Hours() float64 {\n\thour := d / Hour\n\tusec := d % Hour\n\treturn float64(hour) + float64(usec)/(60*60*1e6)\n}", "func (f *Formatter) Hours() string {\n\tva...
[ "0.8240469", "0.7533591", "0.7021137", "0.6808942", "0.6702991", "0.64857084", "0.6369934", "0.6315462", "0.62868947", "0.6244098", "0.6225003", "0.61721426", "0.60986143", "0.6051746", "0.60097885", "0.60097885", "0.5976994", "0.59059703", "0.58845806", "0.5852544", "0.58464...
0.73899436
2
Minutes returns the duration as a floating point number of minutes.
func (d Duration) Minutes() float64 { return time.Duration(d).Minutes() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (f *Formatter) Minutes() string {\n\tvar format string\n\tif f.withoutUnit {\n\t\tformat = \"%d\\n\"\n\t} else {\n\t\tformat = \"%d minutes\\n\"\n\t}\n\treturn fmt.Sprintf(format, int(f.duration.Minutes()))\n}", "func (d Duration) Minutes() float64 {\n\tmin := d / Minute\n\tusec := d % Minute\n\treturn floa...
[ "0.67800033", "0.67238533", "0.671037", "0.671037", "0.6626685", "0.64240044", "0.6328356", "0.61948246", "0.6168142", "0.6068626", "0.6063561", "0.6036192", "0.6029605", "0.60184914", "0.5966828", "0.58673483", "0.5823611", "0.581647", "0.5814458", "0.5779258", "0.57744646",...
0.6980067
0
Nanoseconds returns the duration as an integer nanosecond count.
func (d Duration) Nanoseconds() int64 { return time.Duration(d).Nanoseconds() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s Stopwatch) Nanoseconds() int64 {\n\treturn s.acc.Nanoseconds()\n}", "func (ft *filetime) Nanoseconds() int64 {\n\t// 100-nanosecond intervals since January 1, 1601\n\tnsec := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime)\n\t// change starting time to the Epoch (00:00:00 UTC, January 1, 1970)\n\tnsec...
[ "0.7365707", "0.71677697", "0.71562576", "0.7091952", "0.68987876", "0.68987876", "0.68987876", "0.68987876", "0.6809148", "0.66780186", "0.66759145", "0.6439207", "0.64132607", "0.62661654", "0.62491643", "0.62183124", "0.6110412", "0.61070853", "0.60488445", "0.6042173", "0...
0.81052667
0
Seconds returns the duration as a floating point number of seconds.
func (d Duration) Seconds() float64 { return time.Duration(d).Seconds() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (d Duration) Seconds() float64 {\n\tsec := d / Second\n\tusec := d % Second\n\treturn float64(sec) + float64(usec)/1e6\n}", "func fseconds(d time.Duration) float64 { return float64(d) / float64(time.Second) }", "func (c *ClockVal) Seconds(d time.Duration) float64 {\n\treturn d.Seconds()\n}", "func (s St...
[ "0.8146225", "0.7864046", "0.7570394", "0.72681606", "0.7212462", "0.7174782", "0.7154755", "0.71513623", "0.6977348", "0.6971889", "0.6894677", "0.6776812", "0.6714895", "0.67132235", "0.66712093", "0.65651536", "0.6538294", "0.65263283", "0.65176785", "0.6466086", "0.643401...
0.81206095
1
Returns a string representation of the duration in russian language
func (d Duration) String() (result string) { var seconds, minutes, hours int seconds = int(d.Seconds()) if seconds > 60 { minutes = (seconds - seconds%60) / 60 seconds = seconds % 60 } if minutes > 59 { hours = (minutes - minutes%60) / 60 minutes = minutes - hours*60 result = numberInString(hours, false) result += " " + hoursTail(hours) } if minutes != 0 { if result != "" { result += ", " } result += strings.ToLower(numberInString(minutes, true)) result += " " + minutesTail(minutes) } if seconds != 0 { if result != "" { result += ", " } result += strings.ToLower(numberInString(seconds, true)) result += " " + secondsTail(seconds) } return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (d Duration) String() string {}", "func (s TtlDuration) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (d Duration) String() string {\n\tvalue := int64(d)\n\tout := \"\"\n\tif value < 0 {\n\t\tout = \"-\"\n\t\tvalue = -value\n\t}\n\tdivmod := func(divisor, dividend int64) (int64, int64) {\n\t\...
[ "0.6247409", "0.61673176", "0.58863103", "0.58602095", "0.58570576", "0.5843788", "0.57781535", "0.57749504", "0.5758013", "0.5729817", "0.57266206", "0.56786615", "0.5660766", "0.5638222", "0.5626445", "0.56179184", "0.5606179", "0.5606179", "0.5599463", "0.5569892", "0.5564...
0.5642375
13
Returns a string representation of the approximate duration in russian language
func (d Duration) StringApproximate() (result string) { var seconds, minutes, hours, days, months, years int seconds = int(d.Seconds()) if seconds > 60 { minutes = int(d.Minutes()) } if minutes > 59 { hours = int(d.Hours()) minutes = minutes - hours*60 } if hours > 24 { days = (hours - hours%24) / 24 hours = hours - days*24 } if days > 365 { years = (days - days%365) / 365 days = days - years*365 } if days > 30 { months = (days - days%30) / 30 days = days - months*30 } if years > 0 { if months < 3 { result = numberInString(years, false) + " " + yearsTail(years) } else { result = "Более" if years > 1 { result = " " + strings.ToLower(numberStringInGenitiveCase(years, false)) } result += " " + strings.ToLower(numberStringInGenitiveCase(years, false)) + " " + strings.ToLower(yearsTailInGenitiveCase(years)) } } else if months > 0 { if days < 8 { result = numberInString(months, false) + " " + monthsTail(months) } else { result = "Более" if months > 1 { result = " " + strings.ToLower(numberStringInGenitiveCase(months, false)) } result += " " + strings.ToLower(numberStringInGenitiveCase(months, false)) + " " + strings.ToLower(monthsTailInGenitiveCase(months)) } } else if days > 0 { if hours < 5 { result = numberInString(days, false) + " " + daysTail(days) } else { result = "Более " if days == 1 { result += "суток" } else { result += strings.ToLower(numberStringInGenitiveCase(days, false)) + " суток" } } } else if hours > 0 { if minutes < 16 { result = numberInString(hours, false) + " " + hoursTail(hours) } else { result = "Более " if hours == 1 { result += "часа" } else { result += strings.ToLower(numberStringInGenitiveCase(hours, false)) result += " " + strings.ToLower(hoursTailInGenitiveCase(hours)) } } } else if minutes > 0 { if minutes == 1 { result = "Минуту" } else { result = numberInString(minutes, true) + " " + minutesTail(minutes) } } else { result = "Менее минуты" } result += " назад" return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func toElapsedLabel(rfc850time string) string {\n\tcreated, err := time.Parse(time.RFC850, rfc850time)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\telapsed := time.Now().UTC().Sub(created.UTC())\n\tseconds := elapsed.Seconds()\n\tminutes := elapsed.Minutes()\n\thours := elapsed.Hours()\n\tdays := hours / 24\n\tw...
[ "0.6004345", "0.5819394", "0.5691862", "0.5682199", "0.5630542", "0.55865556", "0.5574526", "0.5537256", "0.5509831", "0.5462976", "0.5461879", "0.5460645", "0.54367495", "0.54135704", "0.5379238", "0.53715813", "0.5345213", "0.5343836", "0.5315461", "0.5292297", "0.52745247"...
0.6517362
0
AddLabel adds a label to the specified PR or issue
func (fc *fakeClient) AddLabel(owner, repo string, number int, label string) error { fc.added = append(fc.added, label) fc.labels = append(fc.labels, label) return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (issue *Issue) AddLabel(labels []string) error {\n\tfor i, val := range labels {\n\t\tlabels[i] = fmt.Sprintf(`{\"add\": \"%s\"}`, val)\n\t}\n\treturn updateLabelsHelper(labels, issue.Key)\n}", "func (issue *Issue) AddLabel(labels []string) error {\n\tfor i, val := range labels {\n\t\tlabels[i] = fmt.Sprint...
[ "0.75481737", "0.75481737", "0.7350045", "0.71433645", "0.7023004", "0.6740278", "0.655589", "0.64857525", "0.6314333", "0.62937886", "0.62546927", "0.62197626", "0.6193546", "0.61563903", "0.6149052", "0.6143574", "0.61072576", "0.6095906", "0.60725856", "0.60462886", "0.603...
0.72734094
3
RemoveLabel removes the label from the specified PR or issue
func (fc *fakeClient) RemoveLabel(owner, repo string, number int, label string) error { fc.removed = append(fc.removed, label) // remove from existing labels for k, v := range fc.labels { if label == v { fc.labels = append(fc.labels[:k], fc.labels[k+1:]...) break } } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (issue *Issue) RemoveLabel(labels []string) error {\n\tfor i, val := range labels {\n\t\tlabels[i] = fmt.Sprintf(`{\"remove\": \"%s\"}`, val)\n\t}\n\n\treturn updateLabelsHelper(labels, issue.Key)\n}", "func (issue *Issue) RemoveLabel(labels []string) error {\n\tfor i, val := range labels {\n\t\tlabels[i] =...
[ "0.7710544", "0.7710544", "0.7190297", "0.7131391", "0.69650364", "0.6953825", "0.68455964", "0.66010845", "0.65678805", "0.6539643", "0.63907325", "0.63030523", "0.6289586", "0.6282687", "0.62680924", "0.6208816", "0.620009", "0.61222297", "0.6066965", "0.6064077", "0.600839...
0.7706561
2
GetIssueLabels gets the current labels on the specified PR or issue
func (fc *fakeClient) GetIssueLabels(owner, repo string, number int) ([]github.Label, error) { var la []github.Label for _, l := range fc.labels { la = append(la, github.Label{Name: l}) } return la, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *client) GetIssueLabels(org, repo string, number int) ([]Label, error) {\n\tdurationLogger := c.log(\"GetIssueLabels\", org, repo, number)\n\tdefer durationLogger()\n\n\treturn c.getLabels(fmt.Sprintf(\"/repos/%s/%s/issues/%d/labels\", org, repo, number), org)\n}", "func (issue *Issue) GetLabels() []stri...
[ "0.8113356", "0.7227975", "0.70755315", "0.7042123", "0.70022243", "0.6956072", "0.69555527", "0.6602383", "0.65491056", "0.65308553", "0.6387276", "0.63280463", "0.63201314", "0.6253154", "0.6241437", "0.62240124", "0.62148833", "0.6124567", "0.6104237", "0.5956916", "0.5950...
0.8116904
0
CreateComment adds and tracks a comment in the client
func (fc *fakeClient) CreateComment(owner, repo string, number int, comment string) error { fc.commentsAdded[number] = append(fc.commentsAdded[number], comment) return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *Server) CreateComment(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\n\tb, err := ioutil.ReadAll(r.Body)\n\tdefer r.Body.Close()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tvar comment Comment\n\tif err = json.Unmarshal(b, &comment); err != nil {\n\t\th...
[ "0.78270876", "0.77825135", "0.7703459", "0.76506597", "0.7603772", "0.7488165", "0.74705166", "0.74413574", "0.7431552", "0.7339268", "0.71487004", "0.71214473", "0.7113855", "0.71087986", "0.71036273", "0.70275646", "0.70132875", "0.6990566", "0.6989565", "0.6970975", "0.69...
0.8281089
1
NumComments counts the number of tracked comments
func (fc *fakeClient) NumComments() int { n := 0 for _, comments := range fc.commentsAdded { n += len(comments) } return n }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *commentsQueryBuilder) Count() (int64, error) {\n\tif c.err != nil {\n\t\treturn 0, c.err\n\t}\n\treturn c.builder.Count()\n}", "func (o *ViewMilestone) GetCommentsCount() int32 {\n\tif o == nil || o.CommentsCount == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.CommentsCount\n}", "func (q...
[ "0.68079066", "0.6796185", "0.6678622", "0.6601007", "0.65994287", "0.6564124", "0.65543514", "0.6539888", "0.6526384", "0.6513839", "0.64962953", "0.6486291", "0.6461657", "0.638486", "0.63353264", "0.63241607", "0.6318741", "0.63061756", "0.6243758", "0.62380064", "0.623787...
0.8412568
1
NewOutput instantiates a new output plugin instance publishing to elasticsearch.
func (f elasticsearchOutputPlugin) NewOutput( config *outputs.MothershipConfig, topologyExpire int, ) (outputs.Outputer, error) { // configure bulk size in config in case it is not set if config.BulkMaxSize == nil { bulkSize := defaultBulkSize config.BulkMaxSize = &bulkSize } output := &elasticsearchOutput{} err := output.init(*config, topologyExpire) if err != nil { return nil, err } return output, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *Manager) NewOutput(conf loutput.Config, pipelines ...processor.PipelineConstructorFunc) (output.Streamed, error) {\n\treturn bundle.AllOutputs.Init(conf, m, pipelines...)\n}", "func NewOutput() *Output {\n\treturn &Output{}\n}", "func newOutput(node rpcClient, txHash *chainhash.Hash, vout uint32, valu...
[ "0.6398827", "0.6216201", "0.61832756", "0.6142493", "0.600292", "0.600292", "0.58812755", "0.5859912", "0.57929546", "0.57894796", "0.578846", "0.5770203", "0.5722518", "0.5554305", "0.5553186", "0.55329394", "0.55323535", "0.55209005", "0.54609996", "0.5429639", "0.54219884...
0.8399119
0
loadTemplate checks if the index mapping template should be loaded In case template loading is enabled, template is written to index
func loadTemplate(config outputs.Template, clients []mode.ProtocolClient) { // Check if template should be loaded // Not being able to load the template will output an error but will not stop execution if config.Name != "" && len(clients) > 0 { // Always takes the first client esClient := clients[0].(*Client) logp.Info("Loading template enabled. Trying to load template: %v", config.Path) exists := esClient.CheckTemplate(config.Name) // Check if template already exist or should be overwritten if !exists || config.Overwrite { if config.Overwrite { logp.Info("Existing template will be overwritten, as overwrite is enabled.") } // Load template from file content, err := ioutil.ReadFile(config.Path) if err != nil { logp.Err("Could not load template from file path: %s; Error: %s", config.Path, err) } else { reader := bytes.NewReader(content) err = esClient.LoadTemplate(config.Name, reader) if err != nil { logp.Err("Could not load template: %v", err) } } } else { logp.Info("Template already exists and will not be overwritten.") } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func loadTemplate() {\n\tif _, err := os.Stat(indexPath); err == nil {\n\t\tif homeTemplate, err = template.New(\"index.html\").ParseFiles(indexPath); err != nil {\n\t\t\tlog.Errorf(\"Unable to parse template %s: %+v\", indexPath, err)\n\t\t}\n\t\tlog.Infof(\"Running with production template...\")\n\t\treturn\n\t}...
[ "0.6890042", "0.6643959", "0.624839", "0.62140644", "0.61590606", "0.60328317", "0.6032525", "0.5931923", "0.58915156", "0.58405447", "0.583602", "0.57904", "0.5784774", "0.57673085", "0.5760238", "0.5710334", "0.5703832", "0.5703815", "0.56856227", "0.5679397", "0.56789005",...
0.63263446
2
New returns a new PagerDuty notifier.
func New(c *config.PagerdutyConfig, t *template.Template, l log.Logger, httpOpts ...commoncfg.HTTPClientOption) (*Notifier, error) { client, err := commoncfg.NewClientFromConfig(*c.HTTPConfig, "pagerduty", httpOpts...) if err != nil { return nil, err } n := &Notifier{conf: c, tmpl: t, logger: l, client: client} if c.ServiceKey != "" || c.ServiceKeyFile != "" { n.apiV1 = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" // Retrying can solve the issue on 403 (rate limiting) and 5xx response codes. // https://v2.developer.pagerduty.com/docs/trigger-events n.retrier = &notify.Retrier{RetryCodes: []int{http.StatusForbidden}, CustomDetailsFunc: errDetails} } else { // Retrying can solve the issue on 429 (rate limiting) and 5xx response codes. // https://v2.developer.pagerduty.com/docs/events-api-v2#api-response-codes--retry-logic n.retrier = &notify.Retrier{RetryCodes: []int{http.StatusTooManyRequests}, CustomDetailsFunc: errDetails} } return n, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func New(c *config.DingTalkConfig, t *template.Template, l log.Logger) (*Notifier, error) {\n\tclient, err := commoncfg.NewClientFromConfig(*c.HTTPConfig, \"dingtalk\", false, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Notifier{conf: c, tmpl: t, logger: l, client: client}, nil\n}", "func Ne...
[ "0.7220423", "0.7142861", "0.6967976", "0.68429434", "0.6688869", "0.6591405", "0.65628475", "0.65613985", "0.6546102", "0.6531209", "0.6517486", "0.6479263", "0.6462606", "0.64456606", "0.64222896", "0.6400352", "0.63652164", "0.6297083", "0.62604415", "0.6237658", "0.620780...
0.77650785
0
Notify implements the Notifier interface.
func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error) { key, err := notify.ExtractGroupKey(ctx) if err != nil { return false, err } var ( alerts = types.Alerts(as...) data = notify.GetTemplateData(ctx, n.tmpl, as, n.logger) eventType = pagerDutyEventTrigger ) if alerts.Status() == model.AlertResolved { eventType = pagerDutyEventResolve } level.Debug(n.logger).Log("incident", key, "eventType", eventType) details := make(map[string]string, len(n.conf.Details)) for k, v := range n.conf.Details { detail, err := n.tmpl.ExecuteTextString(v, data) if err != nil { return false, errors.Wrapf(err, "%q: failed to template %q", k, v) } details[k] = detail } if n.apiV1 != "" { return n.notifyV1(ctx, eventType, key, data, details, as...) } return n.notifyV2(ctx, eventType, key, data, details, as...) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r NopReporter) Notify(ctx context.Context, err error) {}", "func (notifier *Notifier) Notify(notification Notification) {\n\n}", "func (n *Notifier) Notify(err interface{}) error {\n\t_, sendErr := n.Client.SendNotice(NewNotice(err, nil))\n\treturn ex.New(sendErr)\n}", "func (n *IFTTTNotifier) Notify(m...
[ "0.8132382", "0.79153454", "0.73934114", "0.73212564", "0.72838044", "0.7273848", "0.72720206", "0.7210515", "0.71945655", "0.7182227", "0.7165415", "0.7159474", "0.7114332", "0.7078729", "0.7068588", "0.7026278", "0.69815", "0.696589", "0.69469804", "0.69469804", "0.69469804...
0.6415623
75
String returns a JSON representation of the model
func (o *Patchintegrationactionfields) String() string { o.RequestMappings = []Requestmapping{{}} j, _ := json.Marshal(o) str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\u`, `\u`, -1)) return str }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *Model) JSON() string {\n\tret := \"\"\n\tif m.Emm != nil {\n\t\tret += m.Emm.JSON()\n\t}\n\tif m.Snow != nil {\n\t\tret += m.Snow.JSON()\n\t}\n\treturn ret\n}", "func Stringnify(model interface{}) string {\n\tbyteModel, err := json.Marshal(model)\n\tif err != nil {\n\t\tfmt.Println(\"model Stringnify er...
[ "0.7602305", "0.6521702", "0.6492924", "0.6464296", "0.6367784", "0.6342571", "0.63273394", "0.62768203", "0.6276228", "0.6276228", "0.62744915", "0.6235233", "0.6219029", "0.61787146", "0.6173133", "0.61425954", "0.61332303", "0.6116796", "0.6115479", "0.6108615", "0.6108399...
0.0
-1
/ Outgoing connections Using an outgoing connection is a snap. A `net.Conn` satisfies the io.Reader and `io.Writer` interfaces, so we can treat a TCP connection just like any other `Reader` or `Writer`. Open connects to a TCP Address. It returns a TCP connection armed with a timeout and wrapped into a buffered ReadWriter.
func Open(addr string) (*bufio.ReadWriter, error) { // Dial the remote process. // Note that the local port is chosen on the fly. If the local port // must be a specific one, use DialTCP() instead. log.Println("Dial " + addr) conn, err := net.Dial("tcp", addr) if err != nil { return nil, errors.Wrap(err, "Dialing "+addr+" failed") } return bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)), nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Open(addr string) (*bufio.ReadWriter, error) {\n\tlog.Println(\"Dial \" + addr)\n\tconn, err := net.Dial(\"tcp\", addr)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Dialing \"+addr+\" failed\")\n\t}\n\treturn bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)), nil\n}", "func NewConn...
[ "0.69614923", "0.6535082", "0.6439303", "0.63323253", "0.6207398", "0.6090019", "0.6024686", "0.5997635", "0.59855705", "0.5922736", "0.5920382", "0.5903573", "0.5891919", "0.5875956", "0.5862222", "0.5819222", "0.5783388", "0.5781409", "0.57801217", "0.57777804", "0.57632226...
0.6163392
5
NewEndpoint creates a new endpoint. To keep things simple, the endpoint listens on a fixed port number.
func NewEndpoint() *Endpoint { // Create a new Endpoint with an empty list of handler funcs. return &Endpoint{ handler: map[string]HandleFunc{}, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (*protocol) NewEndpoint(stack *stack.Stack, netProto tcpip.NetworkProtocolNumber,\n\twaiterQueue *waiter.Queue) (tcpip.Endpoint, *tcpip.Error) {\n\treturn newEndpoint(stack, netProto, waiterQueue), nil\n}", "func NewEndpoint(service health.Service) *Endpoint {\n\treturn &Endpoint{\n\t\tservice: service,\n\t...
[ "0.74292636", "0.72935945", "0.72410816", "0.7215659", "0.71920615", "0.711459", "0.70928013", "0.705746", "0.69879735", "0.6974851", "0.69721305", "0.68303615", "0.6723265", "0.6712752", "0.6605164", "0.6602713", "0.6558774", "0.65187037", "0.64022964", "0.6365016", "0.63610...
0.74718064
0
AddHandleFunc adds a new function for handling incoming data.
func (e *Endpoint) AddHandleFunc(name string, f HandleFunc) { e.mutex.Lock() e.handler[name] = f e.mutex.Unlock() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (l Listener) AddHandler(cmd string, handleFunc func()) {\n\tl[cmd] = handleFunc\n}", "func (l *logPipe) HandleFunc(hf func(string) error) {\n\tl.handleFunc = hf\n}", "func HandleFunc(name string, handlerFunc func(Response)) {\n\thandlers[name] = toFunction(handlerFunc)\n}", "func HandleFunc(h HandlerFun...
[ "0.6896334", "0.6849562", "0.67962635", "0.6719276", "0.6719276", "0.6719276", "0.6665553", "0.66522294", "0.66437876", "0.64656377", "0.6382179", "0.63733864", "0.6351293", "0.634102", "0.6330453", "0.6318307", "0.62964743", "0.6292983", "0.6267896", "0.62676644", "0.6267022...
0.781103
0
Listen starts listening on the endpoint port on all interfaces. At least one handler function must have been added through AddHandleFunc() before.
func (e *Endpoint) Listen() error { var err error e.listener, err = net.Listen("tcp", Port) if err != nil { return errors.Wrapf(err, "Unable to listen on port %s\n", Port) } log.Println("Listen on", e.listener.Addr().String()) for { log.Println("Accept a connection request.") conn, err := e.listener.Accept() if err != nil { log.Println("Failed accepting a connection request:", err) continue } log.Println("Handle incoming messages.") go e.handleMessages(conn) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (handler *Handler) Listen() error {\n\treturn handler.engine.Start(g.GetConfig().ListenAddr)\n}", "func (r *EndpointRegistry) Listen(listener Listener) {\n\tif !r.OnCloseAlways(func() {\n\t\tif err := listener.Close(); err != nil {\n\t\t\tr.Log().Debugf(\"EndpointRegistry.Listen: closing listener OnClose: %...
[ "0.66203743", "0.66116935", "0.6564344", "0.6537847", "0.65230536", "0.64692837", "0.6373706", "0.6306666", "0.6273131", "0.6236138", "0.6213746", "0.6199536", "0.61531854", "0.6143046", "0.61272234", "0.6126517", "0.6125706", "0.60635346", "0.6059511", "0.60571146", "0.60484...
0.6941947
0
handleMessages reads the connection up to the first newline. Based on this string, it calls the appropriate HandleFunc.
func (e *Endpoint) handleMessages(conn net.Conn) { // Wrap the connection into a buffered reader for easier reading. rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)) defer conn.Close() // Read from the connection until EOF. Expect a command name as the // next input. Call the handler that is registered for this command. for { log.Print("Receive command '") cmd, err := rw.ReadString('\n') switch { case err == io.EOF: log.Println("Reached EOF - close this connection.\n ---") return case err != nil: log.Println("\nError reading command. Got: '"+cmd+"'\n", err) return } // Trim the request string - ReadString does not strip any newlines. cmd = strings.Trim(cmd, "\n ") log.Println(cmd + "'") // Fetch the appropriate handler function from the 'handler' map and call it. e.mutex.RLock() handleCommand, ok := e.handler[cmd] e.mutex.RUnlock() if !ok { log.Println("Command '" + cmd + "' is not registered.") return } handleCommand(rw) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *MetricReceiver) handleMessage(addr net.Addr, msg []byte) {\n\tbuf := bytes.NewBuffer(msg)\n\tfor {\n\t\tline, readerr := buf.ReadBytes('\\n')\n\n\t\t// protocol does not require line to end in \\n, if EOF use received line if valid\n\t\tif readerr != nil && readerr != io.EOF {\n\t\t\tr.handleError(fmt.Err...
[ "0.6561378", "0.6528215", "0.64411", "0.64160395", "0.6366687", "0.6283122", "0.6273615", "0.6269291", "0.6257093", "0.6256277", "0.61980337", "0.6155845", "0.6121732", "0.61168164", "0.601344", "0.6010404", "0.59898597", "0.5966513", "0.59574014", "0.5955264", "0.58938617", ...
0.71781325
0
/ Now let's create two handler functions. The easiest case is where our adhoc protocol only sends string data. The second handler receives and processes a struct that was sent as GOB data. handleStrings handles the "STRING" request.
func handleStrings(rw *bufio.ReadWriter) { // Receive a string. log.Print("Receive STRING message:") s, err := rw.ReadString('\n') if err != nil { log.Println("Cannot read from connection.\n", err) } s = strings.Trim(s, "\n ") log.Println(s) _, err = rw.WriteString("Thank you.\n") if err != nil { log.Println("Cannot write to connection.\n", err) } err = rw.Flush() if err != nil { log.Println("Flush failed.", err) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (serv *Server) handleText(conn int, payload []byte) {\n\tvar (\n\t\tlogp = `handleText`\n\n\t\thandler RouteHandler\n\t\terr error\n\t\tctx context.Context\n\t\treq *Request\n\t\tres *Response\n\t\tok bool\n\t)\n\n\tres = _resPool.Get().(*Response)\n\tres.reset()\n\n\tctx, ok = serv.Clien...
[ "0.6016482", "0.5864663", "0.58399796", "0.5835402", "0.5817867", "0.57098216", "0.57005876", "0.5564361", "0.5549767", "0.5534213", "0.55311394", "0.55142814", "0.55067146", "0.55020815", "0.55018497", "0.5486916", "0.5477967", "0.5446462", "0.54205203", "0.5399795", "0.5399...
0.677268
0
handleGob handles the "GOB" request. It decodes the received GOB data into a struct.
func handleGob(rw *bufio.ReadWriter) { log.Print("Receive GOB data:") var data complexData // Create a decoder that decodes directly into a struct variable. dec := gob.NewDecoder(rw) err := dec.Decode(&data) if err != nil { log.Println("Error decoding GOB data:", err) return } // Print the complexData struct and the nested one, too, to prove // that both travelled across the wire. log.Printf("Outer complexData struct: \n%#v\n", data) log.Printf("Inner complexData struct: \n%#v\n", data.C) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (d *Decoder) GOB(val interface{}) {\n\tgobd := gob.NewDecoder(d.buf)\n\tif err := gobd.Decode(val); err != nil {\n\t\tlog.Panicf(\"gob: failed to decode: %v\", err)\n\t}\n}", "func (z *Rat) GobDecode(buf []byte) error {}", "func GobDecode(ctx context.Context, data []byte, obj interface{}) error {\n\tretur...
[ "0.6926054", "0.66555226", "0.6592844", "0.6570786", "0.6549313", "0.64678264", "0.6391482", "0.62011355", "0.6196608", "0.61929303", "0.61607766", "0.59881604", "0.5935874", "0.5881003", "0.58700323", "0.58210737", "0.58197033", "0.57850504", "0.5690908", "0.5690442", "0.568...
0.8036416
0
/ The client and server functions With all this in place, we can now set up client and server functions. The client function connects to the server and sends STRING and GOB requests. The server starts listening for requests and triggers the appropriate handlers. client is called if the app is called with connect=`ip addr`.
func client(ip string) error { // Some test data. Note how GOB even handles maps, slices, and // recursive data structures without problems. testStruct := complexData{ N: 23, S: "string data", M: map[string]int{"one": 1, "two": 2, "three": 3}, P: []byte("abc"), C: &complexData{ N: 256, S: "Recursive structs? Piece of cake!", M: map[string]int{"01": 1, "10": 2, "11": 3}, }, } // Open a connection to the server. rw, err := Open(ip + Port) if err != nil { return errors.Wrap(err, "Client: Failed to open connection to "+ip+Port) } // Send a STRING request. // Send the request name. // Send the data. log.Println("Send the string request.") n, err := rw.WriteString("STRING\n") if err != nil { return errors.Wrap(err, "Could not send the STRING request ("+strconv.Itoa(n)+" bytes written)") } n, err = rw.WriteString("Additional data.\n") if err != nil { return errors.Wrap(err, "Could not send additional STRING data ("+strconv.Itoa(n)+" bytes written)") } log.Println("Flush the buffer.") err = rw.Flush() if err != nil { return errors.Wrap(err, "Flush failed.") } // Read the reply. log.Println("Read the reply.") response, err := rw.ReadString('\n') if err != nil { return errors.Wrap(err, "Client: Failed to read the reply: '"+response+"'") } log.Println("STRING request: got a response:", response) // Send a GOB request. // Create an encoder that directly transmits to `rw`. // Send the request name. // Send the GOB. log.Println("Send a struct as GOB:") log.Printf("Outer complexData struct: \n%#v\n", testStruct) log.Printf("Inner complexData struct: \n%#v\n", testStruct.C) enc := gob.NewEncoder(rw) n, err = rw.WriteString("GOB\n") if err != nil { return errors.Wrap(err, "Could not write GOB data ("+strconv.Itoa(n)+" bytes written)") } err = enc.Encode(testStruct) if err != nil { return errors.Wrapf(err, "Encode failed for struct: %#v", testStruct) } err = rw.Flush() if err != nil { return errors.Wrap(err, "Flush failed.") } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func client(){\n // Connect to the server through tcp/IP.\n\tconnection, err := net.Dial(\"tcp\", (\"127.0.0.1\" + \":\" + \"9090\"))\n\tupdateListener , listErr := net.Dial(\"tcp\", (\"127.0.0.1\" + \":\" + \"9091\"))\n\t// If connection failed crash.\n\tcheck(err)\n\tcheck(listErr)\n\t//Create separate thread...
[ "0.6618485", "0.65211356", "0.646171", "0.638421", "0.6304717", "0.6287283", "0.6227207", "0.6171384", "0.6169162", "0.60877883", "0.6082746", "0.6051843", "0.6039413", "0.6016558", "0.6005998", "0.599888", "0.59954786", "0.5988336", "0.5975594", "0.59596217", "0.59543747", ...
0.6478236
2
server listens for incoming requests and dispatches them to registered handler functions.
func server() error { endpoint := NewEndpoint() // Add the handle funcs. endpoint.AddHandleFunc("STRING", handleStrings) endpoint.AddHandleFunc("GOB", handleGob) // Start listening. return endpoint.Listen() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func server() {\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"/\", respond.HandleRequest)\n\tlog.Fatal(http.ListenAndServe(\":\"+*port, mux))\n}", "func handleRequests() {\n\taddress := \"localhost\"\n\taddress += \":\"\n\taddress += \"8000\"\n\tfmt.Printf(\"Server at address %v is up\\n\", \"http://\"+addres...
[ "0.68494654", "0.6800467", "0.67156327", "0.6608716", "0.65980685", "0.6566974", "0.6520821", "0.64973724", "0.6458118", "0.6454185", "0.64205277", "0.64116293", "0.6407764", "0.6395274", "0.6377654", "0.636549", "0.6357758", "0.63493574", "0.6325145", "0.6282549", "0.6282397...
0.6814072
1
/ Main Main starts either a client or a server, depending on whether the `connect` flag is set. Without the flag, the process starts as a server, listening for incoming requests. With the flag the process starts as a client and connects to the host specified by the flag value. Try "localhost" or "127.0.0.1" when running both processes on the same machine. main
func main() { connect := flag.String("connect", "", "IP address of process to join. If empty, go into listen mode.") flag.Parse() // If the connect flag is set, go into client mode. if *connect != "" { err := client(*connect) if err != nil { log.Println("Error:", errors.WithStack(err)) } log.Println("Client done.") return } // Else go into server mode. err := server() if err != nil { log.Println("Error:", errors.WithStack(err)) } log.Println("Server done.") }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func main() {\n\tflag.StringVar(&MODE, \"mode\", MODE, \"server/client\")\n\tflag.StringVar(&SERVER_ADDR, \"server\", SERVER_ADDR, \"mode: server => listen, mode: client => connect to\")\n\tflag.StringVar(&PayLoad, \"pl\", PayLoad, \"PayLoad\")\n\tflag.BoolVar(&PrintDump, \"d\", PrintDump, \"Print dump\")\n\tflag....
[ "0.70549303", "0.6946936", "0.670047", "0.6645153", "0.6628611", "0.66251695", "0.6601195", "0.6575071", "0.6574852", "0.6552101", "0.6540078", "0.64823025", "0.64497507", "0.64278376", "0.6411183", "0.63908523", "0.6383223", "0.6341063", "0.6328398", "0.63155854", "0.6315046...
0.80571264
0
The Lshortfile flag includes file name and line number in log messages.
func init() { log.SetFlags(log.Lshortfile) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func StatusShort(c *Client, files []File, untracked StatusUntrackedMode, lineprefix, lineending string) (string, error) {\n\tvar lsfiles []File\n\tif len(files) == 0 {\n\t\tlsfiles = []File{File(c.WorkDir)}\n\t} else {\n\t\tlsfiles = files\n\t}\n\n\tcfiles, err := LsFiles(c, LsFilesOptions{Cached: true}, lsfiles)\...
[ "0.5979167", "0.5713366", "0.56390464", "0.5635513", "0.5625114", "0.5615503", "0.5562804", "0.55347633", "0.5523255", "0.541619", "0.54031026", "0.536245", "0.53480095", "0.53017974", "0.52917963", "0.5189681", "0.51692307", "0.50795686", "0.50761473", "0.5056755", "0.500594...
0.60543543
0
NewHealthController creates a health controller.
func NewHealthController(service *goa.Service) *HealthController { return &HealthController{Controller: service.NewController("HealthController")} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewHealthController(router *mux.Router, r *render.Render) *HealthController {\n\tctrl := &HealthController{router, r}\n\tctrl.Register()\n\treturn ctrl\n}", "func NewHealthController() *HealthController {\n\treturn &HealthController{}\n}", "func NewHealthController() *HealthController {\n\treturn new(Heal...
[ "0.81887347", "0.80589396", "0.7891792", "0.77446157", "0.68483025", "0.677226", "0.67417073", "0.6698271", "0.6693059", "0.66468036", "0.6549667", "0.6535941", "0.6533674", "0.6522994", "0.64922637", "0.6490057", "0.64858145", "0.64842665", "0.642976", "0.64031225", "0.63901...
0.80731326
1
Health runs the health action.
func (c *HealthController) Health(ctx *app.HealthHealthContext) error { // HealthController_Health: start_implement ver := "unknown" semVer, err := semver.Make(MajorMinorPatch + "-" + ReleaseType + "+git.sha." + GitCommit) if err == nil { ver = semVer.String() } return ctx.OK([]byte("Health OK: " + time.Now().String() + ", semVer: " + ver + "\n")) // HealthController_Health: end_implement }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (cmd *HealthHealthCommand) Run(c *client.Client, args []string) error {\n\tvar path string\n\tif len(args) > 0 {\n\t\tpath = args[0]\n\t} else {\n\t\tpath = \"/api/_ah/health\"\n\t}\n\tlogger := goa.NewLogger(log.New(os.Stderr, \"\", log.LstdFlags))\n\tctx := goa.WithLogger(context.Background(), logger)\n\tre...
[ "0.6713238", "0.6543138", "0.6531063", "0.64671564", "0.638338", "0.6375378", "0.63475406", "0.63455915", "0.6333624", "0.6295893", "0.6284236", "0.62841773", "0.628139", "0.6255509", "0.6253855", "0.62408435", "0.6238117", "0.6238117", "0.6195274", "0.61200297", "0.61177963"...
0.67453545
0
NewGenerator starts foreground goroutine which generates sequence of unsigned ints and puts them in input channel, also it returnes stop channel which need to be triggered when generator need to be stopped
func NewGenerator(input chan<- uint) chan<- bool { stop := make(chan bool) go func() { var current uint = 1 for { select { case input <- current: current++ case <-stop: close(input) return } } }() return stop }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Generator(done chan struct{}) <-chan int {\n\tout := make(chan int)\n\n\tgo func() {\n\t\tdefer close(out)\n\n\t\tfor i, j := 0, 1; ; i, j = j, i+j {\n\t\t\tselect {\n\t\t\tcase out <- i:\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn out\n}", "func generate() chan int {\n\tch := mak...
[ "0.6641966", "0.6627279", "0.65606636", "0.6544673", "0.6534481", "0.646908", "0.63416237", "0.6290985", "0.62610847", "0.6223949", "0.6180005", "0.6167735", "0.6167735", "0.616628", "0.6149821", "0.61059165", "0.61059165", "0.61059165", "0.6101464", "0.6064095", "0.6044233",...
0.7869876
0
Asset loads and returns the asset for the given name. It returns an error if the asset could not be found or could not be loaded.
func Asset(name string) ([]byte, error) { cannonicalName := strings.Replace(name, "\\", "/", -1) if f, ok := _bindata[cannonicalName]; ok { a, err := f() if err != nil { return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) } return a.bytes, nil } return nil, fmt.Errorf("Asset %s not found", name) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Asset(name string) ([]byte, error) {\n cannonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n if f, ok := _bindata[cannonicalName]; ok {\n a, err := f()\n if err != nil {\n return nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n }\n return a.bytes, nil\n }\n retu...
[ "0.7347324", "0.7267579", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.710425", "0.70949763", "0.70949763", "0...
0.0
-1
MustAsset is like Asset but panics when Asset would return an error. It simplifies safe initialization of global variables.
func MustAsset(name string) []byte { a, err := Asset(name) if err != nil { panic("asset: Asset(" + name + "): " + err.Error()) } return a }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}", "func MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif (err != nil) {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\...
[ "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "0.7499818", "...
0.0
-1
AssetInfo loads and returns the asset info for the given name. It returns an error if the asset could not be found or could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) { cannonicalName := strings.Replace(name, "\\", "/", -1) if f, ok := _bindata[cannonicalName]; ok { a, err := f() if err != nil { return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) } return a.info, nil } return nil, fmt.Errorf("AssetInfo %s not found", name) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\...
[ "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746", "0.81356746"...
0.0
-1
AssetNames returns the names of the assets.
func AssetNames() []string { names := make([]string, 0, len(_bindata)) for name := range _bindata { names = append(names, name) } return names }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[]
[]
0.0
-1
RestoreAsset restores an asset under the given directory
func RestoreAsset(dir, name string) error { data, err := Asset(name) if err != nil { return err } info, err := AssetInfo(name) if err != nil { return err } err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) if err != nil { return err } err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) if err != nil { return err } err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) if err != nil { return err } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, path.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.Wri...
[ "0.80596304", "0.80596304", "0.80596304", "0.80596304" ]
0.0
-1
RestoreAssets restores an asset under the given directory recursively
func RestoreAssets(dir, name string) error { children, err := AssetDir(name) // File if err != nil { return RestoreAsset(dir, name) } // Dir for _, child := range children { err = RestoreAssets(dir, filepath.Join(name, child)) if err != nil { return err } } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func RestoreAssets(dir, name string) error {\n children, err := AssetDir(name)\n // File\n if err != nil {\n return RestoreAsset(dir, name)\n }\n // Dir\n for _, child := range children {\n err = RestoreAssets(dir, path.Join(name, child))\n ...
[ "0.798857", "0.798857", "0.798857", "0.798857", "0.798857", "0.7979806", "0.7979806", "0.7979806", "0.7979806", "0.7979806", "0.7979806", "0.7979806", "0.7979806", "0.7979806", "0.7979806", "0.7979806", "0.7979806", "0.7979806", "0.7973515", "0.7973515", "0.7973515", "0.797...
0.0
-1
Copyright 2018 Information Trust Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import "syscall"
func (s *service) Start() error { log.Debug("Starting Proc") s.pipe, _ = s.Proc.StdoutPipe() e := s.Proc.Start() if e != nil { return e } s.isRunning = true log.Debug("Waiting") e = s.Proc.Wait() return e }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func SYSCALL() { ctx.SYSCALL() }", "func PrintSyscall(sc int32) string {\n\tvar syscalls = map[int32]string{\n\t\t0: \"read\",\n\t\t1: \"write\",\n\t\t2: \"open\",\n\t\t3: \"close\",\n\t\t4: \"newstat\",\n\t\t5: \"fstat\",\n\t\t6: \"newlstat\",\n\t\t7: \"poll\",\n\t\t8: \"lseek\",\n\t\t9: \"m...
[ "0.7211902", "0.6758328", "0.65226424", "0.6358648", "0.59837586", "0.5968245", "0.57826364", "0.5782253", "0.5769097", "0.56659955", "0.5599327", "0.5580922", "0.55571395", "0.55484253", "0.55367595", "0.54918444", "0.54208976", "0.5402904", "0.5364842", "0.53416246", "0.532...
0.0
-1
Titleizes given name to match
func toFieldName(name string) string { name = strings.Title(name) // NOTE: golint prefers method names use "ID" instead of "Id". re := regexp.MustCompile("Id([A-Z]|$)") return re.ReplaceAllString(name, "ID${1}") }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Title(name string) string {\n\t//name = strings.Replace(name, \"_\", \" \", -1)\n\t//name = strings.Replace(name, \".\", \" \", -1)\n\tname = strings.TrimSpace(name)\n\treturn strings.ToUpper(name)\n}", "func Title(name string) string {\n\tname = strings.Replace(name, \"_\", \" \", -1)\n\tname = strings.Rep...
[ "0.7358443", "0.7263768", "0.6653588", "0.66427946", "0.6610177", "0.65596616", "0.6526799", "0.6518762", "0.64714485", "0.64706373", "0.6443669", "0.6421691", "0.638007", "0.6326133", "0.62860125", "0.6278684", "0.6265051", "0.6237156", "0.62015903", "0.6178959", "0.61777234...
0.0
-1
genFields generates fields config for given AST
func genFields(fs []*ast.FieldDefinition) *jen.Statement { // // Generate config for fields // // == Example input SDL // // type Dog { // name(style: NameComponentsStyle = SHORT): String! // givenName: String @deprecated(reason: "No longer supported; please use name field.") // } // // == Example output // // graphql.Fields{ // "name": graphql.Field{ ... }, // "givenName": graphql.Field{ ... }, // } // return jen.Qual(defsPkg, "Fields").Values(jen.DictFunc(func(d jen.Dict) { for _, f := range fs { d[jen.Lit(f.Name.Value)] = genField(f) } })) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (g *Generator) generate(typeName string) {\n\tfields := make([]Field, 0, 100)\n\tfor _, file := range g.pkg.files {\n\t\t// Set the state for this run of the walker.\n\t\tfile.typeName = typeName\n\t\tif file.file != nil {\n\t\t\tast.Inspect(file.file, file.genDecl)\n\t\t\tfields = append(fields, file.fields....
[ "0.62704337", "0.6208788", "0.61161774", "0.588618", "0.5744872", "0.5581054", "0.556311", "0.55467135", "0.5487712", "0.5455513", "0.54416054", "0.54179704", "0.5410756", "0.54015136", "0.53891087", "0.5364398", "0.5253189", "0.5208028", "0.52043945", "0.5097442", "0.5096631...
0.7535413
0
genField generates field config for given AST
func genField(field *ast.FieldDefinition) *jen.Statement { // // Generate config for field // // == Example input SDL // // interface Pet { // "name of the pet" // name(style: NameComponentsStyle = SHORT): String! // """ // givenName of the pet ★ // """ // givenName: String @deprecated(reason: "No longer supported; please use name field.") // } // // == Example output // // &graphql.Field{ // Name: "name", // Type: graphql.NonNull(graphql.String), // Description: "name of the pet", // DeprecationReason: "", // Args: FieldConfigArgument{ ... }, // } // // &graphql.Field{ // Name: "givenName", // Type: graphql.String, // Description: "givenName of the pet", // DeprecationReason: "No longer supported; please use name field.", // Args: FieldConfigArgument{ ... }, // } // return jen.Op("&").Qual(defsPkg, "Field").Values(jen.Dict{ jen.Id("Args"): genArguments(field.Arguments), jen.Id("DeprecationReason"): genDeprecationReason(field.Directives), jen.Id("Description"): genDescription(field), jen.Id("Name"): jen.Lit(field.Name.Value), jen.Id("Type"): genOutputTypeReference(field.Type), }) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func genFields(fs []*ast.FieldDefinition) *jen.Statement {\n\t//\n\t// Generate config for fields\n\t//\n\t// == Example input SDL\n\t//\n\t// type Dog {\n\t// name(style: NameComponentsStyle = SHORT): String!\n\t// givenName: String @deprecated(reason: \"No longer supported; please use name field.\"...
[ "0.6892996", "0.59916204", "0.57207865", "0.57188165", "0.56753683", "0.5636733", "0.5596574", "0.55853677", "0.5520538", "0.5467922", "0.5415466", "0.53430325", "0.53384626", "0.5321866", "0.52896065", "0.5278876", "0.5278473", "0.521161", "0.5197192", "0.5160622", "0.515671...
0.757106
0
genArguments generates argument field config for given AST
func genArguments(args []*ast.InputValueDefinition) *jen.Statement { // // Generate config for arguments // // == Example input SDL // // type Dog { // name( // "style is stylish" // style: NameComponentsStyle = SHORT, // ): String! // } // // == Example output // // FieldConfigArgument{ // "style": &ArgumentConfig{ ... } // }, // return jen.Qual(defsPkg, "FieldConfigArgument").Values( jen.DictFunc(func(d jen.Dict) { for _, arg := range args { d[jen.Lit(arg.Name.Value)] = genArgument(arg) } }), ) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func genArgument(arg *ast.InputValueDefinition) *jen.Statement {\n\t//\n\t// Generate config for argument\n\t//\n\t// == Example input SDL\n\t//\n\t// type Dog {\n\t// name(\n\t// \"style is stylish\"\n\t// style: NameComponentsStyle = SHORT,\n\t// ): String!\n\t// }\n\t//\n\t// == Ex...
[ "0.6473212", "0.5893512", "0.52816784", "0.5240693", "0.5222666", "0.51920587", "0.50385666", "0.5037734", "0.50329053", "0.5010324", "0.50005144", "0.49702114", "0.4968135", "0.47936794", "0.47646612", "0.47604808", "0.47278732", "0.47187448", "0.4692279", "0.4692153", "0.46...
0.7882007
0
genArgument generates argument config for given AST
func genArgument(arg *ast.InputValueDefinition) *jen.Statement { // // Generate config for argument // // == Example input SDL // // type Dog { // name( // "style is stylish" // style: NameComponentsStyle = SHORT, // ): String! // } // // == Example output // // &ArgumentConfig{ // Type: graphql.NonNull(graphql.String), // DefaultValue: "SHORT", // TODO: ??? // Description: "style is stylish", // } // return jen.Op("&").Qual(defsPkg, "ArgumentConfig").Values(jen.Dict{ jen.Id("DefaultValue"): genValue(arg.DefaultValue), jen.Id("Description"): genDescription(arg), jen.Id("Type"): genInputTypeReference(arg.Type), }) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func genArguments(args []*ast.InputValueDefinition) *jen.Statement {\n\t//\n\t// Generate config for arguments\n\t//\n\t// == Example input SDL\n\t//\n\t// type Dog {\n\t// name(\n\t// \"style is stylish\"\n\t// style: NameComponentsStyle = SHORT,\n\t// ): String!\n\t// }\n\t//\n\t// ...
[ "0.73820037", "0.57308537", "0.55290264", "0.5522804", "0.5506649", "0.5157852", "0.5125404", "0.5091101", "0.504007", "0.5006817", "0.49592155", "0.49067444", "0.49022472", "0.48643097", "0.48459315", "0.484178", "0.48397017", "0.48209807", "0.4767527", "0.47625056", "0.4760...
0.793481
0
AddReceipt adds receipt for user.
func (client Client) AddReceipt(userId string, text string) error { addReceiptUrl := client.backendUrl + "/internal/receipt" request := addReceiptRequest{ReceiptString: text, UserId: userId} reader, err := getReader(request) if err != nil { return err } response, err := http.Post(addReceiptUrl, "text/javascript", reader) if err != nil { return err } switch response.StatusCode { case http.StatusOK: return nil default: return errors.New(response.Status) } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (puo *ProductUpdateOne) AddReceipt(r ...*Receipt) *ProductUpdateOne {\n\tids := make([]int, len(r))\n\tfor i := range r {\n\t\tids[i] = r[i].ID\n\t}\n\treturn puo.AddReceiptIDs(ids...)\n}", "func (pu *ProductUpdate) AddReceipt(r ...*Receipt) *ProductUpdate {\n\tids := make([]int, len(r))\n\tfor i := range r...
[ "0.65677184", "0.6518654", "0.54841864", "0.54648083", "0.5241363", "0.5195453", "0.51696557", "0.51665854", "0.5155161", "0.514535", "0.5085661", "0.50395", "0.5033886", "0.50278527", "0.50236046", "0.49896088", "0.49473062", "0.49466264", "0.4934199", "0.4933858", "0.492777...
0.7855252
0
Contain Returns true if slice contains string.
func Contain(list []string, str string) bool { for _, s := range list { if s == str { return true } } return false }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (slice StringSlice) Contains(str string) bool {\n\tfor _, iStr := range slice {\n\t\tif iStr == str {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (t *StringSlice) Contains(s string) bool {\n\treturn t.Index(s) > -1\n}", "func Contains(slice []string, s string) bool {\n\tfor _, elem := range ...
[ "0.783324", "0.76756775", "0.76235545", "0.74297565", "0.7393863", "0.73772204", "0.7371275", "0.7368948", "0.72871715", "0.72871715", "0.72871715", "0.72871715", "0.72871715", "0.72871715", "0.72871715", "0.72871715", "0.72871715", "0.72871715", "0.72871715", "0.72871715", "...
0.0
-1
MergeUnique Merges `source` string slice into `dest` and returns result. Inserts from `source` only when `dest` does not `Contain` given string.
func MergeUnique(dest, source []string) []string { for _, str := range source { if !Contain(dest, str) { dest = append(dest, str) } } return dest }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func MergeAndDeduplicateSlice(src []string, target []string) []string {\n\tm := make(map[string]bool)\n\tfor i := range src {\n\t\tm[src[i]] = true\n\t}\n\n\tfor i := range target {\n\t\tif _, ok := m[target[i]]; !ok {\n\t\t\tsrc = append(src, target[i])\n\t\t}\n\t}\n\n\treturn src\n}", "func StringUniqueAppend(...
[ "0.64953166", "0.5654552", "0.5652756", "0.55148536", "0.53828245", "0.53584385", "0.5342289", "0.5150319", "0.513586", "0.5078584", "0.50460446", "0.50121546", "0.5000155", "0.49774796", "0.49146158", "0.49021077", "0.48866537", "0.48661357", "0.48562434", "0.47965133", "0.4...
0.7760827
0