_id
stringlengths
2
7
title
stringlengths
1
118
partition
stringclasses
3 values
text
stringlengths
52
85.5k
language
stringclasses
1 value
meta_information
dict
q12300
SetDestination
train
func (p *Pipeline) SetDestination(d Destination) { p.mutex.Lock() defer p.mutex.Unlock() p.dst = d }
go
{ "resource": "" }
q12301
Run
train
func (p *Pipeline) Run(ctx context.Context) (err error) { if pdebug.Enabled { g := pdebug.Marker("Pipeline.Run (%s)", ctx.Value("query")).BindError(&err) defer g.End() } p.mutex.Lock() defer p.mutex.Unlock() defer close(p.done) if p.src == nil { return errors.New("source must be non-nil") } if p.dst == nil { return errors.New("destination must be non-nil") } // Reset is called on the source/destination to effectively reset // any state changes that may have happened in the end of // the previous call to Run() p.src.Reset() p.dst.Reset() // Setup the Acceptors, effectively chaining all nodes // starting from the destination, working all the way // up to the Source var prevCh ChanOutput = ChanOutput(make(chan interface{})) go p.dst.Accept(ctx, prevCh, nil) for i := len(p.nodes) - 1; i >= 0; i-- { cur := p.nodes[i] ch := make(chan interface{}) // go cur.Accept(ctx, ch, prevCh) prevCh = ChanOutput(ch) } // And now tell the Source to send the values so data chugs // through the pipeline go p.src.Start(ctx, prevCh) // Wait till we're done <-p.dst.Done() return nil }
go
{ "resource": "" }
q12302
Add
train
func (s *Selection) Add(l line.Line) { s.mutex.Lock() defer s.mutex.Unlock() s.tree.ReplaceOrInsert(l) }
go
{ "resource": "" }
q12303
Remove
train
func (s *Selection) Remove(l line.Line) { s.mutex.Lock() defer s.mutex.Unlock() s.tree.Delete(l) }
go
{ "resource": "" }
q12304
NewAnchorSettings
train
func NewAnchorSettings(screen Screen, anchor VerticalAnchor, offset int) *AnchorSettings { if !IsValidVerticalAnchor(anchor) { panic("Invalid vertical anchor specified") } return &AnchorSettings{ anchor: anchor, anchorOffset: offset, screen: screen, } }
go
{ "resource": "" }
q12305
AnchorPosition
train
func (as AnchorSettings) AnchorPosition() int { var pos int switch as.anchor { case AnchorTop: pos = as.anchorOffset case AnchorBottom: _, h := as.screen.Size() pos = int(h) - as.anchorOffset - 1 // -1 is required because y is 0 base, but h is 1 base default: panic("Unknown anchor type!") } return pos }
go
{ "resource": "" }
q12306
NewUserPrompt
train
func NewUserPrompt(screen Screen, anchor VerticalAnchor, anchorOffset int, prompt string, styles *StyleSet) *UserPrompt { if len(prompt) <= 0 { // default prompt = "QUERY>" } promptLen := runewidth.StringWidth(prompt) return &UserPrompt{ AnchorSettings: NewAnchorSettings(screen, anchor, anchorOffset), prompt: prompt, promptLen: int(promptLen), styles: styles, } }
go
{ "resource": "" }
q12307
NewStatusBar
train
func NewStatusBar(screen Screen, anchor VerticalAnchor, anchorOffset int, styles *StyleSet) *StatusBar { return &StatusBar{ AnchorSettings: NewAnchorSettings(screen, anchor, anchorOffset), clearTimer: nil, styles: styles, } }
go
{ "resource": "" }
q12308
NewListArea
train
func NewListArea(screen Screen, anchor VerticalAnchor, anchorOffset int, sortTopDown bool, styles *StyleSet) *ListArea { return &ListArea{ AnchorSettings: NewAnchorSettings(screen, anchor, anchorOffset), displayCache: []line.Line{}, dirty: false, sortTopDown: sortTopDown, styles: styles, } }
go
{ "resource": "" }
q12309
NewBottomUpLayout
train
func NewBottomUpLayout(state *Peco) *BasicLayout { return &BasicLayout{ StatusBar: NewStatusBar(state.Screen(), AnchorBottom, 0+extraOffset, state.Styles()), // The prompt is at the bottom, above the status bar prompt: NewUserPrompt(state.Screen(), AnchorBottom, 1+extraOffset, state.Prompt(), state.Styles()), // The list area is at the bottom, above the prompt // It's displayed in bottom-to-top order list: NewListArea(state.Screen(), AnchorBottom, 2+extraOffset, false, state.Styles()), } }
go
{ "resource": "" }
q12310
CalculatePage
train
func (l *BasicLayout) CalculatePage(state *Peco, perPage int) error { if pdebug.Enabled { g := pdebug.Marker("BasicLayout.Calculate %d", perPage) defer g.End() } buf := state.CurrentLineBuffer() loc := state.Location() loc.SetPage((loc.LineNumber() / perPage) + 1) loc.SetOffset((loc.Page() - 1) * perPage) loc.SetPerPage(perPage) loc.SetTotal(buf.Size()) if loc.Total() == 0 { loc.SetMaxPage(1) } else { loc.SetMaxPage((loc.Total() + perPage - 1) / perPage) } if loc.MaxPage() < loc.Page() { if buf.Size() == 0 { // wait for targets return errors.New("no targets or query. nothing to do") } loc.SetLineNumber(loc.Offset()) } return nil }
go
{ "resource": "" }
q12311
DrawScreen
train
func (l *BasicLayout) DrawScreen(state *Peco, options *DrawOptions) { if pdebug.Enabled { g := pdebug.Marker("BasicLayout.DrawScreen") defer g.End() } perPage := l.linesPerPage() if err := l.CalculatePage(state, perPage); err != nil { return } l.DrawPrompt(state) l.list.Draw(state, l, perPage, options) if err := l.screen.Flush(); err != nil { return } }
go
{ "resource": "" }
q12312
MovePage
train
func (l *BasicLayout) MovePage(state *Peco, p PagingRequest) (moved bool) { switch p.Type() { case ToScrollLeft, ToScrollRight: moved = horizontalScroll(state, l, p) default: moved = verticalScroll(state, l, p) } return }
go
{ "resource": "" }
q12313
horizontalScroll
train
func horizontalScroll(state *Peco, l *BasicLayout, p PagingRequest) bool { width, _ := state.screen.Size() loc := state.Location() if p.Type() == ToScrollRight { loc.SetColumn(loc.Column() + width/2) } else if loc.Column() > 0 { loc.SetColumn(loc.Column() - width/2) if loc.Column() < 0 { loc.SetColumn(0) } } else { return false } l.list.SetDirty(true) return true }
go
{ "resource": "" }
q12314
TtyReady
train
func TtyReady() error { var err error _stdin, err := os.Open("CONIN$") if err != nil { return err } stdin = os.Stdin os.Stdin = _stdin syscall.Stdin = syscall.Handle(os.Stdin.Fd()) return errors.Wrap(setStdHandle(syscall.STD_INPUT_HANDLE, syscall.Stdin), "failed to check for TtyReady") }
go
{ "resource": "" }
q12315
TtyTerm
train
func TtyTerm() { os.Stdin = stdin syscall.Stdin = syscall.Handle(os.Stdin.Fd()) setStdHandle(syscall.STD_INPUT_HANDLE, syscall.Stdin) }
go
{ "resource": "" }
q12316
NewRegexp
train
func NewRegexp() *Regexp { return &Regexp{ factory: &regexpQueryFactory{ compiled: make(map[string]regexpQuery), threshold: time.Minute, }, flags: regexpFlagList(defaultFlags), quotemeta: false, name: "Regexp", outCh: pipeline.ChanOutput(make(chan interface{})), } }
go
{ "resource": "" }
q12317
NewSmartCase
train
func NewSmartCase() *Regexp { rf := NewRegexp() rf.quotemeta = true rf.name = "SmartCase" rf.flags = regexpFlagFunc(func(q string) []string { if util.ContainsUpper(q) { return defaultFlags } return []string{"i"} }) return rf }
go
{ "resource": "" }
q12318
NewRaw
train
func NewRaw(id uint64, v string, enableSep bool) *Raw { rl := &Raw{ id: id, buf: v, sepLoc: -1, displayString: "", dirty: false, } if !enableSep { return rl } if i := strings.IndexByte(rl.buf, '\000'); i != -1 { rl.sepLoc = i } return rl }
go
{ "resource": "" }
q12319
Less
train
func (rl *Raw) Less(b btree.Item) bool { return rl.id < b.(Line).ID() }
go
{ "resource": "" }
q12320
DisplayString
train
func (rl Raw) DisplayString() string { if rl.displayString != "" { return rl.displayString } if i := rl.sepLoc; i > -1 { rl.displayString = util.StripANSISequence(rl.buf[:i]) } else { rl.displayString = util.StripANSISequence(rl.buf) } return rl.displayString }
go
{ "resource": "" }
q12321
Init
train
func (c *Config) Init() error { c.Keymap = make(map[string]string) c.InitialMatcher = IgnoreCaseMatch c.Style.Init() c.Prompt = "QUERY>" c.Layout = LayoutTypeTopDown return nil }
go
{ "resource": "" }
q12322
ReadFilename
train
func (c *Config) ReadFilename(filename string) error { f, err := os.Open(filename) if err != nil { return errors.Wrapf(err, "failed to open file %s", filename) } defer f.Close() err = json.NewDecoder(f).Decode(c) if err != nil { return errors.Wrap(err, "failed to decode JSON") } if !IsValidLayoutType(LayoutType(c.Layout)) { return errors.Errorf("invalid layout type: %s", c.Layout) } if len(c.CustomMatcher) > 0 { fmt.Fprintf(os.Stderr, "'CustomMatcher' is deprecated. Use CustomFilter instead\n") for n, cfg := range c.CustomMatcher { if _, ok := c.CustomFilter[n]; ok { return errors.Errorf("failed to create CustomFilter: '%s' already exists. Refusing to overwrite with deprecated CustomMatcher config", n) } c.CustomFilter[n] = CustomFilterConfig{ Cmd: cfg[0], Args: cfg[1:], BufferThreshold: filter.DefaultCustomFilterBufferThreshold, } } } return nil }
go
{ "resource": "" }
q12323
UnmarshalJSON
train
func (s *Style) UnmarshalJSON(buf []byte) error { raw := []string{} if err := json.Unmarshal(buf, &raw); err != nil { return errors.Wrapf(err, "failed to unmarshal Style") } return stringsToStyle(s, raw) }
go
{ "resource": "" }
q12324
LocateRcfile
train
func LocateRcfile(locater configLocateFunc) (string, error) { // http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html // // Try in this order: // $XDG_CONFIG_HOME/peco/config.json // $XDG_CONFIG_DIR/peco/config.json (where XDG_CONFIG_DIR is listed in $XDG_CONFIG_DIRS) // ~/.peco/config.json home, uErr := homedirFunc() // Try dir supplied via env var if dir := os.Getenv("XDG_CONFIG_HOME"); dir != "" { if file, err := locater(filepath.Join(dir, "peco")); err == nil { return file, nil } } else if uErr == nil { // silently ignore failure for homedir() // Try "default" XDG location, is user is available if file, err := locater(filepath.Join(home, ".config", "peco")); err == nil { return file, nil } } // this standard does not take into consideration windows (duh) // while the spec says use ":" as the separator, Go provides us // with filepath.ListSeparator, so use it if dirs := os.Getenv("XDG_CONFIG_DIRS"); dirs != "" { for _, dir := range strings.Split(dirs, fmt.Sprintf("%c", filepath.ListSeparator)) { if file, err := locater(filepath.Join(dir, "peco")); err == nil { return file, nil } } } if uErr == nil { // silently ignore failure for homedir() if file, err := locater(filepath.Join(home, ".peco")); err == nil { return file, nil } } return "", errors.New("config file not found") }
go
{ "resource": "" }
q12325
flusher
train
func flusher(ctx context.Context, f filter.Filter, incoming chan []line.Line, done chan struct{}, out pipeline.ChanOutput) { if pdebug.Enabled { g := pdebug.Marker("flusher goroutine") defer g.End() } defer close(done) defer out.SendEndMark("end of filter") for { select { case <-ctx.Done(): return case buf, ok := <-incoming: if !ok { return } pdebug.Printf("flusher: %#v", buf) f.Apply(ctx, buf, out) buffer.ReleaseLineListBuf(buf) } } }
go
{ "resource": "" }
q12326
Loop
train
func (f *Filter) Loop(ctx context.Context, cancel func()) error { defer cancel() // previous holds the function that can cancel the previous // query. This is used when multiple queries come in succession // and the previous query is discarded anyway var mutex sync.Mutex var previous func() for { select { case <-ctx.Done(): return nil case q := <-f.state.Hub().QueryCh(): workctx, workcancel := context.WithCancel(ctx) mutex.Lock() if previous != nil { if pdebug.Enabled { pdebug.Printf("Canceling previous query") } previous() } previous = workcancel mutex.Unlock() f.state.Hub().SendStatusMsg("Running query...") go f.Work(workctx, q) } } }
go
{ "resource": "" }
q12327
Crop
train
func (pf PageCrop) Crop(in Buffer) *FilteredBuffer { return NewFilteredBuffer(in, pf.currentPage, pf.perPage) }
go
{ "resource": "" }
q12328
Setup
train
func (s *Source) Setup(ctx context.Context, state *Peco) { s.setupOnce.Do(func() { done := make(chan struct{}) refresh := make(chan struct{}, 1) defer close(done) defer close(refresh) // And also, close the done channel so we can tell the consumers // we have finished reading everything defer close(s.setupDone) draw := func(state *Peco) { state.Hub().SendDraw(nil) } go func() { ticker := time.NewTicker(100 * time.Millisecond) defer ticker.Stop() for { select { case <-done: draw(state) return case <-ticker.C: draw(state) } } }() // This sync.Once var is used to receive the notification // that there was at least 1 line read from the source // This is wrapped in a sync.Notify so we can safely call // it in multiple places var notify sync.Once notifycb := func() { // close the ready channel so others can be notified // that there's at least 1 line in the buffer state.Hub().SendStatusMsg("") close(s.ready) } // Register this to be called in a defer, just in case we could bailed // out without reading a single line. // Note: this will be a no-op if notify.Do has been called before defer notify.Do(notifycb) if pdebug.Enabled { pdebug.Printf("Source: using buffer size of %dkb", state.maxScanBufferSize) } scanbuf := make([]byte, state.maxScanBufferSize*1024) scanner := bufio.NewScanner(s.in) scanner.Buffer(scanbuf, state.maxScanBufferSize*1024) defer func() { if util.IsTty(s.in) { return } if closer, ok := s.in.(io.Closer); ok { closer.Close() } }() lines := make(chan string) go func() { var scanned int if pdebug.Enabled { defer func() { pdebug.Printf("Source scanned %d lines", scanned) }() } defer close(lines) for scanner.Scan() { lines <- scanner.Text() scanned++ } }() state.Hub().SendStatusMsg("Waiting for input...") readCount := 0 for loop := true; loop; { select { case <-ctx.Done(): if pdebug.Enabled { pdebug.Printf("Bailing out of source setup, because ctx was canceled") } return case l, ok := <-lines: if !ok { if pdebug.Enabled { pdebug.Printf("No more lines to read...") } loop = false break } readCount++ s.Append(line.NewRaw(s.idgen.Next(), l, s.enableSep)) notify.Do(notifycb) } } if pdebug.Enabled { pdebug.Printf("Read all %d lines from source", readCount) } }) }
go
{ "resource": "" }
q12329
Reset
train
func (s *Source) Reset() { if pdebug.Enabled { g := pdebug.Marker("Source.Reset") defer g.End() } s.ChanOutput = pipeline.ChanOutput(make(chan interface{})) }
go
{ "resource": "" }
q12330
GetScheduleTime
train
func (schedule Schedule) GetScheduleTime() *time.Time { if scheduleTime := schedule.ScheduleTime; scheduleTime != nil { if scheduleTime.After(time.Now().Add(time.Minute)) { return scheduleTime } } return nil }
go
{ "resource": "" }
q12331
Add
train
func (cron *Cron) Add(job QorJobInterface) (err error) { cron.parseJobs() defer cron.writeCronJob() var binaryFile string if binaryFile, err = filepath.Abs(os.Args[0]); err == nil { var jobs []*cronJob for _, cronJob := range cron.Jobs { if cronJob.JobID != job.GetJobID() { jobs = append(jobs, cronJob) } } if scheduler, ok := job.GetArgument().(Scheduler); ok && scheduler.GetScheduleTime() != nil { scheduleTime := scheduler.GetScheduleTime().In(time.Local) job.SetStatus(JobStatusScheduled) currentPath, _ := os.Getwd() jobs = append(jobs, &cronJob{ JobID: job.GetJobID(), Command: fmt.Sprintf("%d %d %d %d * cd %v; %v --qor-job %v\n", scheduleTime.Minute(), scheduleTime.Hour(), scheduleTime.Day(), scheduleTime.Month(), currentPath, binaryFile, job.GetJobID()), }) } else { cmd := exec.Command(binaryFile, "--qor-job", job.GetJobID()) if err = cmd.Start(); err == nil { jobs = append(jobs, &cronJob{JobID: job.GetJobID(), Pid: cmd.Process.Pid}) cmd.Process.Release() } } cron.Jobs = jobs } return }
go
{ "resource": "" }
q12332
Run
train
func (cron *Cron) Run(qorJob QorJobInterface) error { job := qorJob.GetJob() if job.Handler != nil { go func() { sigint := make(chan os.Signal, 1) // interrupt signal sent from terminal signal.Notify(sigint, syscall.SIGINT) // sigterm signal sent from kubernetes signal.Notify(sigint, syscall.SIGTERM) i := <-sigint qorJob.SetProgressText(fmt.Sprintf("Worker killed by signal %s", i.String())) qorJob.SetStatus(JobStatusKilled) qorJob.StopReferesh() os.Exit(int(reflect.ValueOf(i).Int())) }() qorJob.StartReferesh() defer qorJob.StopReferesh() err := job.Handler(qorJob.GetSerializableArgument(qorJob), qorJob) if err == nil { cron.parseJobs() defer cron.writeCronJob() for _, cronJob := range cron.Jobs { if cronJob.JobID == qorJob.GetJobID() { cronJob.Delete = true } } } return err } return errors.New("no handler found for job " + job.Name) }
go
{ "resource": "" }
q12333
Kill
train
func (cron *Cron) Kill(job QorJobInterface) (err error) { cron.parseJobs() defer cron.writeCronJob() for _, cronJob := range cron.Jobs { if cronJob.JobID == job.GetJobID() { if process, err := os.FindProcess(cronJob.Pid); err == nil { if err = process.Kill(); err == nil { cronJob.Delete = true return nil } } return err } } return errors.New("failed to find job") }
go
{ "resource": "" }
q12334
Remove
train
func (cron *Cron) Remove(job QorJobInterface) error { cron.parseJobs() defer cron.writeCronJob() for _, cronJob := range cron.Jobs { if cronJob.JobID == job.GetJobID() { if cronJob.Pid == 0 { cronJob.Delete = true return nil } return errors.New("failed to remove current job as it is running") } } return errors.New("failed to find job") }
go
{ "resource": "" }
q12335
New
train
func New(config ...*Config) *Worker { var cfg = &Config{} if len(config) > 0 { cfg = config[0] } if cfg.Job == nil { cfg.Job = &QorJob{} } if cfg.Queue == nil { cfg.Queue = NewCronQueue() } return &Worker{Config: cfg} }
go
{ "resource": "" }
q12336
ConfigureQorResourceBeforeInitialize
train
func (worker *Worker) ConfigureQorResourceBeforeInitialize(res resource.Resourcer) { if res, ok := res.(*admin.Resource); ok { res.GetAdmin().RegisterViewPath("github.com/qor/worker/views") res.UseTheme("worker") worker.Admin = res.GetAdmin() worker.JobResource = worker.Admin.NewResource(worker.Config.Job) worker.JobResource.UseTheme("worker") worker.JobResource.Meta(&admin.Meta{Name: "Name", Valuer: func(record interface{}, context *qor.Context) interface{} { return record.(QorJobInterface).GetJobName() }}) worker.JobResource.IndexAttrs("ID", "Name", "Status", "CreatedAt") worker.JobResource.Name = res.Name for _, status := range []string{JobStatusScheduled, JobStatusNew, JobStatusRunning, JobStatusDone, JobStatusException} { var status = status worker.JobResource.Scope(&admin.Scope{Name: status, Handler: func(db *gorm.DB, ctx *qor.Context) *gorm.DB { return db.Where("status = ?", status) }}) } // default scope worker.JobResource.Scope(&admin.Scope{ Handler: func(db *gorm.DB, ctx *qor.Context) *gorm.DB { if jobName := ctx.Request.URL.Query().Get("job"); jobName != "" { return db.Where("kind = ?", jobName) } if groupName := ctx.Request.URL.Query().Get("group"); groupName != "" { var jobNames []string for _, job := range worker.Jobs { if groupName == job.Group { jobNames = append(jobNames, job.Name) } } if len(jobNames) > 0 { return db.Where("kind IN (?)", jobNames) } return db.Where("kind IS NULL") } { var jobNames []string for _, job := range worker.Jobs { jobNames = append(jobNames, job.Name) } if len(jobNames) > 0 { return db.Where("kind IN (?)", jobNames) } } return db }, Default: true, }) // Auto Migration worker.Admin.DB.AutoMigrate(worker.Config.Job) // Configure jobs for _, job := range worker.Jobs { if job.Resource == nil { job.Resource = worker.Admin.NewResource(worker.JobResource.Value) } } } }
go
{ "resource": "" }
q12337
RegisterJob
train
func (worker *Worker) RegisterJob(job *Job) error { if worker.mounted { debug.PrintStack() fmt.Printf("Job should be registered before Worker mounted into admin, but %v is registered after that", job.Name) } job.Worker = worker worker.Jobs = append(worker.Jobs, job) return nil }
go
{ "resource": "" }
q12338
GetRegisteredJob
train
func (worker *Worker) GetRegisteredJob(name string) *Job { for _, job := range worker.Jobs { if job.Name == name { return job } } return nil }
go
{ "resource": "" }
q12339
GetJob
train
func (worker *Worker) GetJob(jobID string) (QorJobInterface, error) { qorJob := worker.JobResource.NewStruct().(QorJobInterface) context := worker.Admin.NewContext(nil, nil) context.ResourceID = jobID context.Resource = worker.JobResource if err := worker.JobResource.FindOneHandler(qorJob, nil, context.Context); err == nil { for _, job := range worker.Jobs { if job.Name == qorJob.GetJobName() { qorJob.SetJob(job) return qorJob, nil } } return nil, fmt.Errorf("failed to load job: %v, unregistered job type: %v", jobID, qorJob.GetJobName()) } return nil, fmt.Errorf("failed to find job: %v", jobID) }
go
{ "resource": "" }
q12340
AddJob
train
func (worker *Worker) AddJob(qorJob QorJobInterface) error { return worker.Queue.Add(qorJob) }
go
{ "resource": "" }
q12341
RunJob
train
func (worker *Worker) RunJob(jobID string) error { qorJob, err := worker.GetJob(jobID) if qorJob != nil && err == nil { defer func() { if r := recover(); r != nil { qorJob.AddLog(string(debug.Stack())) qorJob.SetProgressText(fmt.Sprint(r)) qorJob.SetStatus(JobStatusException) } }() if qorJob.GetStatus() != JobStatusNew && qorJob.GetStatus() != JobStatusScheduled { return errors.New("invalid job status, current status: " + qorJob.GetStatus()) } if err = qorJob.SetStatus(JobStatusRunning); err == nil { if err = qorJob.GetJob().GetQueue().Run(qorJob); err == nil { return qorJob.SetStatus(JobStatusDone) } qorJob.SetProgressText(err.Error()) qorJob.SetStatus(JobStatusException) } } return err }
go
{ "resource": "" }
q12342
KillJob
train
func (worker *Worker) KillJob(jobID string) error { if qorJob, err := worker.GetJob(jobID); err == nil { if qorJob.GetStatus() == JobStatusRunning { if err = qorJob.GetJob().GetQueue().Kill(qorJob); err == nil { qorJob.SetStatus(JobStatusKilled) return nil } return err } else if qorJob.GetStatus() == JobStatusScheduled || qorJob.GetStatus() == JobStatusNew { qorJob.SetStatus(JobStatusKilled) return worker.RemoveJob(jobID) } else { return errors.New("invalid job status") } } else { return err } }
go
{ "resource": "" }
q12343
RemoveJob
train
func (worker *Worker) RemoveJob(jobID string) error { qorJob, err := worker.GetJob(jobID) if err == nil { return qorJob.GetJob().GetQueue().Remove(qorJob) } return err }
go
{ "resource": "" }
q12344
NewStruct
train
func (job *Job) NewStruct() interface{} { qorJobInterface := job.Worker.JobResource.NewStruct().(QorJobInterface) qorJobInterface.SetJob(job) return qorJobInterface }
go
{ "resource": "" }
q12345
GetQueue
train
func (job *Job) GetQueue() Queue { if job.Queue != nil { return job.Queue } return job.Worker.Queue }
go
{ "resource": "" }
q12346
New
train
func New(config *Config) (*Kubernetes, error) { var err error if config == nil { config = &Config{} } if config.ClusterConfig == nil { config.ClusterConfig, err = rest.InClusterConfig() } if err != nil { return nil, err } clientset, err := kubernetes.NewForConfig(config.ClusterConfig) if err != nil { return nil, err } return &Kubernetes{Clientset: clientset, Config: config}, nil }
go
{ "resource": "" }
q12347
GetCurrentPod
train
func (k8s *Kubernetes) GetCurrentPod() *corev1.Pod { var ( podlist, err = k8s.Clientset.Core().Pods("").List(metav1.ListOptions{}) localeIP = GetLocalIP() ) if err == nil { for _, item := range podlist.Items { if item.Status.PodIP == localeIP { return &item } } } return nil }
go
{ "resource": "" }
q12348
GetJobSpec
train
func (k8s *Kubernetes) GetJobSpec(qorJob worker.QorJobInterface) (*v1.Job, error) { var ( k8sJob = &v1.Job{} currentPod = k8s.GetCurrentPod() namespace = currentPod.GetNamespace() ) if k8s.Config.Namespace != "" { namespace = k8s.Config.Namespace } if k8s.Config.JobTemplateMaker != nil { if err := yaml.Unmarshal([]byte(k8s.Config.JobTemplateMaker(qorJob)), k8sJob); err != nil { return nil, err } if k8sJob.ObjectMeta.Namespace != "" { namespace = k8sJob.ObjectMeta.Namespace } } else { if marshaledContainers, err := json.Marshal(currentPod.Spec.Containers); err == nil { json.Unmarshal(marshaledContainers, k8sJob.Spec.Template.Spec.Containers) } if marshaledVolumes, err := json.Marshal(currentPod.Spec.Volumes); err == nil { json.Unmarshal(marshaledVolumes, k8sJob.Spec.Template.Spec.Volumes) } } if k8sJob.TypeMeta.Kind == "" { k8sJob.TypeMeta.Kind = "Job" } if k8sJob.TypeMeta.APIVersion == "" { k8sJob.TypeMeta.APIVersion = "batch/v1" } if k8sJob.ObjectMeta.Namespace == "" { k8sJob.ObjectMeta.Namespace = namespace } return k8sJob, nil }
go
{ "resource": "" }
q12349
Add
train
func (k8s *Kubernetes) Add(qorJob worker.QorJobInterface) error { var ( jobName = fmt.Sprintf("qor-job-%v", qorJob.GetJobID()) k8sJob, err = k8s.GetJobSpec(qorJob) currentPath, _ = os.Getwd() binaryFile, _ = filepath.Abs(os.Args[0]) ) if err == nil { k8sJob.ObjectMeta.Name = jobName k8sJob.Spec.Template.ObjectMeta.Name = jobName if k8sJob.Spec.Template.Spec.RestartPolicy == "" { k8sJob.Spec.Template.Spec.RestartPolicy = "Never" } for idx, container := range k8sJob.Spec.Template.Spec.Containers { if len(container.Command) == 0 || k8s.Config.JobTemplateMaker == nil { container.Command = []string{binaryFile, "--qor-job", qorJob.GetJobID()} } if container.WorkingDir == "" || k8s.Config.JobTemplateMaker == nil { container.WorkingDir = currentPath } k8sJob.Spec.Template.Spec.Containers[idx] = container } _, err = k8s.Clientset.Batch().Jobs(k8sJob.ObjectMeta.GetNamespace()).Create(k8sJob) } return err }
go
{ "resource": "" }
q12350
Run
train
func (k8s *Kubernetes) Run(qorJob worker.QorJobInterface) error { job := qorJob.GetJob() if job.Handler != nil { return job.Handler(qorJob.GetSerializableArgument(qorJob), qorJob) } return errors.New("no handler found for job " + job.Name) }
go
{ "resource": "" }
q12351
Kill
train
func (k8s *Kubernetes) Kill(qorJob worker.QorJobInterface) error { var ( k8sJob, err = k8s.GetJobSpec(qorJob) jobName = fmt.Sprintf("qor-job-%v", qorJob.GetJobID()) ) if err == nil { return k8s.Clientset.Batch().Jobs(k8sJob.ObjectMeta.GetNamespace()).Delete(jobName, &metav1.DeleteOptions{}) } return err }
go
{ "resource": "" }
q12352
Scan
train
func (resultsTable *ResultsTable) Scan(data interface{}) error { switch values := data.(type) { case []byte: return json.Unmarshal(values, resultsTable) case string: return resultsTable.Scan([]byte(values)) default: return errors.New("unsupported data type for Qor Job error table") } }
go
{ "resource": "" }
q12353
Value
train
func (resultsTable ResultsTable) Value() (driver.Value, error) { result, err := json.Marshal(resultsTable) return string(result), err }
go
{ "resource": "" }
q12354
SetStatus
train
func (job *QorJob) SetStatus(status string) error { job.mutex.Lock() defer job.mutex.Unlock() job.Status = status if status == JobStatusDone { job.Progress = 100 } if job.shouldCallSave() { return job.callSave() } return nil }
go
{ "resource": "" }
q12355
SetJob
train
func (job *QorJob) SetJob(j *Job) { job.Kind = j.Name job.Job = j }
go
{ "resource": "" }
q12356
GetJob
train
func (job *QorJob) GetJob() *Job { if job.Job != nil { return job.Job } return nil }
go
{ "resource": "" }
q12357
GetSerializableArgumentResource
train
func (job *QorJob) GetSerializableArgumentResource() *admin.Resource { if j := job.GetJob(); j != nil { return j.Resource } return nil }
go
{ "resource": "" }
q12358
SetProgress
train
func (job *QorJob) SetProgress(progress uint) error { job.mutex.Lock() defer job.mutex.Unlock() if progress > 100 { progress = 100 } job.Progress = progress if job.shouldCallSave() { return job.callSave() } return nil }
go
{ "resource": "" }
q12359
SetProgressText
train
func (job *QorJob) SetProgressText(str string) error { job.mutex.Lock() defer job.mutex.Unlock() job.ProgressText = str if job.shouldCallSave() { return job.callSave() } return nil }
go
{ "resource": "" }
q12360
AddLog
train
func (job *QorJob) AddLog(log string) error { job.mutex.Lock() defer job.mutex.Unlock() fmt.Println(log) job.Log += "\n" + log if job.shouldCallSave() { return job.callSave() } return nil }
go
{ "resource": "" }
q12361
AddResultsRow
train
func (job *QorJob) AddResultsRow(cells ...TableCell) error { job.mutex.Lock() defer job.mutex.Unlock() job.ResultsTable.TableCells = append(job.ResultsTable.TableCells, cells) if job.shouldCallSave() { return job.callSave() } return nil }
go
{ "resource": "" }
q12362
ParseChannelSizeFlag
train
func ParseChannelSizeFlag(flag string) (int, error) { ss := strings.Split(flag, "=") if len(ss) != 2 { return 0, fmt.Errorf("Failed split flag %s", flag) } if ss[0] != "--channel_size" { return 0, fmt.Errorf("Unknown flag %s", flag) } return strconv.Atoi(ss[1]) }
go
{ "resource": "" }
q12363
Help
train
func Help(args []string, cmds []*command.Command) int { var ( cmd string ) if len(args) >= 3 { cmd = args[2] } // Prints the help if the command exist. for _, c := range cmds { if c.Name() == cmd { return c.Usage() } } if cmd == "" { fmt.Fprintf(os.Stderr, "missing help command. Usage:\n\n\t$ bw help [command]\n\nAvailable help commands\n\n") var usage []string for _, c := range cmds { name := c.Name() for i := len(name); i < 12; i++ { name += " " } usage = append(usage, fmt.Sprintf("\t%s\t- %s\n", name, c.Short)) } sort.Strings(usage) for _, u := range usage { fmt.Fprint(os.Stderr, u) } fmt.Fprintln(os.Stderr, "") return 0 } fmt.Fprintf(os.Stderr, "help command %q not recognized. Usage:\n\n\t$ bw help\n\n", cmd) return 2 }
go
{ "resource": "" }
q12364
InitializeDriver
train
func InitializeDriver(driverName string, drivers map[string]StoreGenerator) (storage.Store, error) { f, ok := drivers[driverName] if !ok { var ds []string for k := range drivers { ds = append(ds, k) } return nil, fmt.Errorf("unknown driver name %q; valid drivers [%q]", driverName, strings.Join(ds, ", ")) } return f() }
go
{ "resource": "" }
q12365
InitializeCommands
train
func InitializeCommands(driver storage.Store, chanSize, bulkTripleOpSize, builderSize int, rl repl.ReadLiner, done chan bool) []*command.Command { return []*command.Command{ assert.New(driver, literal.DefaultBuilder(), chanSize, bulkTripleOpSize), benchmark.New(driver, chanSize, bulkTripleOpSize), export.New(driver, bulkTripleOpSize), load.New(driver, bulkTripleOpSize, builderSize), run.New(driver, chanSize, bulkTripleOpSize), repl.New(driver, chanSize, bulkTripleOpSize, builderSize, rl, done), server.New(driver, chanSize, bulkTripleOpSize), version.New(), } }
go
{ "resource": "" }
q12366
Eval
train
func Eval(ctx context.Context, args []string, cmds []*command.Command) int { // Retrieve the provided command. if len(args) < 1 { return Help(args, cmds) } cmd := args[0] // Check for help request. if cmd == "help" { return Help(args, cmds) } // Run the requested command. for _, c := range cmds { if c.Name() == cmd { return c.Run(ctx, args) } } // The command was not found. if cmd != "" { fmt.Fprintf(os.Stderr, "command %q not recognized. Usage:\n\n\t$ bw [command]\n\nPlease run\n\n\t$ bw help\n\n", cmd) } return 1 }
go
{ "resource": "" }
q12367
Run
train
func Run(driverName string, args []string, drivers map[string]StoreGenerator, chanSize, bulkTripleOpSize, builderSize int, rl repl.ReadLiner) int { driver, err := InitializeDriver(driverName, drivers) if err != nil { fmt.Fprintln(os.Stderr, err) return 2 } return Eval(context.Background(), args, InitializeCommands(driver, chanSize, bulkTripleOpSize, builderSize, rl, make(chan bool))) }
go
{ "resource": "" }
q12368
Name
train
func (s *storeMemoizer) Name(ctx context.Context) string { return s.s.Name(ctx) }
go
{ "resource": "" }
q12369
Version
train
func (s *storeMemoizer) Version(ctx context.Context) string { return s.s.Version(ctx) }
go
{ "resource": "" }
q12370
NewGraph
train
func (s *storeMemoizer) NewGraph(ctx context.Context, id string) (storage.Graph, error) { g, err := s.s.NewGraph(ctx, id) if err != nil { return nil, err } return &graphMemoizer{ g: g, memN: make(map[string][]*node.Node), memP: make(map[string][]*predicate.Predicate), memO: make(map[string][]*triple.Object), memT: make(map[string][]*triple.Triple), memE: make(map[string]bool), }, nil }
go
{ "resource": "" }
q12371
ID
train
func (g *graphMemoizer) ID(ctx context.Context) string { return g.g.ID(ctx) }
go
{ "resource": "" }
q12372
AddTriples
train
func (g *graphMemoizer) AddTriples(ctx context.Context, ts []*triple.Triple) error { g.mu.Lock() // Update operations reset the memoization. g.memN = make(map[string][]*node.Node) g.memP = make(map[string][]*predicate.Predicate) g.memO = make(map[string][]*triple.Object) g.memT = make(map[string][]*triple.Triple) g.memE = make(map[string]bool) g.mu.Unlock() return g.g.AddTriples(ctx, ts) }
go
{ "resource": "" }
q12373
Objects
train
func (g *graphMemoizer) Objects(ctx context.Context, s *node.Node, p *predicate.Predicate, lo *storage.LookupOptions, objs chan<- *triple.Object) error { k := combinedUUID("Objects", lo, s.UUID(), p.UUID()) g.mu.RLock() v := g.memO[k] g.mu.RUnlock() if v != nil { // Return the memoized results. defer close(objs) for _, o := range v { select { case <-ctx.Done(): return nil case objs <- o: // Nothing to do. } } return nil } // Query and memoize the results. c := make(chan *triple.Object) defer close(objs) var ( err error wg sync.WaitGroup mobjs []*triple.Object ) wg.Add(1) go func() { err = g.g.Objects(ctx, s, p, lo, c) wg.Done() }() for o := range c { select { case <-ctx.Done(): return errors.New("context cancelled") case objs <- o: // memoize the object. mobjs = append(mobjs, o) } } wg.Wait() g.mu.Lock() g.memO[k] = mobjs g.mu.Unlock() return err }
go
{ "resource": "" }
q12374
Subjects
train
func (g *graphMemoizer) Subjects(ctx context.Context, p *predicate.Predicate, o *triple.Object, lo *storage.LookupOptions, subs chan<- *node.Node) error { k := combinedUUID("Subjects", lo, p.UUID(), o.UUID()) g.mu.RLock() v := g.memN[k] g.mu.RUnlock() if v != nil { // Return the memoized results. defer close(subs) for _, s := range v { select { case <-ctx.Done(): return nil case subs <- s: // Nothing to do. } } return nil } // Query and memoize the results. c := make(chan *node.Node) defer close(subs) var ( err error wg sync.WaitGroup msubs []*node.Node ) wg.Add(1) go func() { err = g.g.Subjects(ctx, p, o, lo, c) wg.Done() }() for s := range c { select { case <-ctx.Done(): return errors.New("context cancelled") case subs <- s: // memoize the object. msubs = append(msubs, s) } } wg.Wait() g.mu.Lock() g.memN[k] = msubs g.mu.Unlock() return err }
go
{ "resource": "" }
q12375
PredicatesForSubjectAndObject
train
func (g *graphMemoizer) PredicatesForSubjectAndObject(ctx context.Context, s *node.Node, o *triple.Object, lo *storage.LookupOptions, prds chan<- *predicate.Predicate) error { k := combinedUUID("PredicatesForSubjectAndObject", lo, s.UUID(), o.UUID()) g.mu.RLock() v := g.memP[k] g.mu.RUnlock() if v != nil { // Return the memoized results. defer close(prds) for _, p := range v { select { case <-ctx.Done(): return nil case prds <- p: // Nothing to do. } } return nil } // Query and memoize the results. c := make(chan *predicate.Predicate) defer close(prds) var ( err error wg sync.WaitGroup mpreds []*predicate.Predicate ) wg.Add(1) go func() { err = g.g.PredicatesForSubjectAndObject(ctx, s, o, lo, c) wg.Done() }() for p := range c { select { case <-ctx.Done(): return errors.New("context cancelled") case prds <- p: // memoize the object. mpreds = append(mpreds, p) } } wg.Wait() g.mu.Lock() g.memP[k] = mpreds g.mu.Unlock() return err }
go
{ "resource": "" }
q12376
TriplesForSubject
train
func (g *graphMemoizer) TriplesForSubject(ctx context.Context, s *node.Node, lo *storage.LookupOptions, trpls chan<- *triple.Triple) error { k := combinedUUID("TriplesForSubject", lo, s.UUID()) g.mu.RLock() v := g.memT[k] g.mu.RUnlock() if v != nil { // Return the memoized results. defer close(trpls) for _, t := range v { select { case <-ctx.Done(): return nil case trpls <- t: // Nothing to do. } } return nil } // Query and memoize the results. c := make(chan *triple.Triple) defer close(trpls) var ( err error wg sync.WaitGroup mts []*triple.Triple ) wg.Add(1) go func() { err = g.g.TriplesForSubject(ctx, s, lo, c) wg.Done() }() for t := range c { select { case <-ctx.Done(): return errors.New("context cancelled") case trpls <- t: // memoize the object. mts = append(mts, t) } } wg.Wait() g.mu.Lock() g.memT[k] = mts g.mu.Unlock() return err }
go
{ "resource": "" }
q12377
Execute
train
func (p *createPlan) Execute(ctx context.Context) (*table.Table, error) { t, err := table.New([]string{}) if err != nil { return nil, err } errs := []string{} for _, g := range p.stm.GraphNames() { tracer.Trace(p.tracer, func() []string { return []string{"Creating new graph \"" + g + "\""} }) if _, err := p.store.NewGraph(ctx, g); err != nil { errs = append(errs, err.Error()) } } if len(errs) > 0 { return nil, errors.New(strings.Join(errs, "; ")) } return t, nil }
go
{ "resource": "" }
q12378
Execute
train
func (p *insertPlan) Execute(ctx context.Context) (*table.Table, error) { t, err := table.New([]string{}) if err != nil { return nil, err } return t, update(ctx, p.stm.Data(), p.stm.OutputGraphNames(), p.store, func(g storage.Graph, d []*triple.Triple) error { tracer.Trace(p.tracer, func() []string { return []string{"Inserting triples to graph \"" + g.ID(ctx) + "\""} }) return g.AddTriples(ctx, d) }) }
go
{ "resource": "" }
q12379
newQueryPlan
train
func newQueryPlan(ctx context.Context, store storage.Store, stm *semantic.Statement, chanSize int, w io.Writer) (*queryPlan, error) { bs := []string{} for _, b := range stm.Bindings() { bs = append(bs, b) } t, err := table.New([]string{}) if err != nil { return nil, err } return &queryPlan{ stm: stm, store: store, bndgs: bs, grfsNames: stm.InputGraphNames(), cls: stm.GraphPatternClauses(), tbl: t, chanSize: chanSize, tracer: w, }, nil }
go
{ "resource": "" }
q12380
processClause
train
func (p *queryPlan) processClause(ctx context.Context, cls *semantic.GraphClause, lo *storage.LookupOptions) (bool, error) { // This method decides how to process the clause based on the current // list of bindings solved and data available. if cls.Specificity() == 3 { tracer.Trace(p.tracer, func() []string { return []string{"Clause is fully specified"} }) t, err := triple.New(cls.S, cls.P, cls.O) if err != nil { return false, err } b, tbl, err := simpleExist(ctx, p.grfs, cls, t) if err != nil { return false, err } if err := p.tbl.AppendTable(tbl); err != nil { return b, err } return b, nil } exist, total := 0, 0 var existing []string for _, b := range cls.Bindings() { total++ if p.tbl.HasBinding(b) { exist++ existing = append(existing, b) } } if exist == 0 { tracer.Trace(p.tracer, func() []string { return []string{fmt.Sprintf("None of the clause binding exist %v/%v", cls.Bindings(), existing)} }) // Data is new. stmLimit := int64(0) if len(p.stm.GraphPatternClauses()) == 1 && len(p.stm.GroupBy()) == 0 && len(p.stm.HavingExpression()) == 0 { stmLimit = p.stm.Limit() } tbl, err := simpleFetch(ctx, p.grfs, cls, lo, stmLimit, p.chanSize, p.tracer) if err != nil { return false, err } if len(p.tbl.Bindings()) > 0 { return false, p.tbl.DotProduct(tbl) } return false, p.tbl.AppendTable(tbl) } tracer.Trace(p.tracer, func() []string { return []string{fmt.Sprintf("Some clause binding exist %v/%v", cls.Bindings(), existing)} }) return false, p.specifyClauseWithTable(ctx, cls, lo) }
go
{ "resource": "" }
q12381
getBoundValueForComponent
train
func getBoundValueForComponent(r table.Row, bs []string) *table.Cell { var cs []*table.Cell for _, b := range bs { if v, ok := r[b]; ok { cs = append(cs, v) } } if len(cs) == 1 || len(cs) == 2 && reflect.DeepEqual(cs[0], cs[1]) { return cs[0] } return nil }
go
{ "resource": "" }
q12382
specifyClauseWithTable
train
func (p *queryPlan) specifyClauseWithTable(ctx context.Context, cls *semantic.GraphClause, lo *storage.LookupOptions) error { rws := p.tbl.Rows() p.tbl.Truncate() var ( gErr error mu sync.Mutex wg sync.WaitGroup ) for _, tmpRow := range rws { wg.Add(1) go func(r table.Row) { defer wg.Done() var tmpCls = *cls // The table manipulations are now thread safe. if err := p.addSpecifiedData(ctx, r, &tmpCls, lo); err != nil { mu.Lock() gErr = err mu.Unlock() } }(tmpRow) } wg.Wait() return gErr }
go
{ "resource": "" }
q12383
cellToObject
train
func cellToObject(c *table.Cell) (*triple.Object, error) { if c == nil { return nil, errors.New("cannot create an object out of and empty cell") } if c.N != nil { return triple.NewNodeObject(c.N), nil } if c.P != nil { return triple.NewPredicateObject(c.P), nil } if c.L != nil { return triple.NewLiteralObject(c.L), nil } if c.S != nil { l, err := literal.DefaultBuilder().Parse(fmt.Sprintf(`"%s"^^type:string`, *c.S)) if err != nil { return nil, err } return triple.NewLiteralObject(l), nil } return nil, fmt.Errorf("invalid cell %v", c) }
go
{ "resource": "" }
q12384
processGraphPattern
train
func (p *queryPlan) processGraphPattern(ctx context.Context, lo *storage.LookupOptions) error { tracer.Trace(p.tracer, func() []string { var res []string for i, cls := range p.cls { res = append(res, fmt.Sprintf("Clause %d to process: %v", i, cls)) } return res }) for i, c := range p.cls { i, cls := i, *c tracer.Trace(p.tracer, func() []string { return []string{fmt.Sprintf("Processing clause %d: %v", i, &cls)} }) // The current planner is based on naively executing clauses by // specificity. unresolvable, err := p.processClause(ctx, &cls, lo) if err != nil { return err } if unresolvable { p.tbl.Truncate() return nil } } return nil }
go
{ "resource": "" }
q12385
orderBy
train
func (p *queryPlan) orderBy() { order := p.stm.OrderByConfig() if len(order) <= 0 { return } tracer.Trace(p.tracer, func() []string { return []string{"Ordering by " + order.String()} }) p.tbl.Sort(order) }
go
{ "resource": "" }
q12386
having
train
func (p *queryPlan) having() error { if p.stm.HasHavingClause() { tracer.Trace(p.tracer, func() []string { return []string{"Having filtering"} }) eval := p.stm.HavingEvaluator() ok := true var eErr error p.tbl.Filter(func(r table.Row) bool { b, err := eval.Evaluate(r) if err != nil { ok, eErr = false, err } return !b }) if !ok { return eErr } } return nil }
go
{ "resource": "" }
q12387
limit
train
func (p *queryPlan) limit() { if p.stm.IsLimitSet() { tracer.Trace(p.tracer, func() []string { return []string{"Limit results to " + strconv.Itoa(int(p.stm.Limit()))} }) p.tbl.Limit(p.stm.Limit()) } }
go
{ "resource": "" }
q12388
Execute
train
func (p *queryPlan) Execute(ctx context.Context) (*table.Table, error) { // Fetch and cache graph instances. tracer.Trace(p.tracer, func() []string { return []string{fmt.Sprintf("Caching graph instances for graphs %v", p.stm.InputGraphNames())} }) if err := p.stm.Init(ctx, p.store); err != nil { return nil, err } p.grfs = p.stm.InputGraphs() // Retrieve the data. lo := p.stm.GlobalLookupOptions() tracer.Trace(p.tracer, func() []string { return []string{"Setting global lookup options to " + lo.String()} }) if err := p.processGraphPattern(ctx, lo); err != nil { return nil, err } if err := p.projectAndGroupBy(); err != nil { return nil, err } p.orderBy() err := p.having() if err != nil { return nil, err } p.limit() if p.tbl.NumRows() == 0 { // Correct the bindings. t, err := table.New(p.stm.OutputBindings()) if err != nil { return nil, err } p.tbl = t } return p.tbl, nil }
go
{ "resource": "" }
q12389
Execute
train
func (p *showPlan) Execute(ctx context.Context) (*table.Table, error) { t, err := table.New([]string{"?graph_id"}) if err != nil { return nil, err } errs := make(chan error) names := make(chan string) go func() { errs <- p.store.GraphNames(ctx, names) close(errs) }() for name := range names { id := name t.AddRow(table.Row{ "?graph_id": &table.Cell{ S: &id, }, }) } if <-errs != nil { return nil, err } return t, nil }
go
{ "resource": "" }
q12390
New
train
func New(ctx context.Context, store storage.Store, stm *semantic.Statement, chanSize, bulkSize int, w io.Writer) (Executor, error) { switch stm.Type() { case semantic.Query: return newQueryPlan(ctx, store, stm, chanSize, w) case semantic.Insert: return &insertPlan{ stm: stm, store: store, tracer: w, }, nil case semantic.Delete: return &deletePlan{ stm: stm, store: store, tracer: w, }, nil case semantic.Create: return &createPlan{ stm: stm, store: store, tracer: w, }, nil case semantic.Drop: return &dropPlan{ stm: stm, store: store, tracer: w, }, nil case semantic.Construct: qp, _ := newQueryPlan(ctx, store, stm, chanSize, w) return &constructPlan{ stm: stm, store: store, tracer: w, bulkSize: bulkSize, queryPlan: qp, construct: true, }, nil case semantic.Deconstruct: qp, _ := newQueryPlan(ctx, store, stm, chanSize, w) return &constructPlan{ stm: stm, store: store, tracer: w, bulkSize: bulkSize, queryPlan: qp, construct: false, }, nil case semantic.Show: return &showPlan{ stm: stm, store: store, tracer: w, }, nil default: return nil, fmt.Errorf("planner.New: unknown statement type in statement %v", stm) } }
go
{ "resource": "" }
q12391
NewLLk
train
func NewLLk(input string, k int) *LLk { c := lexer.New(input, 2*k) // +2 to keep a bit of buffer available. l := &LLk{ k: k, c: c, } for i := 0; i < k+1; i++ { appendNextToken(l) } return l }
go
{ "resource": "" }
q12392
appendNextToken
train
func appendNextToken(l *LLk) { for t := range l.c { l.tkns = append(l.tkns, t) return } l.tkns = append(l.tkns, lexer.Token{Type: lexer.ItemEOF}) }
go
{ "resource": "" }
q12393
Peek
train
func (l *LLk) Peek(k int) (*lexer.Token, error) { if k > l.k { return nil, fmt.Errorf("grammar.LLk: cannot look ahead %v beyond defined %v", k, l.k) } if k <= 0 { return nil, fmt.Errorf("grammar.LLk: invalid look ahead value %v", k) } return &l.tkns[k], nil }
go
{ "resource": "" }
q12394
CanAccept
train
func (l *LLk) CanAccept(tt lexer.TokenType) bool { return l.tkns[0].Type == tt }
go
{ "resource": "" }
q12395
Consume
train
func (l *LLk) Consume(tt lexer.TokenType) bool { if l.tkns[0].Type != tt { return false } l.tkns = l.tkns[1:] appendNextToken(l) return true }
go
{ "resource": "" }
q12396
TrackDuration
train
func TrackDuration(f func() error) (time.Duration, error) { ts := timeNow() err := f() d := timeNow().Sub(ts) return d, err }
go
{ "resource": "" }
q12397
RepetitionDurationStats
train
func RepetitionDurationStats(reps int, setup, f, teardown func() error) (time.Duration, time.Duration, error) { if reps < 1 { return time.Duration(0), 0, fmt.Errorf("repetions need to be %d >= 1", reps) } if setup == nil { return time.Duration(0), 0, errors.New("setup function is required") } if f == nil { return time.Duration(0), 0, errors.New("benchmark function is required") } if teardown == nil { return time.Duration(0), 0, errors.New("teardown function is required") } var durations []time.Duration for i := 0; i < reps; i++ { if err := setup(); err != nil { return time.Duration(0), 0, err } d, err := TrackDuration(f) if err != nil { return 0, 0, err } durations = append(durations, d) if err := teardown(); err != nil { return time.Duration(0), 0, err } } mean := int64(0) for _, d := range durations { mean += int64(d) } mean /= int64(len(durations)) dev, expSquare := int64(0), mean*mean for _, d := range durations { dev = int64(d)*int64(d) - expSquare } dev = int64(math.Sqrt(math.Abs(float64(dev)))) return time.Duration(mean), time.Duration(dev), nil }
go
{ "resource": "" }
q12398
RunBenchmarkBatterySequentially
train
func RunBenchmarkBatterySequentially(entries []*BenchEntry) []*BenchResult { var res []*BenchResult for _, entry := range entries { m, d, err := RepetitionDurationStats(entry.Reps, entry.Setup, entry.F, entry.TearDown) res = append(res, &BenchResult{ BatteryID: entry.BatteryID, ID: entry.ID, Triples: entry.Triples, Err: err, Mean: m, StdDev: d, }) } return res }
go
{ "resource": "" }
q12399
RunBenchmarkBatteryConcurrently
train
func RunBenchmarkBatteryConcurrently(entries []*BenchEntry) []*BenchResult { var ( mu sync.Mutex wg sync.WaitGroup res []*BenchResult ) for _, entry := range entries { wg.Add(1) go func(entry *BenchEntry) { m, d, err := RepetitionDurationStats(entry.Reps, entry.Setup, entry.F, entry.TearDown) mu.Lock() defer mu.Unlock() defer wg.Done() res = append(res, &BenchResult{ BatteryID: entry.BatteryID, ID: entry.ID, Triples: entry.Triples, Err: err, Mean: m, StdDev: d, }) }(entry) } wg.Wait() return res }
go
{ "resource": "" }