id
stringlengths
95
167
text
stringlengths
69
15.9k
title
stringclasses
1 value
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/buffalo/cmd/fix/deprecations.go#L12-L58
func DeprecrationsCheck(r *Runner) error { fmt.Println("~~~ Checking for deprecations ~~~") b, err := ioutil.ReadFile("main.go") if err != nil { return err } if bytes.Contains(b, []byte("app.Start")) { r.Warnings = append(r.Warnings, "app.Start has been removed in v0.11.0. Use app.Serve Instead. [main.go]") } return filepath.Walk(filepath.Join(r.App.Root, "actions"), func(path string, info os.FileInfo, _ error) error { if info.IsDir() { return nil } if filepath.Ext(path) != ".go" { return nil } b, err := ioutil.ReadFile(path) if err != nil { return err } if bytes.Contains(b, []byte("Websocket()")) { r.Warnings = append(r.Warnings, fmt.Sprintf("buffalo.Context#Websocket has been deprecated in v0.11.0, and removed in v0.12.0. Use github.com/gorilla/websocket directly. [%s]", path)) } if bytes.Contains(b, []byte("meta.Name")) { r.Warnings = append(r.Warnings, fmt.Sprintf("meta.Name has been deprecated in v0.11.0, and removed in v0.12.0. Use github.com/markbates/inflect.Name directly. [%s]", path)) } if bytes.Contains(b, []byte("generators.Find(")) { r.Warnings = append(r.Warnings, fmt.Sprintf("generators.Find(string) has been deprecated in v0.11.0, and removed in v0.12.0. Use generators.FindByBox() instead. [%s]", path)) } // i18n middleware changes in v0.11.1 if bytes.Contains(b, []byte("T.CookieName")) { b = bytes.Replace(b, []byte("T.CookieName"), []byte("T.LanguageExtractorOptions[\"CookieName\"]"), -1) } if bytes.Contains(b, []byte("T.SessionName")) { b = bytes.Replace(b, []byte("T.SessionName"), []byte("T.LanguageExtractorOptions[\"SessionName\"]"), -1) } if bytes.Contains(b, []byte("T.LanguageFinder=")) || bytes.Contains(b, []byte("T.LanguageFinder ")) { r.Warnings = append(r.Warnings, fmt.Sprintf("i18n.Translator#LanguageFinder has been deprecated in v0.11.1, and has been removed in v0.12.0. Use i18n.Translator#LanguageExtractors instead. [%s]", path)) } ioutil.WriteFile(path, b, 0664) return nil }) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/plugins/lgtm/lgtm.go#L504-L514
func getChangedFiles(gc githubClient, org, repo string, number int) ([]string, error) { changes, err := gc.GetPullRequestChanges(org, repo, number) if err != nil { return nil, fmt.Errorf("cannot get PR changes for %s/%s#%d", org, repo, number) } var filenames []string for _, change := range changes { filenames = append(filenames, change.Filename) } return filenames, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2store/watcher_hub.go#L59-L116
func (wh *watcherHub) watch(key string, recursive, stream bool, index, storeIndex uint64) (Watcher, *v2error.Error) { reportWatchRequest() event, err := wh.EventHistory.scan(key, recursive, index) if err != nil { err.Index = storeIndex return nil, err } w := &watcher{ eventChan: make(chan *Event, 100), // use a buffered channel recursive: recursive, stream: stream, sinceIndex: index, startIndex: storeIndex, hub: wh, } wh.mutex.Lock() defer wh.mutex.Unlock() // If the event exists in the known history, append the EtcdIndex and return immediately if event != nil { ne := event.Clone() ne.EtcdIndex = storeIndex w.eventChan <- ne return w, nil } l, ok := wh.watchers[key] var elem *list.Element if ok { // add the new watcher to the back of the list elem = l.PushBack(w) } else { // create a new list and add the new watcher l = list.New() elem = l.PushBack(w) wh.watchers[key] = l } w.remove = func() { if w.removed { // avoid removing it twice return } w.removed = true l.Remove(elem) atomic.AddInt64(&wh.count, -1) reportWatcherRemoved() if l.Len() == 0 { delete(wh.watchers, key) } } atomic.AddInt64(&wh.count, 1) reportWatcherAdded() return w, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/transport/timeout_listener.go#L25-L39
func NewTimeoutListener(addr string, scheme string, tlsinfo *TLSInfo, rdtimeoutd, wtimeoutd time.Duration) (net.Listener, error) { ln, err := newListener(addr, scheme) if err != nil { return nil, err } ln = &rwTimeoutListener{ Listener: ln, rdtimeoutd: rdtimeoutd, wtimeoutd: wtimeoutd, } if ln, err = wrapTLS(scheme, tlsinfo, ln); err != nil { return nil, err } return ln, nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/file_snapshot.go#L349-L365
func (f *FileSnapshotStore) ReapSnapshots() error { snapshots, err := f.getSnapshots() if err != nil { f.logger.Printf("[ERR] snapshot: Failed to get snapshots: %v", err) return err } for i := f.retain; i < len(snapshots); i++ { path := filepath.Join(f.path, snapshots[i].ID) f.logger.Printf("[INFO] snapshot: reaping snapshot %v", path) if err := os.RemoveAll(path); err != nil { f.logger.Printf("[ERR] snapshot: Failed to reap snapshot %v: %v", path, err) return err } } return nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/file_snapshot.go#L482-L509
func (s *FileSnapshotSink) writeMeta() error { // Open the meta file metaPath := filepath.Join(s.dir, metaFilePath) fh, err := os.Create(metaPath) if err != nil { return err } defer fh.Close() // Buffer the file IO buffered := bufio.NewWriter(fh) // Write out as JSON enc := json.NewEncoder(buffered) if err := enc.Encode(&s.meta); err != nil { return err } if err = buffered.Flush(); err != nil { return err } if err = fh.Sync(); err != nil { return err } return nil }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/config/secret/agent.go#L95-L99
func (a *Agent) setSecret(secretPath string, secretValue []byte) { a.Lock() defer a.Unlock() a.secretsMap[secretPath] = secretValue }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/proxy/grpcproxy/leader.go#L109-L113
func (l *leader) lostNotify() <-chan struct{} { l.mu.RLock() defer l.mu.RUnlock() return l.leaderc }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/config/tide.go#L419-L430
func (cp *TideContextPolicy) Validate() error { if inter := sets.NewString(cp.RequiredContexts...).Intersection(sets.NewString(cp.OptionalContexts...)); inter.Len() > 0 { return fmt.Errorf("contexts %s are defined as required and optional", strings.Join(inter.List(), ", ")) } if inter := sets.NewString(cp.RequiredContexts...).Intersection(sets.NewString(cp.RequiredIfPresentContexts...)); inter.Len() > 0 { return fmt.Errorf("contexts %s are defined as required and required if present", strings.Join(inter.List(), ", ")) } if inter := sets.NewString(cp.OptionalContexts...).Intersection(sets.NewString(cp.RequiredIfPresentContexts...)); inter.Len() > 0 { return fmt.Errorf("contexts %s are defined as optional and required if present", strings.Join(inter.List(), ", ")) } return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go#L132-L134
func RegisterElectionHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { return RegisterElectionHandlerClient(ctx, mux, v3electionpb.NewElectionClient(conn)) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/mvcc/key_index.go#L176-L218
func (ki *keyIndex) since(lg *zap.Logger, rev int64) []revision { if ki.isEmpty() { if lg != nil { lg.Panic( "'since' got an unexpected empty keyIndex", zap.String("key", string(ki.key)), ) } else { plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key)) } } since := revision{rev, 0} var gi int // find the generations to start checking for gi = len(ki.generations) - 1; gi > 0; gi-- { g := ki.generations[gi] if g.isEmpty() { continue } if since.GreaterThan(g.created) { break } } var revs []revision var last int64 for ; gi < len(ki.generations); gi++ { for _, r := range ki.generations[gi].revs { if since.GreaterThan(r) { continue } if r.main == last { // replace the revision with a new one that has higher sub value, // because the original one should not be seen by external revs[len(revs)-1] = r continue } revs = append(revs, r) last = r.main } } return revs }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2store/node.go#L206-L256
func (n *node) Remove(dir, recursive bool, callback func(path string)) *v2error.Error { if !n.IsDir() { // key-value pair _, name := path.Split(n.Path) // find its parent and remove the node from the map if n.Parent != nil && n.Parent.Children[name] == n { delete(n.Parent.Children, name) } if callback != nil { callback(n.Path) } if !n.IsPermanent() { n.store.ttlKeyHeap.remove(n) } return nil } if !dir { // cannot delete a directory without dir set to true return v2error.NewError(v2error.EcodeNotFile, n.Path, n.store.CurrentIndex) } if len(n.Children) != 0 && !recursive { // cannot delete a directory if it is not empty and the operation // is not recursive return v2error.NewError(v2error.EcodeDirNotEmpty, n.Path, n.store.CurrentIndex) } for _, child := range n.Children { // delete all children child.Remove(true, true, callback) } // delete self _, name := path.Split(n.Path) if n.Parent != nil && n.Parent.Children[name] == n { delete(n.Parent.Children, name) if callback != nil { callback(n.Path) } if !n.IsPermanent() { n.store.ttlKeyHeap.remove(n) } } return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/contrib/recipes/rwmutex.go#L68-L86
func (rwm *RWMutex) waitOnLastRev(pfx string) (bool, error) { client := rwm.s.Client() // get key that's blocking myKey opts := append(v3.WithLastRev(), v3.WithMaxModRev(rwm.myKey.Revision()-1)) lastKey, err := client.Get(rwm.ctx, pfx, opts...) if err != nil { return false, err } if len(lastKey.Kvs) == 0 { return true, nil } // wait for release on blocking key _, err = WaitEvents( client, string(lastKey.Kvs[0].Key), rwm.myKey.Revision(), []mvccpb.Event_EventType{mvccpb.DELETE}) return false, err }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/functional/rpcpb/member.go#L133-L153
func (m *Member) CheckCompact(rev int64) error { cli, err := m.CreateEtcdClient() if err != nil { return fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint) } defer cli.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) wch := cli.Watch(ctx, "\x00", clientv3.WithFromKey(), clientv3.WithRev(rev-1)) wr, ok := <-wch cancel() if !ok { return fmt.Errorf("watch channel terminated (endpoint %q)", m.EtcdClientEndpoint) } if wr.CompactRevision != rev { return fmt.Errorf("got compact revision %v, wanted %v (endpoint %q)", wr.CompactRevision, rev, m.EtcdClientEndpoint) } return nil }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/external-plugins/needs-rebase/plugin/plugin.go#L162-L181
func takeAction(log *logrus.Entry, ghc githubClient, org, repo string, num int, author string, hasLabel, mergeable bool) error { if !mergeable && !hasLabel { if err := ghc.AddLabel(org, repo, num, labels.NeedsRebase); err != nil { log.WithError(err).Errorf("Failed to add %q label.", labels.NeedsRebase) } msg := plugins.FormatSimpleResponse(author, needsRebaseMessage) return ghc.CreateComment(org, repo, num, msg) } else if mergeable && hasLabel { // remove label and prune comment if err := ghc.RemoveLabel(org, repo, num, labels.NeedsRebase); err != nil { log.WithError(err).Errorf("Failed to remove %q label.", labels.NeedsRebase) } botName, err := ghc.BotName() if err != nil { return err } return ghc.DeleteStaleComments(org, repo, num, nil, shouldPrune(botName)) } return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2store/store.go#L113-L117
func (s *store) Index() uint64 { s.worldLock.RLock() defer s.worldLock.RUnlock() return s.CurrentIndex }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2store/node.go#L110-L116
func (n *node) Read() (string, *v2error.Error) { if n.IsDir() { return "", v2error.NewError(v2error.EcodeNotFile, "", n.store.CurrentIndex) } return n.Value, nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/raft.go#L1466-L1472
func (r *Raft) setCurrentTerm(t uint64) { // Persist to disk first if err := r.stable.SetUint64(keyCurrentTerm, t); err != nil { panic(fmt.Errorf("failed to save current term: %v", err)) } r.raftState.setCurrentTerm(t) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/boskos/ranch/ranch.go#L319-L328
func (r *Ranch) SyncConfig(config string) error { resources, err := ParseConfig(config) if err != nil { return err } if err := r.Storage.SyncResources(resources); err != nil { return err } return nil }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/genny/ci/ci.go#L13-L64
func New(opts *Options) (*genny.Generator, error) { g := genny.New() if err := opts.Validate(); err != nil { return g, err } g.Transformer(genny.Replace("-no-pop", "")) g.Transformer(genny.Dot()) box := packr.New("buffalo:genny:ci", "../ci/templates") var fname string switch opts.Provider { case "travis", "travis-ci": fname = "-dot-travis.yml.tmpl" case "gitlab", "gitlab-ci": if opts.App.WithPop { fname = "-dot-gitlab-ci.yml.tmpl" } else { fname = "-dot-gitlab-ci-no-pop.yml.tmpl" } default: return g, fmt.Errorf("could not find a template for %s", opts.Provider) } f, err := box.FindString(fname) if err != nil { return g, err } g.File(genny.NewFileS(fname, f)) data := map[string]interface{}{ "opts": opts, } if opts.DBType == "postgres" { data["testDbUrl"] = "postgres://postgres:postgres@postgres:5432/" + opts.App.Name.File().String() + "_test?sslmode=disable" } else if opts.DBType == "mysql" { data["testDbUrl"] = "mysql://root:root@(mysql:3306)/" + opts.App.Name.File().String() + "_test?parseTime=true&multiStatements=true&readTimeout=1s" } else { data["testDbUrl"] = "" } helpers := template.FuncMap{} t := gogen.TemplateTransformer(data, helpers) g.Transformer(t) return g, nil }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/client/clientset/versioned/typed/prowjobs/v1/prowjob.go#L100-L109
func (c *prowJobs) Create(prowJob *v1.ProwJob) (result *v1.ProwJob, err error) { result = &v1.ProwJob{} err = c.client.Post(). Namespace(c.ns). Resource("prowjobs"). Body(prowJob). Do(). Into(result) return }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/velodrome/fetcher/conversion.go#L114-L130
func newLabels(issueID int, gLabels []github.Label, repository string) ([]sql.Label, error) { labels := []sql.Label{} repository = strings.ToLower(repository) for _, label := range gLabels { if label.Name == nil { return nil, fmt.Errorf("Label is missing name field") } labels = append(labels, sql.Label{ IssueID: strconv.Itoa(issueID), Name: *label.Name, Repository: repository, }) } return labels, nil }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/genny/mail/mail.go#L12-L41
func New(opts *Options) (*genny.Group, error) { gg := &genny.Group{} if err := opts.Validate(); err != nil { return gg, err } if !opts.SkipInit { g, err := initGenerator(opts) if err != nil { return gg, err } gg.Add(g) } g := genny.New() h := template.FuncMap{} data := map[string]interface{}{ "opts": opts, } t := gogen.TemplateTransformer(data, h) g.Transformer(t) fn := opts.Name.File().String() g.File(genny.NewFileS("mailers/"+fn+".go.tmpl", mailerTmpl)) g.File(genny.NewFileS("templates/mail/"+fn+".html.tmpl", mailTmpl)) gg.Add(g) return gg, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/transport/listener.go#L324-L330
func (info TLSInfo) cafiles() []string { cs := make([]string, 0) if info.TrustedCAFile != "" { cs = append(cs, info.TrustedCAFile) } return cs }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/rawnode.go#L285-L287
func (rn *RawNode) TransferLeader(transferee uint64) { _ = rn.raft.Step(pb.Message{Type: pb.MsgTransferLeader, From: transferee}) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/transport/listener.go#L40-L45
func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err error) { if l, err = newListener(addr, scheme); err != nil { return nil, err } return wrapTLS(scheme, tlsinfo, l) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/jenkins/jenkins.go#L130-L132
func (jb *Build) IsSuccess() bool { return jb.Result != nil && *jb.Result == success }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/configuration.go#L147-L154
func hasVote(configuration Configuration, id ServerID) bool { for _, server := range configuration.Servers { if server.ID == id { return server.Suffrage == Voter } } return false }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/config/agent.go#L116-L120
func (ca *Agent) Config() *Config { ca.mut.RLock() defer ca.mut.RUnlock() return ca.c }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/retry_interceptor.go#L346-L350
func withBackoff(bf backoffFunc) retryOption { return retryOption{applyFunc: func(o *options) { o.backoffFunc = bf }} }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/netutil/routes.go#L31-L33
func GetDefaultInterfaces() (map[string]uint8, error) { return nil, fmt.Errorf("default host not supported on %s_%s", runtime.GOOS, runtime.GOARCH) }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/servers/simple.go#L21-L24
func (s *Simple) Start(c context.Context, h http.Handler) error { s.Handler = h return s.ListenAndServe() }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/y/y.go#L171-L180
func FixedDuration(d time.Duration) string { str := fmt.Sprintf("%02ds", int(d.Seconds())%60) if d >= time.Minute { str = fmt.Sprintf("%02dm", int(d.Minutes())%60) + str } if d >= time.Hour { str = fmt.Sprintf("%02dh", int(d.Hours())) + str } return str }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/rawnode.go#L72-L116
func NewRawNode(config *Config, peers []Peer) (*RawNode, error) { if config.ID == 0 { panic("config.ID must not be zero") } r := newRaft(config) rn := &RawNode{ raft: r, } lastIndex, err := config.Storage.LastIndex() if err != nil { panic(err) // TODO(bdarnell) } // If the log is empty, this is a new RawNode (like StartNode); otherwise it's // restoring an existing RawNode (like RestartNode). // TODO(bdarnell): rethink RawNode initialization and whether the application needs // to be able to tell us when it expects the RawNode to exist. if lastIndex == 0 { r.becomeFollower(1, None) ents := make([]pb.Entry, len(peers)) for i, peer := range peers { cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context} data, err := cc.Marshal() if err != nil { panic("unexpected marshal error") } ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data} } r.raftLog.append(ents...) r.raftLog.committed = uint64(len(ents)) for _, peer := range peers { r.addNode(peer.ID) } } // Set the initial hard and soft states after performing all initialization. rn.prevSoftSt = r.softState() if lastIndex == 0 { rn.prevHardSt = emptyState } else { rn.prevHardSt = r.hardState() } return rn, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/rafthttp/transport.go#L254-L266
func (t *Transport) MendPeer(id types.ID) { t.mu.RLock() p, pok := t.peers[id] g, gok := t.remotes[id] t.mu.RUnlock() if pok { p.(Pausable).Resume() } if gok { g.Resume() } }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/default_context.go#L70-L74
func (d *DefaultContext) Set(key string, value interface{}) { d.moot.Lock() d.data[key] = value d.moot.Unlock() }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/genny/build/options.go#L49-L70
func (opts *Options) Validate() error { pwd, _ := os.Getwd() if opts.App.IsZero() { opts.App = meta.New(pwd) } if len(opts.Environment) == 0 { opts.Environment = "development" } if opts.BuildTime.IsZero() { opts.BuildTime = time.Now() } if len(opts.BuildVersion) == 0 { opts.BuildVersion = opts.BuildTime.Format(time.RFC3339) } if opts.rollback == nil { opts.rollback = &sync.Map{} } if len(opts.GoCommand) == 0 { opts.GoCommand = "build" } return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/transport/listener_tls.go#L108-L167
func (l *tlsListener) acceptLoop() { var wg sync.WaitGroup var pendingMu sync.Mutex pending := make(map[net.Conn]struct{}) ctx, cancel := context.WithCancel(context.Background()) defer func() { cancel() pendingMu.Lock() for c := range pending { c.Close() } pendingMu.Unlock() wg.Wait() close(l.donec) }() for { conn, err := l.Listener.Accept() if err != nil { l.err = err return } pendingMu.Lock() pending[conn] = struct{}{} pendingMu.Unlock() wg.Add(1) go func() { defer func() { if conn != nil { conn.Close() } wg.Done() }() tlsConn := conn.(*tls.Conn) herr := tlsConn.Handshake() pendingMu.Lock() delete(pending, conn) pendingMu.Unlock() if herr != nil { l.handshakeFailure(tlsConn, herr) return } if err := l.check(ctx, tlsConn); err != nil { l.handshakeFailure(tlsConn, err) return } select { case l.connc <- tlsConn: conn = nil case <-ctx.Done(): } }() } }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/github/fakegithub/fakegithub.go#L339-L357
func (f *FakeClient) GetFile(org, repo, file, commit string) ([]byte, error) { contents, ok := f.RemoteFiles[file] if !ok { return nil, fmt.Errorf("could not find file %s", file) } if commit == "" { if master, ok := contents["master"]; ok { return []byte(master), nil } return nil, fmt.Errorf("could not find file %s in master", file) } if content, ok := contents[commit]; ok { return []byte(content), nil } return nil, fmt.Errorf("could not find file %s with ref %s", file, commit) }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/net_transport.go#L388-L411
func (n *NetworkTransport) genericRPC(id ServerID, target ServerAddress, rpcType uint8, args interface{}, resp interface{}) error { // Get a conn conn, err := n.getConnFromAddressProvider(id, target) if err != nil { return err } // Set a deadline if n.timeout > 0 { conn.conn.SetDeadline(time.Now().Add(n.timeout)) } // Send the RPC if err = sendRPC(conn, rpcType, args); err != nil { return err } // Decode the response canReturn, err := decodeResponse(conn, resp) if canReturn { n.returnConn(conn) } return err }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/mirror/syncer.go#L39-L41
func NewSyncer(c *clientv3.Client, prefix string, rev int64) Syncer { return &syncer{c: c, prefix: prefix, rev: rev} }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/functional/rpcpb/member.go#L124-L130
func (m *Member) CreateEtcdClient(opts ...grpc.DialOption) (*clientv3.Client, error) { cfg, err := m.CreateEtcdClientConfig(opts...) if err != nil { return nil, err } return clientv3.New(*cfg) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/github/fakegithub/fakegithub.go#L401-L407
func (f *FakeClient) ListCollaborators(org, repo string) ([]github.User, error) { result := make([]github.User, 0, len(f.Collaborators)) for _, login := range f.Collaborators { result = append(result, github.User{Login: login}) } return result, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv2/command/exec_watch_command.go#L31-L45
func NewExecWatchCommand() cli.Command { return cli.Command{ Name: "exec-watch", Usage: "watch a key for changes and exec an executable", ArgsUsage: "<key> <command> [args...]", Flags: []cli.Flag{ cli.IntFlag{Name: "after-index", Value: 0, Usage: "watch after the given index"}, cli.BoolFlag{Name: "recursive, r", Usage: "watch all values for key and child keys"}, }, Action: func(c *cli.Context) error { execWatchCommandFunc(c, mustNewKeyAPI(c)) return nil }, } }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/velodrome/fetcher/conversion.go#L81-L111
func NewIssueEvent(gIssueEvent *github.IssueEvent, issueID int, repository string) (*sql.IssueEvent, error) { if gIssueEvent.ID == nil || gIssueEvent.Event == nil || gIssueEvent.CreatedAt == nil { return nil, fmt.Errorf("IssueEvent is missing mandatory field: %+v", gIssueEvent) } var label *string if gIssueEvent.Label != nil { label = gIssueEvent.Label.Name } var assignee *string if gIssueEvent.Assignee != nil { assignee = gIssueEvent.Assignee.Login } var actor *string if gIssueEvent.Actor != nil { actor = gIssueEvent.Actor.Login } return &sql.IssueEvent{ ID: itoa(*gIssueEvent.ID), Label: label, Event: *gIssueEvent.Event, EventCreatedAt: *gIssueEvent.CreatedAt, IssueID: strconv.Itoa(issueID), Assignee: assignee, Actor: actor, Repository: strings.ToLower(repository), }, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/storage.go#L52-L66
func (st *storage) SaveSnap(snap raftpb.Snapshot) error { walsnap := walpb.Snapshot{ Index: snap.Metadata.Index, Term: snap.Metadata.Term, } err := st.WAL.SaveSnapshot(walsnap) if err != nil { return err } err = st.Snapshotter.SaveSnap(snap) if err != nil { return err } return st.WAL.ReleaseLockTo(snap.Metadata.Index) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/velodrome/sql/mysql.go#L50-L66
func (config *MySQLConfig) CreateDatabase() (*gorm.DB, error) { db, err := gorm.Open("mysql", config.getDSN("")) if err != nil { return nil, err } db.Exec(fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %v;", config.Db)) db.Close() db, err = gorm.Open("mysql", config.getDSN(config.Db)) err = db.AutoMigrate(&Assignee{}, &Issue{}, &IssueEvent{}, &Label{}, &Comment{}).Error if err != nil { return nil, err } return db, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/server.go#L724-L788
func (s *EtcdServer) start() { lg := s.getLogger() if s.Cfg.SnapshotCount == 0 { if lg != nil { lg.Info( "updating snapshot-count to default", zap.Uint64("given-snapshot-count", s.Cfg.SnapshotCount), zap.Uint64("updated-snapshot-count", DefaultSnapshotCount), ) } else { plog.Infof("set snapshot count to default %d", DefaultSnapshotCount) } s.Cfg.SnapshotCount = DefaultSnapshotCount } if s.Cfg.SnapshotCatchUpEntries == 0 { if lg != nil { lg.Info( "updating snapshot catch-up entries to default", zap.Uint64("given-snapshot-catchup-entries", s.Cfg.SnapshotCatchUpEntries), zap.Uint64("updated-snapshot-catchup-entries", DefaultSnapshotCatchUpEntries), ) } s.Cfg.SnapshotCatchUpEntries = DefaultSnapshotCatchUpEntries } s.w = wait.New() s.applyWait = wait.NewTimeList() s.done = make(chan struct{}) s.stop = make(chan struct{}) s.stopping = make(chan struct{}) s.ctx, s.cancel = context.WithCancel(context.Background()) s.readwaitc = make(chan struct{}, 1) s.readNotifier = newNotifier() s.leaderChanged = make(chan struct{}) if s.ClusterVersion() != nil { if lg != nil { lg.Info( "starting etcd server", zap.String("local-member-id", s.ID().String()), zap.String("local-server-version", version.Version), zap.String("cluster-id", s.Cluster().ID().String()), zap.String("cluster-version", version.Cluster(s.ClusterVersion().String())), ) } else { plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String())) } membership.ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": s.ClusterVersion().String()}).Set(1) } else { if lg != nil { lg.Info( "starting etcd server", zap.String("local-member-id", s.ID().String()), zap.String("local-server-version", version.Version), zap.String("cluster-version", "to_be_decided"), ) } else { plog.Infof("starting server... [version: %v, cluster version: to_be_decided]", version.Version) } } // TODO: if this is an empty log, writes all peer infos // into the first entry go s.run() }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/github/fakegithub/fakegithub.go#L120-L122
func (f *FakeClient) ListIssueComments(owner, repo string, number int) ([]github.IssueComment, error) { return append([]github.IssueComment{}, f.IssueComments[number]...), nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/proxy/grpcproxy/watch_broadcast.go#L91-L122
func (wb *watchBroadcast) add(w *watcher) bool { wb.mu.Lock() defer wb.mu.Unlock() if wb.nextrev > w.nextrev || (wb.nextrev == 0 && w.nextrev != 0) { // wb is too far ahead, w will miss events // or wb is being established with a current watcher return false } if wb.responses == 0 { // Newly created; create event will be sent by etcd. wb.receivers[w] = struct{}{} return true } // already sent by etcd; emulate create event ok := w.post(&pb.WatchResponse{ Header: &pb.ResponseHeader{ // todo: fill in ClusterId // todo: fill in MemberId: Revision: w.nextrev, // todo: fill in RaftTerm: }, WatchId: w.id, Created: true, }) if !ok { return false } wb.receivers[w] = struct{}{} watchersCoalescing.Inc() return true }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/configuration.go#L302-L323
func decodePeers(buf []byte, trans Transport) Configuration { // Decode the buffer first. var encPeers [][]byte if err := decodeMsgPack(buf, &encPeers); err != nil { panic(fmt.Errorf("failed to decode peers: %v", err)) } // Deserialize each peer. var servers []Server for _, enc := range encPeers { p := trans.DecodePeer(enc) servers = append(servers, Server{ Suffrage: Voter, ID: ServerID(p), Address: ServerAddress(p), }) } return Configuration{ Servers: servers, } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/integration/cluster.go#L1064-L1099
func (m *member) Restart(t testing.TB) error { lg.Info( "restarting a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), zap.String("grpc-address", m.grpcAddr), ) newPeerListeners := make([]net.Listener, 0) for _, ln := range m.PeerListeners { newPeerListeners = append(newPeerListeners, NewListenerWithAddr(t, ln.Addr().String())) } m.PeerListeners = newPeerListeners newClientListeners := make([]net.Listener, 0) for _, ln := range m.ClientListeners { newClientListeners = append(newClientListeners, NewListenerWithAddr(t, ln.Addr().String())) } m.ClientListeners = newClientListeners if m.grpcListener != nil { if err := m.listenGRPC(); err != nil { t.Fatal(err) } } err := m.Launch() lg.Info( "restarted a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), zap.String("grpc-address", m.grpcAddr), zap.Error(err), ) return err }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/raft.go#L1534-L1548
func (r *raft) increaseUncommittedSize(ents []pb.Entry) bool { var s uint64 for _, e := range ents { s += uint64(PayloadSize(e)) } if r.uncommittedSize > 0 && r.uncommittedSize+s > r.maxUncommittedSize { // If the uncommitted tail of the Raft log is empty, allow any size // proposal. Otherwise, limit the size of the uncommitted tail of the // log and drop any proposal that would push the size over the limit. return false } r.uncommittedSize += s return true }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/worker/simple.go#L61-L65
func (w *Simple) Start(ctx context.Context) error { w.Logger.Info("Starting Simple Background Worker") w.ctx, w.cancel = context.WithCancel(ctx) return nil }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/github/fakegithub/fakegithub.go#L189-L201
func (f *FakeClient) DeleteStaleComments(org, repo string, number int, comments []github.IssueComment, isStale func(github.IssueComment) bool) error { if comments == nil { comments, _ = f.ListIssueComments(org, repo, number) } for _, comment := range comments { if isStale(comment) { if err := f.DeleteComment(org, repo, comment.ID); err != nil { return fmt.Errorf("failed to delete stale comment with ID '%d'", comment.ID) } } } return nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/net_transport.go#L193-L201
func NewNetworkTransportWithLogger( stream StreamLayer, maxPool int, timeout time.Duration, logger *log.Logger, ) *NetworkTransport { config := &NetworkTransportConfig{Stream: stream, MaxPool: maxPool, Timeout: timeout, Logger: logger} return NewNetworkTransportWithConfig(config) }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/functional/agent/utils.go#L30-L51
func archive(baseDir, etcdLogPath, dataDir string) error { dir := filepath.Join(baseDir, "etcd-failure-archive", time.Now().Format(time.RFC3339)) if existDir(dir) { dir = filepath.Join(baseDir, "etcd-failure-archive", time.Now().Add(time.Second).Format(time.RFC3339)) } if err := fileutil.TouchDirAll(dir); err != nil { return err } if err := os.Rename(etcdLogPath, filepath.Join(dir, "etcd.log")); err != nil { if !os.IsNotExist(err) { return err } } if err := os.Rename(dataDir, filepath.Join(dir, filepath.Base(dataDir))); err != nil { if !os.IsNotExist(err) { return err } } return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/watch.go#L113-L126
func (wr *WatchResponse) Err() error { switch { case wr.closeErr != nil: return v3rpc.Error(wr.closeErr) case wr.CompactRevision != 0: return v3rpc.ErrCompacted case wr.Canceled: if len(wr.cancelReason) != 0 { return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason)) } return v3rpc.ErrFutureRev } return nil }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/raft.go#L726-L812
func (r *Raft) restoreUserSnapshot(meta *SnapshotMeta, reader io.Reader) error { defer metrics.MeasureSince([]string{"raft", "restoreUserSnapshot"}, time.Now()) // Sanity check the version. version := meta.Version if version < SnapshotVersionMin || version > SnapshotVersionMax { return fmt.Errorf("unsupported snapshot version %d", version) } // We don't support snapshots while there's a config change // outstanding since the snapshot doesn't have a means to // represent this state. committedIndex := r.configurations.committedIndex latestIndex := r.configurations.latestIndex if committedIndex != latestIndex { return fmt.Errorf("cannot restore snapshot now, wait until the configuration entry at %v has been applied (have applied %v)", latestIndex, committedIndex) } // Cancel any inflight requests. for { e := r.leaderState.inflight.Front() if e == nil { break } e.Value.(*logFuture).respond(ErrAbortedByRestore) r.leaderState.inflight.Remove(e) } // We will overwrite the snapshot metadata with the current term, // an index that's greater than the current index, or the last // index in the snapshot. It's important that we leave a hole in // the index so we know there's nothing in the Raft log there and // replication will fault and send the snapshot. term := r.getCurrentTerm() lastIndex := r.getLastIndex() if meta.Index > lastIndex { lastIndex = meta.Index } lastIndex++ // Dump the snapshot. Note that we use the latest configuration, // not the one that came with the snapshot. sink, err := r.snapshots.Create(version, lastIndex, term, r.configurations.latest, r.configurations.latestIndex, r.trans) if err != nil { return fmt.Errorf("failed to create snapshot: %v", err) } n, err := io.Copy(sink, reader) if err != nil { sink.Cancel() return fmt.Errorf("failed to write snapshot: %v", err) } if n != meta.Size { sink.Cancel() return fmt.Errorf("failed to write snapshot, size didn't match (%d != %d)", n, meta.Size) } if err := sink.Close(); err != nil { return fmt.Errorf("failed to close snapshot: %v", err) } r.logger.Info(fmt.Sprintf("Copied %d bytes to local snapshot", n)) // Restore the snapshot into the FSM. If this fails we are in a // bad state so we panic to take ourselves out. fsm := &restoreFuture{ID: sink.ID()} fsm.init() select { case r.fsmMutateCh <- fsm: case <-r.shutdownCh: return ErrRaftShutdown } if err := fsm.Error(); err != nil { panic(fmt.Errorf("failed to restore snapshot: %v", err)) } // We set the last log so it looks like we've stored the empty // index we burned. The last applied is set because we made the // FSM take the snapshot state, and we store the last snapshot // in the stable store since we created a snapshot as part of // this process. r.setLastLog(lastIndex, term) r.setLastApplied(lastIndex) r.setLastSnapshot(lastIndex, term) r.logger.Info(fmt.Sprintf("Restored user snapshot (index %d)", lastIndex)) return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/snapshot_command.go#L45-L54
func NewSnapshotCommand() *cobra.Command { cmd := &cobra.Command{ Use: "snapshot <subcommand>", Short: "Manages etcd node snapshots", } cmd.AddCommand(NewSnapshotSaveCommand()) cmd.AddCommand(NewSnapshotRestoreCommand()) cmd.AddCommand(newSnapshotStatusCommand()) return cmd }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/render/auto.go#L65-L84
func (e *Engine) Auto(ctx context.Context, i interface{}) Renderer { ct, _ := ctx.Value("contentType").(string) if ct == "" { ct = e.DefaultContentType } ct = strings.TrimSpace(strings.ToLower(ct)) if strings.Contains(ct, "json") { return e.JSON(i) } if strings.Contains(ct, "xml") { return e.XML(i) } return htmlAutoRenderer{ Engine: e, model: i, } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/mvcc/metrics.go#L247-L250
func ReportEventReceived(n int) { pendingEventsGauge.Sub(float64(n)) totalEventsCounter.Add(float64(n)) }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/stream.go#L332-L337
func (db *DB) NewStream() *Stream { if db.opt.managedTxns { panic("This API can not be called in managed mode.") } return db.newStream() }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/plugins/plugins.go#L260-L271
func (pa *ConfigAgent) GenericCommentHandlers(owner, repo string) map[string]GenericCommentHandler { pa.mut.Lock() defer pa.mut.Unlock() hs := map[string]GenericCommentHandler{} for _, p := range pa.getPlugins(owner, repo) { if h, ok := genericCommentHandlers[p]; ok { hs[p] = h } } return hs }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/log_unstable.go#L56-L75
func (u *unstable) maybeTerm(i uint64) (uint64, bool) { if i < u.offset { if u.snapshot == nil { return 0, false } if u.snapshot.Metadata.Index == i { return u.snapshot.Metadata.Term, true } return 0, false } last, ok := u.maybeLastIndex() if !ok { return 0, false } if i > last { return 0, false } return u.entries[i-u.offset].Term, true }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/inmem_store.go#L32-L36
func (i *InmemStore) FirstIndex() (uint64, error) { i.l.RLock() defer i.l.RUnlock() return i.lowIndex, nil }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/servers/servers.go#L32-L37
func WrapListener(s *http.Server, l net.Listener) Server { return &Listener{ Server: s, Listener: l, } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/retry.go#L87-L96
func isSafeRetryMutableRPC(err error) bool { if ev, ok := status.FromError(err); ok && ev.Code() != codes.Unavailable { // not safe for mutable RPCs // e.g. interrupted by non-transient error that client cannot handle itself, // or transient error while the connection has already been established return false } desc := rpctypes.ErrorDesc(err) return desc == "there is no address available" || desc == "there is no connection available" }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/pjutil/tot.go#L51-L56
func PresubmitToJobSpec(pre config.Presubmit) *downwardapi.JobSpec { return &downwardapi.JobSpec{ Type: prowapi.PresubmitJob, Job: pre.Name, } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/client.go#L419-L422
func WithRequireLeader(ctx context.Context) context.Context { md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) return metadata.NewOutgoingContext(ctx, md) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/client/clientset/versioned/typed/prowjobs/v1/fake/fake_prowjob.go#L42-L50
func (c *FakeProwJobs) Get(name string, options v1.GetOptions) (result *prowjobsv1.ProwJob, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(prowjobsResource, c.ns, name), &prowjobsv1.ProwJob{}) if obj == nil { return nil, err } return obj.(*prowjobsv1.ProwJob), err }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/skl/skl.go#L257-L276
func (s *Skiplist) findSpliceForLevel(key []byte, before *node, level int) (*node, *node) { for { // Assume before.key < key. next := s.getNext(before, level) if next == nil { return before, next } nextKey := next.key(s.arena) cmp := y.CompareKeys(key, nextKey) if cmp == 0 { // Equality case. return next, next } if cmp < 0 { // before.key < key < next.key. We are done for this level. return before, next } before = next // Keep moving right on this level. } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/concurrency/stm.go#L84-L86
func WithPrefetch(keys ...string) stmOption { return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) } }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/api.go#L695-L706
func (r *Raft) AddPeer(peer ServerAddress) Future { if r.protocolVersion > 2 { return errorFuture{ErrUnsupportedProtocol} } return r.requestConfigChange(configurationChangeRequest{ command: AddStaging, serverID: ServerID(peer), serverAddress: peer, prevIndex: 0, }, 0) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/github/fakegithub/fakegithub.go#L416-L422
func (f *FakeClient) SetMilestone(org, repo string, issueNum, milestoneNum int) error { if milestoneNum < 0 { return fmt.Errorf("Milestone Numbers Cannot Be Negative") } f.Milestone = milestoneNum return nil }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/table/table.go#L82-L105
func (t *Table) DecrRef() error { newRef := atomic.AddInt32(&t.ref, -1) if newRef == 0 { // We can safely delete this file, because for all the current files, we always have // at least one reference pointing to them. // It's necessary to delete windows files if t.loadingMode == options.MemoryMap { y.Munmap(t.mmap) } if err := t.fd.Truncate(0); err != nil { // This is very important to let the FS know that the file is deleted. return err } filename := t.fd.Name() if err := t.fd.Close(); err != nil { return err } if err := os.Remove(filename); err != nil { return err } } return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/storage.go#L216-L234
func (ms *MemoryStorage) Compact(compactIndex uint64) error { ms.Lock() defer ms.Unlock() offset := ms.ents[0].Index if compactIndex <= offset { return ErrCompacted } if compactIndex > ms.lastIndex() { raftLogger.Panicf("compact %d is out of bound lastindex(%d)", compactIndex, ms.lastIndex()) } i := compactIndex - offset ents := make([]pb.Entry, 1, 1+uint64(len(ms.ents))-i) ents[0].Index = ms.ents[i].Index ents[0].Term = ms.ents[i].Term ents = append(ents, ms.ents[i+1:]...) ms.ents = ents return nil }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/pjutil/pjutil.go#L196-L222
func PartitionActive(pjs []prowapi.ProwJob) (pending, triggered chan prowapi.ProwJob) { // Size channels correctly. pendingCount, triggeredCount := 0, 0 for _, pj := range pjs { switch pj.Status.State { case prowapi.PendingState: pendingCount++ case prowapi.TriggeredState: triggeredCount++ } } pending = make(chan prowapi.ProwJob, pendingCount) triggered = make(chan prowapi.ProwJob, triggeredCount) // Partition the jobs into the two separate channels. for _, pj := range pjs { switch pj.Status.State { case prowapi.PendingState: pending <- pj case prowapi.TriggeredState: triggered <- pj } } close(pending) close(triggered) return pending, triggered }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/plugins/plugins.go#L110-L113
func RegisterReviewEventHandler(name string, fn ReviewEventHandler, help HelpProvider) { pluginHelp[name] = help reviewEventHandlers[name] = fn }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/y/iterator.go#L257-L264
func (s *MergeIterator) Close() error { for _, itr := range s.all { if err := itr.Close(); err != nil { return errors.Wrap(err, "MergeIterator") } } return nil }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/db.go#L1044-L1055
func (seq *Sequence) Next() (uint64, error) { seq.Lock() defer seq.Unlock() if seq.next >= seq.leased { if err := seq.updateLease(); err != nil { return 0, err } } val := seq.next seq.next++ return val, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv2/command/update_command.go#L27-L40
func NewUpdateCommand() cli.Command { return cli.Command{ Name: "update", Usage: "update an existing key with a given value", ArgsUsage: "<key> <value>", Flags: []cli.Flag{ cli.IntFlag{Name: "ttl", Value: 0, Usage: "key time-to-live in seconds"}, }, Action: func(c *cli.Context) error { updateCommandFunc(c, mustNewKeyAPI(c)) return nil }, } }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/worker/simple.go#L109-L119
func (w Simple) PerformIn(job Job, d time.Duration) error { go func() { select { case <-time.After(d): w.Perform(job) case <-w.ctx.Done(): w.cancel() } }() return nil }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/server.go#L21-L98
func (a *App) Serve(srvs ...servers.Server) error { a.Logger.Infof("Starting application at %s", a.Options.Addr) payload := events.Payload{ "app": a, } if err := events.EmitPayload(EvtAppStart, payload); err != nil { return err } if len(srvs) == 0 { if strings.HasPrefix(a.Options.Addr, "unix:") { tcp, err := servers.UnixSocket(a.Options.Addr[5:]) if err != nil { return err } srvs = append(srvs, tcp) } else { srvs = append(srvs, servers.New()) } } ctx, cancel := sigtx.WithCancel(a.Context, syscall.SIGTERM, os.Interrupt) defer cancel() go func() { // gracefully shut down the application when the context is cancelled <-ctx.Done() a.Logger.Info("Shutting down application") events.EmitError(EvtAppStop, ctx.Err(), payload) if err := a.Stop(ctx.Err()); err != nil { events.EmitError(EvtAppStopErr, err, payload) a.Logger.Error(err) } if !a.WorkerOff { // stop the workers a.Logger.Info("Shutting down worker") events.EmitPayload(EvtWorkerStop, payload) if err := a.Worker.Stop(); err != nil { events.EmitError(EvtWorkerStopErr, err, payload) a.Logger.Error(err) } } for _, s := range srvs { if err := s.Shutdown(ctx); err != nil { a.Logger.Error(err) } } }() // if configured to do so, start the workers if !a.WorkerOff { go func() { events.EmitPayload(EvtWorkerStart, payload) if err := a.Worker.Start(ctx); err != nil { a.Stop(err) } }() } for _, s := range srvs { s.SetAddr(a.Addr) go func(s servers.Server) { if err := s.Start(ctx, a); err != nil { a.Stop(err) } }(s) } <-ctx.Done() return a.Context.Err() }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/retry.go#L57-L77
func isSafeRetryImmutableRPC(err error) bool { eErr := rpctypes.Error(err) if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable { // interrupted by non-transient server-side or gRPC-side error // client cannot handle itself (e.g. rpctypes.ErrCompacted) return false } // only retry if unavailable ev, ok := status.FromError(err) if !ok { // all errors from RPC is typed "grpc/status.(*statusError)" // (ref. https://github.com/grpc/grpc-go/pull/1782) // // if the error type is not "grpc/status.(*statusError)", // it could be from "Dial" // TODO: do not retry for now // ref. https://github.com/grpc/grpc-go/issues/1581 return false } return ev.Code() == codes.Unavailable }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/wal/wal.go#L583-L667
func (w *WAL) cut() error { // close old wal file; truncate to avoid wasting space if an early cut off, serr := w.tail().Seek(0, io.SeekCurrent) if serr != nil { return serr } if err := w.tail().Truncate(off); err != nil { return err } if err := w.sync(); err != nil { return err } fpath := filepath.Join(w.dir, walName(w.seq()+1, w.enti+1)) // create a temp wal file with name sequence + 1, or truncate the existing one newTail, err := w.fp.Open() if err != nil { return err } // update writer and save the previous crc w.locks = append(w.locks, newTail) prevCrc := w.encoder.crc.Sum32() w.encoder, err = newFileEncoder(w.tail().File, prevCrc) if err != nil { return err } if err = w.saveCrc(prevCrc); err != nil { return err } if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: w.metadata}); err != nil { return err } if err = w.saveState(&w.state); err != nil { return err } // atomically move temp wal file to wal file if err = w.sync(); err != nil { return err } off, err = w.tail().Seek(0, io.SeekCurrent) if err != nil { return err } if err = os.Rename(newTail.Name(), fpath); err != nil { return err } if err = fileutil.Fsync(w.dirFile); err != nil { return err } // reopen newTail with its new path so calls to Name() match the wal filename format newTail.Close() if newTail, err = fileutil.LockFile(fpath, os.O_WRONLY, fileutil.PrivateFileMode); err != nil { return err } if _, err = newTail.Seek(off, io.SeekStart); err != nil { return err } w.locks[len(w.locks)-1] = newTail prevCrc = w.encoder.crc.Sum32() w.encoder, err = newFileEncoder(w.tail().File, prevCrc) if err != nil { return err } if w.lg != nil { w.lg.Info("created a new WAL segment", zap.String("path", fpath)) } else { plog.Infof("segmented wal file %v is created", fpath) } return nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/progress.go#L118-L129
func (pr *Progress) maybeUpdate(n uint64) bool { var updated bool if pr.Match < n { pr.Match = n updated = true pr.resume() } if pr.Next < n+1 { pr.Next = n + 1 } return updated }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/github/helpers.go#L37-L53
func ImageTooBig(url string) (bool, error) { // limit is 10MB limit := 10000000 // try to get the image size from Content-Length header resp, err := http.Head(url) if err != nil { return true, fmt.Errorf("HEAD error: %v", err) } if sc := resp.StatusCode; sc != http.StatusOK { return true, fmt.Errorf("failing %d response", sc) } size, _ := strconv.Atoi(resp.Header.Get("Content-Length")) if size > limit { return true, nil } return false, nil }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/membership/member.go#L54-L74
func NewMember(name string, peerURLs types.URLs, clusterName string, now *time.Time) *Member { m := &Member{ RaftAttributes: RaftAttributes{PeerURLs: peerURLs.StringSlice()}, Attributes: Attributes{Name: name}, } var b []byte sort.Strings(m.PeerURLs) for _, p := range m.PeerURLs { b = append(b, []byte(p)...) } b = append(b, []byte(clusterName)...) if now != nil { b = append(b, []byte(fmt.Sprintf("%d", now.Unix()))...) } hash := sha1.Sum(b) m.ID = types.ID(binary.BigEndian.Uint64(hash[:8])) return m }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/concurrency/stm.go#L385-L387
func NewSTMReadCommitted(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(ReadCommitted)) }
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/jenkins/jenkins.go#L153-L167
func (jb *Build) ProwJobID() string { for _, action := range jb.Actions { for _, p := range action.Parameters { if p.Name == prowJobID { value, ok := p.Value.(string) if !ok { logrus.Errorf("Cannot determine %s value for %#v", p.Name, jb) continue } return value } } } return "" }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/embed/etcd.go#L803-L808
func (e *Etcd) GetLogger() *zap.Logger { e.cfg.loggerMu.RLock() l := e.cfg.logger e.cfg.loggerMu.RUnlock() return l }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/api.go#L638-L661
func (r *Raft) Barrier(timeout time.Duration) Future { metrics.IncrCounter([]string{"raft", "barrier"}, 1) var timer <-chan time.Time if timeout > 0 { timer = time.After(timeout) } // Create a log future, no index or term yet logFuture := &logFuture{ log: Log{ Type: LogBarrier, }, } logFuture.init() select { case <-timer: return errorFuture{ErrEnqueueTimeout} case <-r.shutdownCh: return errorFuture{ErrRaftShutdown} case r.applyCh <- logFuture: return logFuture } }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/genny/build/cleanup.go#L14-L46
func Cleanup(opts *Options) genny.RunFn { return func(r *genny.Runner) error { defer os.RemoveAll(filepath.Join(opts.Root, "a")) if err := jam.Clean(); err != nil { return err } var err error opts.rollback.Range(func(k, v interface{}) bool { f := genny.NewFileS(k.(string), v.(string)) r.Logger.Debugf("Rollback: %s", f.Name()) if err = r.File(f); err != nil { return false } r.Disk.Remove(f.Name()) return true }) if err != nil { return err } for _, f := range r.Disk.Files() { if err := r.Disk.Delete(f.Name()); err != nil { return err } } if envy.Mods() { if err := r.Exec(exec.Command(genny.GoBin(), "mod", "tidy")); err != nil { return err } } return nil } }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/balancer/balancer.go#L53-L86
func (b *builder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { bb := &baseBalancer{ id: strconv.FormatInt(time.Now().UnixNano(), 36), policy: b.cfg.Policy, name: b.cfg.Name, lg: b.cfg.Logger, addrToSc: make(map[resolver.Address]balancer.SubConn), scToAddr: make(map[balancer.SubConn]resolver.Address), scToSt: make(map[balancer.SubConn]connectivity.State), currentConn: nil, csEvltr: &connectivityStateEvaluator{}, // initialize picker always returns "ErrNoSubConnAvailable" Picker: picker.NewErr(balancer.ErrNoSubConnAvailable), } if bb.lg == nil { bb.lg = zap.NewNop() } // TODO: support multiple connections bb.mu.Lock() bb.currentConn = cc bb.mu.Unlock() bb.lg.Info( "built balancer", zap.String("balancer-id", bb.id), zap.String("policy", bb.policy.String()), zap.String("resolver-target", cc.Target()), ) return bb }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/log.go#L88-L104
func (l *raftLog) maybeAppend(index, logTerm, committed uint64, ents ...pb.Entry) (lastnewi uint64, ok bool) { if l.matchTerm(index, logTerm) { lastnewi = index + uint64(len(ents)) ci := l.findConflict(ents) switch { case ci == 0: case ci <= l.committed: l.logger.Panicf("entry %d conflict with committed entry [committed(%d)]", ci, l.committed) default: offset := index + 1 l.append(ents[ci-offset:]...) } l.commitTo(min(committed, lastnewi)) return lastnewi, true } return 0, false }
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/y/watermark.go#L117-L130
func (w *WaterMark) WaitForMark(ctx context.Context, index uint64) error { if w.DoneUntil() >= index { return nil } waitCh := make(chan struct{}) w.markCh <- mark{index: index, waiter: waitCh} select { case <-ctx.Done(): return ctx.Err() case <-waitCh: return nil } }
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/inmem_store.go#L46-L55
func (i *InmemStore) GetLog(index uint64, log *Log) error { i.l.RLock() defer i.l.RUnlock() l, ok := i.logs[index] if !ok { return ErrLogNotFound } *log = *l return nil }
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/flash.go#L39-L43
func (f Flash) persist(session *Session) { b, _ := json.Marshal(f.data) session.Set(flashKey, b) session.Save() }
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/flags/selective_string.go#L61-L67
func NewSelectiveStringValue(valids ...string) *SelectiveStringValue { vm := make(map[string]struct{}) for _, v := range valids { vm[v] = struct{}{} } return &SelectiveStringValue{valids: vm, v: valids[0]} }