id stringlengths 95 167 | text stringlengths 69 15.9k | title stringclasses 1 value |
|---|---|---|
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/gcsupload/run.go#L41-L62 | func (o Options) Run(spec *downwardapi.JobSpec, extra map[string]gcs.UploadFunc) error {
uploadTargets := o.assembleTargets(spec, extra)
if !o.DryRun {
ctx := context.Background()
gcsClient, err := storage.NewClient(ctx, option.WithCredentialsFile(o.GcsCredentialsFile))
if err != nil {
return fmt.Errorf("could not connect to GCS: %v", err)
}
if err := gcs.Upload(gcsClient.Bucket(o.Bucket), uploadTargets); err != nil {
return fmt.Errorf("failed to upload to GCS: %v", err)
}
} else {
for destination := range uploadTargets {
logrus.WithField("dest", destination).Info("Would upload")
}
}
logrus.Info("Finished upload to GCS")
return nil
} | |
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/fuzzy/cluster.go#L144-L162 | func (c *cluster) LeaderPlus(n int) []*raftNode {
r := make([]*raftNode, 0, n+1)
ldr := c.Leader(time.Second)
if ldr != nil {
r = append(r, ldr)
}
if len(r) >= n {
return r
}
for _, node := range c.nodes {
if !containsNode(r, node) {
r = append(r, node)
if len(r) >= n {
return r
}
}
}
return r
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/cmd/pipeline/controller.go#L467-L473 | func finalState(status prowjobv1.ProwJobState) bool {
switch status {
case "", prowjobv1.PendingState, prowjobv1.TriggeredState:
return false
}
return true
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/deck/jobs/jobs.go#L132-L147 | func (ja *JobAgent) GetProwJob(job, id string) (prowapi.ProwJob, error) {
if ja == nil {
return prowapi.ProwJob{}, fmt.Errorf("Prow job agent doesn't exist (are you running locally?)")
}
var j prowapi.ProwJob
ja.mut.Lock()
idMap, ok := ja.jobsIDMap[job]
if ok {
j, ok = idMap[id]
}
ja.mut.Unlock()
if !ok {
return prowapi.ProwJob{}, errProwjobNotFound
}
return j, nil
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/plugins/plugins.go#L65-L68 | func RegisterIssueHandler(name string, fn IssueHandler, help HelpProvider) {
pluginHelp[name] = help
issueHandlers[name] = fn
} | |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/iterator.go#L120-L128 | func (item *Item) ValueCopy(dst []byte) ([]byte, error) {
item.wg.Wait()
if item.status == prefetched {
return y.SafeCopy(dst, item.val), item.err
}
buf, cb, err := item.yieldItemValue()
defer runCallback(cb)
return y.SafeCopy(dst, buf), err
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/pubsub/reporter/reporter.go#L92-L120 | func (c *Client) Report(pj *prowapi.ProwJob) ([]*prowapi.ProwJob, error) {
message := c.generateMessageFromPJ(pj)
ctx := context.Background()
client, err := pubsub.NewClient(ctx, message.Project)
if err != nil {
return nil, fmt.Errorf("could not create pubsub Client: %v", err)
}
topic := client.Topic(message.Topic)
d, err := json.Marshal(message)
if err != nil {
return nil, fmt.Errorf("could not marshal pubsub report: %v", err)
}
res := topic.Publish(ctx, &pubsub.Message{
Data: d,
})
_, err = res.Get(ctx)
if err != nil {
return nil, fmt.Errorf(
"failed to publish pubsub message with run ID %q to topic: \"%s/%s\". %v",
message.RunID, message.Project, message.Topic, err)
}
return []*prowapi.ProwJob{pj}, nil
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/op.go#L383-L391 | func WithPrefix() OpOption {
return func(op *Op) {
if len(op.key) == 0 {
op.key, op.end = []byte{0}, []byte{0}
return
}
op.end = getPrefix(op.key)
}
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/watch.go#L688-L696 | func (w *watchGrpcStream) broadcastResponse(wr *WatchResponse) bool {
for _, ws := range w.substreams {
select {
case ws.recvc <- wr:
case <-ws.donec:
}
}
return true
} | |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/skl/skl.go#L472-L477 | func (s *Skiplist) NewUniIterator(reversed bool) *UniIterator {
return &UniIterator{
iter: s.NewIterator(),
reversed: reversed,
}
} | |
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/render/js.go#L9-L12 | func JavaScript(names ...string) Renderer {
e := New(Options{})
return e.JavaScript(names...)
} | |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/db.go#L1110-L1130 | func (db *DB) GetSequence(key []byte, bandwidth uint64) (*Sequence, error) {
if db.opt.managedTxns {
panic("Cannot use GetSequence with managedDB=true.")
}
switch {
case len(key) == 0:
return nil, ErrEmptyKey
case bandwidth == 0:
return nil, ErrZeroBandwidth
}
seq := &Sequence{
db: db,
key: key,
next: 0,
leased: 0,
bandwidth: bandwidth,
}
err := seq.updateLease()
return seq, err
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2auth/auth.go#L197-L205 | func (s *store) CreateOrUpdateUser(user User) (out User, created bool, err error) {
_, err = s.getUser(user.User, true)
if err == nil {
out, err = s.UpdateUser(user)
return out, false, err
}
u, err := s.CreateUser(user)
return u, true, err
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/client/keys.go#L311-L313 | func (n *Node) TTLDuration() time.Duration {
return time.Duration(n.TTL) * time.Second
} | |
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/binding/binding.go#L78-L91 | func Exec(req *http.Request, value interface{}) error {
if ba, ok := value.(Bindable); ok {
return ba.Bind(req)
}
ct := httpx.ContentType(req)
if ct == "" {
return errors.New("blank content type")
}
if b, ok := binders[ct]; ok {
return b(req, value)
}
return fmt.Errorf("could not find a binder for %s", ct)
} | |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/dir_unix.go#L45-L80 | func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) (*directoryLockGuard, error) {
// Convert to absolute path so that Release still works even if we do an unbalanced
// chdir in the meantime.
absPidFilePath, err := filepath.Abs(filepath.Join(dirPath, pidFileName))
if err != nil {
return nil, errors.Wrap(err, "cannot get absolute path for pid lock file")
}
f, err := os.Open(dirPath)
if err != nil {
return nil, errors.Wrapf(err, "cannot open directory %q", dirPath)
}
opts := unix.LOCK_EX | unix.LOCK_NB
if readOnly {
opts = unix.LOCK_SH | unix.LOCK_NB
}
err = unix.Flock(int(f.Fd()), opts)
if err != nil {
f.Close()
return nil, errors.Wrapf(err,
"Cannot acquire directory lock on %q. Another process is using this Badger database.",
dirPath)
}
if !readOnly {
// Yes, we happily overwrite a pre-existing pid file. We're the
// only read-write badger process using this directory.
err = ioutil.WriteFile(absPidFilePath, []byte(fmt.Sprintf("%d\n", os.Getpid())), 0666)
if err != nil {
f.Close()
return nil, errors.Wrapf(err,
"Cannot write pid file %q", absPidFilePath)
}
}
return &directoryLockGuard{f, absPidFilePath, readOnly}, nil
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/pluginhelp/externalplugins/externalplugins.go#L40-L85 | func ServeExternalPluginHelp(mux *http.ServeMux, log *logrus.Entry, provider ExternalPluginHelpProvider) {
mux.HandleFunc(
"/help",
func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Cache-Control", "no-cache")
serverError := func(action string, err error) {
log.WithError(err).Errorf("Error %s.", action)
msg := fmt.Sprintf("500 Internal server error %s: %v", action, err)
http.Error(w, msg, http.StatusInternalServerError)
}
if r.Method != http.MethodPost {
log.Errorf("Invalid request method: %v.", r.Method)
http.Error(w, "405 Method not allowed", http.StatusMethodNotAllowed)
return
}
b, err := ioutil.ReadAll(r.Body)
if err != nil {
serverError("reading request body", err)
return
}
var enabledRepos []string
if err := json.Unmarshal(b, &enabledRepos); err != nil {
serverError("unmarshaling request body", err)
return
}
if provider == nil {
serverError("generating plugin help", errors.New("help provider is nil"))
return
}
help, err := provider(enabledRepos)
if err != nil {
serverError("generating plugin help", err)
return
}
b, err = json.Marshal(help)
if err != nil {
serverError("marshaling plugin help", err)
return
}
fmt.Fprint(w, string(b))
},
)
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/velodrome/transform/plugins/comment_counter.go#L35-L37 | func (c *CommentCounterPlugin) AddFlags(cmd *cobra.Command) {
cmd.Flags().StringSliceVar(&c.pattern, "comments", []string{}, "Regexps to match comments")
} | |
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/render/string.go#L35-L38 | func String(s string, args ...interface{}) Renderer {
e := New(Options{})
return e.String(s, args...)
} | |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/skl/skl.go#L489-L495 | func (s *UniIterator) Rewind() {
if !s.reversed {
s.iter.SeekToFirst()
} else {
s.iter.SeekToLast()
}
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/compact_op.go#L37-L41 | func OpCompact(rev int64, opts ...CompactOption) CompactOp {
ret := CompactOp{revision: rev}
ret.applyCompactOpts(opts)
return ret
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/maintenance/aws-janitor/resources/dhcp_options.go#L125-L159 | func defaultLookingDHCPOptions(dhcp *ec2.DhcpOptions, region string) bool {
if len(dhcp.Tags) != 0 {
return false
}
for _, conf := range dhcp.DhcpConfigurations {
switch *conf.Key {
case "domain-name":
var domain string
// TODO(akutz): Should this be updated to regions.Default, or is
// this relying on the default region for EC2 for North America?
// Because EC2's default region changed from us-east-1 to us-east-2
// depending on when the account was created.
if region == "us-east-1" {
domain = "ec2.internal"
} else {
domain = region + ".compute.internal"
}
// TODO(vincepri): Investigate this line, seems it might segfault if conf.Values is 0?
if len(conf.Values) != 1 || *conf.Values[0].Value != domain {
return false
}
case "domain-name-servers":
// TODO(vincepri): Same as above.
if len(conf.Values) != 1 || *conf.Values[0].Value != "AmazonProvidedDNS" {
return false
}
default:
return false
}
}
return true
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/integration/cluster.go#L427-L470 | func (c *cluster) waitLeader(t testing.TB, membs []*member) int {
possibleLead := make(map[uint64]bool)
var lead uint64
for _, m := range membs {
possibleLead[uint64(m.s.ID())] = true
}
cc := MustNewHTTPClient(t, getMembersURLs(membs), nil)
kapi := client.NewKeysAPI(cc)
// ensure leader is up via linearizable get
for {
ctx, cancel := context.WithTimeout(context.Background(), 10*tickDuration+time.Second)
_, err := kapi.Get(ctx, "0", &client.GetOptions{Quorum: true})
cancel()
if err == nil || strings.Contains(err.Error(), "Key not found") {
break
}
}
for lead == 0 || !possibleLead[lead] {
lead = 0
for _, m := range membs {
select {
case <-m.s.StopNotify():
continue
default:
}
if lead != 0 && lead != m.s.Lead() {
lead = 0
time.Sleep(10 * tickDuration)
break
}
lead = m.s.Lead()
}
}
for i, m := range membs {
if uint64(m.s.ID()) == lead {
return i
}
}
return -1
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2discovery/discovery.go#L95-L122 | func newProxyFunc(lg *zap.Logger, proxy string) (func(*http.Request) (*url.URL, error), error) {
if proxy == "" {
return nil, nil
}
// Do a small amount of URL sanitization to help the user
// Derived from net/http.ProxyFromEnvironment
proxyURL, err := url.Parse(proxy)
if err != nil || !strings.HasPrefix(proxyURL.Scheme, "http") {
// proxy was bogus. Try prepending "http://" to it and
// see if that parses correctly. If not, we ignore the
// error and complain about the original one
var err2 error
proxyURL, err2 = url.Parse("http://" + proxy)
if err2 == nil {
err = nil
}
}
if err != nil {
return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err)
}
if lg != nil {
lg.Info("running proxy with discovery", zap.String("proxy-url", proxyURL.String()))
} else {
plog.Infof("using proxy %q", proxyURL.String())
}
return http.ProxyURL(proxyURL), nil
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/snapshot_merge.go#L31-L62 | func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi uint64, confState raftpb.ConfState) snap.Message {
// get a snapshot of v2 store as []byte
clone := s.v2store.Clone()
d, err := clone.SaveNoCopy()
if err != nil {
if lg := s.getLogger(); lg != nil {
lg.Panic("failed to save v2 store data", zap.Error(err))
} else {
plog.Panicf("store save should never fail: %v", err)
}
}
// commit kv to write metadata(for example: consistent index).
s.KV().Commit()
dbsnap := s.be.Snapshot()
// get a snapshot of v3 KV as readCloser
rc := newSnapshotReaderCloser(s.getLogger(), dbsnap)
// put the []byte snapshot of store into raft snapshot and return the merged snapshot with
// KV readCloser snapshot.
snapshot := raftpb.Snapshot{
Metadata: raftpb.SnapshotMetadata{
Index: snapi,
Term: snapt,
ConfState: confState,
},
Data: d,
}
m.Snapshot = snapshot
return *snap.NewMessage(m, rc, dbsnap.Size())
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/velodrome/fetcher/conversion.go#L156-L179 | func NewIssueComment(issueID int, gComment *github.IssueComment, repository string) (*sql.Comment, error) {
if gComment.ID == nil ||
gComment.Body == nil ||
gComment.CreatedAt == nil ||
gComment.UpdatedAt == nil {
return nil, fmt.Errorf("IssueComment is missing mandatory field: %s", gComment)
}
var login string
if gComment.User != nil && gComment.User.Login != nil {
login = *gComment.User.Login
}
return &sql.Comment{
ID: itoa(*gComment.ID),
IssueID: strconv.Itoa(issueID),
Body: *gComment.Body,
User: login,
CommentCreatedAt: *gComment.CreatedAt,
CommentUpdatedAt: *gComment.UpdatedAt,
PullRequest: false,
Repository: strings.ToLower(repository),
}, nil
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/cmd/pipeline/controller.go#L476-L484 | func description(cond duckv1alpha1.Condition, fallback string) string {
switch {
case cond.Message != "":
return cond.Message
case cond.Reason != "":
return cond.Reason
}
return fallback
} | |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/txn.go#L741-L753 | func (db *DB) Update(fn func(txn *Txn) error) error {
if db.opt.managedTxns {
panic("Update can only be used with managedDB=false.")
}
txn := db.NewTransaction(true)
defer txn.Discard()
if err := fn(txn); err != nil {
return err
}
return txn.Commit()
} | |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/db.go#L330-L427 | func (db *DB) Close() (err error) {
db.elog.Printf("Closing database")
atomic.StoreInt32(&db.blockWrites, 1)
// Stop value GC first.
db.closers.valueGC.SignalAndWait()
// Stop writes next.
db.closers.writes.SignalAndWait()
// Now close the value log.
if vlogErr := db.vlog.Close(); vlogErr != nil {
err = errors.Wrap(vlogErr, "DB.Close")
}
// Make sure that block writer is done pushing stuff into memtable!
// Otherwise, you will have a race condition: we are trying to flush memtables
// and remove them completely, while the block / memtable writer is still
// trying to push stuff into the memtable. This will also resolve the value
// offset problem: as we push into memtable, we update value offsets there.
if !db.mt.Empty() {
db.elog.Printf("Flushing memtable")
for {
pushedFlushTask := func() bool {
db.Lock()
defer db.Unlock()
y.AssertTrue(db.mt != nil)
select {
case db.flushChan <- flushTask{mt: db.mt, vptr: db.vhead}:
db.imm = append(db.imm, db.mt) // Flusher will attempt to remove this from s.imm.
db.mt = nil // Will segfault if we try writing!
db.elog.Printf("pushed to flush chan\n")
return true
default:
// If we fail to push, we need to unlock and wait for a short while.
// The flushing operation needs to update s.imm. Otherwise, we have a deadlock.
// TODO: Think about how to do this more cleanly, maybe without any locks.
}
return false
}()
if pushedFlushTask {
break
}
time.Sleep(10 * time.Millisecond)
}
}
db.stopCompactions()
// Force Compact L0
// We don't need to care about cstatus since no parallel compaction is running.
if db.opt.CompactL0OnClose {
err := db.lc.doCompact(compactionPriority{level: 0, score: 1.73})
switch err {
case errFillTables:
// This error only means that there might be enough tables to do a compaction. So, we
// should not report it to the end user to avoid confusing them.
case nil:
db.opt.Infof("Force compaction on level 0 done")
default:
db.opt.Warningf("While forcing compaction on level 0: %v", err)
}
}
if lcErr := db.lc.close(); err == nil {
err = errors.Wrap(lcErr, "DB.Close")
}
db.elog.Printf("Waiting for closer")
db.closers.updateSize.SignalAndWait()
db.orc.Stop()
db.elog.Finish()
if db.dirLockGuard != nil {
if guardErr := db.dirLockGuard.release(); err == nil {
err = errors.Wrap(guardErr, "DB.Close")
}
}
if db.valueDirGuard != nil {
if guardErr := db.valueDirGuard.release(); err == nil {
err = errors.Wrap(guardErr, "DB.Close")
}
}
if manifestErr := db.manifest.close(); err == nil {
err = errors.Wrap(manifestErr, "DB.Close")
}
// Fsync directories to ensure that lock file, and any other removed files whose directory
// we haven't specifically fsynced, are guaranteed to have their directory entry removal
// persisted to disk.
if syncErr := syncDir(db.opt.Dir); err == nil {
err = errors.Wrap(syncErr, "DB.Close")
}
if syncErr := syncDir(db.opt.ValueDir); err == nil {
err = errors.Wrap(syncErr, "DB.Close")
}
return err
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2stats/leader.go#L124-L128 | func (fs *FollowerStats) Fail() {
fs.Lock()
defer fs.Unlock()
fs.Counts.Fail++
} | |
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/inmem_store.go#L107-L115 | func (i *InmemStore) Get(key []byte) ([]byte, error) {
i.l.RLock()
defer i.l.RUnlock()
val := i.kv[string(key)]
if val == nil {
return nil, errors.New("not found")
}
return val, nil
} | |
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/worker/simple.go#L75-L100 | func (w Simple) Perform(job Job) error {
w.Logger.Debugf("Performing job %s", job)
if job.Handler == "" {
err := fmt.Errorf("no handler name given for %s", job)
w.Logger.Error(err)
return err
}
w.moot.Lock()
defer w.moot.Unlock()
if h, ok := w.handlers[job.Handler]; ok {
go func() {
err := safe.RunE(func() error {
return h(job.Args)
})
if err != nil {
w.Logger.Error(err)
}
w.Logger.Debugf("Completed job %s", job)
}()
return nil
}
err := fmt.Errorf("no handler mapped for name %s", job.Handler)
w.Logger.Error(err)
return err
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/client/clientset/versioned/typed/prowjobs/v1/fake/fake_prowjob.go#L93-L101 | func (c *FakeProwJobs) Update(prowJob *prowjobsv1.ProwJob) (result *prowjobsv1.ProwJob, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(prowjobsResource, c.ns, prowJob), &prowjobsv1.ProwJob{})
if obj == nil {
return nil, err
}
return obj.(*prowjobsv1.ProwJob), err
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/external-plugins/needs-rebase/plugin/plugin.go#L71-L96 | func HandleEvent(log *logrus.Entry, ghc githubClient, pre *github.PullRequestEvent) error {
if pre.Action != github.PullRequestActionOpened && pre.Action != github.PullRequestActionSynchronize && pre.Action != github.PullRequestActionReopened {
return nil
}
// Before checking mergeability wait a few seconds to give github a chance to calculate it.
// This initial delay prevents us from always wasting the first API token.
sleep(time.Second * 5)
org := pre.Repo.Owner.Login
repo := pre.Repo.Name
number := pre.Number
sha := pre.PullRequest.Head.SHA
mergeable, err := ghc.IsMergeable(org, repo, number, sha)
if err != nil {
return err
}
issueLabels, err := ghc.GetIssueLabels(org, repo, number)
if err != nil {
return err
}
hasLabel := github.HasLabel(labels.NeedsRebase, issueLabels)
return takeAction(log, ghc, org, repo, number, pre.PullRequest.User.Login, hasLabel, mergeable)
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/plugins/trigger/trigger.go#L230-L247 | func RunRequested(c Client, pr *github.PullRequest, requestedJobs []config.Presubmit, eventGUID string) error {
baseSHA, err := c.GitHubClient.GetRef(pr.Base.Repo.Owner.Login, pr.Base.Repo.Name, "heads/"+pr.Base.Ref)
if err != nil {
return err
}
var errors []error
for _, job := range requestedJobs {
c.Logger.Infof("Starting %s build.", job.Name)
pj := pjutil.NewPresubmit(*pr, baseSHA, job, eventGUID)
c.Logger.WithFields(pjutil.ProwJobFields(&pj)).Info("Creating a new prowjob.")
if _, err := c.ProwJobClient.Create(&pj); err != nil {
c.Logger.WithError(err).Error("Failed to create prowjob.")
errors = append(errors, err)
}
}
return errorutil.NewAggregate(errors...)
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/rawnode.go#L143-L150 | func (rn *RawNode) Propose(data []byte) error {
return rn.raft.Step(pb.Message{
Type: pb.MsgProp,
From: rn.raft.id,
Entries: []pb.Entry{
{Data: data},
}})
} | |
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/mail/internal/mail/message.go#L309-L311 | func (m *Message) EmbedReader(name string, r io.Reader, settings ...FileSetting) {
m.embedded = m.appendFile(m.embedded, fileFromReader(name, r), settings)
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/functional/runner/watch_command.go#L41-L54 | func NewWatchCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "watcher",
Short: "Performs watch operation",
Run: runWatcherFunc,
}
cmd.Flags().DurationVar(&runningTime, "running-time", 60, "number of seconds to run")
cmd.Flags().StringVar(&watchPrefix, "prefix", "", "the prefix to append on all keys")
cmd.Flags().IntVar(&noOfPrefixes, "total-prefixes", 10, "total no of prefixes to use")
cmd.Flags().IntVar(&watchPerPrefix, "watch-per-prefix", 10, "number of watchers per prefix")
cmd.Flags().IntVar(&totalKeys, "total-keys", 1000, "total number of keys to watch")
return cmd
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/tlsutil/tlsutil.go#L53-L73 | func NewCert(certfile, keyfile string, parseFunc func([]byte, []byte) (tls.Certificate, error)) (*tls.Certificate, error) {
cert, err := ioutil.ReadFile(certfile)
if err != nil {
return nil, err
}
key, err := ioutil.ReadFile(keyfile)
if err != nil {
return nil, err
}
if parseFunc == nil {
parseFunc = tls.X509KeyPair
}
tlsCert, err := parseFunc(cert, key)
if err != nil {
return nil, err
}
return &tlsCert, nil
} | |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/table/iterator.go#L508-L529 | func (s *ConcatIterator) Next() {
s.cur.Next()
if s.cur.Valid() {
// Nothing to do. Just stay with the current table.
return
}
for { // In case there are empty tables.
if !s.reversed {
s.setIdx(s.idx + 1)
} else {
s.setIdx(s.idx - 1)
}
if s.cur == nil {
// End of list. Valid will become false.
return
}
s.cur.Rewind()
if s.cur.Valid() {
break
}
}
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/github/reporter/reporter.go#L75-L78 | func (c *Client) Report(pj *v1.ProwJob) ([]*v1.ProwJob, error) {
// TODO(krzyzacy): ditch ReportTemplate, and we can drop reference to config.Getter
return []*v1.ProwJob{pj}, report.Report(c.gc, c.config().Plank.ReportTemplate, *pj, c.config().GitHubReporter.JobTypesToReport)
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/retry_interceptor.go#L339-L343 | func withMax(maxRetries uint) retryOption {
return retryOption{applyFunc: func(o *options) {
o.max = maxRetries
}}
} | |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/table/iterator.go#L485-L505 | func (s *ConcatIterator) Seek(key []byte) {
var idx int
if !s.reversed {
idx = sort.Search(len(s.tables), func(i int) bool {
return y.CompareKeys(s.tables[i].Biggest(), key) >= 0
})
} else {
n := len(s.tables)
idx = n - 1 - sort.Search(n, func(i int) bool {
return y.CompareKeys(s.tables[n-1-i].Smallest(), key) <= 0
})
}
if idx >= len(s.tables) || idx < 0 {
s.setIdx(-1)
return
}
// For reversed=false, we know s.tables[i-1].Biggest() < key. Thus, the
// previous table cannot possibly contain key.
s.setIdx(idx)
s.cur.Seek(key)
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/raft/progress.go#L213-L227 | func (in *inflights) add(inflight uint64) {
if in.full() {
panic("cannot add into a full inflights")
}
next := in.start + in.count
size := in.size
if next >= size {
next -= size
}
if next >= len(in.buffer) {
in.growBuf()
}
in.buffer[next] = inflight
in.count++
} | |
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/worker/simple.go#L50-L58 | func (w *Simple) Register(name string, h Handler) error {
w.moot.Lock()
defer w.moot.Unlock()
if _, ok := w.handlers[name]; ok {
return fmt.Errorf("handler already mapped for name %s", name)
}
w.handlers[name] = h
return nil
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/kube/client.go#L572-L578 | func (c *Client) GetContainerLog(pod, container string) ([]byte, error) {
c.log("GetContainerLog", pod)
return c.requestRetry(&request{
path: fmt.Sprintf("/api/v1/namespaces/%s/pods/%s/log", c.namespace, pod),
query: map[string]string{"container": container},
})
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v3rpc/header.go#L39-L49 | func (h *header) fill(rh *pb.ResponseHeader) {
if rh == nil {
plog.Panic("unexpected nil resp.Header")
}
rh.ClusterId = uint64(h.clusterID)
rh.MemberId = uint64(h.memberID)
rh.RaftTerm = h.sg.Term()
if rh.Revision == 0 {
rh.Revision = h.rev()
}
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/velodrome/transform/plugins/comment_counter.go#L40-L49 | func (c *CommentCounterPlugin) CheckFlags() error {
for _, pattern := range c.pattern {
matcher, err := regexp.Compile(pattern)
if err != nil {
return err
}
c.matcher = append(c.matcher, matcher)
}
return nil
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/netutil/isolate_linux.go#L45-L67 | func SetLatency(ms, rv int) error {
ifces, err := GetDefaultInterfaces()
if err != nil {
return err
}
if rv > ms {
rv = 1
}
for ifce := range ifces {
cmdStr := fmt.Sprintf("sudo tc qdisc add dev %s root netem delay %dms %dms distribution normal", ifce, ms, rv)
_, err = exec.Command("/bin/sh", "-c", cmdStr).Output()
if err != nil {
// the rule has already been added. Overwrite it.
cmdStr = fmt.Sprintf("sudo tc qdisc change dev %s root netem delay %dms %dms distribution normal", ifce, ms, rv)
_, err = exec.Command("/bin/sh", "-c", cmdStr).Output()
if err != nil {
return err
}
}
}
return nil
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2http/client.go#L556-L578 | func writeKeyEvent(w http.ResponseWriter, resp etcdserver.Response, noValueOnSuccess bool) error {
ev := resp.Event
if ev == nil {
return errors.New("cannot write empty Event")
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("X-Etcd-Index", fmt.Sprint(ev.EtcdIndex))
w.Header().Set("X-Raft-Index", fmt.Sprint(resp.Index))
w.Header().Set("X-Raft-Term", fmt.Sprint(resp.Term))
if ev.IsCreated() {
w.WriteHeader(http.StatusCreated)
}
ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix)
if noValueOnSuccess &&
(ev.Action == v2store.Set || ev.Action == v2store.CompareAndSwap ||
ev.Action == v2store.Create || ev.Action == v2store.Update) {
ev.Node = nil
ev.PrevNode = nil
}
return json.NewEncoder(w).Encode(ev)
} | |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/logger.go#L50-L55 | func (opt *Options) Warningf(format string, v ...interface{}) {
if opt.Logger == nil {
return
}
opt.Logger.Warningf(format, v...)
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/v2store/store.go#L162-L188 | func (s *store) Create(nodePath string, dir bool, value string, unique bool, expireOpts TTLOptionSet) (*Event, error) {
var err *v2error.Error
s.worldLock.Lock()
defer s.worldLock.Unlock()
defer func() {
if err == nil {
s.Stats.Inc(CreateSuccess)
reportWriteSuccess(Create)
return
}
s.Stats.Inc(CreateFail)
reportWriteFailure(Create)
}()
e, err := s.internalCreate(nodePath, dir, value, unique, false, expireOpts.ExpireTime, Create)
if err != nil {
return nil, err
}
e.EtcdIndex = s.CurrentIndex
s.WatcherHub.notify(e)
return e, nil
} | |
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/mail/internal/mail/message.go#L314-L316 | func (m *Message) Embed(filename string, settings ...FileSetting) {
m.embedded = m.appendFile(m.embedded, fileFromFilename(filename), settings)
} | |
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/fuzzy/cluster.go#L133-L140 | func containsNode(nodes []*raftNode, n *raftNode) bool {
for _, rn := range nodes {
if rn == n {
return true
}
}
return false
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/github/fakegithub/fakegithub.go#L445-L447 | func (f *FakeClient) GetOrgProjects(org string) ([]github.Project, error) {
return f.RepoProjects[fmt.Sprintf("%s/*", org)], nil
} | |
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/resource.go#L51-L53 | func (v BaseResource) List(c Context) error {
return c.Error(404, errors.New("resource not implemented"))
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/mvcc/watcher_group.go#L270-L292 | func (wg *watcherGroup) watcherSetByKey(key string) watcherSet {
wkeys := wg.keyWatchers[key]
wranges := wg.ranges.Stab(adt.NewStringAffinePoint(key))
// zero-copy cases
switch {
case len(wranges) == 0:
// no need to merge ranges or copy; reuse single-key set
return wkeys
case len(wranges) == 0 && len(wkeys) == 0:
return nil
case len(wranges) == 1 && len(wkeys) == 0:
return wranges[0].Val.(watcherSet)
}
// copy case
ret := make(watcherSet)
ret.union(wg.keyWatchers[key])
for _, item := range wranges {
ret.union(item.Val.(watcherSet))
}
return ret
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/netutil/routes_linux.go#L189-L224 | func GetDefaultInterfaces() (map[string]uint8, error) {
interfaces := make(map[string]uint8)
rmsgs, rerr := getDefaultRoutes()
if rerr != nil {
return interfaces, rerr
}
for family, rmsg := range rmsgs {
_, oif, err := parsePREFSRC(rmsg)
if err != nil {
return interfaces, err
}
ifmsg, ierr := getIfaceLink(oif)
if ierr != nil {
return interfaces, ierr
}
attrs, aerr := syscall.ParseNetlinkRouteAttr(ifmsg)
if aerr != nil {
return interfaces, aerr
}
for _, attr := range attrs {
if attr.Attr.Type == syscall.IFLA_IFNAME {
// key is an interface name
// possible values: 2 - AF_INET, 10 - AF_INET6, 12 - dualstack
interfaces[string(attr.Value[:len(attr.Value)-1])] += family
}
}
}
if len(interfaces) > 0 {
return interfaces, nil
}
return interfaces, errNoDefaultInterface
} | |
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/net_transport.go#L146-L169 | func NewNetworkTransportWithConfig(
config *NetworkTransportConfig,
) *NetworkTransport {
if config.Logger == nil {
config.Logger = log.New(os.Stderr, "", log.LstdFlags)
}
trans := &NetworkTransport{
connPool: make(map[ServerAddress][]*netConn),
consumeCh: make(chan RPC),
logger: config.Logger,
maxPool: config.MaxPool,
shutdownCh: make(chan struct{}),
stream: config.Stream,
timeout: config.Timeout,
TimeoutScale: DefaultTimeoutScale,
serverAddressProvider: config.ServerAddressProvider,
}
// Create the connection context and then start our listener.
trans.setupStreamContext()
go trans.listen()
return trans
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/cmd/build/controller.go#L607-L614 | func injectEnvironment(b *buildv1alpha1.Build, rawEnv map[string]string) {
for i := range b.Spec.Steps { // Inject environment variables to each step
defaultEnv(&b.Spec.Steps[i], rawEnv)
}
if b.Spec.Template != nil { // Also add it as template arguments
defaultArguments(b.Spec.Template, rawEnv)
}
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/githuboauth/githuboauth.go#L93-L115 | func (ga *Agent) HandleLogin(client OAuthClient) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
stateToken := xsrftoken.Generate(ga.gc.ClientSecret, "", "")
state := hex.EncodeToString([]byte(stateToken))
oauthSession, err := ga.gc.CookieStore.New(r, oauthSessionCookie)
oauthSession.Options.Secure = true
oauthSession.Options.HttpOnly = true
if err != nil {
ga.serverError(w, "Creating new OAuth session", err)
return
}
oauthSession.Options.MaxAge = 10 * 60
oauthSession.Values[stateKey] = state
if err := oauthSession.Save(r, w); err != nil {
ga.serverError(w, "Save oauth session", err)
return
}
redirectURL := client.AuthCodeURL(state, oauth2.ApprovalForce, oauth2.AccessTypeOnline)
http.Redirect(w, r, redirectURL, http.StatusFound)
}
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/plugins/updateconfig/updateconfig.go#L90-L164 | func Update(fg FileGetter, kc corev1.ConfigMapInterface, name, namespace string, updates []ConfigMapUpdate, logger *logrus.Entry) error {
cm, getErr := kc.Get(name, metav1.GetOptions{})
isNotFound := errors.IsNotFound(getErr)
if getErr != nil && !isNotFound {
return fmt.Errorf("failed to fetch current state of configmap: %v", getErr)
}
if cm == nil || isNotFound {
cm = &coreapi.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
}
}
if cm.Data == nil {
cm.Data = map[string]string{}
}
if cm.BinaryData == nil {
cm.BinaryData = map[string][]byte{}
}
for _, upd := range updates {
if upd.Filename == "" {
logger.WithField("key", upd.Key).Debug("Deleting key.")
delete(cm.Data, upd.Key)
delete(cm.BinaryData, upd.Key)
continue
}
content, err := fg.GetFile(upd.Filename)
if err != nil {
return fmt.Errorf("get file err: %v", err)
}
logger.WithFields(logrus.Fields{"key": upd.Key, "filename": upd.Filename}).Debug("Populating key.")
value := content
if upd.GZIP {
buff := bytes.NewBuffer([]byte{})
// TODO: this error is wildly unlikely for anything that
// would actually fit in a configmap, we could just as well return
// the error instead of falling back to the raw content
z := gzip.NewWriter(buff)
if _, err := z.Write(content); err != nil {
logger.WithError(err).Error("failed to gzip content, falling back to raw")
} else {
if err := z.Close(); err != nil {
logger.WithError(err).Error("failed to flush gzipped content (!?), falling back to raw")
} else {
value = buff.Bytes()
}
}
}
if utf8.ValidString(string(value)) {
delete(cm.BinaryData, upd.Key)
cm.Data[upd.Key] = string(value)
} else {
delete(cm.Data, upd.Key)
cm.BinaryData[upd.Key] = value
}
}
var updateErr error
var verb string
if getErr != nil && isNotFound {
verb = "create"
_, updateErr = kc.Create(cm)
} else {
verb = "update"
_, updateErr = kc.Update(cm)
}
if updateErr != nil {
return fmt.Errorf("%s config map err: %v", verb, updateErr)
}
return nil
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/transport/listener.go#L333-L357 | func (info TLSInfo) ServerConfig() (*tls.Config, error) {
cfg, err := info.baseConfig()
if err != nil {
return nil, err
}
cfg.ClientAuth = tls.NoClientCert
if info.TrustedCAFile != "" || info.ClientCertAuth {
cfg.ClientAuth = tls.RequireAndVerifyClientCert
}
cs := info.cafiles()
if len(cs) > 0 {
cp, err := tlsutil.NewCertPool(cs)
if err != nil {
return nil, err
}
cfg.ClientCAs = cp
}
// "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP server
cfg.NextProtos = []string{"h2"}
return cfg, nil
} | |
https://github.com/gobuffalo/buffalo/blob/7f360181f4ccd79dcc9dcea2c904a4801f194f04/cookies.go#L15-L22 | func (c *Cookies) Get(name string) (string, error) {
ck, err := c.req.Cookie(name)
if err != nil {
return "", err
}
return ck.Value, nil
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/external-plugins/cherrypicker/server.go#L484-L499 | func (s *Server) getPatch(org, repo, targetBranch string, num int) (string, error) {
patch, err := s.ghc.GetPullRequestPatch(org, repo, num)
if err != nil {
return "", err
}
localPath := fmt.Sprintf("/tmp/%s_%s_%d_%s.patch", org, repo, num, normalize(targetBranch))
out, err := os.Create(localPath)
if err != nil {
return "", err
}
defer out.Close()
if _, err := io.Copy(out, bytes.NewBuffer(patch)); err != nil {
return "", err
}
return localPath, nil
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/utils.go#L31-L34 | func jitterUp(duration time.Duration, jitter float64) time.Duration {
multiplier := jitter * (rand.Float64()*2 - 1)
return time.Duration(float64(duration) * (1 + multiplier))
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/greenhouse/diskutil/diskutil.go#L29-L39 | func GetDiskUsage(path string) (percentBlocksFree float64, bytesFree, bytesUsed uint64, err error) {
var stat syscall.Statfs_t
err = syscall.Statfs(path, &stat)
if err != nil {
return 0, 0, 0, err
}
percentBlocksFree = float64(stat.Bfree) / float64(stat.Blocks) * 100
bytesFree = stat.Bfree * uint64(stat.Bsize)
bytesUsed = (stat.Blocks - stat.Bfree) * uint64(stat.Bsize)
return percentBlocksFree, bytesFree, bytesUsed, nil
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/crier/controller.go#L190-L285 | func (c *Controller) processNextItem() bool {
key, quit := c.queue.Get()
if quit {
return false
}
defer c.queue.Done(key)
// assert the string out of the key (format `namespace/name`)
keyRaw := key.(string)
namespace, name, err := cache.SplitMetaNamespaceKey(keyRaw)
if err != nil {
logrus.WithError(err).WithField("prowjob", keyRaw).Error("invalid resource key")
c.queue.Forget(key)
return true
}
// take the string key and get the object out of the indexer
//
// item will contain the complex object for the resource and
// exists is a bool that'll indicate whether or not the
// resource was created (true) or deleted (false)
//
// if there is an error in getting the key from the index
// then we want to retry this particular queue key a certain
// number of times (5 here) before we forget the queue key
// and throw an error
pj, err := c.informer.Lister().ProwJobs(namespace).Get(name)
if err != nil {
if errors.IsNotFound(err) {
logrus.WithField("prowjob", keyRaw).Info("object no longer exist")
c.queue.Forget(key)
return true
}
return c.retry(key, err)
}
// not belong to the current reporter
if !c.reporter.ShouldReport(pj) {
c.queue.Forget(key)
return true
}
// we set omitempty on PrevReportStates, so here we need to init it if is nil
if pj.Status.PrevReportStates == nil {
pj.Status.PrevReportStates = map[string]v1.ProwJobState{}
}
// already reported current state
if pj.Status.PrevReportStates[c.reporter.GetName()] == pj.Status.State {
logrus.WithField("prowjob", keyRaw).Info("Already reported")
c.queue.Forget(key)
return true
}
logrus.WithField("prowjob", keyRaw).Infof("Will report state : %s", pj.Status.State)
pjs, err := c.reporter.Report(pj)
if err != nil {
fields := logrus.Fields{
"prowjob": keyRaw,
"jobName": pj.Name,
"jobStatus": pj.Status,
}
logrus.WithError(err).WithFields(fields).Error("failed to report job")
return c.retry(key, err)
}
logrus.WithField("prowjob", keyRaw).Info("Updated job, now will update pj")
for _, pjob := range pjs {
if err := c.updateReportState(pjob); err != nil {
logrus.WithError(err).WithField("prowjob", keyRaw).Error("failed to update report state")
// theoretically patch should not have this issue, but in case:
// it might be out-dated, try to re-fetch pj and try again
updatedPJ, err := c.pjclientset.Prow().ProwJobs(pjob.Namespace).Get(pjob.Name, metav1.GetOptions{})
if err != nil {
logrus.WithError(err).WithField("prowjob", keyRaw).Error("failed to get prowjob from apiserver")
c.queue.Forget(key)
return true
}
if err := c.updateReportState(updatedPJ); err != nil {
// shrug
logrus.WithError(err).WithField("prowjob", keyRaw).Error("failed to update report state again, give up")
c.queue.Forget(key)
return true
}
}
logrus.WithField("prowjob", keyRaw).Infof("Hunky Dory!, pj : %v, state : %s", pjob.Spec.Job, pjob.Status.State)
}
c.queue.Forget(key)
return true
} | |
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/api.go#L938-L996 | func (r *Raft) Stats() map[string]string {
toString := func(v uint64) string {
return strconv.FormatUint(v, 10)
}
lastLogIndex, lastLogTerm := r.getLastLog()
lastSnapIndex, lastSnapTerm := r.getLastSnapshot()
s := map[string]string{
"state": r.getState().String(),
"term": toString(r.getCurrentTerm()),
"last_log_index": toString(lastLogIndex),
"last_log_term": toString(lastLogTerm),
"commit_index": toString(r.getCommitIndex()),
"applied_index": toString(r.getLastApplied()),
"fsm_pending": toString(uint64(len(r.fsmMutateCh))),
"last_snapshot_index": toString(lastSnapIndex),
"last_snapshot_term": toString(lastSnapTerm),
"protocol_version": toString(uint64(r.protocolVersion)),
"protocol_version_min": toString(uint64(ProtocolVersionMin)),
"protocol_version_max": toString(uint64(ProtocolVersionMax)),
"snapshot_version_min": toString(uint64(SnapshotVersionMin)),
"snapshot_version_max": toString(uint64(SnapshotVersionMax)),
}
future := r.GetConfiguration()
if err := future.Error(); err != nil {
r.logger.Warn(fmt.Sprintf("could not get configuration for Stats: %v", err))
} else {
configuration := future.Configuration()
s["latest_configuration_index"] = toString(future.Index())
s["latest_configuration"] = fmt.Sprintf("%+v", configuration.Servers)
// This is a legacy metric that we've seen people use in the wild.
hasUs := false
numPeers := 0
for _, server := range configuration.Servers {
if server.Suffrage == Voter {
if server.ID == r.localID {
hasUs = true
} else {
numPeers++
}
}
}
if !hasUs {
numPeers = 0
}
s["num_peers"] = toString(uint64(numPeers))
}
last := r.LastContact()
if r.getState() == Leader {
s["last_contact"] = "0"
} else if last.IsZero() {
s["last_contact"] = "never"
} else {
s["last_contact"] = fmt.Sprintf("%v", time.Now().Sub(last))
}
return s
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/snap/db.go#L36-L79 | func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) {
start := time.Now()
f, err := ioutil.TempFile(s.dir, "tmp")
if err != nil {
return 0, err
}
var n int64
n, err = io.Copy(f, r)
if err == nil {
fsyncStart := time.Now()
err = fileutil.Fsync(f)
snapDBFsyncSec.Observe(time.Since(fsyncStart).Seconds())
}
f.Close()
if err != nil {
os.Remove(f.Name())
return n, err
}
fn := s.dbFilePath(id)
if fileutil.Exist(fn) {
os.Remove(f.Name())
return n, nil
}
err = os.Rename(f.Name(), fn)
if err != nil {
os.Remove(f.Name())
return n, err
}
if s.lg != nil {
s.lg.Info(
"saved database snapshot to disk",
zap.String("path", fn),
zap.Int64("bytes", n),
zap.String("size", humanize.Bytes(uint64(n))),
)
} else {
plog.Infof("saved database snapshot to disk [total bytes: %d]", n)
}
snapDBSaveSec.Observe(time.Since(start).Seconds())
return n, nil
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/mvcc/kvstore.go#L569-L582 | func appendMarkTombstone(lg *zap.Logger, b []byte) []byte {
if len(b) != revBytesLen {
if lg != nil {
lg.Panic(
"cannot append tombstone mark to non-normal revision bytes",
zap.Int("expected-revision-bytes-size", revBytesLen),
zap.Int("given-revision-bytes-size", len(b)),
)
} else {
plog.Panicf("cannot append mark to non normal revision bytes")
}
}
return append(b, markTombstone)
} | |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/util.go#L117-L120 | func (s *levelsController) reserveFileID() uint64 {
id := atomic.AddUint64(&s.nextFileID, 1)
return id - 1
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/pluginhelp/pluginhelp.go#L71-L73 | func (pluginHelp *PluginHelp) AddCommand(command Command) {
pluginHelp.Commands = append(pluginHelp.Commands, command)
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/transport/keepalive_listener.go#L70-L83 | func (l *tlsKeepaliveListener) Accept() (c net.Conn, err error) {
c, err = l.Listener.Accept()
if err != nil {
return
}
kac := c.(keepAliveConn)
// detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl
// default on linux: 30 + 8 * 30
// default on osx: 30 + 8 * 75
kac.SetKeepAlive(true)
kac.SetKeepAlivePeriod(30 * time.Second)
c = tls.Server(c, l.config)
return c, nil
} | |
https://github.com/dgraph-io/badger/blob/6b796b3ebec3ff006fcb1b425836cd784651e9fd/util.go#L66-L93 | func (s *levelHandler) validate() error {
if s.level == 0 {
return nil
}
s.RLock()
defer s.RUnlock()
numTables := len(s.tables)
for j := 1; j < numTables; j++ {
if j >= len(s.tables) {
return errors.Errorf("Level %d, j=%d numTables=%d", s.level, j, numTables)
}
if y.CompareKeys(s.tables[j-1].Biggest(), s.tables[j].Smallest()) >= 0 {
return errors.Errorf(
"Inter: Biggest(j-1) \n%s\n vs Smallest(j): \n%s\n: level=%d j=%d numTables=%d",
hex.Dump(s.tables[j-1].Biggest()), hex.Dump(s.tables[j].Smallest()),
s.level, j, numTables)
}
if y.CompareKeys(s.tables[j].Smallest(), s.tables[j].Biggest()) > 0 {
return errors.Errorf(
"Intra: %q vs %q: level=%d j=%d numTables=%d",
s.tables[j].Smallest(), s.tables[j].Biggest(), s.level, j, numTables)
}
}
return nil
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/ghproxy/ghproxy.go#L158-L172 | func diskMonitor(interval time.Duration, diskRoot string) {
logger := logrus.WithField("sync-loop", "disk-monitor")
ticker := time.NewTicker(interval)
for ; true; <-ticker.C {
logger.Info("tick")
_, bytesFree, bytesUsed, err := diskutil.GetDiskUsage(diskRoot)
if err != nil {
logger.WithError(err).Error("Failed to get disk metrics")
} else {
diskFree.Set(float64(bytesFree) / 1e9)
diskUsed.Set(float64(bytesUsed) / 1e9)
diskTotal.Set(float64(bytesFree+bytesUsed) / 1e9)
}
}
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/check.go#L286-L411 | func newCheckDatascaleCommand(cmd *cobra.Command, args []string) {
var checkDatascaleAlias = map[string]string{
"s": "s", "small": "s",
"m": "m", "medium": "m",
"l": "l", "large": "l",
"xl": "xl", "xLarge": "xl",
}
model, ok := checkDatascaleAlias[checkDatascaleLoad]
if !ok {
ExitWithError(ExitBadFeature, fmt.Errorf("unknown load option %v", checkDatascaleLoad))
}
cfg := checkDatascaleCfgMap[model]
requests := make(chan v3.Op, cfg.clients)
cc := clientConfigFromCmd(cmd)
clients := make([]*v3.Client, cfg.clients)
for i := 0; i < cfg.clients; i++ {
clients[i] = cc.mustClient()
}
// get endpoints
eps, errEndpoints := endpointsFromCmd(cmd)
if errEndpoints != nil {
ExitWithError(ExitError, errEndpoints)
}
ctx, cancel := context.WithCancel(context.Background())
resp, err := clients[0].Get(ctx, checkDatascalePrefix, v3.WithPrefix(), v3.WithLimit(1))
cancel()
if err != nil {
ExitWithError(ExitError, err)
}
if len(resp.Kvs) > 0 {
ExitWithError(ExitInvalidInput, fmt.Errorf("prefix %q has keys. Delete with etcdctl del --prefix %s first", checkDatascalePrefix, checkDatascalePrefix))
}
ksize, vsize := 512, 512
k, v := make([]byte, ksize), string(make([]byte, vsize))
r := report.NewReport("%4.4f")
var wg sync.WaitGroup
wg.Add(len(clients))
// get the process_resident_memory_bytes and process_virtual_memory_bytes before the put operations
bytesBefore := endpointMemoryMetrics(eps[0])
if bytesBefore == 0 {
fmt.Println("FAIL: Could not read process_resident_memory_bytes before the put operations.")
os.Exit(ExitError)
}
fmt.Println(fmt.Sprintf("Start data scale check for work load [%v key-value pairs, %v bytes per key-value, %v concurrent clients].", cfg.limit, cfg.kvSize, cfg.clients))
bar := pb.New(cfg.limit)
bar.Format("Bom !")
bar.Start()
for i := range clients {
go func(c *v3.Client) {
defer wg.Done()
for op := range requests {
st := time.Now()
_, derr := c.Do(context.Background(), op)
r.Results() <- report.Result{Err: derr, Start: st, End: time.Now()}
bar.Increment()
}
}(clients[i])
}
go func() {
for i := 0; i < cfg.limit; i++ {
binary.PutVarint(k, rand.Int63n(math.MaxInt64))
requests <- v3.OpPut(checkDatascalePrefix+string(k), v)
}
close(requests)
}()
sc := r.Stats()
wg.Wait()
close(r.Results())
bar.Finish()
s := <-sc
// get the process_resident_memory_bytes after the put operations
bytesAfter := endpointMemoryMetrics(eps[0])
if bytesAfter == 0 {
fmt.Println("FAIL: Could not read process_resident_memory_bytes after the put operations.")
os.Exit(ExitError)
}
// delete the created kv pairs
ctx, cancel = context.WithCancel(context.Background())
dresp, derr := clients[0].Delete(ctx, checkDatascalePrefix, v3.WithPrefix())
defer cancel()
if derr != nil {
ExitWithError(ExitError, derr)
}
if autoCompact {
compact(clients[0], dresp.Header.Revision)
}
if autoDefrag {
for _, ep := range clients[0].Endpoints() {
defrag(clients[0], ep)
}
}
if bytesAfter == 0 {
fmt.Println("FAIL: Could not read process_resident_memory_bytes after the put operations.")
os.Exit(ExitError)
}
bytesUsed := bytesAfter - bytesBefore
mbUsed := bytesUsed / (1024 * 1024)
if len(s.ErrorDist) != 0 {
fmt.Println("FAIL: too many errors")
for k, v := range s.ErrorDist {
fmt.Printf("FAIL: ERROR(%v) -> %d\n", k, v)
}
os.Exit(ExitError)
} else {
fmt.Println(fmt.Sprintf("PASS: Approximate system memory used : %v MB.", strconv.FormatFloat(mbUsed, 'f', 2, 64)))
}
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/proxy/grpcproxy/cache/store.go#L44-L51 | func keyFunc(req *pb.RangeRequest) string {
// TODO: use marshalTo to reduce allocation
b, err := req.Marshal()
if err != nil {
panic(err)
}
return string(b)
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/api/etcdhttp/metrics.go#L94-L123 | func checkHealth(srv etcdserver.ServerV2) Health {
h := Health{Health: "true"}
as := srv.Alarms()
if len(as) > 0 {
h.Health = "false"
}
if h.Health == "true" {
if uint64(srv.Leader()) == raft.None {
h.Health = "false"
}
}
if h.Health == "true" {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
_, err := srv.Do(ctx, etcdserverpb.Request{Method: "QGET"})
cancel()
if err != nil {
h.Health = "false"
}
}
if h.Health == "true" {
healthSuccess.Inc()
} else {
healthFailed.Inc()
}
return h
} | |
https://github.com/hashicorp/raft/blob/773bcaa2009bf059c5c06457b9fccd156d5e91e7/fuzzy/cluster.go#L369-L388 | func assertLogEntryEqual(t *testing.T, node string, exp *raft.Log, act *raft.Log) bool {
res := true
if exp.Term != act.Term {
t.Errorf("Log Entry at Index %d for node %v has mismatched terms %d/%d", exp.Index, node, exp.Term, act.Term)
res = false
}
if exp.Index != act.Index {
t.Errorf("Node %v, Log Entry should be Index %d,but is %d", node, exp.Index, act.Index)
res = false
}
if exp.Type != act.Type {
t.Errorf("Node %v, Log Entry at Index %d should have type %v but is %v", node, exp.Index, exp.Type, act.Type)
res = false
}
if !bytes.Equal(exp.Data, act.Data) {
t.Errorf("Node %v, Log Entry at Index %d should have data %v, but has %v", node, exp.Index, exp.Data, act.Data)
res = false
}
return res
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/plugins/plugins.go#L244-L257 | func (pa *ConfigAgent) Start(path string) error {
if err := pa.Load(path); err != nil {
return err
}
ticker := time.Tick(1 * time.Minute)
go func() {
for range ticker {
if err := pa.Load(path); err != nil {
logrus.WithField("path", path).WithError(err).Error("Error loading plugin config.")
}
}
}()
return nil
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/plugins/require-matching-label/require-matching-label.go#L143-L163 | func matchingConfigs(org, repo, branch, label string, allConfigs []plugins.RequireMatchingLabel) []plugins.RequireMatchingLabel {
var filtered []plugins.RequireMatchingLabel
for _, cfg := range allConfigs {
// Check if the config applies to this issue type.
if (branch == "" && !cfg.Issues) || (branch != "" && !cfg.PRs) {
continue
}
// Check if the config applies to this 'org[/repo][/branch]'.
if org != cfg.Org ||
(cfg.Repo != "" && cfg.Repo != repo) ||
(cfg.Branch != "" && branch != "" && cfg.Branch != branch) {
continue
}
// If we are reacting to a label event, see if it is relevant.
if label != "" && !cfg.Re.MatchString(label) {
continue
}
filtered = append(filtered, cfg)
}
return filtered
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/flags/urls.go#L41-L47 | func (us *URLsValue) String() string {
all := make([]string, len(*us))
for i, u := range *us {
all[i] = u.String()
}
return strings.Join(all, ",")
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/kube/dry_run_client.go#L55-L57 | func (c *dryRunProwJobClient) Update(*prowapi.ProwJob) (*prowapi.ProwJob, error) {
return nil, nil
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdctl/ctlv3/command/make_mirror_command.go#L44-L61 | func NewMakeMirrorCommand() *cobra.Command {
c := &cobra.Command{
Use: "make-mirror [options] <destination>",
Short: "Makes a mirror at the destination etcd cluster",
Run: makeMirrorCommandFunc,
}
c.Flags().StringVar(&mmprefix, "prefix", "", "Key-value prefix to mirror")
c.Flags().StringVar(&mmdestprefix, "dest-prefix", "", "destination prefix to mirror a prefix to a different prefix in the destination cluster")
c.Flags().BoolVar(&mmnodestprefix, "no-dest-prefix", false, "mirror key-values to the root of the destination cluster")
c.Flags().StringVar(&mmcert, "dest-cert", "", "Identify secure client using this TLS certificate file for the destination cluster")
c.Flags().StringVar(&mmkey, "dest-key", "", "Identify secure client using this TLS key file")
c.Flags().StringVar(&mmcacert, "dest-cacert", "", "Verify certificates of TLS enabled secure servers using this CA bundle")
// TODO: secure by default when etcd enables secure gRPC by default.
c.Flags().BoolVar(&mminsecureTr, "dest-insecure-transport", true, "Disable transport security for client connections")
return c
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/tide/search.go#L88-L100 | func dateToken(start, end time.Time) string {
// GitHub's GraphQL API silently fails if you provide it with an invalid time
// string.
// Dates before 1970 (unix epoch) are considered invalid.
startString, endString := "*", "*"
if start.Year() >= 1970 {
startString = start.Format(github.SearchTimeFormat)
}
if end.Year() >= 1970 {
endString = end.Format(github.SearchTimeFormat)
}
return fmt.Sprintf("updated:%s..%s", startString, endString)
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/cmd/pipeline/controller.go#L242-L248 | func fromKey(key string) (string, string, string, error) {
parts := strings.Split(key, "/")
if len(parts) != 3 {
return "", "", "", fmt.Errorf("bad key: %q", key)
}
return parts[0], parts[1], parts[2], nil
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/contrib/recipes/key.go#L154-L163 | func newUniqueEphemeralKV(s *concurrency.Session, prefix, val string) (ek *EphemeralKV, err error) {
for {
newKey := fmt.Sprintf("%s/%v", prefix, time.Now().UnixNano())
ek, err = newEphemeralKV(s, newKey, val)
if err == nil || err != ErrKeyExists {
break
}
}
return ek, err
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/logutil/zap_grpc.go#L26-L32 | func NewGRPCLoggerV2(lcfg zap.Config) (grpclog.LoggerV2, error) {
lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil"
if err != nil {
return nil, err
}
return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()}, nil
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/cmd/peribolos/main.go#L520-L530 | func findTeam(teams map[string]github.Team, name string, previousNames ...string) *github.Team {
if t, ok := teams[name]; ok {
return &t
}
for _, p := range previousNames {
if t, ok := teams[p]; ok {
return &t
}
}
return nil
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/crier/controller.go#L134-L139 | func (c *Controller) runWorker() {
c.wg.Add(1)
for c.processNextItem() {
}
c.wg.Done()
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/kube/dry_run_client.go#L177-L179 | func (c *dryRunProwJobClient) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *prowapi.ProwJob, err error) {
return nil, nil
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/jenkins/jenkins.go#L483-L491 | func (c *Client) JobParameterized(jobInfo *JobInfo) bool {
for _, prop := range jobInfo.Property {
if prop.ParameterDefinitions != nil && len(prop.ParameterDefinitions) > 0 {
return true
}
}
return false
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/spyglass/lenses/lenses.go#L96-L112 | func RegisterLens(lens Lens) error {
config := lens.Config()
_, ok := lensReg[config.Name]
if ok {
return fmt.Errorf("viewer already registered with name %s", config.Name)
}
if config.Title == "" {
return errors.New("empty title field in view metadata")
}
if config.Priority < 0 {
return errors.New("priority must be >=0")
}
lensReg[config.Name] = lens
logrus.Infof("Spyglass registered viewer %s with title %s.", config.Name, config.Title)
return nil
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/proxy/grpcproxy/cache/store.go#L158-L165 | func (c *cache) Compact(revision int64) {
c.mu.Lock()
defer c.mu.Unlock()
if revision > c.compactedRev {
c.compactedRev = revision
}
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/logutil/zap.go#L57-L97 | func AddOutputPaths(cfg zap.Config, outputPaths, errorOutputPaths []string) zap.Config {
outputs := make(map[string]struct{})
for _, v := range cfg.OutputPaths {
outputs[v] = struct{}{}
}
for _, v := range outputPaths {
outputs[v] = struct{}{}
}
outputSlice := make([]string, 0)
if _, ok := outputs["/dev/null"]; ok {
// "/dev/null" to discard all
outputSlice = []string{"/dev/null"}
} else {
for k := range outputs {
outputSlice = append(outputSlice, k)
}
}
cfg.OutputPaths = outputSlice
sort.Strings(cfg.OutputPaths)
errOutputs := make(map[string]struct{})
for _, v := range cfg.ErrorOutputPaths {
errOutputs[v] = struct{}{}
}
for _, v := range errorOutputPaths {
errOutputs[v] = struct{}{}
}
errOutputSlice := make([]string, 0)
if _, ok := errOutputs["/dev/null"]; ok {
// "/dev/null" to discard all
errOutputSlice = []string{"/dev/null"}
} else {
for k := range errOutputs {
errOutputSlice = append(errOutputSlice, k)
}
}
cfg.ErrorOutputPaths = errOutputSlice
sort.Strings(cfg.ErrorOutputPaths)
return cfg
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/clientv3/compare.go#L117-L120 | func (cmp Cmp) WithPrefix() Cmp {
cmp.RangeEnd = getPrefix(cmp.Key)
return cmp
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/pkg/netutil/routes_linux.go#L36-L65 | func GetDefaultHost() (string, error) {
rmsgs, rerr := getDefaultRoutes()
if rerr != nil {
return "", rerr
}
// prioritize IPv4
if rmsg, ok := rmsgs[syscall.AF_INET]; ok {
if host, err := chooseHost(syscall.AF_INET, rmsg); host != "" || err != nil {
return host, err
}
delete(rmsgs, syscall.AF_INET)
}
// sort so choice is deterministic
var families []int
for family := range rmsgs {
families = append(families, int(family))
}
sort.Ints(families)
for _, f := range families {
family := uint8(f)
if host, err := chooseHost(family, rmsgs[family]); host != "" || err != nil {
return host, err
}
}
return "", errNoDefaultHost
} | |
https://github.com/etcd-io/etcd/blob/616592d9ba993e3fe9798eef581316016df98906/etcdserver/etcdserverpb/rpc.pb.go#L690-L697 | func (*RequestOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _RequestOp_OneofMarshaler, _RequestOp_OneofUnmarshaler, _RequestOp_OneofSizer, []interface{}{
(*RequestOp_RequestRange)(nil),
(*RequestOp_RequestPut)(nil),
(*RequestOp_RequestDeleteRange)(nil),
(*RequestOp_RequestTxn)(nil),
}
} | |
https://github.com/kubernetes/test-infra/blob/8125fbda10178887be5dff9e901d6a0a519b67bc/prow/jenkins/jenkins.go#L260-L290 | func NewClient(
url string,
dryRun bool,
tlsConfig *tls.Config,
authConfig *AuthConfig,
logger *logrus.Entry,
metrics *ClientMetrics,
) (*Client, error) {
if logger == nil {
logger = logrus.NewEntry(logrus.StandardLogger())
}
c := &Client{
logger: logger.WithField("client", "jenkins"),
dryRun: dryRun,
baseURL: url,
authConfig: authConfig,
client: &http.Client{
Timeout: 30 * time.Second,
},
metrics: metrics,
}
if tlsConfig != nil {
c.client.Transport = &http.Transport{TLSClientConfig: tlsConfig}
}
if c.authConfig.CSRFProtect {
if err := c.CrumbRequest(); err != nil {
return nil, fmt.Errorf("cannot get Jenkins crumb: %v", err)
}
}
return c, nil
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.