id
stringlengths
2
7
text
stringlengths
17
51.2k
title
stringclasses
1 value
c176300
pr.Repo.Name, pr.Number, comments, func(c github.IssueComment) bool { // isStale function return c.User.Login == botName && (strings.Contains(c.Body, releaseNoteBody) || strings.Contains(c.Body, parentReleaseNoteBody)) }, ) }
c176301
if strings.Contains(composedReleaseNote, actionRequiredNote) { return releaseNoteActionRequired } return releaseNote }
c176302
if potentialMatch == nil { return "" } return strings.TrimSpace(potentialMatch[1]) }
c176303
resources: map[string]common.Resource{}, } }
c176304
parse %s from User Data", LeasedResources) return nil, err } } resourcesToRelease = append(resourcesToRelease, *res) resources, err := c.basic.AcquireByState(res.Name, dest, leasedResources) if err != nil { releaseOnFailure() return nil, err } resourcesToRelease = append(resourcesToRelease, resources...) c.updateResource(*res) return res, nil }
c176305
allErrors = multierror.Append(allErrors, err) } return } } resourceNames = append(resourceNames, leasedResources...) for _, n := range resourceNames { if err := c.basic.ReleaseOne(n, dest); err != nil { logrus.WithError(err).Warningf("failed to release resource %s", n) allErrors = multierror.Append(allErrors, err) } } c.deleteResource(name) return }
c176306
c.basic.UpdateAll(state) }
c176307
oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token}), ), ) }
c176308
nil { return "", errors.New("Users.Get(\"\") returned empty login") } return *user.Login, nil }
c176309
client := GetGitHubClient(strings.TrimSpace(string(token))) login, err := GetUsername(client) // Get user name for token if err != nil { return nil, err } return &TokenHandler{ gClient: client, login: login, influxdb: influxdb, }, nil }
c176310
token, err := CreateTokenHandler(f, influxdb) if err != nil { return nil, fmt.Errorf("Failed to create token (%s): %s", tokenFile, err) } tokens = append(tokens, *token) } return tokens, nil }
c176311
i.organization, i.repository, i.pullRequest) }
c176312
cancelIndex = prev dupes[ji] = i } toCancel := pjs[cancelIndex] // TODO cancel the prow job before cleaning up its resources and make this system // independent. // See this discussion for more details: https://github.com/kubernetes/test-infra/pull/11451#discussion_r263523932 if err := cleanup(toCancel); err != nil { log.WithError(err).WithFields(ProwJobFields(&toCancel)).Warn("Cannot clean up job resources") } toCancel.SetComplete() prevState := toCancel.Status.State toCancel.Status.State = prowapi.AbortedState log.WithFields(ProwJobFields(&toCancel)). WithField("from", prevState). WithField("to", toCancel.Status.State).Info("Transitioning states") npj, err := pjc.ReplaceProwJob(toCancel.ObjectMeta.Name, toCancel) if err != nil { return err } pjs[cancelIndex] = npj } return nil }
c176313
logrus.WithField("component", component).WithError(err).Error("Failed to push metrics.") } case <-sig: logrus.WithField("component", component).Infof("Metrics pusher shutting down...") return } } }
c176314
rate.NewLimiter(rate.Limit(1000), 50000)}, ) return workqueue.NewNamedRateLimitingQueue(rl, controllerName) }
c176315
statuses { if status.Context != dcoContextName { continue } existingStatus = status.State break } l.Debugf("Existing DCO status context status is %q", existingStatus) return existingStatus, nil }
c176316
!= nil { return false, false, fmt.Errorf("error getting pull request labels: %v", err) } for _, l := range labels { if l.Name == dcoYesLabel { hasYesLabel = true } if l.Name == dcoNoLabel { hasNoLabel = true } } return hasYesLabel, hasNoLabel, nil }
c176317
} hasYesLabel, hasNoLabel, err := checkExistingLabels(gc, l, org, repo, pr.Number) if err != nil { l.WithError(err).Infof("Error checking existing PR labels") return err } return takeAction(gc, cp, l, org, repo, pr, commitsMissingDCO, existingStatus, hasYesLabel, hasNoLabel, addComment) }
c176318
shortSHA[:7] } // get the first line of the commit message := strings.Split(commit.Message, "\n")[0] lines[i] = fmt.Sprintf(lineFmt, shortSHA, org, repo, commit.SHA, message) } return strings.Join(lines, "\n") }
c176319
spec.BuildID) case prowapi.BatchJob: return path.Join(PRLogs, "pull", "batch", spec.Job, spec.BuildID) default: logrus.Fatalf("unknown job spec type: %v", spec.Type) } return "" }
c176320
return path.Join(PRLogs, "directory", spec.Job, fmt.Sprintf("%s.txt", spec.BuildID)) default: logrus.Fatalf("unknown job spec type: %v", spec.Type) } return "" }
c176321
"directory", spec.Job) default: logrus.Errorf("unknown job spec type: %v", spec.Type) } return "" }
c176322
} // handle gerrit repo repo = strings.Replace(repo, "/", "_", -1) return fmt.Sprintf("%s_%s", org, repo) } }
c176323
repo = strings.Replace(repo, "/", "_", -1) return fmt.Sprintf("%s_%s", org, repo) } }
c176324
} sources[name] = src glog.Infof("Registered issue source '%s'.", name) }
c176325
continue } // Note: We assume that no issues made by this bot with ID's matching issues generated by // sources will be created while this code is creating issues. If this is a possibility then // this loop should be updated to fetch recently changed issues from github after every issue // sync that results in an issue being created. glog.Infof("Syncing issues from source: %s.", srcName) created := 0 for _, issue := range issues { if c.sync(issue) { created++ } } glog.Infof( "Created issues for %d of the %d issues synced from source: %s.", created, len(issues), srcName, ) } }
c176326
Allowing all assignees. errmsg: %v\n", c.org, c.project, err) } else { c.Collaborators = make([]string, 0, len(collaborators)) for _, user := range collaborators { if user.Login != nil && *user.Login != "" { c.Collaborators = append(c.Collaborators, strings.ToLower(*user.Login)) } } } // Populate the issue cache (allIssues). issues, err := c.client.GetIssues( c.org, c.project, &github.IssueListByRepoOptions{ State: "all", Creator: c.authorName, }, ) if err != nil { return fmt.Errorf("failed to refresh the list of all issues created by %s in repo '%s/%s'. errmsg: %v", c.authorName, c.org, c.project, err) } if len(issues) == 0 { glog.Warningf("IssueCreator found no issues in the repo '%s/%s' authored by '%s'.\n", c.org, c.project, c.authorName) } c.allIssues = make(map[int]*github.Issue) for _, i := range issues { c.allIssues[*i.Number] = i } return nil }
c176327
filtered = append(filtered, elemA) } else { removed = append(removed, elemA) } } return }
c176328
} labels := issue.Labels() if prio, ok := issue.Priority(); ok { labels = append(labels, "priority/"+prio) } if c.validLabels != nil { var removedLabels []string labels, removedLabels = setIntersect(labels, c.validLabels) if len(removedLabels) > 0 { glog.Errorf("Filtered the following invalid labels from issue %q: %q.", title, removedLabels) } } glog.Infof("Create Issue: %q Assigned to: %q\n", title, owners) if c.dryRun { return true } created, err := c.client.CreateIssue(c.org, c.project, title, body, labels, owners) if err != nil { glog.Errorf("Failed to create a new github issue for issue ID '%s'.\n", id) return false } c.allIssues[*created.Number] = created return true }
c176329
ok := r.UserData.Map.Load(UserDataSecretAccessKey) if !ok { return val, errors.New("No Secret Access Key in UserData") } val.AccessKeyID = accessKey.(string) val.SecretAccessKey = secretKey.(string) return val, nil }
c176330
go func() { <-c logrus.Warn("Interrupt received, attempting clean shutdown...") close(stop) <-c logrus.Error("Second interrupt received, force exiting...") os.Exit(1) }() return stop }
c176331
!= nil { return nil, err } // Assume watches receive updates, but resync every 30m in case something wonky happens bif := pipelineinfo.NewSharedInformerFactory(bc, 30*time.Minute) bif.Tekton().V1alpha1().PipelineRuns().Lister() go bif.Start(stop) return &pipelineConfig{ client: bc, informer: bif.Tekton().V1alpha1().PipelineRuns(), }, nil }
c176332
kube.GetKubernetesClient(o.masterURL, o.kubeConfig) }
c176333
kube.GetProwJobClient(o.masterURL, o.kubeConfig) }
c176334
return "", fmt.Errorf("failed to read %s: %v", symLink, err) } // strip gs://<bucket-name> from global address `u` u := string(data) return prefixRe.ReplaceAllString(u, ""), nil }
c176335
return fmt.Errorf("failed to read %s: %v", key, err) } err = json.Unmarshal(rawData, &data) if err != nil { return fmt.Errorf("failed to parse %s: %v", key, err) } return nil }
c176336
Prefix: prefix, Delimiter: "/", }) for { attrs, err := it.Next() if err == iterator.Done { break } if err != nil { return dirs, err } if attrs.Prefix != "" { dirs = append(dirs, attrs.Prefix) } } return dirs, nil }
c176337
err == iterator.Done { break } if err != nil { return keys, err } keys = append(keys, attrs.Name) } return keys, nil }
c176338
} else { logrus.Warningf("unrecognized directory name (expected int64): %s", dir) } } } else { keys, err := bucket.listAll(root) if err != nil { return ids, fmt.Errorf("failed to list GCS keys: %v", err) } for _, key := range keys { matches := linkRe.FindStringSubmatch(key) if len(matches) == 2 { i, err := strconv.ParseInt(matches[1], 10, 64) if err == nil { ids = append(ids, i) } else { logrus.Warningf("unrecognized file name (expected <int64>.txt): %s", key) } } } } return ids, nil }
c176339
nil { return nil, err } result := make([]*cover.Profile, 0, len(profile)) for _, p := range profile { if re.MatchString(p.FileName) == include { result = append(result, p) } } return result, nil }
c176340
path := range paths { secretValue, err := LoadSingleSecret(path) if err != nil { return nil, err } secretsMap[path] = secretValue } return secretsMap, nil }
c176341
nil, fmt.Errorf("error reading %s: %v", path, err) } return bytes.TrimSpace(b), nil }
c176342
err } b.Explicit = true b.Value = v return nil }
c176343
client, err := storage.NewClient(ctx, options...) if err != nil { if creds != "" { return nil, err } logrus.WithError(err).Debug("Cannot load application default gcp credentials") client = nil } return opener{gcs: client}, nil }
c176344
os.IsNotExist(err) || err == storage.ErrObjectNotExist }
c176345
logrus.WithError(err).Error("Failed to close") } }
c176346
if err != nil { return nil, fmt.Errorf("bad gcs path: %v", err) } if g == nil { return os.Create(path) } return g.NewWriter(ctx), nil }
c176347
*sessions.CookieStore) { gob.Register(&oauth2.Token{}) gac.CookieStore = cookie }
c176348
return fmt.Sprintf("%.1f", (change.newRatio-change.baseRatio)*100) }
c176349
filePath, formatPercentage(change.baseRatio), formatPercentage(change.newRatio), deltaDisplayed(change))) if change.newRatio < coverageThreshold { isCoverageLow = true } } return strings.Join(rows, "\n"), isCoverageLow }
c176350
} table, isCoverageLow := makeTable(calculation.ProduceCovList(baseProfiles), calculation.ProduceCovList(newProfiles), coverageThreshold) if table == "" { return "", false } rows = append(rows, table) rows = append(rows, "") return strings.Join(rows, "\n"), isCoverageLow }
c176351
"", "The file containing the OAuth Token to use for requests.") cmd.PersistentFlags().StringVar(&client.Org, "organization", "", "The github organization to scan") cmd.PersistentFlags().StringVar(&client.Project, "project", "", "The github project to scan") }
c176352
{ return fmt.Errorf("project flag must be set") } client.Project = strings.ToLower(client.Project) return nil }
c176353
if err != nil { return nil, err } token = strings.TrimSpace(string(data)) } if len(token) > 0 { ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token}) tc := oauth2.NewClient(oauth2.NoContext, ts) client.githubClient = github.NewClient(tc) } else { client.githubClient = github.NewClient(nil) } return client.githubClient, nil }
c176354
if err != nil { glog.Error("Failed to get RateLimits:", err) sleep = time.Minute } if limits != nil && limits.Core != nil && limits.Core.Remaining < tokenLimit { sleep = limits.Core.Reset.Sub(time.Now()) glog.Warning("RateLimits: reached. Sleeping for ", sleep) } } time.Sleep(sleep) }
c176355
nil { close(c) glog.Error(err) return } for _, issue := range issues { c <- issue count++ } if resp.NextPage == 0 { break } opt.ListOptions.Page = resp.NextPage } glog.Infof("Fetched %d issues updated issue since %v.", count, latest) close(c) }
c176356
== int64(id) { return true } } return false }
c176357
Retrying...", err) time.Sleep(time.Second) continue } for _, event := range events { c <- event count++ } if resp.NextPage == 0 || (latest != nil && hasID(events, *latest)) { break } opt.Page = resp.NextPage } glog.Infof("Fetched %d events.", count) close(c) }
c176358
return true case github.PullRequestActionEdited: return true default: return false } }
c176359
EventsCommentsChannel: make(chan interface{}, 100), repository: repository, } }
c176360
f.repository). Order("issue_updated_at"). Preload("Labels"). Find(&issues) if query.Error != nil { return query.Error } count := len(issues) for _, issue := range issues { f.IssuesChannel <- issue f.lastIssue = issue.IssueUpdatedAt } glog.Infof("Found and pushed %d updated/new issues", count) return nil }
c176361
} for event != nil || comment != nil { if event == nil || (comment != nil && comment.CommentCreatedAt.Before(event.EventCreatedAt)) { f.EventsCommentsChannel <- *comment f.lastComment = comment.CommentCreatedAt if commentRows.Next() { db.ScanRows(commentRows, comment) } else { comment = nil } } else { f.EventsCommentsChannel <- *event f.lastEvent = event.EventCreatedAt if eventRows.Next() { db.ScanRows(eventRows, event) } else { event = nil } } count++ } glog.Infof("Found and pushed %d new events/comments", count) return nil }
c176362
} if err := f.fetchRecentEventsAndComments(db); err != nil { return err } return nil }
c176363
fjr.parseFlakyJobs(json) if err != nil { return nil, err } count := fjr.syncCount if len(flakyJobs) < count { count = len(flakyJobs) } issues := make([]creator.Issue, 0, count) for _, fj := range flakyJobs[0:count] { issues = append(issues, fj) } return issues, nil }
c176364
the past week", fj.Name, *fj.FlakeCount) }
c176365
range fj.reporter.creator.TestsSIGs(fj.TestsSorted()) { labels = append(labels, "sig/"+sig) } return labels }
c176366
nil { return nil, err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { continue } return body, nil } return nil, fmt.Errorf("ran out of retries reading from '%s'. Last error was %v", url, err) }
c176367
log += fmt.Sprintf("%s,\n", strings.TrimSuffix(string(line.actual), "\n")) } } return fmt.Sprintf("[%s]", log) }
c176368
ProwJobID: prowJobID, Refs: spec.Refs, ExtraRefs: spec.ExtraRefs, agent: spec.Agent, } }
c176369
} spec := &JobSpec{} if err := json.Unmarshal([]byte(specEnv), spec); err != nil { return nil, fmt.Errorf("malformed $%s: %v", JobSpecEnv, err) } return spec, nil }
c176370
spec.Type == prowapi.PeriodicJob { return env, nil } env[repoOwnerEnv] = spec.Refs.Org env[repoNameEnv] = spec.Refs.Repo env[pullBaseRefEnv] = spec.Refs.BaseRef env[pullBaseShaEnv] = spec.Refs.BaseSHA env[pullRefsEnv] = spec.Refs.String() if spec.Type == prowapi.PostsubmitJob || spec.Type == prowapi.BatchJob { return env, nil } env[pullNumberEnv] = strconv.Itoa(spec.Refs.Pulls[0].Number) env[pullPullShaEnv] = spec.Refs.Pulls[0].SHA return env, nil }
c176371
case prowapi.PostsubmitJob, prowapi.BatchJob: return append(baseEnv, refsEnv...) case prowapi.PresubmitJob: return append(append(baseEnv, refsEnv...), pullEnv...) default: return []string{} } }
c176372
if refs.BaseSHA != "" { return refs.BaseSHA } return refs.BaseRef }
c176373
} else if len(spec.ExtraRefs) > 0 { return getRevisionFromRef(&spec.ExtraRefs[0]) } return "" }
c176374
not triggered with commands and is not configurable. return &pluginhelp.PluginHelp{ Description: fmt.Sprintf("The merge commit blocker plugin adds the %s label to pull requests that contain merge commits", labels.MergeCommits), }, nil }
c176375
case "paths-from-repo": // Despite the name, this command actually requires a file // of paths from the _same_ repo in which the .generated_files // config lives. repoPaths = append(repoPaths, fs[1]) default: return repoPaths, &ParseError{line: l} } } if err := s.Err(); err != nil { return repoPaths, err } return repoPaths, nil }
c176376
continue } g.Paths[l] = true } if err := s.Err(); err != nil { return fmt.Errorf("scan error: %v", err) } return nil }
c176377
return true } } base := filepath.Base(path) if g.FileNames[base] { return true } for prefix := range g.FilePrefixes { if strings.HasPrefix(base, prefix) { return true } } return false }
c176378
Precision: "s", }) if err != nil { return nil, err } return &InfluxDB{ client: client, database: config.DB, batch: bp, tags: tags, measurement: measurement, }, err }
c176379
for k, v := range defaultTags { newTags[k] = v } for k, v := range extraTags { newTags[k] = v } return newTags }
c176380
conditions = append(conditions, fmt.Sprintf(`"%s" = '%v'`, key, tags[key])) } return "WHERE " + strings.Join(conditions, " AND ") }
c176381
:= influxdb.NewPoint(i.measurement, mergeTags(i.tags, tags), fields, date) if err != nil { return err } i.batch.AddPoint(pt) i.batchSize++ return nil }
c176382
nil { return nil, fmt.Errorf("Error accessing pod log from given source: %v", err) } return podLog, nil }
c176383
logrus.WithError(http.ListenAndServe(":8080", nil)).Fatal("ListenAndServe returned.") }
c176384
logrus.WithField("metrics-duration", fmt.Sprintf("%v", time.Since(start))).Debug("Metrics synced") case <-sig: logrus.Debug("Plank gatherer is shutting down...") return } } }
c176385
RequiredPullRequestReviews: makeReviews(policy.RequiredPullRequestReviews), RequiredStatusChecks: makeChecks(policy.RequiredStatusChecks), Restrictions: makeRestrictions(policy.Restrictions), } }
c176386
makeBool(rp.RequireOwners), RequiredApprovingReviewCount: *rp.Approvals, } if rp.DismissalRestrictions != nil { rprr.DismissalRestrictions = *makeRestrictions(rp.DismissalRestrictions) } return &rprr }
c176387
return executeTemplate(resourceDir, "header", BuildLogsView{}) }
c176388
{ lines, err = logLines(artifact, request.Offset, request.Length) } if err != nil { return fmt.Sprintf("failed to retrieve log lines: %v", err) } logLines := highlightLines(lines, request.StartLine) return executeTemplate(resourceDir, "line group", logLines) }
c176389
log %q: %v", artifact.JobPath(), err) } logLines := strings.Split(string(read), "\n") return logLines, nil }
c176390
var buf bytes.Buffer if err := t.ExecuteTemplate(&buf, templateName, data); err != nil { logrus.WithError(err).Error("Error executing template.") } return buf.String() }
c176391
{ if c := in.deepCopy(); c != nil { return c } return nil }
c176392
if err == nil { in.fromResource(r) } }
c176393
items = append(items, b.(*ResourceObject)) } in.Items = items }
c176394
:= in.deepCopy(); c != nil { return c } return nil }
c176395
"use-context", ctx) return cmd.Run() }
c176396
err := cmd.Output() return strings.TrimSpace(string(b)), err }
c176397
cmd.Stderr = os.Stderr return append([]string{bin}, args...), cmd }
c176398
err := cmd.Output() if err != nil { return nil, fmt.Errorf("%s: %v", strings.Join(args, " "), err) } var d describe if yaml.Unmarshal(data, &d); err != nil { return nil, fmt.Errorf("unmarshal gcloud: %v", err) } if d.Endpoint == "" { return nil, errors.New("empty endpoint") } if len(d.Auth.ClusterCACertificate) == 0 { return nil, errors.New("empty clusterCaCertificate") } if len(d.Auth.ClientKey) == 0 { return nil, errors.New("empty clientKey, consider running with --get-client-cert") } if len(d.Auth.ClientCertificate) == 0 { return nil, errors.New("empty clientCertificate, consider running with --get-client-cert") } return &d, nil }
c176399
= append(*ss, value) return nil }