repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/api/queries_comments.go | api/queries_comments.go | package api
import (
"time"
"github.com/shurcooL/githubv4"
)
type Comments struct {
Nodes []Comment
TotalCount int
PageInfo struct {
HasNextPage bool
EndCursor string
}
}
func (cs Comments) CurrentUserComments() []Comment {
var comments []Comment
for _, c := range cs.Nodes {
if c.ViewerDidAuthor {
comments = append(comments, c)
}
}
return comments
}
type Comment struct {
ID string `json:"id"`
Author CommentAuthor `json:"author"`
AuthorAssociation string `json:"authorAssociation"`
Body string `json:"body"`
CreatedAt time.Time `json:"createdAt"`
IncludesCreatedEdit bool `json:"includesCreatedEdit"`
IsMinimized bool `json:"isMinimized"`
MinimizedReason string `json:"minimizedReason"`
ReactionGroups ReactionGroups `json:"reactionGroups"`
URL string `json:"url,omitempty"`
ViewerDidAuthor bool `json:"viewerDidAuthor"`
}
type CommentCreateInput struct {
Body string
SubjectId string
}
type CommentDeleteInput struct {
CommentId string
}
type CommentUpdateInput struct {
Body string
CommentId string
}
func CommentCreate(client *Client, repoHost string, params CommentCreateInput) (string, error) {
var mutation struct {
AddComment struct {
CommentEdge struct {
Node struct {
URL string
}
}
} `graphql:"addComment(input: $input)"`
}
variables := map[string]interface{}{
"input": githubv4.AddCommentInput{
Body: githubv4.String(params.Body),
SubjectID: githubv4.ID(params.SubjectId),
},
}
err := client.Mutate(repoHost, "CommentCreate", &mutation, variables)
if err != nil {
return "", err
}
return mutation.AddComment.CommentEdge.Node.URL, nil
}
func CommentUpdate(client *Client, repoHost string, params CommentUpdateInput) (string, error) {
var mutation struct {
UpdateIssueComment struct {
IssueComment struct {
URL string
}
} `graphql:"updateIssueComment(input: $input)"`
}
variables := map[string]interface{}{
"input": githubv4.UpdateIssueCommentInput{
Body: githubv4.String(params.Body),
ID: githubv4.ID(params.CommentId),
},
}
err := client.Mutate(repoHost, "CommentUpdate", &mutation, variables)
if err != nil {
return "", err
}
return mutation.UpdateIssueComment.IssueComment.URL, nil
}
func CommentDelete(client *Client, repoHost string, params CommentDeleteInput) error {
var mutation struct {
DeleteIssueComment struct {
ClientMutationID string
} `graphql:"deleteIssueComment(input: $input)"`
}
variables := map[string]interface{}{
"input": githubv4.DeleteIssueCommentInput{
ID: githubv4.ID(params.CommentId),
},
}
err := client.Mutate(repoHost, "CommentDelete", &mutation, variables)
if err != nil {
return err
}
return nil
}
func (c Comment) Identifier() string {
return c.ID
}
func (c Comment) AuthorLogin() string {
return c.Author.Login
}
func (c Comment) Association() string {
return c.AuthorAssociation
}
func (c Comment) Content() string {
return c.Body
}
func (c Comment) Created() time.Time {
return c.CreatedAt
}
func (c Comment) HiddenReason() string {
return c.MinimizedReason
}
func (c Comment) IsEdited() bool {
return c.IncludesCreatedEdit
}
func (c Comment) IsHidden() bool {
return c.IsMinimized
}
func (c Comment) Link() string {
return c.URL
}
func (c Comment) Reactions() ReactionGroups {
return c.ReactionGroups
}
func (c Comment) Status() string {
return ""
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/api/export_pr_test.go | api/export_pr_test.go | package api
import (
"bytes"
"encoding/json"
"strings"
"testing"
"github.com/MakeNowJust/heredoc"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestIssue_ExportData(t *testing.T) {
tests := []struct {
name string
fields []string
inputJSON string
outputJSON string
}{
{
name: "simple",
fields: []string{"number", "title"},
inputJSON: heredoc.Doc(`
{ "title": "Bugs hugs", "number": 2345 }
`),
outputJSON: heredoc.Doc(`
{
"number": 2345,
"title": "Bugs hugs"
}
`),
},
{
name: "milestone",
fields: []string{"number", "milestone"},
inputJSON: heredoc.Doc(`
{ "number": 2345, "milestone": {"title": "The next big thing"} }
`),
outputJSON: heredoc.Doc(`
{
"milestone": {
"number": 0,
"title": "The next big thing",
"description": "",
"dueOn": null
},
"number": 2345
}
`),
},
{
name: "project cards",
fields: []string{"projectCards"},
inputJSON: heredoc.Doc(`
{ "projectCards": { "nodes": [
{
"project": { "name": "Rewrite" },
"column": { "name": "TO DO" }
}
] } }
`),
outputJSON: heredoc.Doc(`
{
"projectCards": [
{
"project": {
"name": "Rewrite"
},
"column": {
"name": "TO DO"
}
}
]
}
`),
},
{
name: "project items",
fields: []string{"projectItems"},
inputJSON: heredoc.Doc(`
{ "projectItems": { "nodes": [
{
"id": "PVTI_id",
"project": {
"id": "PVT_id",
"title": "Some Project"
},
"status": {
"name": "Todo",
"optionId": "abc123"
}
}
] } }
`),
outputJSON: heredoc.Doc(`
{
"projectItems": [
{
"status": {
"optionId": "abc123",
"name": "Todo"
},
"title": "Some Project"
}
]
}
`),
},
{
name: "linked pull requests",
fields: []string{"closedByPullRequestsReferences"},
inputJSON: heredoc.Doc(`
{ "closedByPullRequestsReferences": { "nodes": [
{
"id": "I_123",
"number": 123,
"url": "https://github.com/cli/cli/pull/123",
"repository": {
"id": "R_123",
"name": "cli",
"owner": {
"id": "O_123",
"login": "cli"
}
}
},
{
"id": "I_456",
"number": 456,
"url": "https://github.com/cli/cli/pull/456",
"repository": {
"id": "R_456",
"name": "cli",
"owner": {
"id": "O_456",
"login": "cli"
}
}
}
] } }
`),
outputJSON: heredoc.Doc(`
{ "closedByPullRequestsReferences": [
{
"id": "I_123",
"number": 123,
"repository": {
"id": "R_123",
"name": "cli",
"owner": {
"id": "O_123",
"login": "cli"
}
},
"url": "https://github.com/cli/cli/pull/123"
},
{
"id": "I_456",
"number": 456,
"repository": {
"id": "R_456",
"name": "cli",
"owner": {
"id": "O_456",
"login": "cli"
}
},
"url": "https://github.com/cli/cli/pull/456"
}
] }
`),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var issue Issue
dec := json.NewDecoder(strings.NewReader(tt.inputJSON))
require.NoError(t, dec.Decode(&issue))
exported := issue.ExportData(tt.fields)
buf := bytes.Buffer{}
enc := json.NewEncoder(&buf)
enc.SetIndent("", "\t")
require.NoError(t, enc.Encode(exported))
var gotData interface{}
dec = json.NewDecoder(&buf)
require.NoError(t, dec.Decode(&gotData))
var expectData interface{}
require.NoError(t, json.Unmarshal([]byte(tt.outputJSON), &expectData))
assert.Equal(t, expectData, gotData)
})
}
}
func TestPullRequest_ExportData(t *testing.T) {
tests := []struct {
name string
fields []string
inputJSON string
outputJSON string
}{
{
name: "simple",
fields: []string{"number", "title"},
inputJSON: heredoc.Doc(`
{ "title": "Bugs hugs", "number": 2345 }
`),
outputJSON: heredoc.Doc(`
{
"number": 2345,
"title": "Bugs hugs"
}
`),
},
{
name: "milestone",
fields: []string{"number", "milestone"},
inputJSON: heredoc.Doc(`
{ "number": 2345, "milestone": {"title": "The next big thing"} }
`),
outputJSON: heredoc.Doc(`
{
"milestone": {
"number": 0,
"title": "The next big thing",
"description": "",
"dueOn": null
},
"number": 2345
}
`),
},
{
name: "status checks",
fields: []string{"statusCheckRollup"},
inputJSON: heredoc.Doc(`
{ "statusCheckRollup": { "nodes": [
{ "commit": { "statusCheckRollup": { "contexts": { "nodes": [
{
"__typename": "CheckRun",
"name": "mycheck",
"checkSuite": {"workflowRun": {"workflow": {"name": "myworkflow"}}},
"status": "COMPLETED",
"conclusion": "SUCCESS",
"startedAt": "2020-08-31T15:44:24+02:00",
"completedAt": "2020-08-31T15:45:24+02:00",
"detailsUrl": "http://example.com/details"
},
{
"__typename": "StatusContext",
"context": "mycontext",
"state": "SUCCESS",
"createdAt": "2020-08-31T15:44:24+02:00",
"targetUrl": "http://example.com/details"
}
] } } } }
] } }
`),
outputJSON: heredoc.Doc(`
{
"statusCheckRollup": [
{
"__typename": "CheckRun",
"name": "mycheck",
"workflowName": "myworkflow",
"status": "COMPLETED",
"conclusion": "SUCCESS",
"startedAt": "2020-08-31T15:44:24+02:00",
"completedAt": "2020-08-31T15:45:24+02:00",
"detailsUrl": "http://example.com/details"
},
{
"__typename": "StatusContext",
"context": "mycontext",
"state": "SUCCESS",
"startedAt": "2020-08-31T15:44:24+02:00",
"targetUrl": "http://example.com/details"
}
]
}
`),
},
{
name: "project items",
fields: []string{"projectItems"},
inputJSON: heredoc.Doc(`
{ "projectItems": { "nodes": [
{
"id": "PVTPR_id",
"project": {
"id": "PVT_id",
"title": "Some Project"
},
"status": {
"name": "Todo",
"optionId": "abc123"
}
}
] } }
`),
outputJSON: heredoc.Doc(`
{
"projectItems": [
{
"status": {
"optionId": "abc123",
"name": "Todo"
},
"title": "Some Project"
}
]
}
`),
},
{
name: "linked issues",
fields: []string{"closingIssuesReferences"},
inputJSON: heredoc.Doc(`
{ "closingIssuesReferences": { "nodes": [
{
"id": "I_123",
"number": 123,
"url": "https://github.com/cli/cli/issues/123",
"repository": {
"id": "R_123",
"name": "cli",
"owner": {
"id": "O_123",
"login": "cli"
}
}
},
{
"id": "I_456",
"number": 456,
"url": "https://github.com/cli/cli/issues/456",
"repository": {
"id": "R_456",
"name": "cli",
"owner": {
"id": "O_456",
"login": "cli"
}
}
}
] } }
`),
outputJSON: heredoc.Doc(`
{ "closingIssuesReferences": [
{
"id": "I_123",
"number": 123,
"repository": {
"id": "R_123",
"name": "cli",
"owner": {
"id": "O_123",
"login": "cli"
}
},
"url": "https://github.com/cli/cli/issues/123"
},
{
"id": "I_456",
"number": 456,
"repository": {
"id": "R_456",
"name": "cli",
"owner": {
"id": "O_456",
"login": "cli"
}
},
"url": "https://github.com/cli/cli/issues/456"
}
] }
`),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var pr PullRequest
dec := json.NewDecoder(strings.NewReader(tt.inputJSON))
require.NoError(t, dec.Decode(&pr))
exported := pr.ExportData(tt.fields)
buf := bytes.Buffer{}
enc := json.NewEncoder(&buf)
enc.SetIndent("", "\t")
require.NoError(t, enc.Encode(exported))
var gotData interface{}
dec = json.NewDecoder(&buf)
require.NoError(t, dec.Decode(&gotData))
var expectData interface{}
require.NoError(t, json.Unmarshal([]byte(tt.outputJSON), &expectData))
assert.Equal(t, expectData, gotData)
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/api/client.go | api/client.go | package api
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"regexp"
"strings"
ghAPI "github.com/cli/go-gh/v2/pkg/api"
ghauth "github.com/cli/go-gh/v2/pkg/auth"
)
const (
accept = "Accept"
authorization = "Authorization"
cacheTTL = "X-GH-CACHE-TTL"
graphqlFeatures = "GraphQL-Features"
features = "merge_queue"
userAgent = "User-Agent"
)
var linkRE = regexp.MustCompile(`<([^>]+)>;\s*rel="([^"]+)"`)
func NewClientFromHTTP(httpClient *http.Client) *Client {
client := &Client{http: httpClient}
return client
}
type Client struct {
http *http.Client
}
func (c *Client) HTTP() *http.Client {
return c.http
}
type GraphQLError struct {
*ghAPI.GraphQLError
}
type HTTPError struct {
*ghAPI.HTTPError
scopesSuggestion string
}
func (err HTTPError) ScopesSuggestion() string {
return err.scopesSuggestion
}
// GraphQL performs a GraphQL request using the query string and parses the response into data receiver. If there are errors in the response,
// GraphQLError will be returned, but the receiver will also be partially populated.
func (c Client) GraphQL(hostname string, query string, variables map[string]interface{}, data interface{}) error {
opts := clientOptions(hostname, c.http.Transport)
opts.Headers[graphqlFeatures] = features
gqlClient, err := ghAPI.NewGraphQLClient(opts)
if err != nil {
return err
}
return handleResponse(gqlClient.Do(query, variables, data))
}
// Mutate performs a GraphQL mutation based on a struct and parses the response with the same struct as the receiver. If there are errors in the response,
// GraphQLError will be returned, but the receiver will also be partially populated.
func (c Client) Mutate(hostname, name string, mutation interface{}, variables map[string]interface{}) error {
opts := clientOptions(hostname, c.http.Transport)
opts.Headers[graphqlFeatures] = features
gqlClient, err := ghAPI.NewGraphQLClient(opts)
if err != nil {
return err
}
return handleResponse(gqlClient.Mutate(name, mutation, variables))
}
// Query performs a GraphQL query based on a struct and parses the response with the same struct as the receiver. If there are errors in the response,
// GraphQLError will be returned, but the receiver will also be partially populated.
func (c Client) Query(hostname, name string, query interface{}, variables map[string]interface{}) error {
opts := clientOptions(hostname, c.http.Transport)
opts.Headers[graphqlFeatures] = features
gqlClient, err := ghAPI.NewGraphQLClient(opts)
if err != nil {
return err
}
return handleResponse(gqlClient.Query(name, query, variables))
}
// QueryWithContext performs a GraphQL query based on a struct and parses the response with the same struct as the receiver. If there are errors in the response,
// GraphQLError will be returned, but the receiver will also be partially populated.
func (c Client) QueryWithContext(ctx context.Context, hostname, name string, query interface{}, variables map[string]interface{}) error {
opts := clientOptions(hostname, c.http.Transport)
opts.Headers[graphqlFeatures] = features
gqlClient, err := ghAPI.NewGraphQLClient(opts)
if err != nil {
return err
}
return handleResponse(gqlClient.QueryWithContext(ctx, name, query, variables))
}
// REST performs a REST request and parses the response.
func (c Client) REST(hostname string, method string, p string, body io.Reader, data interface{}) error {
opts := clientOptions(hostname, c.http.Transport)
restClient, err := ghAPI.NewRESTClient(opts)
if err != nil {
return err
}
return handleResponse(restClient.Do(method, p, body, data))
}
func (c Client) RESTWithNext(hostname string, method string, p string, body io.Reader, data interface{}) (string, error) {
opts := clientOptions(hostname, c.http.Transport)
restClient, err := ghAPI.NewRESTClient(opts)
if err != nil {
return "", err
}
resp, err := restClient.Request(method, p, body)
if err != nil {
return "", err
}
defer resp.Body.Close()
success := resp.StatusCode >= 200 && resp.StatusCode < 300
if !success {
return "", HandleHTTPError(resp)
}
if resp.StatusCode == http.StatusNoContent {
return "", nil
}
b, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
err = json.Unmarshal(b, &data)
if err != nil {
return "", err
}
var next string
for _, m := range linkRE.FindAllStringSubmatch(resp.Header.Get("Link"), -1) {
if len(m) > 2 && m[2] == "next" {
next = m[1]
}
}
return next, nil
}
// HandleHTTPError parses a http.Response into a HTTPError.
//
// The caller is responsible to close the response body stream.
func HandleHTTPError(resp *http.Response) error {
return handleResponse(ghAPI.HandleHTTPError(resp))
}
// handleResponse takes a ghAPI.HTTPError or ghAPI.GraphQLError and converts it into an
// HTTPError or GraphQLError respectively.
func handleResponse(err error) error {
if err == nil {
return nil
}
var restErr *ghAPI.HTTPError
if errors.As(err, &restErr) {
return HTTPError{
HTTPError: restErr,
scopesSuggestion: generateScopesSuggestion(restErr.StatusCode,
restErr.Headers.Get("X-Accepted-Oauth-Scopes"),
restErr.Headers.Get("X-Oauth-Scopes"),
restErr.RequestURL.Hostname()),
}
}
var gqlErr *ghAPI.GraphQLError
if errors.As(err, &gqlErr) {
return GraphQLError{
GraphQLError: gqlErr,
}
}
return err
}
// ScopesSuggestion is an error messaging utility that prints the suggestion to request additional OAuth
// scopes in case a server response indicates that there are missing scopes.
func ScopesSuggestion(resp *http.Response) string {
return generateScopesSuggestion(resp.StatusCode,
resp.Header.Get("X-Accepted-Oauth-Scopes"),
resp.Header.Get("X-Oauth-Scopes"),
resp.Request.URL.Hostname())
}
// EndpointNeedsScopes adds additional OAuth scopes to an HTTP response as if they were returned from the
// server endpoint. This improves HTTP 4xx error messaging for endpoints that don't explicitly list the
// OAuth scopes they need.
func EndpointNeedsScopes(resp *http.Response, s string) {
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
oldScopes := resp.Header.Get("X-Accepted-Oauth-Scopes")
resp.Header.Set("X-Accepted-Oauth-Scopes", fmt.Sprintf("%s, %s", oldScopes, s))
}
}
func generateScopesSuggestion(statusCode int, endpointNeedsScopes, tokenHasScopes, hostname string) string {
if statusCode < 400 || statusCode > 499 || statusCode == 422 {
return ""
}
if tokenHasScopes == "" {
return ""
}
gotScopes := map[string]struct{}{}
for _, s := range strings.Split(tokenHasScopes, ",") {
s = strings.TrimSpace(s)
gotScopes[s] = struct{}{}
// Certain scopes may be grouped under a single "top-level" scope. The following branch
// statements include these grouped/implied scopes when the top-level scope is encountered.
// See https://docs.github.com/en/developers/apps/building-oauth-apps/scopes-for-oauth-apps.
if s == "repo" {
gotScopes["repo:status"] = struct{}{}
gotScopes["repo_deployment"] = struct{}{}
gotScopes["public_repo"] = struct{}{}
gotScopes["repo:invite"] = struct{}{}
gotScopes["security_events"] = struct{}{}
} else if s == "user" {
gotScopes["read:user"] = struct{}{}
gotScopes["user:email"] = struct{}{}
gotScopes["user:follow"] = struct{}{}
} else if s == "codespace" {
gotScopes["codespace:secrets"] = struct{}{}
} else if strings.HasPrefix(s, "admin:") {
gotScopes["read:"+strings.TrimPrefix(s, "admin:")] = struct{}{}
gotScopes["write:"+strings.TrimPrefix(s, "admin:")] = struct{}{}
} else if strings.HasPrefix(s, "write:") {
gotScopes["read:"+strings.TrimPrefix(s, "write:")] = struct{}{}
}
}
for _, s := range strings.Split(endpointNeedsScopes, ",") {
s = strings.TrimSpace(s)
if _, gotScope := gotScopes[s]; s == "" || gotScope {
continue
}
return fmt.Sprintf(
"This API operation needs the %[1]q scope. To request it, run: gh auth refresh -h %[2]s -s %[1]s",
s,
ghauth.NormalizeHostname(hostname),
)
}
return ""
}
func clientOptions(hostname string, transport http.RoundTripper) ghAPI.ClientOptions {
// AuthToken, and Headers are being handled by transport,
// so let go-gh know that it does not need to resolve them.
opts := ghAPI.ClientOptions{
AuthToken: "none",
Headers: map[string]string{
authorization: "",
},
Host: hostname,
SkipDefaultHeaders: true,
Transport: transport,
LogIgnoreEnv: true,
}
return opts
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/api/queries_org.go | api/queries_org.go | package api
import (
"fmt"
"github.com/cli/cli/v2/internal/ghrepo"
"github.com/shurcooL/githubv4"
)
// OrganizationProjects fetches all open projects for an organization.
func OrganizationProjects(client *Client, repo ghrepo.Interface) ([]RepoProject, error) {
type responseData struct {
Organization struct {
Projects struct {
Nodes []RepoProject
PageInfo struct {
HasNextPage bool
EndCursor string
}
} `graphql:"projects(states: [OPEN], first: 100, orderBy: {field: NAME, direction: ASC}, after: $endCursor)"`
} `graphql:"organization(login: $owner)"`
}
variables := map[string]interface{}{
"owner": githubv4.String(repo.RepoOwner()),
"endCursor": (*githubv4.String)(nil),
}
var projects []RepoProject
for {
var query responseData
err := client.Query(repo.RepoHost(), "OrganizationProjectList", &query, variables)
if err != nil {
return nil, err
}
projects = append(projects, query.Organization.Projects.Nodes...)
if !query.Organization.Projects.PageInfo.HasNextPage {
break
}
variables["endCursor"] = githubv4.String(query.Organization.Projects.PageInfo.EndCursor)
}
return projects, nil
}
type OrgTeam struct {
ID string
Slug string
}
// OrganizationTeam fetch the team in an organization with the given slug
func OrganizationTeam(client *Client, hostname string, org string, teamSlug string) (*OrgTeam, error) {
type responseData struct {
Organization struct {
Team OrgTeam `graphql:"team(slug: $teamSlug)"`
} `graphql:"organization(login: $owner)"`
}
variables := map[string]interface{}{
"owner": githubv4.String(org),
"teamSlug": githubv4.String(teamSlug),
}
var query responseData
err := client.Query(hostname, "OrganizationTeam", &query, variables)
if err != nil {
return nil, err
}
if query.Organization.Team.ID == "" {
return nil, fmt.Errorf("could not resolve to a Team with the slug of '%s'", teamSlug)
}
return &query.Organization.Team, nil
}
// OrganizationTeams fetches all the teams in an organization
func OrganizationTeams(client *Client, repo ghrepo.Interface) ([]OrgTeam, error) {
type responseData struct {
Organization struct {
Teams struct {
Nodes []OrgTeam
PageInfo struct {
HasNextPage bool
EndCursor string
}
} `graphql:"teams(first: 100, orderBy: {field: NAME, direction: ASC}, after: $endCursor)"`
} `graphql:"organization(login: $owner)"`
}
variables := map[string]interface{}{
"owner": githubv4.String(repo.RepoOwner()),
"endCursor": (*githubv4.String)(nil),
}
var teams []OrgTeam
for {
var query responseData
err := client.Query(repo.RepoHost(), "OrganizationTeamList", &query, variables)
if err != nil {
return nil, err
}
teams = append(teams, query.Organization.Teams.Nodes...)
if !query.Organization.Teams.PageInfo.HasNextPage {
break
}
variables["endCursor"] = githubv4.String(query.Organization.Teams.PageInfo.EndCursor)
}
return teams, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/api/queries_issue.go | api/queries_issue.go | package api
import (
"encoding/json"
"fmt"
"time"
"github.com/cli/cli/v2/internal/ghrepo"
)
type IssuesPayload struct {
Assigned IssuesAndTotalCount
Mentioned IssuesAndTotalCount
Authored IssuesAndTotalCount
}
type IssuesAndTotalCount struct {
Issues []Issue
TotalCount int
SearchCapped bool
}
type Issue struct {
Typename string `json:"__typename"`
ID string
Number int
Title string
URL string
State string
StateReason string
Closed bool
Body string
ActiveLockReason string
Locked bool
CreatedAt time.Time
UpdatedAt time.Time
ClosedAt *time.Time
Comments Comments
Author Author
Assignees Assignees
AssignedActors AssignedActors
Labels Labels
ProjectCards ProjectCards
ProjectItems ProjectItems
Milestone *Milestone
ReactionGroups ReactionGroups
IsPinned bool
ClosedByPullRequestsReferences ClosedByPullRequestsReferences
}
type ClosedByPullRequestsReferences struct {
Nodes []struct {
ID string
Number int
URL string
Repository struct {
ID string
Name string
Owner struct {
ID string
Login string
}
}
}
PageInfo struct {
HasNextPage bool
EndCursor string
}
}
// return values for Issue.Typename
const (
TypeIssue string = "Issue"
TypePullRequest string = "PullRequest"
)
func (i Issue) IsPullRequest() bool {
return i.Typename == TypePullRequest
}
type Assignees struct {
Nodes []GitHubUser
TotalCount int
}
func (a Assignees) Logins() []string {
logins := make([]string, len(a.Nodes))
for i, a := range a.Nodes {
logins[i] = a.Login
}
return logins
}
type AssignedActors struct {
Nodes []Actor
TotalCount int
}
func (a AssignedActors) Logins() []string {
logins := make([]string, len(a.Nodes))
for i, a := range a.Nodes {
logins[i] = a.Login
}
return logins
}
// DisplayNames returns a list of display names for the assigned actors.
func (a AssignedActors) DisplayNames() []string {
// These display names are used for populating the "default" assigned actors
// from the AssignedActors type. But, this is only one piece of the puzzle
// as later, other queries will fetch the full list of possible assignable
// actors from the repository, and the two lists will be reconciled.
//
// It's important that the display names are the same between the defaults
// (the values returned here) and the full list (the values returned by
// other repository queries). Any discrepancy would result in an
// "invalid default", which means an assigned actor will not be matched
// to an assignable actor and not presented as a "default" selection.
// Not being presented as a default would cause the actor to be potentially
// unassigned if the edits were submitted.
//
// To prevent this, we need shared logic to look up an actor's display name.
// However, our API types between assignedActors and the full list of
// assignableActors are different. So, as an attempt to maintain
// consistency we convert the assignedActors to the same types as the
// repository's assignableActors, treating the assignableActors DisplayName
// methods as the sources of truth.
// TODO KW: make this comment less of a wall of text if needed.
var displayNames []string
for _, a := range a.Nodes {
if a.TypeName == "User" {
u := NewAssignableUser(
a.ID,
a.Login,
a.Name,
)
displayNames = append(displayNames, u.DisplayName())
} else if a.TypeName == "Bot" {
b := NewAssignableBot(
a.ID,
a.Login,
)
displayNames = append(displayNames, b.DisplayName())
}
}
return displayNames
}
type Labels struct {
Nodes []IssueLabel
TotalCount int
}
func (l Labels) Names() []string {
names := make([]string, len(l.Nodes))
for i, l := range l.Nodes {
names[i] = l.Name
}
return names
}
type ProjectCards struct {
Nodes []*ProjectInfo
TotalCount int
}
type ProjectItems struct {
Nodes []*ProjectV2Item
TotalCount int
}
type ProjectInfo struct {
Project struct {
Name string `json:"name"`
} `json:"project"`
Column struct {
Name string `json:"name"`
} `json:"column"`
}
type ProjectV2Item struct {
ID string `json:"id"`
Project ProjectV2ItemProject
Status ProjectV2ItemStatus
}
type ProjectV2ItemProject struct {
ID string `json:"id"`
Title string `json:"title"`
}
type ProjectV2ItemStatus struct {
OptionID string `json:"optionId"`
Name string `json:"name"`
}
func (p ProjectCards) ProjectNames() []string {
names := make([]string, len(p.Nodes))
for i, c := range p.Nodes {
names[i] = c.Project.Name
}
return names
}
func (p ProjectItems) ProjectTitles() []string {
titles := make([]string, len(p.Nodes))
for i, c := range p.Nodes {
titles[i] = c.Project.Title
}
return titles
}
type Milestone struct {
Number int `json:"number"`
Title string `json:"title"`
Description string `json:"description"`
DueOn *time.Time `json:"dueOn"`
}
type IssuesDisabledError struct {
error
}
type Owner struct {
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Login string `json:"login"`
}
type Author struct {
ID string
Name string
Login string
}
func (author Author) MarshalJSON() ([]byte, error) {
if author.ID == "" {
return json.Marshal(map[string]interface{}{
"is_bot": true,
"login": "app/" + author.Login,
})
}
return json.Marshal(map[string]interface{}{
"is_bot": false,
"login": author.Login,
"id": author.ID,
"name": author.Name,
})
}
type CommentAuthor struct {
Login string `json:"login"`
// Unfortunately, there is no easy way to add "id" and "name" fields to this struct because it's being
// used in both shurcool-graphql type queries and string-based queries where the response gets parsed
// by an ordinary JSON decoder that doesn't understand "graphql" directives via struct tags.
// User *struct {
// ID string
// Name string
// } `graphql:"... on User"`
}
// IssueCreate creates an issue in a GitHub repository
func IssueCreate(client *Client, repo *Repository, params map[string]interface{}) (*Issue, error) {
query := `
mutation IssueCreate($input: CreateIssueInput!) {
createIssue(input: $input) {
issue {
id
url
}
}
}`
inputParams := map[string]interface{}{
"repositoryId": repo.ID,
}
for key, val := range params {
switch key {
case "assigneeIds", "body", "issueTemplate", "labelIds", "milestoneId", "projectIds", "repositoryId", "title":
inputParams[key] = val
case "projectV2Ids":
default:
return nil, fmt.Errorf("invalid IssueCreate mutation parameter %s", key)
}
}
variables := map[string]interface{}{
"input": inputParams,
}
result := struct {
CreateIssue struct {
Issue Issue
}
}{}
err := client.GraphQL(repo.RepoHost(), query, variables, &result)
if err != nil {
return nil, err
}
issue := &result.CreateIssue.Issue
// projectV2 parameters aren't supported in the `createIssue` mutation,
// so add them after the issue has been created.
projectV2Ids, ok := params["projectV2Ids"].([]string)
if ok {
projectItems := make(map[string]string, len(projectV2Ids))
for _, p := range projectV2Ids {
projectItems[p] = issue.ID
}
err = UpdateProjectV2Items(client, repo, projectItems, nil)
if err != nil {
return issue, err
}
}
return issue, nil
}
type IssueStatusOptions struct {
Username string
Fields []string
}
func IssueStatus(client *Client, repo ghrepo.Interface, options IssueStatusOptions) (*IssuesPayload, error) {
type response struct {
Repository struct {
Assigned struct {
TotalCount int
Nodes []Issue
}
Mentioned struct {
TotalCount int
Nodes []Issue
}
Authored struct {
TotalCount int
Nodes []Issue
}
HasIssuesEnabled bool
}
}
fragments := fmt.Sprintf("fragment issue on Issue{%s}", IssueGraphQL(options.Fields))
query := fragments + `
query IssueStatus($owner: String!, $repo: String!, $viewer: String!, $per_page: Int = 10) {
repository(owner: $owner, name: $repo) {
hasIssuesEnabled
assigned: issues(filterBy: {assignee: $viewer, states: OPEN}, first: $per_page, orderBy: {field: UPDATED_AT, direction: DESC}) {
totalCount
nodes {
...issue
}
}
mentioned: issues(filterBy: {mentioned: $viewer, states: OPEN}, first: $per_page, orderBy: {field: UPDATED_AT, direction: DESC}) {
totalCount
nodes {
...issue
}
}
authored: issues(filterBy: {createdBy: $viewer, states: OPEN}, first: $per_page, orderBy: {field: UPDATED_AT, direction: DESC}) {
totalCount
nodes {
...issue
}
}
}
}`
variables := map[string]interface{}{
"owner": repo.RepoOwner(),
"repo": repo.RepoName(),
"viewer": options.Username,
}
var resp response
err := client.GraphQL(repo.RepoHost(), query, variables, &resp)
if err != nil {
return nil, err
}
if !resp.Repository.HasIssuesEnabled {
return nil, fmt.Errorf("the '%s' repository has disabled issues", ghrepo.FullName(repo))
}
payload := IssuesPayload{
Assigned: IssuesAndTotalCount{
Issues: resp.Repository.Assigned.Nodes,
TotalCount: resp.Repository.Assigned.TotalCount,
},
Mentioned: IssuesAndTotalCount{
Issues: resp.Repository.Mentioned.Nodes,
TotalCount: resp.Repository.Mentioned.TotalCount,
},
Authored: IssuesAndTotalCount{
Issues: resp.Repository.Authored.Nodes,
TotalCount: resp.Repository.Authored.TotalCount,
},
}
return &payload, nil
}
func (i Issue) Link() string {
return i.URL
}
func (i Issue) Identifier() string {
return i.ID
}
func (i Issue) CurrentUserComments() []Comment {
return i.Comments.CurrentUserComments()
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/api/query_builder_test.go | api/query_builder_test.go | package api
import "testing"
func TestPullRequestGraphQL(t *testing.T) {
tests := []struct {
name string
fields []string
want string
}{
{
name: "empty",
fields: []string(nil),
want: "",
},
{
name: "simple fields",
fields: []string{"number", "title"},
want: "number,title",
},
{
name: "fields with nested structures",
fields: []string{"author", "assignees"},
want: "author{login,...on User{id,name}},assignees(first:100){nodes{id,login,name},totalCount}",
},
{
name: "compressed query",
fields: []string{"files"},
want: "files(first: 100) {nodes {additions,deletions,path}}",
},
{
name: "invalid fields",
fields: []string{"isPinned", "stateReason", "number"},
want: "number",
},
{
name: "projectItems",
fields: []string{"projectItems"},
want: `projectItems(first:100){nodes{id, project{id,title}, status:fieldValueByName(name: "Status") { ... on ProjectV2ItemFieldSingleSelectValue{optionId,name}}},totalCount}`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := PullRequestGraphQL(tt.fields); got != tt.want {
t.Errorf("PullRequestGraphQL() = %v, want %v", got, tt.want)
}
})
}
}
func TestIssueGraphQL(t *testing.T) {
tests := []struct {
name string
fields []string
want string
}{
{
name: "empty",
fields: []string(nil),
want: "",
},
{
name: "simple fields",
fields: []string{"number", "title"},
want: "number,title",
},
{
name: "fields with nested structures",
fields: []string{"author", "assignees"},
want: "author{login,...on User{id,name}},assignees(first:100){nodes{id,login,name},totalCount}",
},
{
name: "compressed query",
fields: []string{"files"},
want: "files(first: 100) {nodes {additions,deletions,path}}",
},
{
name: "projectItems",
fields: []string{"projectItems"},
want: `projectItems(first:100){nodes{id, project{id,title}, status:fieldValueByName(name: "Status") { ... on ProjectV2ItemFieldSingleSelectValue{optionId,name}}},totalCount}`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := IssueGraphQL(tt.fields); got != tt.want {
t.Errorf("IssueGraphQL() = %v, want %v", got, tt.want)
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/api/queries_pr_review.go | api/queries_pr_review.go | package api
import (
"time"
"github.com/cli/cli/v2/internal/ghrepo"
"github.com/shurcooL/githubv4"
)
type PullRequestReviewState int
const (
ReviewApprove PullRequestReviewState = iota
ReviewRequestChanges
ReviewComment
)
type PullRequestReviewInput struct {
Body string
State PullRequestReviewState
}
type PullRequestReviews struct {
Nodes []PullRequestReview
PageInfo struct {
HasNextPage bool
EndCursor string
}
TotalCount int
}
type PullRequestReview struct {
ID string `json:"id"`
Author CommentAuthor `json:"author"`
AuthorAssociation string `json:"authorAssociation"`
Body string `json:"body"`
SubmittedAt *time.Time `json:"submittedAt"`
IncludesCreatedEdit bool `json:"includesCreatedEdit"`
ReactionGroups ReactionGroups `json:"reactionGroups"`
State string `json:"state"`
URL string `json:"url,omitempty"`
Commit Commit `json:"commit"`
}
func AddReview(client *Client, repo ghrepo.Interface, pr *PullRequest, input *PullRequestReviewInput) error {
var mutation struct {
AddPullRequestReview struct {
ClientMutationID string
} `graphql:"addPullRequestReview(input:$input)"`
}
state := githubv4.PullRequestReviewEventComment
switch input.State {
case ReviewApprove:
state = githubv4.PullRequestReviewEventApprove
case ReviewRequestChanges:
state = githubv4.PullRequestReviewEventRequestChanges
}
body := githubv4.String(input.Body)
variables := map[string]interface{}{
"input": githubv4.AddPullRequestReviewInput{
PullRequestID: pr.ID,
Event: &state,
Body: &body,
},
}
return client.Mutate(repo.RepoHost(), "PullRequestReviewAdd", &mutation, variables)
}
func (prr PullRequestReview) Identifier() string {
return prr.ID
}
func (prr PullRequestReview) AuthorLogin() string {
return prr.Author.Login
}
func (prr PullRequestReview) Association() string {
return prr.AuthorAssociation
}
func (prr PullRequestReview) Content() string {
return prr.Body
}
func (prr PullRequestReview) Created() time.Time {
if prr.SubmittedAt == nil {
return time.Time{}
}
return *prr.SubmittedAt
}
func (prr PullRequestReview) HiddenReason() string {
return ""
}
func (prr PullRequestReview) IsEdited() bool {
return prr.IncludesCreatedEdit
}
func (prr PullRequestReview) IsHidden() bool {
return false
}
func (prr PullRequestReview) Link() string {
return prr.URL
}
func (prr PullRequestReview) Reactions() ReactionGroups {
return prr.ReactionGroups
}
func (prr PullRequestReview) Status() string {
return prr.State
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/api/queries_projects_v2.go | api/queries_projects_v2.go | package api
import (
"fmt"
"strings"
"github.com/cli/cli/v2/internal/ghrepo"
"github.com/shurcooL/githubv4"
)
const (
errorProjectsV2ReadScope = "field requires one of the following scopes: ['read:project']"
errorProjectsV2UserField = "Field 'projectsV2' doesn't exist on type 'User'"
errorProjectsV2RepositoryField = "Field 'projectsV2' doesn't exist on type 'Repository'"
errorProjectsV2OrganizationField = "Field 'projectsV2' doesn't exist on type 'Organization'"
errorProjectsV2IssueField = "Field 'projectItems' doesn't exist on type 'Issue'"
errorProjectsV2PullRequestField = "Field 'projectItems' doesn't exist on type 'PullRequest'"
)
type ProjectV2 struct {
ID string `json:"id"`
Title string `json:"title"`
Number int `json:"number"`
ResourcePath string `json:"resourcePath"`
Closed bool `json:"closed"`
URL string `json:"url"`
}
// UpdateProjectV2Items uses the addProjectV2ItemById and the deleteProjectV2Item mutations
// to add and delete items from projects. The addProjectItems and deleteProjectItems arguments are
// mappings between a project and an item. This function can be used across multiple projects
// and items. Note that the deleteProjectV2Item mutation requires the item id from the project not
// the global id.
func UpdateProjectV2Items(client *Client, repo ghrepo.Interface, addProjectItems, deleteProjectItems map[string]string) error {
l := len(addProjectItems) + len(deleteProjectItems)
if l == 0 {
return nil
}
inputs := make([]string, 0, l)
mutations := make([]string, 0, l)
variables := make(map[string]interface{}, l)
var i int
for project, item := range addProjectItems {
inputs = append(inputs, fmt.Sprintf("$input_%03d: AddProjectV2ItemByIdInput!", i))
mutations = append(mutations, fmt.Sprintf("add_%03d: addProjectV2ItemById(input: $input_%03d) { item { id } }", i, i))
variables[fmt.Sprintf("input_%03d", i)] = map[string]interface{}{"contentId": item, "projectId": project}
i++
}
for project, item := range deleteProjectItems {
inputs = append(inputs, fmt.Sprintf("$input_%03d: DeleteProjectV2ItemInput!", i))
mutations = append(mutations, fmt.Sprintf("delete_%03d: deleteProjectV2Item(input: $input_%03d) { deletedItemId }", i, i))
variables[fmt.Sprintf("input_%03d", i)] = map[string]interface{}{"itemId": item, "projectId": project}
i++
}
query := fmt.Sprintf(`mutation UpdateProjectV2Items(%s) {%s}`, strings.Join(inputs, " "), strings.Join(mutations, " "))
return client.GraphQL(repo.RepoHost(), query, variables, nil)
}
// ProjectsV2ItemsForIssue fetches all ProjectItems for an issue.
func ProjectsV2ItemsForIssue(client *Client, repo ghrepo.Interface, issue *Issue) error {
type projectV2ItemStatus struct {
StatusFragment struct {
OptionID string `json:"optionId"`
Name string `json:"name"`
} `graphql:"... on ProjectV2ItemFieldSingleSelectValue"`
}
type projectV2Item struct {
ID string `json:"id"`
Project struct {
ID string `json:"id"`
Title string `json:"title"`
}
Status projectV2ItemStatus `graphql:"status:fieldValueByName(name: \"Status\")"`
}
type response struct {
Repository struct {
Issue struct {
ProjectItems struct {
TotalCount int
Nodes []*projectV2Item
PageInfo struct {
HasNextPage bool
EndCursor string
}
} `graphql:"projectItems(first: 100, after: $endCursor)"`
} `graphql:"issue(number: $number)"`
} `graphql:"repository(owner: $owner, name: $name)"`
}
variables := map[string]interface{}{
"owner": githubv4.String(repo.RepoOwner()),
"name": githubv4.String(repo.RepoName()),
"number": githubv4.Int(issue.Number),
"endCursor": (*githubv4.String)(nil),
}
var items ProjectItems
for {
var query response
err := client.Query(repo.RepoHost(), "IssueProjectItems", &query, variables)
if err != nil {
return err
}
for _, projectItemNode := range query.Repository.Issue.ProjectItems.Nodes {
if projectItemNode == nil {
continue
}
items.Nodes = append(items.Nodes, &ProjectV2Item{
ID: projectItemNode.ID,
Project: ProjectV2ItemProject{
ID: projectItemNode.Project.ID,
Title: projectItemNode.Project.Title,
},
Status: ProjectV2ItemStatus{
OptionID: projectItemNode.Status.StatusFragment.OptionID,
Name: projectItemNode.Status.StatusFragment.Name,
},
})
}
if !query.Repository.Issue.ProjectItems.PageInfo.HasNextPage {
break
}
variables["endCursor"] = githubv4.String(query.Repository.Issue.ProjectItems.PageInfo.EndCursor)
}
issue.ProjectItems = items
return nil
}
// ProjectsV2ItemsForPullRequest fetches all ProjectItems for a pull request.
func ProjectsV2ItemsForPullRequest(client *Client, repo ghrepo.Interface, pr *PullRequest) error {
type projectV2ItemStatus struct {
StatusFragment struct {
OptionID string `json:"optionId"`
Name string `json:"name"`
} `graphql:"... on ProjectV2ItemFieldSingleSelectValue"`
}
type projectV2Item struct {
ID string `json:"id"`
Project struct {
ID string `json:"id"`
Title string `json:"title"`
}
Status projectV2ItemStatus `graphql:"status:fieldValueByName(name: \"Status\")"`
}
type response struct {
Repository struct {
PullRequest struct {
ProjectItems struct {
TotalCount int
Nodes []*projectV2Item
PageInfo struct {
HasNextPage bool
EndCursor string
}
} `graphql:"projectItems(first: 100, after: $endCursor)"`
} `graphql:"pullRequest(number: $number)"`
} `graphql:"repository(owner: $owner, name: $name)"`
}
variables := map[string]interface{}{
"owner": githubv4.String(repo.RepoOwner()),
"name": githubv4.String(repo.RepoName()),
"number": githubv4.Int(pr.Number),
"endCursor": (*githubv4.String)(nil),
}
var items ProjectItems
for {
var query response
err := client.Query(repo.RepoHost(), "PullRequestProjectItems", &query, variables)
if err != nil {
return err
}
for _, projectItemNode := range query.Repository.PullRequest.ProjectItems.Nodes {
if projectItemNode == nil {
continue
}
items.Nodes = append(items.Nodes, &ProjectV2Item{
ID: projectItemNode.ID,
Project: ProjectV2ItemProject{
ID: projectItemNode.Project.ID,
Title: projectItemNode.Project.Title,
},
Status: ProjectV2ItemStatus{
OptionID: projectItemNode.Status.StatusFragment.OptionID,
Name: projectItemNode.Status.StatusFragment.Name,
},
})
}
if !query.Repository.PullRequest.ProjectItems.PageInfo.HasNextPage {
break
}
variables["endCursor"] = githubv4.String(query.Repository.PullRequest.ProjectItems.PageInfo.EndCursor)
}
pr.ProjectItems = items
return nil
}
// OrganizationProjectsV2 fetches all open projectsV2 for an organization.
func OrganizationProjectsV2(client *Client, repo ghrepo.Interface) ([]ProjectV2, error) {
type responseData struct {
Organization struct {
ProjectsV2 struct {
Nodes []ProjectV2
PageInfo struct {
HasNextPage bool
EndCursor string
}
} `graphql:"projectsV2(first: 100, orderBy: {field: TITLE, direction: ASC}, after: $endCursor, query: $query)"`
} `graphql:"organization(login: $owner)"`
}
variables := map[string]interface{}{
"owner": githubv4.String(repo.RepoOwner()),
"endCursor": (*githubv4.String)(nil),
"query": githubv4.String("is:open"),
}
var projectsV2 []ProjectV2
for {
var query responseData
err := client.Query(repo.RepoHost(), "OrganizationProjectV2List", &query, variables)
if err != nil {
return nil, err
}
projectsV2 = append(projectsV2, query.Organization.ProjectsV2.Nodes...)
if !query.Organization.ProjectsV2.PageInfo.HasNextPage {
break
}
variables["endCursor"] = githubv4.String(query.Organization.ProjectsV2.PageInfo.EndCursor)
}
return projectsV2, nil
}
// RepoProjectsV2 fetches all open projectsV2 for a repository.
func RepoProjectsV2(client *Client, repo ghrepo.Interface) ([]ProjectV2, error) {
type responseData struct {
Repository struct {
ProjectsV2 struct {
Nodes []ProjectV2
PageInfo struct {
HasNextPage bool
EndCursor string
}
} `graphql:"projectsV2(first: 100, orderBy: {field: TITLE, direction: ASC}, after: $endCursor, query: $query)"`
} `graphql:"repository(owner: $owner, name: $name)"`
}
variables := map[string]interface{}{
"owner": githubv4.String(repo.RepoOwner()),
"name": githubv4.String(repo.RepoName()),
"endCursor": (*githubv4.String)(nil),
"query": githubv4.String("is:open"),
}
var projectsV2 []ProjectV2
for {
var query responseData
err := client.Query(repo.RepoHost(), "RepositoryProjectV2List", &query, variables)
if err != nil {
return nil, err
}
projectsV2 = append(projectsV2, query.Repository.ProjectsV2.Nodes...)
if !query.Repository.ProjectsV2.PageInfo.HasNextPage {
break
}
variables["endCursor"] = githubv4.String(query.Repository.ProjectsV2.PageInfo.EndCursor)
}
return projectsV2, nil
}
// CurrentUserProjectsV2 fetches all open projectsV2 for current user.
func CurrentUserProjectsV2(client *Client, hostname string) ([]ProjectV2, error) {
type responseData struct {
Viewer struct {
ProjectsV2 struct {
Nodes []ProjectV2
PageInfo struct {
HasNextPage bool
EndCursor string
}
} `graphql:"projectsV2(first: 100, orderBy: {field: TITLE, direction: ASC}, after: $endCursor, query: $query)"`
} `graphql:"viewer"`
}
variables := map[string]interface{}{
"endCursor": (*githubv4.String)(nil),
"query": githubv4.String("is:open"),
}
var projectsV2 []ProjectV2
for {
var query responseData
err := client.Query(hostname, "UserProjectV2List", &query, variables)
if err != nil {
return nil, err
}
projectsV2 = append(projectsV2, query.Viewer.ProjectsV2.Nodes...)
if !query.Viewer.ProjectsV2.PageInfo.HasNextPage {
break
}
variables["endCursor"] = githubv4.String(query.Viewer.ProjectsV2.PageInfo.EndCursor)
}
return projectsV2, nil
}
// When querying ProjectsV2 fields we generally dont want to show the user
// scope errors and field does not exist errors. ProjectsV2IgnorableError
// checks against known error strings to see if an error can be safely ignored.
// Due to the fact that the GraphQLClient can return multiple types of errors
// this uses brittle string comparison to check against the known error strings.
func ProjectsV2IgnorableError(err error) bool {
msg := err.Error()
if strings.Contains(msg, errorProjectsV2ReadScope) ||
strings.Contains(msg, errorProjectsV2UserField) ||
strings.Contains(msg, errorProjectsV2RepositoryField) ||
strings.Contains(msg, errorProjectsV2OrganizationField) ||
strings.Contains(msg, errorProjectsV2IssueField) ||
strings.Contains(msg, errorProjectsV2PullRequestField) {
return true
}
return false
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/api/pull_request_test.go | api/pull_request_test.go | package api
import (
"encoding/json"
"fmt"
"testing"
"github.com/stretchr/testify/require"
)
func TestChecksStatus_NoCheckRunsOrStatusContexts(t *testing.T) {
t.Parallel()
payload := `
{ "statusCheckRollup": { "nodes": [] } }
`
var pr PullRequest
require.NoError(t, json.Unmarshal([]byte(payload), &pr))
expectedChecksStatus := PullRequestChecksStatus{
Pending: 0,
Failing: 0,
Passing: 0,
Total: 0,
}
require.Equal(t, expectedChecksStatus, pr.ChecksStatus())
}
func TestChecksStatus_SummarisingCheckRuns(t *testing.T) {
t.Parallel()
tests := []struct {
name string
payload string
expectedChecksStatus PullRequestChecksStatus
}{
{
name: "QUEUED is treated as Pending",
payload: singleCheckRunWithStatus("QUEUED"),
expectedChecksStatus: PullRequestChecksStatus{Pending: 1, Total: 1},
},
{
name: "IN_PROGRESS is treated as Pending",
payload: singleCheckRunWithStatus("IN_PROGRESS"),
expectedChecksStatus: PullRequestChecksStatus{Pending: 1, Total: 1},
},
{
name: "WAITING is treated as Pending",
payload: singleCheckRunWithStatus("WAITING"),
expectedChecksStatus: PullRequestChecksStatus{Pending: 1, Total: 1},
},
{
name: "PENDING is treated as Pending",
payload: singleCheckRunWithStatus("PENDING"),
expectedChecksStatus: PullRequestChecksStatus{Pending: 1, Total: 1},
},
{
name: "REQUESTED is treated as Pending",
payload: singleCheckRunWithStatus("REQUESTED"),
expectedChecksStatus: PullRequestChecksStatus{Pending: 1, Total: 1},
},
{
name: "COMPLETED with no conclusion is treated as Pending",
payload: singleCheckRunWithStatus("COMPLETED"),
expectedChecksStatus: PullRequestChecksStatus{Pending: 1, Total: 1},
},
{
name: "COMPLETED / STARTUP_FAILURE is treated as Pending",
payload: singleCompletedCheckRunWithConclusion("STARTUP_FAILURE"),
expectedChecksStatus: PullRequestChecksStatus{Pending: 1, Total: 1},
},
{
name: "COMPLETED / STALE is treated as Pending",
payload: singleCompletedCheckRunWithConclusion("STALE"),
expectedChecksStatus: PullRequestChecksStatus{Pending: 1, Total: 1},
},
{
name: "COMPLETED / SUCCESS is treated as Passing",
payload: singleCompletedCheckRunWithConclusion("SUCCESS"),
expectedChecksStatus: PullRequestChecksStatus{Passing: 1, Total: 1},
},
{
name: "COMPLETED / NEUTRAL is treated as Passing",
payload: singleCompletedCheckRunWithConclusion("NEUTRAL"),
expectedChecksStatus: PullRequestChecksStatus{Passing: 1, Total: 1},
},
{
name: "COMPLETED / SKIPPED is treated as Passing",
payload: singleCompletedCheckRunWithConclusion("SKIPPED"),
expectedChecksStatus: PullRequestChecksStatus{Passing: 1, Total: 1},
},
{
name: "COMPLETED / ACTION_REQUIRED is treated as Failing",
payload: singleCompletedCheckRunWithConclusion("ACTION_REQUIRED"),
expectedChecksStatus: PullRequestChecksStatus{Failing: 1, Total: 1},
},
{
name: "COMPLETED / TIMED_OUT is treated as Failing",
payload: singleCompletedCheckRunWithConclusion("TIMED_OUT"),
expectedChecksStatus: PullRequestChecksStatus{Failing: 1, Total: 1},
},
{
name: "COMPLETED / CANCELLED is treated as Failing",
payload: singleCompletedCheckRunWithConclusion("CANCELLED"),
expectedChecksStatus: PullRequestChecksStatus{Failing: 1, Total: 1},
},
{
name: "COMPLETED / CANCELLED is treated as Failing",
payload: singleCompletedCheckRunWithConclusion("CANCELLED"),
expectedChecksStatus: PullRequestChecksStatus{Failing: 1, Total: 1},
},
{
name: "COMPLETED / FAILURE is treated as Failing",
payload: singleCompletedCheckRunWithConclusion("FAILURE"),
expectedChecksStatus: PullRequestChecksStatus{Failing: 1, Total: 1},
},
{
name: "Unrecognized Status are treated as Pending",
payload: singleCheckRunWithStatus("AnUnrecognizedStatusJustForThisTest"),
expectedChecksStatus: PullRequestChecksStatus{Pending: 1, Total: 1},
},
{
name: "Unrecognized Conclusions are treated as Pending",
payload: singleCompletedCheckRunWithConclusion("AnUnrecognizedConclusionJustForThisTest"),
expectedChecksStatus: PullRequestChecksStatus{Pending: 1, Total: 1},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var pr PullRequest
require.NoError(t, json.Unmarshal([]byte(tt.payload), &pr))
require.Equal(t, tt.expectedChecksStatus, pr.ChecksStatus())
})
}
}
func TestChecksStatus_SummarisingStatusContexts(t *testing.T) {
t.Parallel()
tests := []struct {
name string
payload string
expectedChecksStatus PullRequestChecksStatus
}{
{
name: "EXPECTED is treated as Pending",
payload: singleStatusContextWithState("EXPECTED"),
expectedChecksStatus: PullRequestChecksStatus{Pending: 1, Total: 1},
},
{
name: "PENDING is treated as Pending",
payload: singleStatusContextWithState("PENDING"),
expectedChecksStatus: PullRequestChecksStatus{Pending: 1, Total: 1},
},
{
name: "SUCCESS is treated as Passing",
payload: singleStatusContextWithState("SUCCESS"),
expectedChecksStatus: PullRequestChecksStatus{Passing: 1, Total: 1},
},
{
name: "ERROR is treated as Failing",
payload: singleStatusContextWithState("ERROR"),
expectedChecksStatus: PullRequestChecksStatus{Failing: 1, Total: 1},
},
{
name: "FAILURE is treated as Failing",
payload: singleStatusContextWithState("FAILURE"),
expectedChecksStatus: PullRequestChecksStatus{Failing: 1, Total: 1},
},
{
name: "Unrecognized States are treated as Pending",
payload: singleStatusContextWithState("AnUnrecognizedStateJustForThisTest"),
expectedChecksStatus: PullRequestChecksStatus{Pending: 1, Total: 1},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var pr PullRequest
require.NoError(t, json.Unmarshal([]byte(tt.payload), &pr))
require.Equal(t, tt.expectedChecksStatus, pr.ChecksStatus())
})
}
}
func TestChecksStatus_SummarisingCheckRunsAndStatusContexts(t *testing.T) {
t.Parallel()
// This might look a bit intimidating, but we're just inserting three nodes
// into the rollup, two completed check run nodes and one status context node.
payload := fmt.Sprintf(`
{ "statusCheckRollup": { "nodes": [{ "commit": {
"statusCheckRollup": {
"contexts": {
"nodes": [
%s,
%s,
%s
]
}
}
} }] } }
`,
completedCheckRunNode("SUCCESS"),
statusContextNode("PENDING"),
completedCheckRunNode("FAILURE"),
)
var pr PullRequest
require.NoError(t, json.Unmarshal([]byte(payload), &pr))
expectedChecksStatus := PullRequestChecksStatus{
Pending: 1,
Failing: 1,
Passing: 1,
Total: 3,
}
require.Equal(t, expectedChecksStatus, pr.ChecksStatus())
}
func TestChecksStatus_SummarisingCheckRunAndStatusContextCountsByState(t *testing.T) {
t.Parallel()
payload := `
{ "statusCheckRollup": { "nodes": [{ "commit": {
"statusCheckRollup": {
"contexts": {
"checkRunCount": 14,
"checkRunCountsByState": [
{
"state": "ACTION_REQUIRED",
"count": 1
},
{
"state": "CANCELLED",
"count": 1
},
{
"state": "COMPLETED",
"count": 1
},
{
"state": "FAILURE",
"count": 1
},
{
"state": "IN_PROGRESS",
"count": 1
},
{
"state": "NEUTRAL",
"count": 1
},
{
"state": "PENDING",
"count": 1
},
{
"state": "QUEUED",
"count": 1
},
{
"state": "SKIPPED",
"count": 1
},
{
"state": "STALE",
"count": 1
},
{
"state": "STARTUP_FAILURE",
"count": 1
},
{
"state": "SUCCESS",
"count": 1
},
{
"state": "TIMED_OUT",
"count": 1
},
{
"state": "WAITING",
"count": 1
},
{
"state": "AnUnrecognizedStateJustForThisTest",
"count": 1
}
],
"statusContextCount": 6,
"statusContextCountsByState": [
{
"state": "EXPECTED",
"count": 1
},
{
"state": "ERROR",
"count": 1
},
{
"state": "FAILURE",
"count": 1
},
{
"state": "PENDING",
"count": 1
},
{
"state": "SUCCESS",
"count": 1
},
{
"state": "AnUnrecognizedStateJustForThisTest",
"count": 1
}
]
}
}
} }] } }
`
var pr PullRequest
require.NoError(t, json.Unmarshal([]byte(payload), &pr))
expectedChecksStatus := PullRequestChecksStatus{
Pending: 11,
Failing: 6,
Passing: 4,
Total: 20,
}
require.Equal(t, expectedChecksStatus, pr.ChecksStatus())
}
// Note that it would be incorrect to provide a status of COMPLETED here
// as the conclusion is always set to null. If you want a COMPLETED status,
// use `singleCompletedCheckRunWithConclusion`.
func singleCheckRunWithStatus(status string) string {
return fmt.Sprintf(`
{ "statusCheckRollup": { "nodes": [{ "commit": {
"statusCheckRollup": {
"contexts": {
"nodes": [
{
"__typename": "CheckRun",
"status": "%s",
"conclusion": null
}
]
}
}
} }] } }
`, status)
}
func singleCompletedCheckRunWithConclusion(conclusion string) string {
return fmt.Sprintf(`
{ "statusCheckRollup": { "nodes": [{ "commit": {
"statusCheckRollup": {
"contexts": {
"nodes": [
{
"__typename": "CheckRun",
"status": "COMPLETED",
"conclusion": "%s"
}
]
}
}
} }] } }
`, conclusion)
}
func singleStatusContextWithState(state string) string {
return fmt.Sprintf(`
{ "statusCheckRollup": { "nodes": [{ "commit": {
"statusCheckRollup": {
"contexts": {
"nodes": [
{
"__typename": "StatusContext",
"state": "%s"
}
]
}
}
} }] } }
`, state)
}
func completedCheckRunNode(conclusion string) string {
return fmt.Sprintf(`
{
"__typename": "CheckRun",
"status": "COMPLETED",
"conclusion": "%s"
}`, conclusion)
}
func statusContextNode(state string) string {
return fmt.Sprintf(`
{
"__typename": "StatusContext",
"state": "%s"
}`, state)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/api/reaction_groups.go | api/reaction_groups.go | package api
import (
"bytes"
"encoding/json"
)
type ReactionGroups []ReactionGroup
func (rg ReactionGroups) MarshalJSON() ([]byte, error) {
buf := bytes.Buffer{}
buf.WriteRune('[')
encoder := json.NewEncoder(&buf)
encoder.SetEscapeHTML(false)
hasPrev := false
for _, g := range rg {
if g.Users.TotalCount == 0 {
continue
}
if hasPrev {
buf.WriteRune(',')
}
if err := encoder.Encode(&g); err != nil {
return nil, err
}
hasPrev = true
}
buf.WriteRune(']')
return buf.Bytes(), nil
}
type ReactionGroup struct {
Content string `json:"content"`
Users ReactionGroupUsers `json:"users"`
}
type ReactionGroupUsers struct {
TotalCount int `json:"totalCount"`
}
func (rg ReactionGroup) Count() int {
return rg.Users.TotalCount
}
func (rg ReactionGroup) Emoji() string {
return reactionEmoji[rg.Content]
}
var reactionEmoji = map[string]string{
"THUMBS_UP": "\U0001f44d",
"THUMBS_DOWN": "\U0001f44e",
"LAUGH": "\U0001f604",
"HOORAY": "\U0001f389",
"CONFUSED": "\U0001f615",
"HEART": "\u2764\ufe0f",
"ROCKET": "\U0001f680",
"EYES": "\U0001f440",
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/api/client_test.go | api/client_test.go | package api
import (
"bytes"
"errors"
"io"
"net/http"
"net/http/httptest"
"testing"
"github.com/cli/cli/v2/pkg/httpmock"
"github.com/cli/cli/v2/pkg/iostreams"
"github.com/stretchr/testify/assert"
)
func newTestClient(reg *httpmock.Registry) *Client {
client := &http.Client{}
httpmock.ReplaceTripper(client, reg)
return NewClientFromHTTP(client)
}
func TestGraphQL(t *testing.T) {
http := &httpmock.Registry{}
client := newTestClient(http)
vars := map[string]interface{}{"name": "Mona"}
response := struct {
Viewer struct {
Login string
}
}{}
http.Register(
httpmock.GraphQL("QUERY"),
httpmock.StringResponse(`{"data":{"viewer":{"login":"hubot"}}}`),
)
err := client.GraphQL("github.com", "QUERY", vars, &response)
assert.NoError(t, err)
assert.Equal(t, "hubot", response.Viewer.Login)
req := http.Requests[0]
reqBody, _ := io.ReadAll(req.Body)
assert.Equal(t, `{"query":"QUERY","variables":{"name":"Mona"}}`, string(reqBody))
}
func TestGraphQLError(t *testing.T) {
reg := &httpmock.Registry{}
client := newTestClient(reg)
response := struct{}{}
reg.Register(
httpmock.GraphQL(""),
httpmock.StringResponse(`
{ "errors": [
{
"type": "NOT_FOUND",
"message": "OH NO",
"path": ["repository", "issue"]
},
{
"type": "ACTUALLY_ITS_FINE",
"message": "this is fine",
"path": ["repository", "issues", 0, "comments"]
}
]
}
`),
)
err := client.GraphQL("github.com", "", nil, &response)
if err == nil || err.Error() != "GraphQL: OH NO (repository.issue), this is fine (repository.issues.0.comments)" {
t.Fatalf("got %q", err.Error())
}
}
func TestRESTGetDelete(t *testing.T) {
http := &httpmock.Registry{}
client := newTestClient(http)
http.Register(
httpmock.REST("DELETE", "applications/CLIENTID/grant"),
httpmock.StatusStringResponse(204, "{}"),
)
r := bytes.NewReader([]byte(`{}`))
err := client.REST("github.com", "DELETE", "applications/CLIENTID/grant", r, nil)
assert.NoError(t, err)
}
func TestRESTWithFullURL(t *testing.T) {
http := &httpmock.Registry{}
client := newTestClient(http)
http.Register(
httpmock.REST("GET", "api/v3/user/repos"),
httpmock.StatusStringResponse(200, "{}"))
http.Register(
httpmock.REST("GET", "user/repos"),
httpmock.StatusStringResponse(200, "{}"))
err := client.REST("example.com", "GET", "user/repos", nil, nil)
assert.NoError(t, err)
err = client.REST("example.com", "GET", "https://another.net/user/repos", nil, nil)
assert.NoError(t, err)
assert.Equal(t, "example.com", http.Requests[0].URL.Hostname())
assert.Equal(t, "another.net", http.Requests[1].URL.Hostname())
}
func TestRESTError(t *testing.T) {
fakehttp := &httpmock.Registry{}
client := newTestClient(fakehttp)
fakehttp.Register(httpmock.MatchAny, func(req *http.Request) (*http.Response, error) {
return &http.Response{
Request: req,
StatusCode: 422,
Body: io.NopCloser(bytes.NewBufferString(`{"message": "OH NO"}`)),
Header: map[string][]string{
"Content-Type": {"application/json; charset=utf-8"},
},
}, nil
})
var httpErr HTTPError
err := client.REST("github.com", "DELETE", "repos/branch", nil, nil)
if err == nil || !errors.As(err, &httpErr) {
t.Fatalf("got %v", err)
}
if httpErr.StatusCode != 422 {
t.Errorf("expected status code 422, got %d", httpErr.StatusCode)
}
if httpErr.Error() != "HTTP 422: OH NO (https://api.github.com/repos/branch)" {
t.Errorf("got %q", httpErr.Error())
}
}
func TestHandleHTTPError_GraphQL502(t *testing.T) {
req, err := http.NewRequest("GET", "https://api.github.com/user", nil)
if err != nil {
t.Fatal(err)
}
resp := &http.Response{
Request: req,
StatusCode: 502,
Body: io.NopCloser(bytes.NewBufferString(`{ "data": null, "errors": [{ "message": "Something went wrong" }] }`)),
Header: map[string][]string{"Content-Type": {"application/json"}},
}
err = HandleHTTPError(resp)
if err == nil || err.Error() != "HTTP 502: Something went wrong (https://api.github.com/user)" {
t.Errorf("got error: %v", err)
}
}
func TestHTTPError_ScopesSuggestion(t *testing.T) {
makeResponse := func(s int, u, haveScopes, needScopes string) *http.Response {
req, err := http.NewRequest("GET", u, nil)
if err != nil {
t.Fatal(err)
}
return &http.Response{
Request: req,
StatusCode: s,
Body: io.NopCloser(bytes.NewBufferString(`{}`)),
Header: map[string][]string{
"Content-Type": {"application/json"},
"X-Oauth-Scopes": {haveScopes},
"X-Accepted-Oauth-Scopes": {needScopes},
},
}
}
tests := []struct {
name string
resp *http.Response
want string
}{
{
name: "has necessary scopes",
resp: makeResponse(404, "https://api.github.com/gists", "repo, gist, read:org", "gist"),
want: ``,
},
{
name: "normalizes scopes",
resp: makeResponse(404, "https://api.github.com/orgs/ORG/discussions", "admin:org, write:discussion", "read:org, read:discussion"),
want: ``,
},
{
name: "no scopes on endpoint",
resp: makeResponse(404, "https://api.github.com/user", "repo", ""),
want: ``,
},
{
name: "missing a scope",
resp: makeResponse(404, "https://api.github.com/gists", "repo, read:org", "gist, delete_repo"),
want: `This API operation needs the "gist" scope. To request it, run: gh auth refresh -h github.com -s gist`,
},
{
name: "server error",
resp: makeResponse(500, "https://api.github.com/gists", "repo", "gist"),
want: ``,
},
{
name: "no scopes on token",
resp: makeResponse(404, "https://api.github.com/gists", "", "gist, delete_repo"),
want: ``,
},
{
name: "http code is 422",
resp: makeResponse(422, "https://api.github.com/gists", "", "gist"),
want: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
httpError := HandleHTTPError(tt.resp)
if got := httpError.(HTTPError).ScopesSuggestion(); got != tt.want {
t.Errorf("HTTPError.ScopesSuggestion() = %v, want %v", got, tt.want)
}
})
}
}
func TestHTTPHeaders(t *testing.T) {
var gotReq *http.Request
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
gotReq = r
w.WriteHeader(http.StatusNoContent)
}))
defer ts.Close()
ios, _, _, stderr := iostreams.Test()
httpClient, err := NewHTTPClient(HTTPClientOptions{
AppVersion: "v1.2.3",
Config: tinyConfig{ts.URL[7:] + ":oauth_token": "MYTOKEN"},
Log: ios.ErrOut,
})
assert.NoError(t, err)
client := NewClientFromHTTP(httpClient)
err = client.REST(ts.URL, "GET", ts.URL+"/user/repos", nil, nil)
assert.NoError(t, err)
wantHeader := map[string]string{
"Accept": "application/vnd.github.merge-info-preview+json, application/vnd.github.nebula-preview",
"Authorization": "token MYTOKEN",
"Content-Type": "application/json; charset=utf-8",
"User-Agent": "GitHub CLI v1.2.3",
}
for name, value := range wantHeader {
assert.Equal(t, value, gotReq.Header.Get(name), name)
}
assert.Equal(t, "", stderr.String())
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/api/queries_pr_test.go | api/queries_pr_test.go | package api
import (
"encoding/json"
"testing"
"github.com/cli/cli/v2/internal/ghrepo"
"github.com/cli/cli/v2/pkg/httpmock"
"github.com/stretchr/testify/assert"
)
func TestBranchDeleteRemote(t *testing.T) {
var tests = []struct {
name string
branch string
httpStubs func(*httpmock.Registry)
expectError bool
}{
{
name: "success",
branch: "owner/branch#123",
httpStubs: func(reg *httpmock.Registry) {
reg.Register(
httpmock.REST("DELETE", "repos/OWNER/REPO/git/refs/heads/owner%2Fbranch%23123"),
httpmock.StatusStringResponse(204, ""))
},
expectError: false,
},
{
name: "error",
branch: "my-branch",
httpStubs: func(reg *httpmock.Registry) {
reg.Register(
httpmock.REST("DELETE", "repos/OWNER/REPO/git/refs/heads/my-branch"),
httpmock.StatusStringResponse(500, `{"message": "oh no"}`))
},
expectError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
http := &httpmock.Registry{}
if tt.httpStubs != nil {
tt.httpStubs(http)
}
client := newTestClient(http)
repo, _ := ghrepo.FromFullName("OWNER/REPO")
err := BranchDeleteRemote(client, repo, tt.branch)
if (err != nil) != tt.expectError {
t.Fatalf("unexpected result: %v", err)
}
})
}
}
func Test_Logins(t *testing.T) {
rr := ReviewRequests{}
var tests = []struct {
name string
requestedReviews string
want []string
}{
{
name: "no requested reviewers",
requestedReviews: `{"nodes": []}`,
want: []string{},
},
{
name: "user",
requestedReviews: `{"nodes": [
{
"requestedreviewer": {
"__typename": "User", "login": "testuser"
}
}
]}`,
want: []string{"testuser"},
},
{
name: "team",
requestedReviews: `{"nodes": [
{
"requestedreviewer": {
"__typename": "Team",
"name": "Test Team",
"slug": "test-team",
"organization": {"login": "myorg"}
}
}
]}`,
want: []string{"myorg/test-team"},
},
{
name: "multiple users and teams",
requestedReviews: `{"nodes": [
{
"requestedreviewer": {
"__typename": "User", "login": "user1"
}
},
{
"requestedreviewer": {
"__typename": "User", "login": "user2"
}
},
{
"requestedreviewer": {
"__typename": "Team",
"name": "Test Team",
"slug": "test-team",
"organization": {"login": "myorg"}
}
},
{
"requestedreviewer": {
"__typename": "Team",
"name": "Dev Team",
"slug": "dev-team",
"organization": {"login": "myorg"}
}
}
]}`,
want: []string{"user1", "user2", "myorg/test-team", "myorg/dev-team"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := json.Unmarshal([]byte(tt.requestedReviews), &rr)
assert.NoError(t, err, "Failed to unmarshal json string as ReviewRequests")
logins := rr.Logins()
assert.Equal(t, tt.want, logins)
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/api/http_client_test.go | api/http_client_test.go | package api
import (
"encoding/json"
"fmt"
"io"
"net/http"
"net/http/httptest"
"regexp"
"strings"
"testing"
"github.com/MakeNowJust/heredoc"
"github.com/cli/cli/v2/pkg/iostreams"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewHTTPClient(t *testing.T) {
type args struct {
config tokenGetter
appVersion string
logVerboseHTTP bool
skipDefaultHeaders bool
}
tests := []struct {
name string
args args
host string
wantHeader map[string][]string
wantStderr string
}{
{
name: "github.com",
args: args{
config: tinyConfig{"github.com:oauth_token": "MYTOKEN"},
appVersion: "v1.2.3",
logVerboseHTTP: false,
},
host: "github.com",
wantHeader: map[string][]string{
"authorization": {"token MYTOKEN"},
"user-agent": {"GitHub CLI v1.2.3"},
"accept": {"application/vnd.github.merge-info-preview+json, application/vnd.github.nebula-preview"},
},
wantStderr: "",
},
{
name: "GHES",
args: args{
config: tinyConfig{"example.com:oauth_token": "GHETOKEN"},
appVersion: "v1.2.3",
},
host: "example.com",
wantHeader: map[string][]string{
"authorization": {"token GHETOKEN"},
"user-agent": {"GitHub CLI v1.2.3"},
"accept": {"application/vnd.github.merge-info-preview+json, application/vnd.github.nebula-preview"},
},
wantStderr: "",
},
{
name: "github.com no authentication token",
args: args{
config: tinyConfig{"example.com:oauth_token": "MYTOKEN"},
appVersion: "v1.2.3",
logVerboseHTTP: false,
},
host: "github.com",
wantHeader: map[string][]string{
"authorization": nil, // should not be set
"user-agent": {"GitHub CLI v1.2.3"},
"accept": {"application/vnd.github.merge-info-preview+json, application/vnd.github.nebula-preview"},
},
wantStderr: "",
},
{
name: "GHES no authentication token",
args: args{
config: tinyConfig{"github.com:oauth_token": "MYTOKEN"},
appVersion: "v1.2.3",
logVerboseHTTP: false,
},
host: "example.com",
wantHeader: map[string][]string{
"authorization": nil, // should not be set
"user-agent": {"GitHub CLI v1.2.3"},
"accept": {"application/vnd.github.merge-info-preview+json, application/vnd.github.nebula-preview"},
},
wantStderr: "",
},
{
name: "github.com in verbose mode",
args: args{
config: tinyConfig{"github.com:oauth_token": "MYTOKEN"},
appVersion: "v1.2.3",
logVerboseHTTP: true,
},
host: "github.com",
wantHeader: map[string][]string{
"authorization": {"token MYTOKEN"},
"user-agent": {"GitHub CLI v1.2.3"},
"accept": {"application/vnd.github.merge-info-preview+json, application/vnd.github.nebula-preview"},
},
wantStderr: heredoc.Doc(`
* Request at <time>
* Request to http://<host>:<port>
> GET / HTTP/1.1
> Host: github.com
> Accept: application/vnd.github.merge-info-preview+json, application/vnd.github.nebula-preview
> Authorization: token ████████████████████
> Content-Type: application/json; charset=utf-8
> Time-Zone: <timezone>
> User-Agent: GitHub CLI v1.2.3
< HTTP/1.1 204 No Content
< Date: <time>
* Request took <duration>
`),
},
{
name: "respect skip default headers option",
args: args{
appVersion: "v1.2.3",
logVerboseHTTP: true,
skipDefaultHeaders: true,
},
host: "github.com",
wantHeader: map[string][]string{
"accept": nil,
"authorization": nil,
"content-type": nil,
"user-agent": {"GitHub CLI v1.2.3"},
},
wantStderr: heredoc.Doc(`
* Request at <time>
* Request to http://<host>:<port>
> GET / HTTP/1.1
> Host: github.com
> Time-Zone: <timezone>
> User-Agent: GitHub CLI v1.2.3
< HTTP/1.1 204 No Content
< Date: <time>
* Request took <duration>
`),
},
}
var gotReq *http.Request
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
gotReq = r
w.WriteHeader(http.StatusNoContent)
}))
defer ts.Close()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ios, _, _, stderr := iostreams.Test()
client, err := NewHTTPClient(HTTPClientOptions{
AppVersion: tt.args.appVersion,
Config: tt.args.config,
Log: ios.ErrOut,
LogVerboseHTTP: tt.args.logVerboseHTTP,
SkipDefaultHeaders: tt.args.skipDefaultHeaders,
})
require.NoError(t, err)
req, err := http.NewRequest("GET", ts.URL, nil)
req.Header.Set("time-zone", "Europe/Amsterdam")
req.Host = tt.host
require.NoError(t, err)
res, err := client.Do(req)
require.NoError(t, err)
for name, value := range tt.wantHeader {
assert.Equal(t, value, gotReq.Header.Values(name), name)
}
assert.Equal(t, 204, res.StatusCode)
assert.Equal(t, tt.wantStderr, normalizeVerboseLog(stderr.String()))
})
}
}
func TestHTTPClientRedirectAuthenticationHeaderHandling(t *testing.T) {
var request *http.Request
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
request = r
w.WriteHeader(http.StatusNoContent)
}))
defer server.Close()
var redirectRequest *http.Request
redirectServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
redirectRequest = r
http.Redirect(w, r, server.URL, http.StatusFound)
}))
defer redirectServer.Close()
client, err := NewHTTPClient(HTTPClientOptions{
Config: tinyConfig{
fmt.Sprintf("%s:oauth_token", strings.TrimPrefix(redirectServer.URL, "http://")): "REDIRECT-TOKEN",
fmt.Sprintf("%s:oauth_token", strings.TrimPrefix(server.URL, "http://")): "TOKEN",
},
})
require.NoError(t, err)
req, err := http.NewRequest("GET", redirectServer.URL, nil)
require.NoError(t, err)
res, err := client.Do(req)
require.NoError(t, err)
assert.Equal(t, "token REDIRECT-TOKEN", redirectRequest.Header.Get(authorization))
assert.Equal(t, "", request.Header.Get(authorization))
assert.Equal(t, 204, res.StatusCode)
}
func TestHTTPClientSanitizeJSONControlCharactersC0(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
issue := Issue{
Title: "\u001B[31mRed Title\u001B[0m",
Body: "1\u0001 2\u0002 3\u0003 4\u0004 5\u0005 6\u0006 7\u0007 8\u0008 9\t A\r\n B\u000b C\u000c D\r\n E\u000e F\u000f",
Author: Author{
ID: "1",
Name: "10\u0010 11\u0011 12\u0012 13\u0013 14\u0014 15\u0015 16\u0016 17\u0017 18\u0018 19\u0019 1A\u001a 1B\u001b 1C\u001c 1D\u001d 1E\u001e 1F\u001f",
Login: "monalisa \\u00\u001b",
},
ActiveLockReason: "Escaped \u001B \\u001B \\\u001B \\\\u001B",
}
responseData, _ := json.Marshal(issue)
w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprint(w, string(responseData))
}))
defer ts.Close()
client, err := NewHTTPClient(HTTPClientOptions{})
require.NoError(t, err)
req, err := http.NewRequest("GET", ts.URL, nil)
require.NoError(t, err)
res, err := client.Do(req)
require.NoError(t, err)
body, err := io.ReadAll(res.Body)
res.Body.Close()
require.NoError(t, err)
var issue Issue
err = json.Unmarshal(body, &issue)
require.NoError(t, err)
assert.Equal(t, "^[[31mRed Title^[[0m", issue.Title)
assert.Equal(t, "1^A 2^B 3^C 4^D 5^E 6^F 7^G 8\b 9\t A\r\n B\v C\f D\r\n E^N F^O", issue.Body)
assert.Equal(t, "10^P 11^Q 12^R 13^S 14^T 15^U 16^V 17^W 18^X 19^Y 1A^Z 1B^[ 1C^\\ 1D^] 1E^^ 1F^_", issue.Author.Name)
assert.Equal(t, "monalisa \\u00^[", issue.Author.Login)
assert.Equal(t, "Escaped ^[ \\^[ \\^[ \\\\^[", issue.ActiveLockReason)
}
func TestHTTPClientSanitizeControlCharactersC1(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
issue := Issue{
Title: "\xC2\x9B[31mRed Title\xC2\x9B[0m",
Body: "80\xC2\x80 81\xC2\x81 82\xC2\x82 83\xC2\x83 84\xC2\x84 85\xC2\x85 86\xC2\x86 87\xC2\x87 88\xC2\x88 89\xC2\x89 8A\xC2\x8A 8B\xC2\x8B 8C\xC2\x8C 8D\xC2\x8D 8E\xC2\x8E 8F\xC2\x8F",
Author: Author{
ID: "1",
Name: "90\xC2\x90 91\xC2\x91 92\xC2\x92 93\xC2\x93 94\xC2\x94 95\xC2\x95 96\xC2\x96 97\xC2\x97 98\xC2\x98 99\xC2\x99 9A\xC2\x9A 9B\xC2\x9B 9C\xC2\x9C 9D\xC2\x9D 9E\xC2\x9E 9F\xC2\x9F",
Login: "monalisa\xC2\xA1",
},
}
responseData, _ := json.Marshal(issue)
w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprint(w, string(responseData))
}))
defer ts.Close()
client, err := NewHTTPClient(HTTPClientOptions{})
require.NoError(t, err)
req, err := http.NewRequest("GET", ts.URL, nil)
require.NoError(t, err)
res, err := client.Do(req)
require.NoError(t, err)
body, err := io.ReadAll(res.Body)
res.Body.Close()
require.NoError(t, err)
var issue Issue
err = json.Unmarshal(body, &issue)
require.NoError(t, err)
assert.Equal(t, "^[[31mRed Title^[[0m", issue.Title)
assert.Equal(t, "80^@ 81^A 82^B 83^C 84^D 85^E 86^F 87^G 88^H 89^I 8A^J 8B^K 8C^L 8D^M 8E^N 8F^O", issue.Body)
assert.Equal(t, "90^P 91^Q 92^R 93^S 94^T 95^U 96^V 97^W 98^X 99^Y 9A^Z 9B^[ 9C^\\ 9D^] 9E^^ 9F^_", issue.Author.Name)
assert.Equal(t, "monalisa¡", issue.Author.Login)
}
type tinyConfig map[string]string
func (c tinyConfig) ActiveToken(host string) (string, string) {
return c[fmt.Sprintf("%s:%s", host, "oauth_token")], "oauth_token"
}
var requestAtRE = regexp.MustCompile(`(?m)^\* Request at .+`)
var dateRE = regexp.MustCompile(`(?m)^< Date: .+`)
var hostWithPortRE = regexp.MustCompile(`127\.0\.0\.1:\d+`)
var durationRE = regexp.MustCompile(`(?m)^\* Request took .+`)
var timezoneRE = regexp.MustCompile(`(?m)^> Time-Zone: .+`)
func normalizeVerboseLog(t string) string {
t = requestAtRE.ReplaceAllString(t, "* Request at <time>")
t = hostWithPortRE.ReplaceAllString(t, "<host>:<port>")
t = dateRE.ReplaceAllString(t, "< Date: <time>")
t = durationRE.ReplaceAllString(t, "* Request took <duration>")
t = timezoneRE.ReplaceAllString(t, "> Time-Zone: <timezone>")
return t
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/api/queries_repo.go | api/queries_repo.go | package api
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"sort"
"strings"
"time"
"github.com/cli/cli/v2/internal/gh"
"github.com/cli/cli/v2/internal/ghinstance"
"golang.org/x/sync/errgroup"
"github.com/cli/cli/v2/internal/ghrepo"
ghAPI "github.com/cli/go-gh/v2/pkg/api"
"github.com/shurcooL/githubv4"
)
const (
errorResolvingOrganization = "Could not resolve to an Organization"
)
// Repository contains information about a GitHub repo
type Repository struct {
ID string
Name string
NameWithOwner string
Owner RepositoryOwner
Parent *Repository
TemplateRepository *Repository
Description string
HomepageURL string
OpenGraphImageURL string
UsesCustomOpenGraphImage bool
URL string
SSHURL string
MirrorURL string
SecurityPolicyURL string
CreatedAt time.Time
PushedAt *time.Time
UpdatedAt time.Time
ArchivedAt *time.Time
IsBlankIssuesEnabled bool
IsSecurityPolicyEnabled bool
HasIssuesEnabled bool
HasProjectsEnabled bool
HasDiscussionsEnabled bool
HasWikiEnabled bool
MergeCommitAllowed bool
SquashMergeAllowed bool
RebaseMergeAllowed bool
AutoMergeAllowed bool
ForkCount int
StargazerCount int
Watchers struct {
TotalCount int `json:"totalCount"`
}
Issues struct {
TotalCount int `json:"totalCount"`
}
PullRequests struct {
TotalCount int `json:"totalCount"`
}
CodeOfConduct *CodeOfConduct
ContactLinks []ContactLink
DefaultBranchRef BranchRef
DeleteBranchOnMerge bool
DiskUsage int
FundingLinks []FundingLink
IsArchived bool
IsEmpty bool
IsFork bool
ForkingAllowed bool
IsInOrganization bool
IsMirror bool
IsPrivate bool
IsTemplate bool
IsUserConfigurationRepository bool
LicenseInfo *RepositoryLicense
ViewerCanAdminister bool
ViewerDefaultCommitEmail string
ViewerDefaultMergeMethod string
ViewerHasStarred bool
ViewerPermission string
ViewerPossibleCommitEmails []string
ViewerSubscription string
Visibility string
RepositoryTopics struct {
Nodes []struct {
Topic RepositoryTopic
}
}
PrimaryLanguage *CodingLanguage
Languages struct {
Edges []struct {
Size int `json:"size"`
Node CodingLanguage `json:"node"`
}
}
IssueTemplates []IssueTemplate
PullRequestTemplates []PullRequestTemplate
Labels struct {
Nodes []IssueLabel
}
Milestones struct {
Nodes []Milestone
}
LatestRelease *RepositoryRelease
AssignableUsers struct {
Nodes []GitHubUser
}
MentionableUsers struct {
Nodes []GitHubUser
}
Projects struct {
Nodes []RepoProject
}
ProjectsV2 struct {
Nodes []ProjectV2
}
// pseudo-field that keeps track of host name of this repo
hostname string
}
// RepositoryOwner is the owner of a GitHub repository
type RepositoryOwner struct {
ID string `json:"id"`
Login string `json:"login"`
}
type GitHubUser struct {
ID string `json:"id"`
Login string `json:"login"`
Name string `json:"name"`
DatabaseID int64 `json:"databaseId"`
}
// Actor is a superset of User and Bot, among others.
// At the time of writing, some of these fields
// are not directly supported by the Actor type and
// instead are only available on the User or Bot types
// directly.
type Actor struct {
ID string `json:"id"`
Login string `json:"login"`
Name string `json:"name"`
TypeName string `json:"__typename"`
}
// BranchRef is the branch name in a GitHub repository
type BranchRef struct {
Name string `json:"name"`
}
type CodeOfConduct struct {
Key string `json:"key"`
Name string `json:"name"`
URL string `json:"url"`
}
type RepositoryLicense struct {
Key string `json:"key"`
Name string `json:"name"`
Nickname string `json:"nickname"`
}
type ContactLink struct {
About string `json:"about"`
Name string `json:"name"`
URL string `json:"url"`
}
type FundingLink struct {
Platform string `json:"platform"`
URL string `json:"url"`
}
type CodingLanguage struct {
Name string `json:"name"`
}
type IssueTemplate struct {
Name string `json:"name"`
Title string `json:"title"`
Body string `json:"body"`
About string `json:"about"`
}
type PullRequestTemplate struct {
Filename string `json:"filename"`
Body string `json:"body"`
}
type RepositoryTopic struct {
Name string `json:"name"`
}
type RepositoryRelease struct {
Name string `json:"name"`
TagName string `json:"tagName"`
URL string `json:"url"`
PublishedAt time.Time `json:"publishedAt"`
}
type IssueLabel struct {
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Color string `json:"color"`
}
type License struct {
Key string `json:"key"`
Name string `json:"name"`
SPDXID string `json:"spdx_id"`
URL string `json:"url"`
NodeID string `json:"node_id"`
HTMLURL string `json:"html_url"`
Description string `json:"description"`
Implementation string `json:"implementation"`
Permissions []string `json:"permissions"`
Conditions []string `json:"conditions"`
Limitations []string `json:"limitations"`
Body string `json:"body"`
Featured bool `json:"featured"`
}
type GitIgnore struct {
Name string `json:"name"`
Source string `json:"source"`
}
// RepoOwner is the login name of the owner
func (r Repository) RepoOwner() string {
return r.Owner.Login
}
// RepoName is the name of the repository
func (r Repository) RepoName() string {
return r.Name
}
// RepoHost is the GitHub hostname of the repository
func (r Repository) RepoHost() string {
return r.hostname
}
// ViewerCanPush is true when the requesting user has push access
func (r Repository) ViewerCanPush() bool {
switch r.ViewerPermission {
case "ADMIN", "MAINTAIN", "WRITE":
return true
default:
return false
}
}
// ViewerCanTriage is true when the requesting user can triage issues and pull requests
func (r Repository) ViewerCanTriage() bool {
switch r.ViewerPermission {
case "ADMIN", "MAINTAIN", "WRITE", "TRIAGE":
return true
default:
return false
}
}
func FetchRepository(client *Client, repo ghrepo.Interface, fields []string) (*Repository, error) {
query := fmt.Sprintf(`query RepositoryInfo($owner: String!, $name: String!) {
repository(owner: $owner, name: $name) {%s}
}`, RepositoryGraphQL(fields))
variables := map[string]interface{}{
"owner": repo.RepoOwner(),
"name": repo.RepoName(),
}
var result struct {
Repository *Repository
}
if err := client.GraphQL(repo.RepoHost(), query, variables, &result); err != nil {
return nil, err
}
// The GraphQL API should have returned an error in case of a missing repository, but this isn't
// guaranteed to happen when an authentication token with insufficient permissions is being used.
if result.Repository == nil {
return nil, GraphQLError{
GraphQLError: &ghAPI.GraphQLError{
Errors: []ghAPI.GraphQLErrorItem{{
Type: "NOT_FOUND",
Message: fmt.Sprintf("Could not resolve to a Repository with the name '%s/%s'.", repo.RepoOwner(), repo.RepoName()),
}},
},
}
}
return InitRepoHostname(result.Repository, repo.RepoHost()), nil
}
func GitHubRepo(client *Client, repo ghrepo.Interface) (*Repository, error) {
query := `
fragment repo on Repository {
id
name
owner { login }
hasIssuesEnabled
description
hasWikiEnabled
viewerPermission
defaultBranchRef {
name
}
}
query RepositoryInfo($owner: String!, $name: String!) {
repository(owner: $owner, name: $name) {
...repo
parent {
...repo
}
mergeCommitAllowed
rebaseMergeAllowed
squashMergeAllowed
}
}`
variables := map[string]interface{}{
"owner": repo.RepoOwner(),
"name": repo.RepoName(),
}
var result struct {
Repository *Repository
}
if err := client.GraphQL(repo.RepoHost(), query, variables, &result); err != nil {
return nil, err
}
// The GraphQL API should have returned an error in case of a missing repository, but this isn't
// guaranteed to happen when an authentication token with insufficient permissions is being used.
if result.Repository == nil {
return nil, GraphQLError{
GraphQLError: &ghAPI.GraphQLError{
Errors: []ghAPI.GraphQLErrorItem{{
Type: "NOT_FOUND",
Message: fmt.Sprintf("Could not resolve to a Repository with the name '%s/%s'.", repo.RepoOwner(), repo.RepoName()),
}},
},
}
}
return InitRepoHostname(result.Repository, repo.RepoHost()), nil
}
func RepoDefaultBranch(client *Client, repo ghrepo.Interface) (string, error) {
if r, ok := repo.(*Repository); ok && r.DefaultBranchRef.Name != "" {
return r.DefaultBranchRef.Name, nil
}
r, err := GitHubRepo(client, repo)
if err != nil {
return "", err
}
return r.DefaultBranchRef.Name, nil
}
func CanPushToRepo(httpClient *http.Client, repo ghrepo.Interface) (bool, error) {
if r, ok := repo.(*Repository); ok && r.ViewerPermission != "" {
return r.ViewerCanPush(), nil
}
apiClient := NewClientFromHTTP(httpClient)
r, err := GitHubRepo(apiClient, repo)
if err != nil {
return false, err
}
return r.ViewerCanPush(), nil
}
// RepoParent finds out the parent repository of a fork
func RepoParent(client *Client, repo ghrepo.Interface) (ghrepo.Interface, error) {
var query struct {
Repository struct {
Parent *struct {
Name string
Owner struct {
Login string
}
}
} `graphql:"repository(owner: $owner, name: $name)"`
}
variables := map[string]interface{}{
"owner": githubv4.String(repo.RepoOwner()),
"name": githubv4.String(repo.RepoName()),
}
err := client.Query(repo.RepoHost(), "RepositoryFindParent", &query, variables)
if err != nil {
return nil, err
}
if query.Repository.Parent == nil {
return nil, nil
}
parent := ghrepo.NewWithHost(query.Repository.Parent.Owner.Login, query.Repository.Parent.Name, repo.RepoHost())
return parent, nil
}
// RepoNetworkResult describes the relationship between related repositories
type RepoNetworkResult struct {
ViewerLogin string
Repositories []*Repository
}
// RepoNetwork inspects the relationship between multiple GitHub repositories
func RepoNetwork(client *Client, repos []ghrepo.Interface) (RepoNetworkResult, error) {
var hostname string
if len(repos) > 0 {
hostname = repos[0].RepoHost()
}
queries := make([]string, 0, len(repos))
for i, repo := range repos {
queries = append(queries, fmt.Sprintf(`
repo_%03d: repository(owner: %q, name: %q) {
...repo
parent {
...repo
}
}
`, i, repo.RepoOwner(), repo.RepoName()))
}
// Since the query is constructed dynamically, we can't parse a response
// format using a static struct. Instead, hold the raw JSON data until we
// decide how to parse it manually.
graphqlResult := make(map[string]*json.RawMessage)
var result RepoNetworkResult
err := client.GraphQL(hostname, fmt.Sprintf(`
fragment repo on Repository {
id
name
owner { login }
viewerPermission
defaultBranchRef {
name
}
isPrivate
}
query RepositoryNetwork {
viewer { login }
%s
}
`, strings.Join(queries, "")), nil, &graphqlResult)
var graphqlError GraphQLError
if errors.As(err, &graphqlError) {
// If the only errors are that certain repositories are not found,
// continue processing this response instead of returning an error
tolerated := true
for _, ge := range graphqlError.Errors {
if ge.Type != "NOT_FOUND" {
tolerated = false
}
}
if tolerated {
err = nil
}
}
if err != nil {
return result, err
}
keys := make([]string, 0, len(graphqlResult))
for key := range graphqlResult {
keys = append(keys, key)
}
// sort keys to ensure `repo_{N}` entries are processed in order
sort.Strings(keys)
// Iterate over keys of GraphQL response data and, based on its name,
// dynamically allocate the target struct an individual message gets decoded to.
for _, name := range keys {
jsonMessage := graphqlResult[name]
if name == "viewer" {
viewerResult := struct {
Login string
}{}
decoder := json.NewDecoder(bytes.NewReader([]byte(*jsonMessage)))
if err := decoder.Decode(&viewerResult); err != nil {
return result, err
}
result.ViewerLogin = viewerResult.Login
} else if strings.HasPrefix(name, "repo_") {
if jsonMessage == nil {
result.Repositories = append(result.Repositories, nil)
continue
}
var repo Repository
decoder := json.NewDecoder(bytes.NewReader(*jsonMessage))
if err := decoder.Decode(&repo); err != nil {
return result, err
}
result.Repositories = append(result.Repositories, InitRepoHostname(&repo, hostname))
} else {
return result, fmt.Errorf("unknown GraphQL result key %q", name)
}
}
return result, nil
}
func InitRepoHostname(repo *Repository, hostname string) *Repository {
repo.hostname = hostname
if repo.Parent != nil {
repo.Parent.hostname = hostname
}
return repo
}
// RepositoryV3 is the repository result from GitHub API v3
type repositoryV3 struct {
NodeID string `json:"node_id"`
Name string
CreatedAt time.Time `json:"created_at"`
Owner struct {
Login string
}
Private bool
HTMLUrl string `json:"html_url"`
Parent *repositoryV3
}
// ForkRepo forks the repository on GitHub and returns the new repository
func ForkRepo(client *Client, repo ghrepo.Interface, org, newName string, defaultBranchOnly bool) (*Repository, error) {
path := fmt.Sprintf("repos/%s/forks", ghrepo.FullName(repo))
params := map[string]interface{}{}
if org != "" {
params["organization"] = org
}
if newName != "" {
params["name"] = newName
}
if defaultBranchOnly {
params["default_branch_only"] = true
}
body := &bytes.Buffer{}
enc := json.NewEncoder(body)
if err := enc.Encode(params); err != nil {
return nil, err
}
result := repositoryV3{}
err := client.REST(repo.RepoHost(), "POST", path, body, &result)
if err != nil {
return nil, err
}
newRepo := &Repository{
ID: result.NodeID,
Name: result.Name,
CreatedAt: result.CreatedAt,
Owner: RepositoryOwner{
Login: result.Owner.Login,
},
ViewerPermission: "WRITE",
hostname: repo.RepoHost(),
}
// The GitHub API will happily return a HTTP 200 when attempting to fork own repo even though no forking
// actually took place. Ensure that we raise an error instead.
if ghrepo.IsSame(repo, newRepo) {
return newRepo, fmt.Errorf("%s cannot be forked. A single user account cannot own both a parent and fork.", ghrepo.FullName(repo))
}
return newRepo, nil
}
// RenameRepo renames the repository on GitHub and returns the renamed repository
func RenameRepo(client *Client, repo ghrepo.Interface, newRepoName string) (*Repository, error) {
input := map[string]string{"name": newRepoName}
body := &bytes.Buffer{}
enc := json.NewEncoder(body)
if err := enc.Encode(input); err != nil {
return nil, err
}
path := fmt.Sprintf("%srepos/%s",
ghinstance.RESTPrefix(repo.RepoHost()),
ghrepo.FullName(repo))
result := repositoryV3{}
err := client.REST(repo.RepoHost(), "PATCH", path, body, &result)
if err != nil {
return nil, err
}
return &Repository{
ID: result.NodeID,
Name: result.Name,
CreatedAt: result.CreatedAt,
Owner: RepositoryOwner{
Login: result.Owner.Login,
},
ViewerPermission: "WRITE",
hostname: repo.RepoHost(),
}, nil
}
func LastCommit(client *Client, repo ghrepo.Interface) (*Commit, error) {
var responseData struct {
Repository struct {
DefaultBranchRef struct {
Target struct {
Commit `graphql:"... on Commit"`
}
}
} `graphql:"repository(owner: $owner, name: $repo)"`
}
variables := map[string]interface{}{
"owner": githubv4.String(repo.RepoOwner()), "repo": githubv4.String(repo.RepoName()),
}
if err := client.Query(repo.RepoHost(), "LastCommit", &responseData, variables); err != nil {
return nil, err
}
return &responseData.Repository.DefaultBranchRef.Target.Commit, nil
}
// RepoFindForks finds forks of the repo that are affiliated with the viewer
func RepoFindForks(client *Client, repo ghrepo.Interface, limit int) ([]*Repository, error) {
result := struct {
Repository struct {
Forks struct {
Nodes []Repository
}
}
}{}
variables := map[string]interface{}{
"owner": repo.RepoOwner(),
"repo": repo.RepoName(),
"limit": limit,
}
if err := client.GraphQL(repo.RepoHost(), `
query RepositoryFindFork($owner: String!, $repo: String!, $limit: Int!) {
repository(owner: $owner, name: $repo) {
forks(first: $limit, affiliations: [OWNER, COLLABORATOR]) {
nodes {
id
name
owner { login }
url
viewerPermission
}
}
}
}
`, variables, &result); err != nil {
return nil, err
}
var results []*Repository
for _, r := range result.Repository.Forks.Nodes {
// we check ViewerCanPush, even though we expect it to always be true per
// `affiliations` condition, to guard against versions of GitHub with a
// faulty `affiliations` implementation
if !r.ViewerCanPush() {
continue
}
results = append(results, InitRepoHostname(&r, repo.RepoHost()))
}
return results, nil
}
type RepoMetadataResult struct {
CurrentLogin string
AssignableUsers []AssignableUser
AssignableActors []AssignableActor
Labels []RepoLabel
Projects []RepoProject
ProjectsV2 []ProjectV2
Milestones []RepoMilestone
Teams []OrgTeam
}
func (m *RepoMetadataResult) MembersToIDs(names []string) ([]string, error) {
var ids []string
for _, assigneeLogin := range names {
found := false
for _, u := range m.AssignableUsers {
if strings.EqualFold(assigneeLogin, u.Login()) {
ids = append(ids, u.ID())
found = true
break
}
}
// Look for ID in assignable actors if not found in assignable users
if !found {
for _, a := range m.AssignableActors {
if strings.EqualFold(assigneeLogin, a.Login()) {
ids = append(ids, a.ID())
found = true
break
}
if strings.EqualFold(assigneeLogin, a.DisplayName()) {
ids = append(ids, a.ID())
found = true
break
}
}
}
// And if we still didn't find an ID, return an error
if !found {
return nil, fmt.Errorf("'%s' not found", assigneeLogin)
}
}
return ids, nil
}
func (m *RepoMetadataResult) TeamsToIDs(names []string) ([]string, error) {
var ids []string
for _, teamSlug := range names {
found := false
slug := teamSlug[strings.IndexRune(teamSlug, '/')+1:]
for _, t := range m.Teams {
if strings.EqualFold(slug, t.Slug) {
ids = append(ids, t.ID)
found = true
break
}
}
if !found {
return nil, fmt.Errorf("'%s' not found", teamSlug)
}
}
return ids, nil
}
func (m *RepoMetadataResult) LabelsToIDs(names []string) ([]string, error) {
var ids []string
for _, labelName := range names {
found := false
for _, l := range m.Labels {
if strings.EqualFold(labelName, l.Name) {
ids = append(ids, l.ID)
found = true
break
}
}
if !found {
return nil, fmt.Errorf("'%s' not found", labelName)
}
}
return ids, nil
}
// ProjectsTitlesToIDs returns two arrays:
// - the first contains IDs of projects V1
// - the second contains IDs of projects V2
// - if neither project V1 or project V2 can be found with a given name, then an error is returned
func (m *RepoMetadataResult) ProjectsTitlesToIDs(titles []string) ([]string, []string, error) {
var ids []string
var idsV2 []string
for _, title := range titles {
id, found := m.v1ProjectNameToID(title)
if found {
ids = append(ids, id)
continue
}
idV2, found := m.v2ProjectTitleToID(title)
if found {
idsV2 = append(idsV2, idV2)
continue
}
return nil, nil, fmt.Errorf("'%s' not found", title)
}
return ids, idsV2, nil
}
// We use the word "titles" when referring to v1 and v2 projects.
// In reality, v1 projects really have "names", so there is a bit of a
// mismatch we just need to gloss over.
func (m *RepoMetadataResult) v1ProjectNameToID(name string) (string, bool) {
for _, p := range m.Projects {
if strings.EqualFold(name, p.Name) {
return p.ID, true
}
}
return "", false
}
func (m *RepoMetadataResult) v2ProjectTitleToID(title string) (string, bool) {
for _, p := range m.ProjectsV2 {
if strings.EqualFold(title, p.Title) {
return p.ID, true
}
}
return "", false
}
func ProjectTitlesToPaths(client *Client, repo ghrepo.Interface, titles []string, projectsV1Support gh.ProjectsV1Support) ([]string, error) {
paths := make([]string, 0, len(titles))
matchedPaths := map[string]struct{}{}
// TODO: ProjectsV1Cleanup
// At this point, we only know the names that the user has provided, so we can't push this conditional up the stack.
// First we'll try to match against v1 projects, if supported
if projectsV1Support == gh.ProjectsV1Supported {
v1Projects, err := v1Projects(client, repo)
if err != nil {
return nil, err
}
for _, title := range titles {
for _, p := range v1Projects {
if strings.EqualFold(title, p.Name) {
pathParts := strings.Split(p.ResourcePath, "/")
var path string
if pathParts[1] == "orgs" || pathParts[1] == "users" {
path = fmt.Sprintf("%s/%s", pathParts[2], pathParts[4])
} else {
path = fmt.Sprintf("%s/%s/%s", pathParts[1], pathParts[2], pathParts[4])
}
paths = append(paths, path)
matchedPaths[title] = struct{}{}
break
}
}
}
}
// Then we'll try to match against v2 projects
v2Projects, err := v2Projects(client, repo)
if err != nil {
return nil, err
}
for _, title := range titles {
// If we already found a v1 project with this name, skip it
if _, ok := matchedPaths[title]; ok {
continue
}
found := false
for _, p := range v2Projects {
if strings.EqualFold(title, p.Title) {
pathParts := strings.Split(p.ResourcePath, "/")
var path string
if pathParts[1] == "orgs" || pathParts[1] == "users" {
path = fmt.Sprintf("%s/%s", pathParts[2], pathParts[4])
} else {
path = fmt.Sprintf("%s/%s/%s", pathParts[1], pathParts[2], pathParts[4])
}
paths = append(paths, path)
found = true
break
}
}
if !found {
return nil, fmt.Errorf("'%s' not found", title)
}
}
return paths, nil
}
func (m *RepoMetadataResult) MilestoneToID(title string) (string, error) {
for _, m := range m.Milestones {
if strings.EqualFold(title, m.Title) {
return m.ID, nil
}
}
return "", fmt.Errorf("'%s' not found", title)
}
func (m *RepoMetadataResult) Merge(m2 *RepoMetadataResult) {
if len(m2.AssignableUsers) > 0 || len(m.AssignableUsers) == 0 {
m.AssignableUsers = m2.AssignableUsers
}
if len(m2.Teams) > 0 || len(m.Teams) == 0 {
m.Teams = m2.Teams
}
if len(m2.Labels) > 0 || len(m.Labels) == 0 {
m.Labels = m2.Labels
}
if len(m2.Projects) > 0 || len(m.Projects) == 0 {
m.Projects = m2.Projects
}
if len(m2.Milestones) > 0 || len(m.Milestones) == 0 {
m.Milestones = m2.Milestones
}
}
type RepoMetadataInput struct {
Assignees bool
ActorAssignees bool
Reviewers bool
TeamReviewers bool
Labels bool
ProjectsV1 bool
ProjectsV2 bool
Milestones bool
}
// RepoMetadata pre-fetches the metadata for attaching to issues and pull requests
func RepoMetadata(client *Client, repo ghrepo.Interface, input RepoMetadataInput) (*RepoMetadataResult, error) {
var result RepoMetadataResult
var g errgroup.Group
if input.Assignees || input.Reviewers {
if input.ActorAssignees {
g.Go(func() error {
actors, err := RepoAssignableActors(client, repo)
if err != nil {
return fmt.Errorf("error fetching assignable actors: %w", err)
}
result.AssignableActors = actors
// Filter actors for users to use for pull request reviewers,
// skip retrieving the same info through RepoAssignableUsers().
var users []AssignableUser
for _, a := range actors {
if _, ok := a.(AssignableUser); !ok {
continue
}
users = append(users, a.(AssignableUser))
}
result.AssignableUsers = users
return nil
})
} else {
// Not using Actors, fetch legacy assignable users.
g.Go(func() error {
users, err := RepoAssignableUsers(client, repo)
if err != nil {
err = fmt.Errorf("error fetching assignable users: %w", err)
}
result.AssignableUsers = users
return err
})
}
}
if input.Reviewers && input.TeamReviewers {
g.Go(func() error {
teams, err := OrganizationTeams(client, repo)
// TODO: better detection of non-org repos
if err != nil && !strings.Contains(err.Error(), errorResolvingOrganization) {
err = fmt.Errorf("error fetching organization teams: %w", err)
return err
}
result.Teams = teams
return nil
})
}
if input.Reviewers {
g.Go(func() error {
login, err := CurrentLoginName(client, repo.RepoHost())
if err != nil {
err = fmt.Errorf("error fetching current login: %w", err)
}
result.CurrentLogin = login
return err
})
}
if input.Labels {
g.Go(func() error {
labels, err := RepoLabels(client, repo)
if err != nil {
err = fmt.Errorf("error fetching labels: %w", err)
}
result.Labels = labels
return err
})
}
if input.ProjectsV1 {
g.Go(func() error {
var err error
result.Projects, err = v1Projects(client, repo)
return err
})
}
if input.ProjectsV2 {
g.Go(func() error {
var err error
result.ProjectsV2, err = v2Projects(client, repo)
return err
})
}
if input.Milestones {
g.Go(func() error {
milestones, err := RepoMilestones(client, repo, "open")
if err != nil {
err = fmt.Errorf("error fetching milestones: %w", err)
}
result.Milestones = milestones
return err
})
}
if err := g.Wait(); err != nil {
return nil, err
}
return &result, nil
}
type RepoProject struct {
ID string `json:"id"`
Name string `json:"name"`
Number int `json:"number"`
ResourcePath string `json:"resourcePath"`
}
// RepoProjects fetches all open projects for a repository.
func RepoProjects(client *Client, repo ghrepo.Interface) ([]RepoProject, error) {
type responseData struct {
Repository struct {
Projects struct {
Nodes []RepoProject
PageInfo struct {
HasNextPage bool
EndCursor string
}
} `graphql:"projects(states: [OPEN], first: 100, orderBy: {field: NAME, direction: ASC}, after: $endCursor)"`
} `graphql:"repository(owner: $owner, name: $name)"`
}
variables := map[string]interface{}{
"owner": githubv4.String(repo.RepoOwner()),
"name": githubv4.String(repo.RepoName()),
"endCursor": (*githubv4.String)(nil),
}
var projects []RepoProject
for {
var query responseData
err := client.Query(repo.RepoHost(), "RepositoryProjectList", &query, variables)
if err != nil {
return nil, err
}
projects = append(projects, query.Repository.Projects.Nodes...)
if !query.Repository.Projects.PageInfo.HasNextPage {
break
}
variables["endCursor"] = githubv4.String(query.Repository.Projects.PageInfo.EndCursor)
}
return projects, nil
}
// Expected login for Copilot when retrieved as an Actor
// This is returned from assignable actors and issue/pr assigned actors.
// We use this to check if the actor is Copilot.
const CopilotActorLogin = "copilot-swe-agent"
const CopilotActorName = "Copilot"
type AssignableActor interface {
DisplayName() string
ID() string
Login() string
sealedAssignableActor()
}
// Always a user
type AssignableUser struct {
id string
login string
name string
}
func NewAssignableUser(id, login, name string) AssignableUser {
return AssignableUser{
id: id,
login: login,
name: name,
}
}
// DisplayName returns a formatted string that uses Login and Name to be displayed e.g. 'Login (Name)' or 'Login'
func (u AssignableUser) DisplayName() string {
if u.name != "" {
return fmt.Sprintf("%s (%s)", u.login, u.name)
}
return u.login
}
func (u AssignableUser) ID() string {
return u.id
}
func (u AssignableUser) Login() string {
return u.login
}
func (u AssignableUser) Name() string {
return u.name
}
func (u AssignableUser) sealedAssignableActor() {}
type AssignableBot struct {
id string
login string
}
func NewAssignableBot(id, login string) AssignableBot {
return AssignableBot{
id: id,
login: login,
}
}
func (b AssignableBot) DisplayName() string {
if b.login == CopilotActorLogin {
return fmt.Sprintf("%s (AI)", CopilotActorName)
}
return b.Login()
}
func (b AssignableBot) ID() string {
return b.id
}
func (b AssignableBot) Login() string {
return b.login
}
func (b AssignableBot) Name() string {
return ""
}
func (b AssignableBot) sealedAssignableActor() {}
// RepoAssignableUsers fetches all the assignable users for a repository
func RepoAssignableUsers(client *Client, repo ghrepo.Interface) ([]AssignableUser, error) {
type responseData struct {
Repository struct {
AssignableUsers struct {
Nodes []struct {
ID string
Login string
Name string
}
PageInfo struct {
HasNextPage bool
EndCursor string
}
} `graphql:"assignableUsers(first: 100, after: $endCursor)"`
} `graphql:"repository(owner: $owner, name: $name)"`
}
variables := map[string]interface{}{
"owner": githubv4.String(repo.RepoOwner()),
"name": githubv4.String(repo.RepoName()),
"endCursor": (*githubv4.String)(nil),
}
var users []AssignableUser
for {
var query responseData
err := client.Query(repo.RepoHost(), "RepositoryAssignableUsers", &query, variables)
if err != nil {
return nil, err
}
for _, node := range query.Repository.AssignableUsers.Nodes {
user := AssignableUser{
id: node.ID,
login: node.Login,
name: node.Name,
}
users = append(users, user)
}
if !query.Repository.AssignableUsers.PageInfo.HasNextPage {
break
}
variables["endCursor"] = githubv4.String(query.Repository.AssignableUsers.PageInfo.EndCursor)
}
return users, nil
}
// RepoAssignableActors fetches all the assignable actors for a repository on
// GitHub hosts that support Actor assignees.
func RepoAssignableActors(client *Client, repo ghrepo.Interface) ([]AssignableActor, error) {
type responseData struct {
Repository struct {
SuggestedActors struct {
Nodes []struct {
User struct {
ID string
Login string
Name string
TypeName string `graphql:"__typename"`
} `graphql:"... on User"`
Bot struct {
ID string
Login string
TypeName string `graphql:"__typename"`
} `graphql:"... on Bot"`
}
PageInfo struct {
HasNextPage bool
EndCursor string
}
} `graphql:"suggestedActors(first: 100, after: $endCursor, capabilities: CAN_BE_ASSIGNED)"`
} `graphql:"repository(owner: $owner, name: $name)"`
}
variables := map[string]interface{}{
"owner": githubv4.String(repo.RepoOwner()),
"name": githubv4.String(repo.RepoName()),
"endCursor": (*githubv4.String)(nil),
}
var actors []AssignableActor
for {
var query responseData
err := client.Query(repo.RepoHost(), "RepositoryAssignableActors", &query, variables)
if err != nil {
return nil, err
}
for _, node := range query.Repository.SuggestedActors.Nodes {
if node.User.TypeName == "User" {
actor := AssignableUser{
id: node.User.ID,
login: node.User.Login,
name: node.User.Name,
}
actors = append(actors, actor)
} else if node.Bot.TypeName == "Bot" {
actor := AssignableBot{
id: node.Bot.ID,
login: node.Bot.Login,
}
actors = append(actors, actor)
}
}
if !query.Repository.SuggestedActors.PageInfo.HasNextPage {
break
}
variables["endCursor"] = githubv4.String(query.Repository.SuggestedActors.PageInfo.EndCursor)
}
return actors, nil
}
type RepoLabel struct {
ID string
Name string
}
// RepoLabels fetches all the labels in a repository
func RepoLabels(client *Client, repo ghrepo.Interface) ([]RepoLabel, error) {
type responseData struct {
Repository struct {
Labels struct {
Nodes []RepoLabel
PageInfo struct {
HasNextPage bool
EndCursor string
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | true |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/api/export_pr.go | api/export_pr.go | package api
import (
"reflect"
"strings"
)
func (issue *Issue) ExportData(fields []string) map[string]interface{} {
v := reflect.ValueOf(issue).Elem()
data := map[string]interface{}{}
for _, f := range fields {
switch f {
case "comments":
data[f] = issue.Comments.Nodes
case "assignees":
data[f] = issue.Assignees.Nodes
case "labels":
data[f] = issue.Labels.Nodes
case "projectCards":
data[f] = issue.ProjectCards.Nodes
case "projectItems":
items := make([]map[string]interface{}, 0, len(issue.ProjectItems.Nodes))
for _, n := range issue.ProjectItems.Nodes {
items = append(items, map[string]interface{}{
"status": n.Status,
"title": n.Project.Title,
})
}
data[f] = items
case "closedByPullRequestsReferences":
items := make([]map[string]interface{}, 0, len(issue.ClosedByPullRequestsReferences.Nodes))
for _, n := range issue.ClosedByPullRequestsReferences.Nodes {
items = append(items, map[string]interface{}{
"id": n.ID,
"number": n.Number,
"url": n.URL,
"repository": map[string]interface{}{
"id": n.Repository.ID,
"name": n.Repository.Name,
"owner": map[string]interface{}{
"id": n.Repository.Owner.ID,
"login": n.Repository.Owner.Login,
},
},
})
}
data[f] = items
default:
sf := fieldByName(v, f)
data[f] = sf.Interface()
}
}
return data
}
func (pr *PullRequest) ExportData(fields []string) map[string]interface{} {
v := reflect.ValueOf(pr).Elem()
data := map[string]interface{}{}
for _, f := range fields {
switch f {
case "headRepository":
data[f] = pr.HeadRepository
case "statusCheckRollup":
if n := pr.StatusCheckRollup.Nodes; len(n) > 0 {
checks := make([]interface{}, 0, len(n[0].Commit.StatusCheckRollup.Contexts.Nodes))
for _, c := range n[0].Commit.StatusCheckRollup.Contexts.Nodes {
if c.TypeName == "CheckRun" {
checks = append(checks, map[string]interface{}{
"__typename": c.TypeName,
"name": c.Name,
"workflowName": c.CheckSuite.WorkflowRun.Workflow.Name,
"status": c.Status,
"conclusion": c.Conclusion,
"startedAt": c.StartedAt,
"completedAt": c.CompletedAt,
"detailsUrl": c.DetailsURL,
})
} else {
checks = append(checks, map[string]interface{}{
"__typename": c.TypeName,
"context": c.Context,
"state": c.State,
"targetUrl": c.TargetURL,
"startedAt": c.CreatedAt,
})
}
}
data[f] = checks
} else {
data[f] = nil
}
case "commits":
commits := make([]interface{}, 0, len(pr.Commits.Nodes))
for _, c := range pr.Commits.Nodes {
commit := c.Commit
authors := make([]interface{}, 0, len(commit.Authors.Nodes))
for _, author := range commit.Authors.Nodes {
authors = append(authors, map[string]interface{}{
"name": author.Name,
"email": author.Email,
"id": author.User.ID,
"login": author.User.Login,
})
}
commits = append(commits, map[string]interface{}{
"oid": commit.OID,
"messageHeadline": commit.MessageHeadline,
"messageBody": commit.MessageBody,
"committedDate": commit.CommittedDate,
"authoredDate": commit.AuthoredDate,
"authors": authors,
})
}
data[f] = commits
case "comments":
data[f] = pr.Comments.Nodes
case "assignees":
data[f] = pr.Assignees.Nodes
case "labels":
data[f] = pr.Labels.Nodes
case "projectCards":
data[f] = pr.ProjectCards.Nodes
case "projectItems":
items := make([]map[string]interface{}, 0, len(pr.ProjectItems.Nodes))
for _, n := range pr.ProjectItems.Nodes {
items = append(items, map[string]interface{}{
"status": n.Status,
"title": n.Project.Title,
})
}
data[f] = items
case "reviews":
data[f] = pr.Reviews.Nodes
case "latestReviews":
data[f] = pr.LatestReviews.Nodes
case "files":
data[f] = pr.Files.Nodes
case "reviewRequests":
requests := make([]interface{}, 0, len(pr.ReviewRequests.Nodes))
for _, req := range pr.ReviewRequests.Nodes {
r := req.RequestedReviewer
switch r.TypeName {
case "User":
requests = append(requests, map[string]string{
"__typename": r.TypeName,
"login": r.Login,
})
case "Team":
requests = append(requests, map[string]string{
"__typename": r.TypeName,
"name": r.Name,
"slug": r.LoginOrSlug(),
})
}
}
data[f] = &requests
case "closingIssuesReferences":
items := make([]map[string]interface{}, 0, len(pr.ClosingIssuesReferences.Nodes))
for _, n := range pr.ClosingIssuesReferences.Nodes {
items = append(items, map[string]interface{}{
"id": n.ID,
"number": n.Number,
"url": n.URL,
"repository": map[string]interface{}{
"id": n.Repository.ID,
"name": n.Repository.Name,
"owner": map[string]interface{}{
"id": n.Repository.Owner.ID,
"login": n.Repository.Owner.Login,
},
},
})
}
data[f] = items
default:
sf := fieldByName(v, f)
data[f] = sf.Interface()
}
}
return data
}
func fieldByName(v reflect.Value, field string) reflect.Value {
return v.FieldByNameFunc(func(s string) bool {
return strings.EqualFold(field, s)
})
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/api/query_builder.go | api/query_builder.go | package api
import (
"fmt"
"strings"
"github.com/cli/cli/v2/pkg/set"
)
func squeeze(r rune) rune {
switch r {
case '\n', '\t':
return -1
default:
return r
}
}
func shortenQuery(q string) string {
return strings.Map(squeeze, q)
}
var assignedActors = shortenQuery(`
assignedActors(first: 10) {
nodes {
...on User {
id,
login,
name,
__typename
}
...on Bot {
id,
login,
__typename
}
},
totalCount
}
`)
var issueComments = shortenQuery(`
comments(first: 100) {
nodes {
id,
author{login,...on User{id,name}},
authorAssociation,
body,
createdAt,
includesCreatedEdit,
isMinimized,
minimizedReason,
reactionGroups{content,users{totalCount}},
url,
viewerDidAuthor
},
pageInfo{hasNextPage,endCursor},
totalCount
}
`)
var issueCommentLast = shortenQuery(`
comments(last: 1) {
nodes {
author{login,...on User{id,name}},
authorAssociation,
body,
createdAt,
includesCreatedEdit,
isMinimized,
minimizedReason,
reactionGroups{content,users{totalCount}}
},
totalCount
}
`)
var issueClosedByPullRequestsReferences = shortenQuery(`
closedByPullRequestsReferences(first: 100) {
nodes {
id,
number,
url,
repository {
id,
name,
owner {
id,
login
}
}
}
pageInfo{hasNextPage,endCursor}
}
`)
var prReviewRequests = shortenQuery(`
reviewRequests(first: 100) {
nodes {
requestedReviewer {
__typename,
...on User{login},
...on Team{
organization{login}
name,
slug
}
}
}
}
`)
var prReviews = shortenQuery(`
reviews(first: 100) {
nodes {
id,
author{login},
authorAssociation,
submittedAt,
body,
state,
commit{oid},
reactionGroups{content,users{totalCount}}
}
pageInfo{hasNextPage,endCursor}
totalCount
}
`)
var prLatestReviews = shortenQuery(`
latestReviews(first: 100) {
nodes {
author{login},
authorAssociation,
submittedAt,
body,
state
}
}
`)
var prFiles = shortenQuery(`
files(first: 100) {
nodes {
additions,
deletions,
path
}
}
`)
var prCommits = shortenQuery(`
commits(first: 100) {
nodes {
commit {
authors(first:100) {
nodes {
name,
email,
user{id,login}
}
},
messageHeadline,
messageBody,
oid,
committedDate,
authoredDate
}
}
}
`)
var prClosingIssuesReferences = shortenQuery(`
closingIssuesReferences(first: 100) {
nodes {
id,
number,
url,
repository {
id,
name,
owner {
id,
login
}
}
}
pageInfo{hasNextPage,endCursor}
}
`)
var autoMergeRequest = shortenQuery(`
autoMergeRequest {
authorEmail,
commitBody,
commitHeadline,
mergeMethod,
enabledAt,
enabledBy{login,...on User{id,name}}
}
`)
func StatusCheckRollupGraphQLWithCountByState() string {
return shortenQuery(`
statusCheckRollup: commits(last: 1) {
nodes {
commit {
statusCheckRollup {
contexts {
checkRunCount,
checkRunCountsByState {
state,
count
},
statusContextCount,
statusContextCountsByState {
state,
count
}
}
}
}
}
}`)
}
func StatusCheckRollupGraphQLWithoutCountByState(after string) string {
var afterClause string
if after != "" {
afterClause = ",after:" + after
}
return fmt.Sprintf(shortenQuery(`
statusCheckRollup: commits(last: 1) {
nodes {
commit {
statusCheckRollup {
contexts(first:100%s) {
nodes {
__typename
...on StatusContext {
context,
state,
targetUrl,
createdAt,
description
},
...on CheckRun {
name,
checkSuite{workflowRun{workflow{name}}},
status,
conclusion,
startedAt,
completedAt,
detailsUrl
}
},
pageInfo{hasNextPage,endCursor}
}
}
}
}
}`), afterClause)
}
func RequiredStatusCheckRollupGraphQL(prID, after string, includeEvent bool) string {
var afterClause string
if after != "" {
afterClause = ",after:" + after
}
eventField := "event,"
if !includeEvent {
eventField = ""
}
return fmt.Sprintf(shortenQuery(`
statusCheckRollup: commits(last: 1) {
nodes {
commit {
statusCheckRollup {
contexts(first:100%[1]s) {
nodes {
__typename
...on StatusContext {
context,
state,
targetUrl,
createdAt,
description,
isRequired(pullRequestId: %[2]s)
},
...on CheckRun {
name,
checkSuite{workflowRun{%[3]sworkflow{name}}},
status,
conclusion,
startedAt,
completedAt,
detailsUrl,
isRequired(pullRequestId: %[2]s)
}
},
pageInfo{hasNextPage,endCursor}
}
}
}
}
}`), afterClause, prID, eventField)
}
var sharedIssuePRFields = []string{
"assignees",
"author",
"body",
"closed",
"comments",
"createdAt",
"closedAt",
"id",
"labels",
"milestone",
"number",
"projectCards",
"projectItems",
"reactionGroups",
"state",
"title",
"updatedAt",
"url",
}
// Some fields are only valid in the context of issues.
// They need to be enumerated separately in order to be filtered
// from existing code that expects to be able to pass Issue fields
// to PR queries, e.g. the PullRequestGraphql function.
var issueOnlyFields = []string{
"isPinned",
"stateReason",
"closedByPullRequestsReferences",
}
var IssueFields = append(sharedIssuePRFields, issueOnlyFields...)
var PullRequestFields = append(sharedIssuePRFields,
"additions",
"autoMergeRequest",
"baseRefName",
"baseRefOid",
"changedFiles",
"closingIssuesReferences",
"commits",
"deletions",
"files",
"fullDatabaseId",
"headRefName",
"headRefOid",
"headRepository",
"headRepositoryOwner",
"isCrossRepository",
"isDraft",
"latestReviews",
"maintainerCanModify",
"mergeable",
"mergeCommit",
"mergedAt",
"mergedBy",
"mergeStateStatus",
"potentialMergeCommit",
"reviewDecision",
"reviewRequests",
"reviews",
"statusCheckRollup",
)
// IssueGraphQL constructs a GraphQL query fragment for a set of issue fields.
func IssueGraphQL(fields []string) string {
var q []string
for _, field := range fields {
switch field {
case "author":
q = append(q, `author{login,...on User{id,name}}`)
case "mergedBy":
q = append(q, `mergedBy{login,...on User{id,name}}`)
case "headRepositoryOwner":
q = append(q, `headRepositoryOwner{id,login,...on User{name}}`)
case "headRepository":
q = append(q, `headRepository{id,name}`)
case "assignees":
q = append(q, `assignees(first:100){nodes{id,login,name},totalCount}`)
case "assignedActors":
q = append(q, assignedActors)
case "labels":
q = append(q, `labels(first:100){nodes{id,name,description,color},totalCount}`)
case "projectCards":
q = append(q, `projectCards(first:100){nodes{project{name}column{name}},totalCount}`)
case "projectItems":
q = append(q, `projectItems(first:100){nodes{id, project{id,title}, status:fieldValueByName(name: "Status") { ... on ProjectV2ItemFieldSingleSelectValue{optionId,name}}},totalCount}`)
case "milestone":
q = append(q, `milestone{number,title,description,dueOn}`)
case "reactionGroups":
q = append(q, `reactionGroups{content,users{totalCount}}`)
case "mergeCommit":
q = append(q, `mergeCommit{oid}`)
case "potentialMergeCommit":
q = append(q, `potentialMergeCommit{oid}`)
case "autoMergeRequest":
q = append(q, autoMergeRequest)
case "comments":
q = append(q, issueComments)
case "lastComment": // pseudo-field
q = append(q, issueCommentLast)
case "reviewRequests":
q = append(q, prReviewRequests)
case "reviews":
q = append(q, prReviews)
case "latestReviews":
q = append(q, prLatestReviews)
case "files":
q = append(q, prFiles)
case "commits":
q = append(q, prCommits)
case "lastCommit": // pseudo-field
q = append(q, `commits(last:1){nodes{commit{oid}}}`)
case "commitsCount": // pseudo-field
q = append(q, `commits{totalCount}`)
case "requiresStrictStatusChecks": // pseudo-field
q = append(q, `baseRef{branchProtectionRule{requiresStrictStatusChecks}}`)
case "statusCheckRollup":
q = append(q, StatusCheckRollupGraphQLWithoutCountByState(""))
case "statusCheckRollupWithCountByState": // pseudo-field
q = append(q, StatusCheckRollupGraphQLWithCountByState())
case "closingIssuesReferences":
q = append(q, prClosingIssuesReferences)
case "closedByPullRequestsReferences":
q = append(q, issueClosedByPullRequestsReferences)
default:
q = append(q, field)
}
}
return strings.Join(q, ",")
}
// PullRequestGraphQL constructs a GraphQL query fragment for a set of pull request fields.
// It will try to sanitize the fields to just those available on pull request.
func PullRequestGraphQL(fields []string) string {
s := set.NewStringSet()
s.AddValues(fields)
s.RemoveValues(issueOnlyFields)
return IssueGraphQL(s.ToSlice())
}
var RepositoryFields = []string{
"id",
"name",
"nameWithOwner",
"owner",
"parent",
"templateRepository",
"description",
"homepageUrl",
"openGraphImageUrl",
"usesCustomOpenGraphImage",
"url",
"sshUrl",
"mirrorUrl",
"securityPolicyUrl",
"createdAt",
"pushedAt",
"updatedAt",
"archivedAt",
"isBlankIssuesEnabled",
"isSecurityPolicyEnabled",
"hasIssuesEnabled",
"hasProjectsEnabled",
"hasWikiEnabled",
"hasDiscussionsEnabled",
"mergeCommitAllowed",
"squashMergeAllowed",
"rebaseMergeAllowed",
"forkCount",
"stargazerCount",
"watchers",
"issues",
"pullRequests",
"codeOfConduct",
"contactLinks",
"defaultBranchRef",
"deleteBranchOnMerge",
"diskUsage",
"fundingLinks",
"isArchived",
"isEmpty",
"isFork",
"isInOrganization",
"isMirror",
"isPrivate",
"visibility",
"isTemplate",
"isUserConfigurationRepository",
"licenseInfo",
"viewerCanAdminister",
"viewerDefaultCommitEmail",
"viewerDefaultMergeMethod",
"viewerHasStarred",
"viewerPermission",
"viewerPossibleCommitEmails",
"viewerSubscription",
"repositoryTopics",
"primaryLanguage",
"languages",
"issueTemplates",
"pullRequestTemplates",
"labels",
"milestones",
"latestRelease",
"assignableUsers",
"mentionableUsers",
"projects",
"projectsV2",
// "branchProtectionRules", // too complex to expose
// "collaborators", // does it make sense to expose without affiliation filter?
}
func RepositoryGraphQL(fields []string) string {
var q []string
for _, field := range fields {
switch field {
case "codeOfConduct":
q = append(q, "codeOfConduct{key,name,url}")
case "contactLinks":
q = append(q, "contactLinks{about,name,url}")
case "fundingLinks":
q = append(q, "fundingLinks{platform,url}")
case "licenseInfo":
q = append(q, "licenseInfo{key,name,nickname}")
case "owner":
q = append(q, "owner{id,login}")
case "parent":
q = append(q, "parent{id,name,owner{id,login}}")
case "templateRepository":
q = append(q, "templateRepository{id,name,owner{id,login}}")
case "repositoryTopics":
q = append(q, "repositoryTopics(first:100){nodes{topic{name}}}")
case "issueTemplates":
q = append(q, "issueTemplates{name,title,body,about}")
case "pullRequestTemplates":
q = append(q, "pullRequestTemplates{body,filename}")
case "labels":
q = append(q, "labels(first:100){nodes{id,color,name,description}}")
case "languages":
q = append(q, "languages(first:100){edges{size,node{name}}}")
case "primaryLanguage":
q = append(q, "primaryLanguage{name}")
case "latestRelease":
q = append(q, "latestRelease{publishedAt,tagName,name,url}")
case "milestones":
q = append(q, "milestones(first:100,states:OPEN){nodes{number,title,description,dueOn}}")
case "assignableUsers":
q = append(q, "assignableUsers(first:100){nodes{id,login,name}}")
case "mentionableUsers":
q = append(q, "mentionableUsers(first:100){nodes{id,login,name}}")
case "projects":
q = append(q, "projects(first:100,states:OPEN){nodes{id,name,number,body,resourcePath}}")
case "projectsV2":
q = append(q, "projectsV2(first:100,query:\"is:open\"){nodes{id,number,title,resourcePath,closed,url}}")
case "watchers":
q = append(q, "watchers{totalCount}")
case "issues":
q = append(q, "issues(states:OPEN){totalCount}")
case "pullRequests":
q = append(q, "pullRequests(states:OPEN){totalCount}")
case "defaultBranchRef":
q = append(q, "defaultBranchRef{name}")
default:
q = append(q, field)
}
}
return strings.Join(q, ",")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/api/reaction_groups_test.go | api/reaction_groups_test.go | package api
import (
"testing"
"github.com/stretchr/testify/assert"
)
func Test_String(t *testing.T) {
tests := map[string]struct {
rg ReactionGroup
emoji string
count int
}{
"empty reaction group": {
rg: ReactionGroup{},
emoji: "",
count: 0,
},
"unknown reaction group": {
rg: ReactionGroup{
Content: "UNKNOWN",
Users: ReactionGroupUsers{TotalCount: 1},
},
emoji: "",
count: 1,
},
"thumbs up reaction group": {
rg: ReactionGroup{
Content: "THUMBS_UP",
Users: ReactionGroupUsers{TotalCount: 2},
},
emoji: "\U0001f44d",
count: 2,
},
"thumbs down reaction group": {
rg: ReactionGroup{
Content: "THUMBS_DOWN",
Users: ReactionGroupUsers{TotalCount: 3},
},
emoji: "\U0001f44e",
count: 3,
},
"laugh reaction group": {
rg: ReactionGroup{
Content: "LAUGH",
Users: ReactionGroupUsers{TotalCount: 4},
},
emoji: "\U0001f604",
count: 4,
},
"hooray reaction group": {
rg: ReactionGroup{
Content: "HOORAY",
Users: ReactionGroupUsers{TotalCount: 5},
},
emoji: "\U0001f389",
count: 5,
},
"confused reaction group": {
rg: ReactionGroup{
Content: "CONFUSED",
Users: ReactionGroupUsers{TotalCount: 6},
},
emoji: "\U0001f615",
count: 6,
},
"heart reaction group": {
rg: ReactionGroup{
Content: "HEART",
Users: ReactionGroupUsers{TotalCount: 7},
},
emoji: "\u2764\ufe0f",
count: 7,
},
"rocket reaction group": {
rg: ReactionGroup{
Content: "ROCKET",
Users: ReactionGroupUsers{TotalCount: 8},
},
emoji: "\U0001f680",
count: 8,
},
"eyes reaction group": {
rg: ReactionGroup{
Content: "EYES",
Users: ReactionGroupUsers{TotalCount: 9},
},
emoji: "\U0001f440",
count: 9,
},
}
for name, tt := range tests {
t.Run(name, func(t *testing.T) {
assert.Equal(t, tt.emoji, tt.rg.Emoji())
assert.Equal(t, tt.count, tt.rg.Count())
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/api/queries_repo_test.go | api/queries_repo_test.go | package api
import (
"fmt"
"io"
"net/http"
"slices"
"strings"
"testing"
"github.com/MakeNowJust/heredoc"
"github.com/cli/cli/v2/internal/gh"
"github.com/cli/cli/v2/internal/ghrepo"
"github.com/cli/cli/v2/pkg/httpmock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestGitHubRepo_notFound(t *testing.T) {
httpReg := &httpmock.Registry{}
defer httpReg.Verify(t)
httpReg.Register(
httpmock.GraphQL(`query RepositoryInfo\b`),
httpmock.StringResponse(`{ "data": { "repository": null } }`))
client := newTestClient(httpReg)
repo, err := GitHubRepo(client, ghrepo.New("OWNER", "REPO"))
if err == nil {
t.Fatal("GitHubRepo did not return an error")
}
if wants := "GraphQL: Could not resolve to a Repository with the name 'OWNER/REPO'."; err.Error() != wants {
t.Errorf("GitHubRepo error: want %q, got %q", wants, err.Error())
}
if repo != nil {
t.Errorf("GitHubRepo: expected nil repo, got %v", repo)
}
}
func Test_RepoMetadata(t *testing.T) {
http := &httpmock.Registry{}
client := newTestClient(http)
repo, _ := ghrepo.FromFullName("OWNER/REPO")
input := RepoMetadataInput{
Assignees: true,
Reviewers: true,
TeamReviewers: true,
Labels: true,
ProjectsV1: true,
ProjectsV2: true,
Milestones: true,
}
http.Register(
httpmock.GraphQL(`query RepositoryAssignableUsers\b`),
httpmock.StringResponse(`
{ "data": { "repository": { "assignableUsers": {
"nodes": [
{ "login": "hubot", "id": "HUBOTID" },
{ "login": "MonaLisa", "id": "MONAID" }
],
"pageInfo": { "hasNextPage": false }
} } } }
`))
http.Register(
httpmock.GraphQL(`query RepositoryLabelList\b`),
httpmock.StringResponse(`
{ "data": { "repository": { "labels": {
"nodes": [
{ "name": "feature", "id": "FEATUREID" },
{ "name": "TODO", "id": "TODOID" },
{ "name": "bug", "id": "BUGID" }
],
"pageInfo": { "hasNextPage": false }
} } } }
`))
http.Register(
httpmock.GraphQL(`query RepositoryMilestoneList\b`),
httpmock.StringResponse(`
{ "data": { "repository": { "milestones": {
"nodes": [
{ "title": "GA", "id": "GAID" },
{ "title": "Big One.oh", "id": "BIGONEID" }
],
"pageInfo": { "hasNextPage": false }
} } } }
`))
http.Register(
httpmock.GraphQL(`query RepositoryProjectList\b`),
httpmock.StringResponse(`
{ "data": { "repository": { "projects": {
"nodes": [
{ "name": "Cleanup", "id": "CLEANUPID" },
{ "name": "Roadmap", "id": "ROADMAPID" }
],
"pageInfo": { "hasNextPage": false }
} } } }
`))
http.Register(
httpmock.GraphQL(`query RepositoryProjectV2List\b`),
httpmock.StringResponse(`
{ "data": { "repository": { "projectsV2": {
"nodes": [
{ "title": "CleanupV2", "id": "CLEANUPV2ID" },
{ "title": "RoadmapV2", "id": "ROADMAPV2ID" }
],
"pageInfo": { "hasNextPage": false }
} } } }
`))
http.Register(
httpmock.GraphQL(`query OrganizationProjectList\b`),
httpmock.StringResponse(`
{ "data": { "organization": { "projects": {
"nodes": [
{ "name": "Triage", "id": "TRIAGEID" }
],
"pageInfo": { "hasNextPage": false }
} } } }
`))
http.Register(
httpmock.GraphQL(`query OrganizationProjectV2List\b`),
httpmock.StringResponse(`
{ "data": { "organization": { "projectsV2": {
"nodes": [
{ "title": "TriageV2", "id": "TRIAGEV2ID" }
],
"pageInfo": { "hasNextPage": false }
} } } }
`))
http.Register(
httpmock.GraphQL(`query UserProjectV2List\b`),
httpmock.StringResponse(`
{ "data": { "viewer": { "projectsV2": {
"nodes": [
{ "title": "MonalisaV2", "id": "MONALISAV2ID" }
],
"pageInfo": { "hasNextPage": false }
} } } }
`))
http.Register(
httpmock.GraphQL(`query OrganizationTeamList\b`),
httpmock.StringResponse(`
{ "data": { "organization": { "teams": {
"nodes": [
{ "slug": "owners", "id": "OWNERSID" },
{ "slug": "Core", "id": "COREID" }
],
"pageInfo": { "hasNextPage": false }
} } } }
`))
http.Register(
httpmock.GraphQL(`query UserCurrent\b`),
httpmock.StringResponse(`
{ "data": { "viewer": { "login": "monalisa" } } }
`))
result, err := RepoMetadata(client, repo, input)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
expectedMemberIDs := []string{"MONAID", "HUBOTID"}
memberIDs, err := result.MembersToIDs([]string{"monalisa", "hubot"})
if err != nil {
t.Errorf("error resolving members: %v", err)
}
if !slices.Equal(memberIDs, expectedMemberIDs) {
t.Errorf("expected members %v, got %v", expectedMemberIDs, memberIDs)
}
expectedTeamIDs := []string{"COREID", "OWNERSID"}
teamIDs, err := result.TeamsToIDs([]string{"OWNER/core", "/owners"})
if err != nil {
t.Errorf("error resolving teams: %v", err)
}
if !slices.Equal(teamIDs, expectedTeamIDs) {
t.Errorf("expected teams %v, got %v", expectedTeamIDs, teamIDs)
}
expectedLabelIDs := []string{"BUGID", "TODOID"}
labelIDs, err := result.LabelsToIDs([]string{"bug", "todo"})
if err != nil {
t.Errorf("error resolving labels: %v", err)
}
if !slices.Equal(labelIDs, expectedLabelIDs) {
t.Errorf("expected labels %v, got %v", expectedLabelIDs, labelIDs)
}
expectedProjectIDs := []string{"TRIAGEID", "ROADMAPID"}
expectedProjectV2IDs := []string{"TRIAGEV2ID", "ROADMAPV2ID", "MONALISAV2ID"}
projectIDs, projectV2IDs, err := result.ProjectsTitlesToIDs([]string{"triage", "roadmap", "triagev2", "roadmapv2", "monalisav2"})
if err != nil {
t.Errorf("error resolving projects: %v", err)
}
if !slices.Equal(projectIDs, expectedProjectIDs) {
t.Errorf("expected projects %v, got %v", expectedProjectIDs, projectIDs)
}
if !slices.Equal(projectV2IDs, expectedProjectV2IDs) {
t.Errorf("expected projectsV2 %v, got %v", expectedProjectV2IDs, projectV2IDs)
}
expectedMilestoneID := "BIGONEID"
milestoneID, err := result.MilestoneToID("big one.oh")
if err != nil {
t.Errorf("error resolving milestone: %v", err)
}
if milestoneID != expectedMilestoneID {
t.Errorf("expected milestone %v, got %v", expectedMilestoneID, milestoneID)
}
expectedCurrentLogin := "monalisa"
if result.CurrentLogin != expectedCurrentLogin {
t.Errorf("expected current user %v, got %v", expectedCurrentLogin, result.CurrentLogin)
}
}
// Test that RepoMetadata only fetches teams if the input specifies it
func Test_RepoMetadata_TeamsAreConditionallyFetched(t *testing.T) {
http := &httpmock.Registry{}
client := newTestClient(http)
repo, _ := ghrepo.FromFullName("OWNER/REPO")
input := RepoMetadataInput{
Reviewers: true,
TeamReviewers: false, // Do not fetch teams
}
http.Register(
httpmock.GraphQL(`query RepositoryAssignableUsers\b`),
httpmock.StringResponse(`
{ "data": { "repository": { "assignableUsers": {
"nodes": [
{ "login": "hubot", "id": "HUBOTID" },
{ "login": "MonaLisa", "id": "MONAID" }
],
"pageInfo": { "hasNextPage": false }
} } } }
`))
http.Register(
httpmock.GraphQL(`query UserCurrent\b`),
httpmock.StringResponse(`
{ "data": { "viewer": { "login": "monalisa" } } }
`))
http.Exclude(
t,
httpmock.GraphQL(`query OrganizationTeamList\b`),
)
_, err := RepoMetadata(client, repo, input)
require.NoError(t, err)
}
func Test_ProjectNamesToPaths(t *testing.T) {
t.Run("when projectsV1 is supported, requests them", func(t *testing.T) {
http := &httpmock.Registry{}
client := newTestClient(http)
repo, _ := ghrepo.FromFullName("OWNER/REPO")
http.Register(
httpmock.GraphQL(`query RepositoryProjectList\b`),
httpmock.StringResponse(`
{ "data": { "repository": { "projects": {
"nodes": [
{ "name": "Cleanup", "id": "CLEANUPID", "resourcePath": "/OWNER/REPO/projects/1" },
{ "name": "Roadmap", "id": "ROADMAPID", "resourcePath": "/OWNER/REPO/projects/2" }
],
"pageInfo": { "hasNextPage": false }
} } } }
`))
http.Register(
httpmock.GraphQL(`query OrganizationProjectList\b`),
httpmock.StringResponse(`
{ "data": { "organization": { "projects": {
"nodes": [
{ "name": "Triage", "id": "TRIAGEID", "resourcePath": "/orgs/ORG/projects/1" }
],
"pageInfo": { "hasNextPage": false }
} } } }
`))
http.Register(
httpmock.GraphQL(`query RepositoryProjectV2List\b`),
httpmock.StringResponse(`
{ "data": { "repository": { "projectsV2": {
"nodes": [
{ "title": "CleanupV2", "id": "CLEANUPV2ID", "resourcePath": "/OWNER/REPO/projects/3" },
{ "title": "RoadmapV2", "id": "ROADMAPV2ID", "resourcePath": "/OWNER/REPO/projects/4" }
],
"pageInfo": { "hasNextPage": false }
} } } }
`))
http.Register(
httpmock.GraphQL(`query OrganizationProjectV2List\b`),
httpmock.StringResponse(`
{ "data": { "organization": { "projectsV2": {
"nodes": [
{ "title": "TriageV2", "id": "TRIAGEV2ID", "resourcePath": "/orgs/ORG/projects/2" }
],
"pageInfo": { "hasNextPage": false }
} } } }
`))
http.Register(
httpmock.GraphQL(`query UserProjectV2List\b`),
httpmock.StringResponse(`
{ "data": { "viewer": { "projectsV2": {
"nodes": [
{ "title": "MonalisaV2", "id": "MONALISAV2ID", "resourcePath": "/users/MONALISA/projects/5" }
],
"pageInfo": { "hasNextPage": false }
} } } }
`))
projectPaths, err := ProjectTitlesToPaths(client, repo, []string{"Triage", "Roadmap", "TriageV2", "RoadmapV2", "MonalisaV2"}, gh.ProjectsV1Supported)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
expectedProjectPaths := []string{"ORG/1", "OWNER/REPO/2", "ORG/2", "OWNER/REPO/4", "MONALISA/5"}
if !slices.Equal(projectPaths, expectedProjectPaths) {
t.Errorf("expected projects paths %v, got %v", expectedProjectPaths, projectPaths)
}
})
t.Run("when projectsV1 is not supported, does not request them", func(t *testing.T) {
http := &httpmock.Registry{}
client := newTestClient(http)
repo, _ := ghrepo.FromFullName("OWNER/REPO")
http.Exclude(
t,
httpmock.GraphQL(`query RepositoryProjectList\b`),
)
http.Exclude(
t,
httpmock.GraphQL(`query OrganizationProjectList\b`),
)
http.Register(
httpmock.GraphQL(`query RepositoryProjectV2List\b`),
httpmock.StringResponse(`
{ "data": { "repository": { "projectsV2": {
"nodes": [
{ "title": "CleanupV2", "id": "CLEANUPV2ID", "resourcePath": "/OWNER/REPO/projects/3" },
{ "title": "RoadmapV2", "id": "ROADMAPV2ID", "resourcePath": "/OWNER/REPO/projects/4" }
],
"pageInfo": { "hasNextPage": false }
} } } }
`))
http.Register(
httpmock.GraphQL(`query OrganizationProjectV2List\b`),
httpmock.StringResponse(`
{ "data": { "organization": { "projectsV2": {
"nodes": [
{ "title": "TriageV2", "id": "TRIAGEV2ID", "resourcePath": "/orgs/ORG/projects/2" }
],
"pageInfo": { "hasNextPage": false }
} } } }
`))
http.Register(
httpmock.GraphQL(`query UserProjectV2List\b`),
httpmock.StringResponse(`
{ "data": { "viewer": { "projectsV2": {
"nodes": [
{ "title": "MonalisaV2", "id": "MONALISAV2ID", "resourcePath": "/users/MONALISA/projects/5" }
],
"pageInfo": { "hasNextPage": false }
} } } }
`))
projectPaths, err := ProjectTitlesToPaths(client, repo, []string{"TriageV2", "RoadmapV2", "MonalisaV2"}, gh.ProjectsV1Unsupported)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
expectedProjectPaths := []string{"ORG/2", "OWNER/REPO/4", "MONALISA/5"}
if !slices.Equal(projectPaths, expectedProjectPaths) {
t.Errorf("expected projects paths %v, got %v", expectedProjectPaths, projectPaths)
}
})
t.Run("when a project is not found, returns an error", func(t *testing.T) {
http := &httpmock.Registry{}
client := newTestClient(http)
repo, _ := ghrepo.FromFullName("OWNER/REPO")
// No projects found
http.Register(
httpmock.GraphQL(`query RepositoryProjectV2List\b`),
httpmock.StringResponse(`
{ "data": { "repository": { "projectsV2": {
"nodes": [],
"pageInfo": { "hasNextPage": false }
} } } }
`))
http.Register(
httpmock.GraphQL(`query OrganizationProjectV2List\b`),
httpmock.StringResponse(`
{ "data": { "organization": { "projectsV2": {
"nodes": [],
"pageInfo": { "hasNextPage": false }
} } } }
`))
http.Register(
httpmock.GraphQL(`query UserProjectV2List\b`),
httpmock.StringResponse(`
{ "data": { "viewer": { "projectsV2": {
"nodes": [],
"pageInfo": { "hasNextPage": false }
} } } }
`))
_, err := ProjectTitlesToPaths(client, repo, []string{"TriageV2"}, gh.ProjectsV1Unsupported)
require.Equal(t, err, fmt.Errorf("'TriageV2' not found"))
})
}
func TestMembersToIDs(t *testing.T) {
t.Parallel()
t.Run("finds ids in assignable users", func(t *testing.T) {
t.Parallel()
repoMetadataResult := RepoMetadataResult{
AssignableUsers: []AssignableUser{
NewAssignableUser("MONAID", "monalisa", ""),
NewAssignableUser("MONAID2", "monalisa2", ""),
},
AssignableActors: []AssignableActor{
NewAssignableBot("HUBOTID", "hubot"),
},
}
ids, err := repoMetadataResult.MembersToIDs([]string{"monalisa"})
require.NoError(t, err)
require.Equal(t, []string{"MONAID"}, ids)
})
t.Run("finds ids by assignable actor logins", func(t *testing.T) {
t.Parallel()
repoMetadataResult := RepoMetadataResult{
AssignableActors: []AssignableActor{
NewAssignableBot("HUBOTID", "hubot"),
NewAssignableUser("MONAID", "monalisa", ""),
},
}
ids, err := repoMetadataResult.MembersToIDs([]string{"monalisa"})
require.NoError(t, err)
require.Equal(t, []string{"MONAID"}, ids)
})
t.Run("finds ids by assignable actor display names", func(t *testing.T) {
t.Parallel()
repoMetadataResult := RepoMetadataResult{
AssignableActors: []AssignableActor{
NewAssignableUser("MONAID", "monalisa", "mona"),
},
}
ids, err := repoMetadataResult.MembersToIDs([]string{"monalisa (mona)"})
require.NoError(t, err)
require.Equal(t, []string{"MONAID"}, ids)
})
t.Run("when a name appears in both assignable users and actors, the id is only returned once", func(t *testing.T) {
t.Parallel()
repoMetadataResult := RepoMetadataResult{
AssignableUsers: []AssignableUser{
NewAssignableUser("MONAID", "monalisa", ""),
},
AssignableActors: []AssignableActor{
NewAssignableUser("MONAID", "monalisa", ""),
},
}
ids, err := repoMetadataResult.MembersToIDs([]string{"monalisa"})
require.NoError(t, err)
require.Equal(t, []string{"MONAID"}, ids)
})
t.Run("when id is not found, returns an error", func(t *testing.T) {
t.Parallel()
repoMetadataResult := RepoMetadataResult{}
_, err := repoMetadataResult.MembersToIDs([]string{"monalisa"})
require.Error(t, err)
})
}
func Test_RepoMilestones(t *testing.T) {
tests := []struct {
state string
want string
wantErr bool
}{
{
state: "open",
want: `"states":["OPEN"]`,
},
{
state: "closed",
want: `"states":["CLOSED"]`,
},
{
state: "all",
want: `"states":["OPEN","CLOSED"]`,
},
{
state: "invalid state",
wantErr: true,
},
}
for _, tt := range tests {
var query string
reg := &httpmock.Registry{}
reg.Register(httpmock.MatchAny, func(req *http.Request) (*http.Response, error) {
buf := new(strings.Builder)
_, err := io.Copy(buf, req.Body)
if err != nil {
return nil, err
}
query = buf.String()
return httpmock.StringResponse("{}")(req)
})
client := newTestClient(reg)
_, err := RepoMilestones(client, ghrepo.New("OWNER", "REPO"), tt.state)
if (err != nil) != tt.wantErr {
t.Errorf("RepoMilestones() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !strings.Contains(query, tt.want) {
t.Errorf("query does not contain %v", tt.want)
}
}
}
func TestDisplayName(t *testing.T) {
tests := []struct {
name string
assignee AssignableUser
want string
}{
{
name: "assignee with name",
assignee: AssignableUser{"123", "octocat123", "Octavious Cath"},
want: "octocat123 (Octavious Cath)",
},
{
name: "assignee without name",
assignee: AssignableUser{"123", "octocat123", ""},
want: "octocat123",
},
}
for _, tt := range tests {
actual := tt.assignee.DisplayName()
if actual != tt.want {
t.Errorf("display name was %s wanted %s", actual, tt.want)
}
}
}
func TestRepoExists(t *testing.T) {
tests := []struct {
name string
httpStub func(*httpmock.Registry)
repo ghrepo.Interface
existCheck bool
wantErrMsg string
}{
{
name: "repo exists",
httpStub: func(r *httpmock.Registry) {
r.Register(
httpmock.REST("HEAD", "repos/OWNER/REPO"),
httpmock.StringResponse("{}"),
)
},
repo: ghrepo.New("OWNER", "REPO"),
existCheck: true,
wantErrMsg: "",
},
{
name: "repo does not exists",
httpStub: func(r *httpmock.Registry) {
r.Register(
httpmock.REST("HEAD", "repos/OWNER/REPO"),
httpmock.StatusStringResponse(404, "Not Found"),
)
},
repo: ghrepo.New("OWNER", "REPO"),
existCheck: false,
wantErrMsg: "",
},
{
name: "http error",
httpStub: func(r *httpmock.Registry) {
r.Register(
httpmock.REST("HEAD", "repos/OWNER/REPO"),
httpmock.StatusStringResponse(500, "Internal Server Error"),
)
},
repo: ghrepo.New("OWNER", "REPO"),
existCheck: false,
wantErrMsg: "HTTP 500 (https://api.github.com/repos/OWNER/REPO)",
},
}
for _, tt := range tests {
reg := &httpmock.Registry{}
if tt.httpStub != nil {
tt.httpStub(reg)
}
client := newTestClient(reg)
t.Run(tt.name, func(t *testing.T) {
exist, err := RepoExists(client, ghrepo.New("OWNER", "REPO"))
if tt.wantErrMsg != "" {
assert.Equal(t, tt.wantErrMsg, err.Error())
} else {
assert.NoError(t, err)
}
if exist != tt.existCheck {
t.Errorf("RepoExists() returns %v, expected %v", exist, tt.existCheck)
return
}
})
}
}
func TestForkRepoReturnsErrorWhenForkIsNotPossible(t *testing.T) {
// Given our API returns 202 with a Fork that is the same as
// the repo we provided
repoName := "test-repo"
ownerLogin := "test-owner"
stubbedForkResponse := repositoryV3{
Name: repoName,
Owner: struct{ Login string }{
Login: ownerLogin,
},
}
reg := &httpmock.Registry{}
reg.Register(
httpmock.REST("POST", fmt.Sprintf("repos/%s/%s/forks", ownerLogin, repoName)),
httpmock.StatusJSONResponse(202, stubbedForkResponse),
)
client := newTestClient(reg)
// When we fork the repo
_, err := ForkRepo(client, ghrepo.New(ownerLogin, repoName), ownerLogin, "", false)
// Then it provides a useful error message
require.Equal(t, fmt.Errorf("%s/%s cannot be forked. A single user account cannot own both a parent and fork.", ownerLogin, repoName), err)
}
func TestListLicenseTemplatesReturnsLicenses(t *testing.T) {
hostname := "api.github.com"
httpStubs := func(reg *httpmock.Registry) {
reg.Register(
httpmock.REST("GET", "licenses"),
httpmock.StringResponse(`[
{
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit",
"node_id": "MDc6TGljZW5zZW1pdA=="
},
{
"key": "lgpl-3.0",
"name": "GNU Lesser General Public License v3.0",
"spdx_id": "LGPL-3.0",
"url": "https://api.github.com/licenses/lgpl-3.0",
"node_id": "MDc6TGljZW5zZW1pdA=="
},
{
"key": "mpl-2.0",
"name": "Mozilla Public License 2.0",
"spdx_id": "MPL-2.0",
"url": "https://api.github.com/licenses/mpl-2.0",
"node_id": "MDc6TGljZW5zZW1pdA=="
},
{
"key": "agpl-3.0",
"name": "GNU Affero General Public License v3.0",
"spdx_id": "AGPL-3.0",
"url": "https://api.github.com/licenses/agpl-3.0",
"node_id": "MDc6TGljZW5zZW1pdA=="
},
{
"key": "unlicense",
"name": "The Unlicense",
"spdx_id": "Unlicense",
"url": "https://api.github.com/licenses/unlicense",
"node_id": "MDc6TGljZW5zZW1pdA=="
},
{
"key": "apache-2.0",
"name": "Apache License 2.0",
"spdx_id": "Apache-2.0",
"url": "https://api.github.com/licenses/apache-2.0",
"node_id": "MDc6TGljZW5zZW1pdA=="
},
{
"key": "gpl-3.0",
"name": "GNU General Public License v3.0",
"spdx_id": "GPL-3.0",
"url": "https://api.github.com/licenses/gpl-3.0",
"node_id": "MDc6TGljZW5zZW1pdA=="
}
]`,
))
}
wantLicenses := []License{
{
Key: "mit",
Name: "MIT License",
SPDXID: "MIT",
URL: "https://api.github.com/licenses/mit",
NodeID: "MDc6TGljZW5zZW1pdA==",
HTMLURL: "",
Description: "",
Implementation: "",
Permissions: nil,
Conditions: nil,
Limitations: nil,
Body: "",
},
{
Key: "lgpl-3.0",
Name: "GNU Lesser General Public License v3.0",
SPDXID: "LGPL-3.0",
URL: "https://api.github.com/licenses/lgpl-3.0",
NodeID: "MDc6TGljZW5zZW1pdA==",
HTMLURL: "",
Description: "",
Implementation: "",
Permissions: nil,
Conditions: nil,
Limitations: nil,
Body: "",
},
{
Key: "mpl-2.0",
Name: "Mozilla Public License 2.0",
SPDXID: "MPL-2.0",
URL: "https://api.github.com/licenses/mpl-2.0",
NodeID: "MDc6TGljZW5zZW1pdA==",
HTMLURL: "",
Description: "",
Implementation: "",
Permissions: nil,
Conditions: nil,
Limitations: nil,
Body: "",
},
{
Key: "agpl-3.0",
Name: "GNU Affero General Public License v3.0",
SPDXID: "AGPL-3.0",
URL: "https://api.github.com/licenses/agpl-3.0",
NodeID: "MDc6TGljZW5zZW1pdA==",
HTMLURL: "",
Description: "",
Implementation: "",
Permissions: nil,
Conditions: nil,
Limitations: nil,
Body: "",
},
{
Key: "unlicense",
Name: "The Unlicense",
SPDXID: "Unlicense",
URL: "https://api.github.com/licenses/unlicense",
NodeID: "MDc6TGljZW5zZW1pdA==",
HTMLURL: "",
Description: "",
Implementation: "",
Permissions: nil,
Conditions: nil,
Limitations: nil,
Body: "",
},
{
Key: "apache-2.0",
Name: "Apache License 2.0",
SPDXID: "Apache-2.0",
URL: "https://api.github.com/licenses/apache-2.0",
NodeID: "MDc6TGljZW5zZW1pdA==",
HTMLURL: "",
Description: "",
Implementation: "",
Permissions: nil,
Conditions: nil,
Limitations: nil,
Body: "",
},
{
Key: "gpl-3.0",
Name: "GNU General Public License v3.0",
SPDXID: "GPL-3.0",
URL: "https://api.github.com/licenses/gpl-3.0",
NodeID: "MDc6TGljZW5zZW1pdA==",
HTMLURL: "",
Description: "",
Implementation: "",
Permissions: nil,
Conditions: nil,
Limitations: nil,
Body: "",
},
}
reg := &httpmock.Registry{}
httpStubs(reg)
httpClient := func() (*http.Client, error) {
return &http.Client{Transport: reg}, nil
}
client, _ := httpClient()
defer reg.Verify(t)
gotLicenses, err := RepoLicenses(client, hostname)
assert.NoError(t, err, "Expected no error while fetching /licenses")
assert.Equal(t, wantLicenses, gotLicenses, "Licenses fetched is not as expected")
}
func TestLicenseTemplateReturnsLicense(t *testing.T) {
licenseTemplateName := "mit"
hostname := "api.github.com"
httpStubs := func(reg *httpmock.Registry) {
reg.Register(
httpmock.REST("GET", fmt.Sprintf("licenses/%v", licenseTemplateName)),
httpmock.StringResponse(`{
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit",
"node_id": "MDc6TGljZW5zZTEz",
"html_url": "http://choosealicense.com/licenses/mit/",
"description": "A short and simple permissive license with conditions only requiring preservation of copyright and license notices. Licensed works, modifications, and larger works may be distributed under different terms and without source code.",
"implementation": "Create a text file (typically named LICENSE or LICENSE.txt) in the root of your source code and copy the text of the license into the file. Replace [year] with the current year and [fullname] with the name (or names) of the copyright holders.",
"permissions": [
"commercial-use",
"modifications",
"distribution",
"private-use"
],
"conditions": [
"include-copyright"
],
"limitations": [
"liability",
"warranty"
],
"body": "MIT License\n\nCopyright (c) [year] [fullname]\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n",
"featured": true
}`,
))
}
wantLicense := &License{
Key: "mit",
Name: "MIT License",
SPDXID: "MIT",
URL: "https://api.github.com/licenses/mit",
NodeID: "MDc6TGljZW5zZTEz",
HTMLURL: "http://choosealicense.com/licenses/mit/",
Description: "A short and simple permissive license with conditions only requiring preservation of copyright and license notices. Licensed works, modifications, and larger works may be distributed under different terms and without source code.",
Implementation: "Create a text file (typically named LICENSE or LICENSE.txt) in the root of your source code and copy the text of the license into the file. Replace [year] with the current year and [fullname] with the name (or names) of the copyright holders.",
Permissions: []string{
"commercial-use",
"modifications",
"distribution",
"private-use",
},
Conditions: []string{
"include-copyright",
},
Limitations: []string{
"liability",
"warranty",
},
Body: "MIT License\n\nCopyright (c) [year] [fullname]\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n",
Featured: true,
}
reg := &httpmock.Registry{}
httpStubs(reg)
httpClient := func() (*http.Client, error) {
return &http.Client{Transport: reg}, nil
}
client, _ := httpClient()
defer reg.Verify(t)
gotLicenseTemplate, err := RepoLicense(client, hostname, licenseTemplateName)
assert.NoError(t, err, fmt.Sprintf("Expected no error while fetching /licenses/%v", licenseTemplateName))
assert.Equal(t, wantLicense, gotLicenseTemplate, fmt.Sprintf("License \"%v\" fetched is not as expected", licenseTemplateName))
}
func TestLicenseTemplateReturnsErrorWhenLicenseTemplateNotFound(t *testing.T) {
licenseTemplateName := "invalid-license"
hostname := "api.github.com"
httpStubs := func(reg *httpmock.Registry) {
reg.Register(
httpmock.REST("GET", fmt.Sprintf("licenses/%v", licenseTemplateName)),
httpmock.StatusStringResponse(404, heredoc.Doc(`
{
"message": "Not Found",
"documentation_url": "https://docs.github.com/rest/licenses/licenses#get-a-license",
"status": "404"
}`)),
)
}
reg := &httpmock.Registry{}
httpStubs(reg)
httpClient := func() (*http.Client, error) {
return &http.Client{Transport: reg}, nil
}
client, _ := httpClient()
defer reg.Verify(t)
_, err := RepoLicense(client, hostname, licenseTemplateName)
assert.Error(t, err, fmt.Sprintf("Expected error while fetching /licenses/%v", licenseTemplateName))
}
func TestListGitIgnoreTemplatesReturnsGitIgnoreTemplates(t *testing.T) {
hostname := "api.github.com"
httpStubs := func(reg *httpmock.Registry) {
reg.Register(
httpmock.REST("GET", "gitignore/templates"),
httpmock.StringResponse(`[
"AL",
"Actionscript",
"Ada",
"Agda",
"Android",
"AppEngine",
"AppceleratorTitanium",
"ArchLinuxPackages",
"Autotools",
"Ballerina",
"C",
"C++",
"CFWheels",
"CMake",
"CUDA",
"CakePHP",
"ChefCookbook",
"Clojure",
"CodeIgniter",
"CommonLisp",
"Composer",
"Concrete5",
"Coq",
"CraftCMS",
"D"
]`,
))
}
wantGitIgnoreTemplates := []string{
"AL",
"Actionscript",
"Ada",
"Agda",
"Android",
"AppEngine",
"AppceleratorTitanium",
"ArchLinuxPackages",
"Autotools",
"Ballerina",
"C",
"C++",
"CFWheels",
"CMake",
"CUDA",
"CakePHP",
"ChefCookbook",
"Clojure",
"CodeIgniter",
"CommonLisp",
"Composer",
"Concrete5",
"Coq",
"CraftCMS",
"D",
}
reg := &httpmock.Registry{}
httpStubs(reg)
httpClient := func() (*http.Client, error) {
return &http.Client{Transport: reg}, nil
}
client, _ := httpClient()
defer reg.Verify(t)
gotGitIgnoreTemplates, err := RepoGitIgnoreTemplates(client, hostname)
assert.NoError(t, err, "Expected no error while fetching /gitignore/templates")
assert.Equal(t, wantGitIgnoreTemplates, gotGitIgnoreTemplates, "GitIgnore templates fetched is not as expected")
}
func TestGitIgnoreTemplateReturnsGitIgnoreTemplate(t *testing.T) {
gitIgnoreTemplateName := "Go"
httpStubs := func(reg *httpmock.Registry) {
reg.Register(
httpmock.REST("GET", fmt.Sprintf("gitignore/templates/%v", gitIgnoreTemplateName)),
httpmock.StringResponse(`{
"name": "Go",
"source": "# If you prefer the allow list template instead of the deny list, see community template:\n# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore\n#\n# Binaries for programs and plugins\n*.exe\n*.exe~\n*.dll\n*.so\n*.dylib\n\n# Test binary, built with go test -c\n*.test\n\n# Output of the go coverage tool, specifically when used with LiteIDE\n*.out\n\n# Dependency directories (remove the comment below to include it)\n# vendor/\n\n# Go workspace file\ngo.work\ngo.work.sum\n\n# env file\n.env\n"
}`,
))
}
wantGitIgnoreTemplate := &GitIgnore{
Name: "Go",
Source: "# If you prefer the allow list template instead of the deny list, see community template:\n# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore\n#\n# Binaries for programs and plugins\n*.exe\n*.exe~\n*.dll\n*.so\n*.dylib\n\n# Test binary, built with go test -c\n*.test\n\n# Output of the go coverage tool, specifically when used with LiteIDE\n*.out\n\n# Dependency directories (remove the comment below to include it)\n# vendor/\n\n# Go workspace file\ngo.work\ngo.work.sum\n\n# env file\n.env\n",
}
reg := &httpmock.Registry{}
httpStubs(reg)
httpClient := func() (*http.Client, error) {
return &http.Client{Transport: reg}, nil
}
client, _ := httpClient()
defer reg.Verify(t)
gotGitIgnoreTemplate, err := RepoGitIgnoreTemplate(client, "api.github.com", gitIgnoreTemplateName)
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | true |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/git/command_test.go | git/command_test.go | package git
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestOutput(t *testing.T) {
tests := []struct {
name string
exitCode int
stdout string
stderr string
wantErr *GitError
}{
{
name: "successful command",
stdout: "hello world",
stderr: "",
exitCode: 0,
wantErr: nil,
},
{
name: "not a repo failure",
stdout: "",
stderr: "fatal: not a git repository (or any of the parent directories): .git",
exitCode: 128,
wantErr: &GitError{
ExitCode: 128,
Stderr: "fatal: not a git repository (or any of the parent directories): .git",
err: &exec.ExitError{},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cmd := Command{
&exec.Cmd{
Path: createMockExecutable(t, tt.stdout, tt.stderr, tt.exitCode),
},
}
out, err := cmd.Output()
if tt.wantErr != nil {
require.Error(t, err)
var gitError *GitError
require.ErrorAs(t, err, &gitError)
assert.Equal(t, tt.wantErr.ExitCode, gitError.ExitCode)
assert.Equal(t, tt.wantErr.Stderr, gitError.Stderr)
assert.Equal(t, tt.wantErr.Error(), gitError.Error())
} else {
require.NoError(t, err)
}
assert.Equal(t, tt.stdout, string(out))
})
}
}
func createMockExecutable(t *testing.T, stdout string, stderr string, exitCode int) string {
tmpDir := t.TempDir()
sourcePath := filepath.Join(tmpDir, "main.go")
binaryPath := filepath.Join(tmpDir, "mockexec")
if runtime.GOOS == "windows" {
binaryPath += ".exe"
}
// Create Go source
source := buildCommandSourceCode(exitCode, stdout, stderr)
// Write source file
if err := os.WriteFile(sourcePath, []byte(source), 0600); err != nil {
t.Fatal(err)
}
// Compile
cmd := exec.Command("go", "build", "-o", binaryPath, sourcePath)
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("failed to compile: %v\n%s", err, out)
}
return binaryPath
}
func buildCommandSourceCode(exitCode int, stdout, stderr string) string {
return fmt.Sprintf(`package main
import (
"fmt"
"os"
)
func main() {
fmt.Printf(%q)
fmt.Fprintf(os.Stderr, %q)
os.Exit(%d)
}`, stdout, stderr, exitCode)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/git/client.go | git/client.go | package git
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"net/url"
"os/exec"
"path"
"regexp"
"runtime"
"slices"
"sort"
"strings"
"sync"
"github.com/cli/cli/v2/internal/ghinstance"
"github.com/cli/safeexec"
)
// MergeBaseConfig is the configuration setting to keep track of the PR target branch.
const MergeBaseConfig = "gh-merge-base"
var remoteRE = regexp.MustCompile(`(.+)\s+(.+)\s+\((push|fetch)\)`)
// This regexp exists to match lines of the following form:
// 6a6872b918c601a0e730710ad8473938a7516d30\u0000title 1\u0000Body 1\u0000\n
// 7a6872b918c601a0e730710ad8473938a7516d31\u0000title 2\u0000Body 2\u0000
//
// This is the format we use when collecting commit information,
// with null bytes as separators. Using null bytes this way allows for us
// to easily maintain newlines that might be in the body.
//
// The ?m modifier is the multi-line modifier, meaning that ^ and $
// match the beginning and end of lines, respectively.
//
// The [\S\s] matches any whitespace or non-whitespace character,
// which is different from .* because it allows for newlines as well.
//
// The ? following .* and [\S\s] is a lazy modifier, meaning that it will
// match as few characters as possible while still satisfying the rest of the regexp.
// This is important because it allows us to match the first null byte after the title and body,
// rather than the last null byte in the entire string.
var commitLogRE = regexp.MustCompile(`(?m)^[0-9a-fA-F]{7,40}\x00.*?\x00[\S\s]*?\x00$`)
type errWithExitCode interface {
ExitCode() int
}
type Client struct {
GhPath string
RepoDir string
GitPath string
Stderr io.Writer
Stdin io.Reader
Stdout io.Writer
commandContext commandCtx
mu sync.Mutex
}
func (c *Client) Copy() *Client {
return &Client{
GhPath: c.GhPath,
RepoDir: c.RepoDir,
GitPath: c.GitPath,
Stderr: c.Stderr,
Stdin: c.Stdin,
Stdout: c.Stdout,
commandContext: c.commandContext,
}
}
func (c *Client) Command(ctx context.Context, args ...string) (*Command, error) {
if c.RepoDir != "" {
args = append([]string{"-C", c.RepoDir}, args...)
}
commandContext := exec.CommandContext
if c.commandContext != nil {
commandContext = c.commandContext
}
var err error
c.mu.Lock()
if c.GitPath == "" {
c.GitPath, err = resolveGitPath()
}
c.mu.Unlock()
if err != nil {
return nil, err
}
cmd := commandContext(ctx, c.GitPath, args...)
cmd.Stderr = c.Stderr
cmd.Stdin = c.Stdin
cmd.Stdout = c.Stdout
return &Command{cmd}, nil
}
// CredentialPattern is used to inform AuthenticatedCommand which patterns Git should match
// against when trying to find credentials. It is a little over-engineered as a type because we
// want AuthenticatedCommand to have a clear compilation error when this is not provided,
// as opposed to using a string which might compile with `client.AuthenticatedCommand(ctx, "fetch")`.
//
// It is only usable when constructed by another function in the package because the empty pattern,
// without allMatching set to true, will result in an error in AuthenticatedCommand.
//
// Callers can currently opt-in to an slightly less secure mode for backwards compatibility by using
// AllMatchingCredentialsPattern.
type CredentialPattern struct {
allMatching bool // should only be constructable via AllMatchingCredentialsPattern
pattern string
}
// AllMatchingCredentialsPattern allows for setting gh as credential helper for all hosts.
// However, we should endeavour to remove it as it's less secure.
var AllMatchingCredentialsPattern = CredentialPattern{allMatching: true, pattern: ""}
var disallowedCredentialPattern = CredentialPattern{allMatching: false, pattern: ""}
// CredentialPatternFromGitURL takes a git remote URL e.g. "https://github.com/cli/cli.git" or
// "git@github.com:cli/cli.git" and returns the credential pattern that should be used for it.
func CredentialPatternFromGitURL(gitURL string) (CredentialPattern, error) {
normalizedURL, err := ParseURL(gitURL)
if err != nil {
return CredentialPattern{}, fmt.Errorf("failed to parse remote URL: %w", err)
}
return CredentialPatternFromHost(normalizedURL.Host), nil
}
// CredentialPatternFromHost expects host to be in the form "github.com" and returns
// the credential pattern that should be used for it.
// It does not perform any canonicalisation e.g. "api.github.com" will not work as expected.
func CredentialPatternFromHost(host string) CredentialPattern {
return CredentialPattern{
pattern: strings.TrimSuffix(ghinstance.HostPrefix(host), "/"),
}
}
// AuthenticatedCommand is a wrapper around Command that included configuration to use gh
// as the credential helper for git.
func (c *Client) AuthenticatedCommand(ctx context.Context, credentialPattern CredentialPattern, args ...string) (*Command, error) {
if c.GhPath == "" {
// Assumes that gh is in PATH.
c.GhPath = "gh"
}
credHelper := fmt.Sprintf("!%q auth git-credential", c.GhPath)
var preArgs []string
if credentialPattern == disallowedCredentialPattern {
return nil, fmt.Errorf("empty credential pattern is not allowed unless provided explicitly")
} else if credentialPattern == AllMatchingCredentialsPattern {
preArgs = []string{"-c", "credential.helper="}
preArgs = append(preArgs, "-c", fmt.Sprintf("credential.helper=%s", credHelper))
} else {
preArgs = []string{"-c", fmt.Sprintf("credential.%s.helper=", credentialPattern.pattern)}
preArgs = append(preArgs, "-c", fmt.Sprintf("credential.%s.helper=%s", credentialPattern.pattern, credHelper))
}
args = append(preArgs, args...)
return c.Command(ctx, args...)
}
func (c *Client) Remotes(ctx context.Context) (RemoteSet, error) {
remoteArgs := []string{"remote", "-v"}
remoteCmd, err := c.Command(ctx, remoteArgs...)
if err != nil {
return nil, err
}
remoteOut, remoteErr := remoteCmd.Output()
if remoteErr != nil {
return nil, remoteErr
}
configArgs := []string{"config", "--get-regexp", `^remote\..*\.gh-resolved$`}
configCmd, err := c.Command(ctx, configArgs...)
if err != nil {
return nil, err
}
configOut, configErr := configCmd.Output()
if configErr != nil {
// Ignore exit code 1 as it means there are no resolved remotes.
var gitErr *GitError
if ok := errors.As(configErr, &gitErr); ok && gitErr.ExitCode != 1 {
return nil, gitErr
}
}
remotes := parseRemotes(outputLines(remoteOut))
populateResolvedRemotes(remotes, outputLines(configOut))
sort.Sort(remotes)
return remotes, nil
}
func (c *Client) UpdateRemoteURL(ctx context.Context, name, url string) error {
args := []string{"remote", "set-url", name, url}
cmd, err := c.Command(ctx, args...)
if err != nil {
return err
}
_, err = cmd.Output()
if err != nil {
return err
}
return nil
}
func (c *Client) SetRemoteResolution(ctx context.Context, name, resolution string) error {
args := []string{"config", "--add", fmt.Sprintf("remote.%s.gh-resolved", name), resolution}
cmd, err := c.Command(ctx, args...)
if err != nil {
return err
}
_, err = cmd.Output()
if err != nil {
return err
}
return nil
}
// CurrentBranch reads the checked-out branch for the git repository.
func (c *Client) CurrentBranch(ctx context.Context) (string, error) {
args := []string{"symbolic-ref", "--quiet", "HEAD"}
cmd, err := c.Command(ctx, args...)
if err != nil {
return "", err
}
out, err := cmd.Output()
if err != nil {
var gitErr *GitError
if ok := errors.As(err, &gitErr); ok && len(gitErr.Stderr) == 0 {
gitErr.err = ErrNotOnAnyBranch
gitErr.Stderr = "not on any branch"
return "", gitErr
}
return "", err
}
branch := firstLine(out)
return strings.TrimPrefix(branch, "refs/heads/"), nil
}
// ShowRefs resolves fully-qualified refs to commit hashes.
func (c *Client) ShowRefs(ctx context.Context, refs []string) ([]Ref, error) {
args := append([]string{"show-ref", "--verify", "--"}, refs...)
cmd, err := c.Command(ctx, args...)
if err != nil {
return nil, err
}
// This functionality relies on parsing output from the git command despite
// an error status being returned from git.
out, err := cmd.Output()
var verified []Ref
for _, line := range outputLines(out) {
parts := strings.SplitN(line, " ", 2)
if len(parts) < 2 {
continue
}
verified = append(verified, Ref{
Hash: parts[0],
Name: parts[1],
})
}
return verified, err
}
func (c *Client) Config(ctx context.Context, name string) (string, error) {
args := []string{"config", name}
cmd, err := c.Command(ctx, args...)
if err != nil {
return "", err
}
out, err := cmd.Output()
if err != nil {
var gitErr *GitError
if ok := errors.As(err, &gitErr); ok && gitErr.ExitCode == 1 {
gitErr.Stderr = fmt.Sprintf("unknown config key %s", name)
return "", gitErr
}
return "", err
}
return firstLine(out), nil
}
func (c *Client) UncommittedChangeCount(ctx context.Context) (int, error) {
args := []string{"status", "--porcelain"}
cmd, err := c.Command(ctx, args...)
if err != nil {
return 0, err
}
out, err := cmd.Output()
if err != nil {
return 0, err
}
lines := strings.Split(string(out), "\n")
count := 0
for _, l := range lines {
if l != "" {
count++
}
}
return count, nil
}
func (c *Client) Commits(ctx context.Context, baseRef, headRef string) ([]*Commit, error) {
// The formatting directive %x00 indicates that git should include the null byte as a separator.
// We use this because it is not a valid character to include in a commit message. Previously,
// commas were used here but when we Split on them, we would get incorrect results if commit titles
// happened to contain them.
// https://git-scm.com/docs/pretty-formats#Documentation/pretty-formats.txt-emx00em
args := []string{"-c", "log.ShowSignature=false", "log", "--pretty=format:%H%x00%s%x00%b%x00", "--cherry", fmt.Sprintf("%s...%s", baseRef, headRef)}
cmd, err := c.Command(ctx, args...)
if err != nil {
return nil, err
}
out, err := cmd.Output()
if err != nil {
return nil, err
}
commits := []*Commit{}
commitLogs := commitLogRE.FindAllString(string(out), -1)
for _, commitLog := range commitLogs {
// Each line looks like this:
// 6a6872b918c601a0e730710ad8473938a7516d30\u0000title 1\u0000Body 1\u0000\n
// Or with an optional body:
// 6a6872b918c601a0e730710ad8473938a7516d30\u0000title 1\u0000\u0000\n
// Therefore after splitting we will have:
// ["6a6872b918c601a0e730710ad8473938a7516d30", "title 1", "Body 1", ""]
// Or with an optional body:
// ["6a6872b918c601a0e730710ad8473938a7516d30", "title 1", "", ""]
commitLogParts := strings.Split(commitLog, "\u0000")
commits = append(commits, &Commit{
Sha: commitLogParts[0],
Title: commitLogParts[1],
Body: commitLogParts[2],
})
}
if len(commits) == 0 {
return nil, fmt.Errorf("could not find any commits between %s and %s", baseRef, headRef)
}
return commits, nil
}
func (c *Client) LastCommit(ctx context.Context) (*Commit, error) {
output, err := c.lookupCommit(ctx, "HEAD", "%H,%s")
if err != nil {
return nil, err
}
idx := bytes.IndexByte(output, ',')
return &Commit{
Sha: string(output[0:idx]),
Title: strings.TrimSpace(string(output[idx+1:])),
}, nil
}
func (c *Client) CommitBody(ctx context.Context, sha string) (string, error) {
output, err := c.lookupCommit(ctx, sha, "%b")
return string(output), err
}
func (c *Client) lookupCommit(ctx context.Context, sha, format string) ([]byte, error) {
args := []string{"-c", "log.ShowSignature=false", "show", "-s", "--pretty=format:" + format, sha}
cmd, err := c.Command(ctx, args...)
if err != nil {
return nil, err
}
out, err := cmd.Output()
if err != nil {
return nil, err
}
return out, nil
}
// ReadBranchConfig parses the `branch.BRANCH.(remote|merge|pushremote|gh-merge-base)` part of git config.
// If no branch config is found or there is an error in the command, it returns an empty BranchConfig.
// Downstream consumers of ReadBranchConfig should consider the behavior they desire if this errors,
// as an empty config is not necessarily breaking.
func (c *Client) ReadBranchConfig(ctx context.Context, branch string) (BranchConfig, error) {
prefix := regexp.QuoteMeta(fmt.Sprintf("branch.%s.", branch))
args := []string{"config", "--get-regexp", fmt.Sprintf("^%s(remote|merge|pushremote|%s)$", prefix, MergeBaseConfig)}
cmd, err := c.Command(ctx, args...)
if err != nil {
return BranchConfig{}, err
}
branchCfgOut, err := cmd.Output()
if err != nil {
// This is the error we expect if the git command does not run successfully.
// If the ExitCode is 1, then we just didn't find any config for the branch.
var gitError *GitError
if ok := errors.As(err, &gitError); ok && gitError.ExitCode != 1 {
return BranchConfig{}, err
}
return BranchConfig{}, nil
}
return parseBranchConfig(outputLines(branchCfgOut)), nil
}
func parseBranchConfig(branchConfigLines []string) BranchConfig {
var cfg BranchConfig
// Read the config lines for the specific branch
for _, line := range branchConfigLines {
parts := strings.SplitN(line, " ", 2)
if len(parts) < 2 {
continue
}
keys := strings.Split(parts[0], ".")
switch keys[len(keys)-1] {
case "remote":
cfg.RemoteURL, cfg.RemoteName = parseRemoteURLOrName(parts[1])
case "pushremote":
cfg.PushRemoteURL, cfg.PushRemoteName = parseRemoteURLOrName(parts[1])
case "merge":
cfg.MergeRef = parts[1]
case MergeBaseConfig:
cfg.MergeBase = parts[1]
}
}
return cfg
}
// SetBranchConfig sets the named value on the given branch.
func (c *Client) SetBranchConfig(ctx context.Context, branch, name, value string) error {
name = fmt.Sprintf("branch.%s.%s", branch, name)
args := []string{"config", name, value}
cmd, err := c.Command(ctx, args...)
if err != nil {
return err
}
// No output expected but check for any printed git error.
_, err = cmd.Output()
return err
}
// PushDefault defines the action git push should take if no refspec is given.
// See: https://git-scm.com/docs/git-config#Documentation/git-config.txt-pushdefault
type PushDefault string
const (
PushDefaultNothing PushDefault = "nothing"
PushDefaultCurrent PushDefault = "current"
PushDefaultUpstream PushDefault = "upstream"
PushDefaultTracking PushDefault = "tracking"
PushDefaultSimple PushDefault = "simple"
PushDefaultMatching PushDefault = "matching"
)
func ParsePushDefault(s string) (PushDefault, error) {
validPushDefaults := map[string]struct{}{
string(PushDefaultNothing): {},
string(PushDefaultCurrent): {},
string(PushDefaultUpstream): {},
string(PushDefaultTracking): {},
string(PushDefaultSimple): {},
string(PushDefaultMatching): {},
}
if _, ok := validPushDefaults[s]; ok {
return PushDefault(s), nil
}
return "", fmt.Errorf("unknown push.default value: %s", s)
}
// PushDefault returns the value of push.default in the config. If the value
// is not set, it returns "simple" (the default git value). See
// https://git-scm.com/docs/git-config#Documentation/git-config.txt-pushdefault
func (c *Client) PushDefault(ctx context.Context) (PushDefault, error) {
pushDefault, err := c.Config(ctx, "push.default")
if err == nil {
return ParsePushDefault(pushDefault)
}
// If there is an error that the config key is not set, return the default value
// that git uses since 2.0.
var gitError *GitError
if ok := errors.As(err, &gitError); ok && gitError.ExitCode == 1 {
return PushDefaultSimple, nil
}
return "", err
}
// RemotePushDefault returns the value of remote.pushDefault in the config. If
// the value is not set, it returns an empty string.
func (c *Client) RemotePushDefault(ctx context.Context) (string, error) {
remotePushDefault, err := c.Config(ctx, "remote.pushDefault")
if err == nil {
return remotePushDefault, nil
}
var gitError *GitError
if ok := errors.As(err, &gitError); ok && gitError.ExitCode == 1 {
return "", nil
}
return "", err
}
// RemoteTrackingRef is the structured form of the string "refs/remotes/<remote>/<branch>".
// For example, the @{push} revision syntax could report "refs/remotes/origin/main" which would
// be parsed into RemoteTrackingRef{Remote: "origin", Branch: "main"}.
type RemoteTrackingRef struct {
Remote string
Branch string
}
func (r RemoteTrackingRef) String() string {
return fmt.Sprintf("refs/remotes/%s/%s", r.Remote, r.Branch)
}
// ParseRemoteTrackingRef parses a string of the form "refs/remotes/<remote>/<branch>" into
// a RemoteTrackingBranch struct. If the string does not match this format, an error is returned.
//
// For now, we assume that refnames are of the format "<remote>/<branch>", where
// the remote is a single path component, and branch may have many path components e.g.
// "origin/my/branch" is valid as: {Remote: "origin", Branch: "my/branch"}
// but "my/origin/branch" would parse incorrectly as: {Remote: "my", Branch: "origin/branch"}
// I don't believe there is a way to fix this without providing the list of remotes to this function.
//
// It becomes particularly confusing if you have something like:
//
// ```
// [remote "foo"]
// url = https://github.com/williammartin/test-repo.git
// fetch = +refs/heads/*:refs/remotes/foo/*
// [remote "foo/bar"]
// url = https://github.com/williammartin/test-repo.git
// fetch = +refs/heads/*:refs/remotes/foo/bar/*
// [branch "bar/baz"]
// remote = foo
// merge = refs/heads/bar/baz
// [branch "baz"]
// remote = foo/bar
// merge = refs/heads/baz
// ```
//
// These @{push} refs would resolve identically:
//
// ```
// ➜ git rev-parse --symbolic-full-name baz@{push}
// refs/remotes/foo/bar/baz
// ➜ git rev-parse --symbolic-full-name bar/baz@{push}
// refs/remotes/foo/bar/baz
// ```
//
// When using this ref, git assumes it means `remote: foo` `branch: bar/baz`.
func ParseRemoteTrackingRef(s string) (RemoteTrackingRef, error) {
prefix := "refs/remotes/"
if !strings.HasPrefix(s, prefix) {
return RemoteTrackingRef{}, fmt.Errorf("remote tracking branch must have format refs/remotes/<remote>/<branch> but was: %s", s)
}
refName := strings.TrimPrefix(s, prefix)
refNameParts := strings.SplitN(refName, "/", 2)
if len(refNameParts) != 2 {
return RemoteTrackingRef{}, fmt.Errorf("remote tracking branch must have format refs/remotes/<remote>/<branch> but was: %s", s)
}
return RemoteTrackingRef{
Remote: refNameParts[0],
Branch: refNameParts[1],
}, nil
}
// PushRevision gets the value of the @{push} revision syntax
// An error here doesn't necessarily mean something is broken, but may mean that the @{push}
// revision syntax couldn't be resolved, such as in non-centralized workflows with
// push.default = simple. Downstream consumers should consider how to handle this error.
func (c *Client) PushRevision(ctx context.Context, branch string) (RemoteTrackingRef, error) {
revParseOut, err := c.revParse(ctx, "--symbolic-full-name", branch+"@{push}")
if err != nil {
return RemoteTrackingRef{}, err
}
ref, err := ParseRemoteTrackingRef(firstLine(revParseOut))
if err != nil {
return RemoteTrackingRef{}, fmt.Errorf("could not parse push revision: %v", err)
}
return ref, nil
}
func (c *Client) DeleteLocalTag(ctx context.Context, tag string) error {
args := []string{"tag", "-d", tag}
cmd, err := c.Command(ctx, args...)
if err != nil {
return err
}
_, err = cmd.Output()
if err != nil {
return err
}
return nil
}
func (c *Client) DeleteLocalBranch(ctx context.Context, branch string) error {
args := []string{"branch", "-D", branch}
cmd, err := c.Command(ctx, args...)
if err != nil {
return err
}
_, err = cmd.Output()
if err != nil {
return err
}
return nil
}
func (c *Client) CheckoutBranch(ctx context.Context, branch string) error {
args := []string{"checkout", branch}
cmd, err := c.Command(ctx, args...)
if err != nil {
return err
}
_, err = cmd.Output()
if err != nil {
return err
}
return nil
}
func (c *Client) CheckoutNewBranch(ctx context.Context, remoteName, branch string) error {
track := fmt.Sprintf("%s/%s", remoteName, branch)
args := []string{"checkout", "-b", branch, "--track", track}
cmd, err := c.Command(ctx, args...)
if err != nil {
return err
}
_, err = cmd.Output()
if err != nil {
return err
}
return nil
}
func (c *Client) HasLocalBranch(ctx context.Context, branch string) bool {
_, err := c.revParse(ctx, "--verify", "refs/heads/"+branch)
return err == nil
}
func (c *Client) TrackingBranchNames(ctx context.Context, prefix string) []string {
args := []string{"branch", "-r", "--format", "%(refname:strip=3)"}
if prefix != "" {
args = append(args, "--list", fmt.Sprintf("*/%s*", escapeGlob(prefix)))
}
cmd, err := c.Command(ctx, args...)
if err != nil {
return nil
}
output, err := cmd.Output()
if err != nil {
return nil
}
return strings.Split(string(output), "\n")
}
// ToplevelDir returns the top-level directory path of the current repository.
func (c *Client) ToplevelDir(ctx context.Context) (string, error) {
out, err := c.revParse(ctx, "--show-toplevel")
if err != nil {
return "", err
}
return firstLine(out), nil
}
func (c *Client) GitDir(ctx context.Context) (string, error) {
out, err := c.revParse(ctx, "--git-dir")
if err != nil {
return "", err
}
return firstLine(out), nil
}
// Show current directory relative to the top-level directory of repository.
func (c *Client) PathFromRoot(ctx context.Context) string {
out, err := c.revParse(ctx, "--show-prefix")
if err != nil {
return ""
}
if path := firstLine(out); path != "" {
return path[:len(path)-1]
}
return ""
}
func (c *Client) revParse(ctx context.Context, args ...string) ([]byte, error) {
args = append([]string{"rev-parse"}, args...)
cmd, err := c.Command(ctx, args...)
if err != nil {
return nil, err
}
return cmd.Output()
}
func (c *Client) IsLocalGitRepo(ctx context.Context) (bool, error) {
_, err := c.GitDir(ctx)
if err != nil {
var execError errWithExitCode
if errors.As(err, &execError) && execError.ExitCode() == 128 {
return false, nil
}
return false, err
}
return true, nil
}
func (c *Client) UnsetRemoteResolution(ctx context.Context, name string) error {
args := []string{"config", "--unset", fmt.Sprintf("remote.%s.gh-resolved", name)}
cmd, err := c.Command(ctx, args...)
if err != nil {
return err
}
_, err = cmd.Output()
if err != nil {
return err
}
return nil
}
func (c *Client) SetRemoteBranches(ctx context.Context, remote string, refspec string) error {
args := []string{"remote", "set-branches", remote, refspec}
cmd, err := c.Command(ctx, args...)
if err != nil {
return err
}
_, err = cmd.Output()
if err != nil {
return err
}
return nil
}
func (c *Client) AddRemote(ctx context.Context, name, urlStr string, trackingBranches []string) (*Remote, error) {
args := []string{"remote", "add"}
for _, branch := range trackingBranches {
args = append(args, "-t", branch)
}
args = append(args, name, urlStr)
cmd, err := c.Command(ctx, args...)
if err != nil {
return nil, err
}
if _, err := cmd.Output(); err != nil {
return nil, err
}
var urlParsed *url.URL
if strings.HasPrefix(urlStr, "https") {
urlParsed, err = url.Parse(urlStr)
if err != nil {
return nil, err
}
} else {
urlParsed, err = ParseURL(urlStr)
if err != nil {
return nil, err
}
}
remote := &Remote{
Name: name,
FetchURL: urlParsed,
PushURL: urlParsed,
}
return remote, nil
}
// Below are commands that make network calls and need authentication credentials supplied from gh.
func (c *Client) Fetch(ctx context.Context, remote string, refspec string, mods ...CommandModifier) error {
args := []string{"fetch", remote}
if refspec != "" {
args = append(args, refspec)
}
cmd, err := c.AuthenticatedCommand(ctx, AllMatchingCredentialsPattern, args...)
if err != nil {
return err
}
for _, mod := range mods {
mod(cmd)
}
return cmd.Run()
}
func (c *Client) Pull(ctx context.Context, remote, branch string, mods ...CommandModifier) error {
args := []string{"pull", "--ff-only"}
if remote != "" && branch != "" {
args = append(args, remote, branch)
}
cmd, err := c.AuthenticatedCommand(ctx, AllMatchingCredentialsPattern, args...)
if err != nil {
return err
}
for _, mod := range mods {
mod(cmd)
}
return cmd.Run()
}
func (c *Client) Push(ctx context.Context, remote string, ref string, mods ...CommandModifier) error {
args := []string{"push", "--set-upstream", remote, ref}
cmd, err := c.AuthenticatedCommand(ctx, AllMatchingCredentialsPattern, args...)
if err != nil {
return err
}
for _, mod := range mods {
mod(cmd)
}
return cmd.Run()
}
func (c *Client) Clone(ctx context.Context, cloneURL string, args []string, mods ...CommandModifier) (string, error) {
// Note that even if this is an SSH clone URL, we are setting the pattern anyway.
// We could write some code to prevent this, but it also doesn't seem harmful.
pattern, err := CredentialPatternFromGitURL(cloneURL)
if err != nil {
return "", err
}
cloneArgs, target := parseCloneArgs(args)
cloneArgs = append(cloneArgs, cloneURL)
// If the args contain an explicit target, pass it to clone otherwise,
// parse the URL to determine where git cloned it to so we can return it.
if target != "" {
cloneArgs = append(cloneArgs, target)
} else {
target = path.Base(strings.TrimSuffix(cloneURL, ".git"))
if slices.Contains(cloneArgs, "--bare") {
target += ".git"
}
}
cloneArgs = append([]string{"clone"}, cloneArgs...)
cmd, err := c.AuthenticatedCommand(ctx, pattern, cloneArgs...)
if err != nil {
return "", err
}
for _, mod := range mods {
mod(cmd)
}
err = cmd.Run()
if err != nil {
return "", err
}
return target, nil
}
func resolveGitPath() (string, error) {
path, err := safeexec.LookPath("git")
if err != nil {
if errors.Is(err, exec.ErrNotFound) {
programName := "git"
if runtime.GOOS == "windows" {
programName = "Git for Windows"
}
return "", &NotInstalled{
message: fmt.Sprintf("unable to find git executable in PATH; please install %s before retrying", programName),
err: err,
}
}
return "", err
}
return path, nil
}
func isFilesystemPath(p string) bool {
return p == "." || strings.HasPrefix(p, "./") || strings.HasPrefix(p, "/")
}
func outputLines(output []byte) []string {
lines := strings.TrimSuffix(string(output), "\n")
return strings.Split(lines, "\n")
}
func firstLine(output []byte) string {
if i := bytes.IndexAny(output, "\n"); i >= 0 {
return string(output)[0:i]
}
return string(output)
}
func parseCloneArgs(extraArgs []string) (args []string, target string) {
args = extraArgs
if len(args) > 0 {
if !strings.HasPrefix(args[0], "-") {
target, args = args[0], args[1:]
}
}
return
}
func parseRemotes(remotesStr []string) RemoteSet {
remotes := RemoteSet{}
for _, r := range remotesStr {
match := remoteRE.FindStringSubmatch(r)
if match == nil {
continue
}
name := strings.TrimSpace(match[1])
urlStr := strings.TrimSpace(match[2])
urlType := strings.TrimSpace(match[3])
url, err := ParseURL(urlStr)
if err != nil {
continue
}
var rem *Remote
if len(remotes) > 0 {
rem = remotes[len(remotes)-1]
if name != rem.Name {
rem = nil
}
}
if rem == nil {
rem = &Remote{Name: name}
remotes = append(remotes, rem)
}
switch urlType {
case "fetch":
rem.FetchURL = url
case "push":
rem.PushURL = url
}
}
return remotes
}
func parseRemoteURLOrName(value string) (*url.URL, string) {
if strings.Contains(value, ":") {
if u, err := ParseURL(value); err == nil {
return u, ""
}
} else if !isFilesystemPath(value) {
return nil, value
}
return nil, ""
}
func populateResolvedRemotes(remotes RemoteSet, resolved []string) {
for _, l := range resolved {
parts := strings.SplitN(l, " ", 2)
if len(parts) < 2 {
continue
}
rp := strings.SplitN(parts[0], ".", 3)
if len(rp) < 2 {
continue
}
name := rp[1]
for _, r := range remotes {
if r.Name == name {
r.Resolved = parts[1]
break
}
}
}
}
var globReplacer = strings.NewReplacer(
"*", `\*`,
"?", `\?`,
"[", `\[`,
"]", `\]`,
"{", `\{`,
"}", `\}`,
)
func escapeGlob(p string) string {
return globReplacer.Replace(p)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/git/errors.go | git/errors.go | package git
import (
"errors"
"fmt"
)
// ErrNotOnAnyBranch indicates that the user is in detached HEAD state.
var ErrNotOnAnyBranch = errors.New("git: not on any branch")
type NotInstalled struct {
message string
err error
}
func (e *NotInstalled) Error() string {
return e.message
}
func (e *NotInstalled) Unwrap() error {
return e.err
}
type GitError struct {
ExitCode int
Stderr string
err error
}
func (ge *GitError) Error() string {
if ge.Stderr == "" {
return fmt.Sprintf("failed to run git: %v", ge.err)
}
return fmt.Sprintf("failed to run git: %s", ge.Stderr)
}
func (ge *GitError) Unwrap() error {
return ge.err
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/git/client_test.go | git/client_test.go | package git
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"net/url"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"testing"
"github.com/MakeNowJust/heredoc"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestClientCommand(t *testing.T) {
tests := []struct {
name string
repoDir string
gitPath string
wantExe string
wantArgs []string
}{
{
name: "creates command",
gitPath: "path/to/git",
wantExe: "path/to/git",
wantArgs: []string{"path/to/git", "ref-log"},
},
{
name: "adds repo directory configuration",
repoDir: "path/to/repo",
gitPath: "path/to/git",
wantExe: "path/to/git",
wantArgs: []string{"path/to/git", "-C", "path/to/repo", "ref-log"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
in, out, errOut := &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}
client := Client{
Stdin: in,
Stdout: out,
Stderr: errOut,
RepoDir: tt.repoDir,
GitPath: tt.gitPath,
}
cmd, err := client.Command(context.Background(), "ref-log")
assert.NoError(t, err)
assert.Equal(t, tt.wantExe, cmd.Path)
assert.Equal(t, tt.wantArgs, cmd.Args)
assert.Equal(t, in, cmd.Stdin)
assert.Equal(t, out, cmd.Stdout)
assert.Equal(t, errOut, cmd.Stderr)
})
}
}
func TestClientAuthenticatedCommand(t *testing.T) {
tests := []struct {
name string
path string
pattern CredentialPattern
wantArgs []string
wantErr error
}{
{
name: "when credential pattern allows for anything, credential helper matches everything",
path: "path/to/gh",
pattern: AllMatchingCredentialsPattern,
wantArgs: []string{"path/to/git", "-c", "credential.helper=", "-c", `credential.helper=!"path/to/gh" auth git-credential`, "fetch"},
},
{
name: "when credential pattern is set, credential helper only matches that pattern",
path: "path/to/gh",
pattern: CredentialPattern{pattern: "https://github.com"},
wantArgs: []string{"path/to/git", "-c", "credential.https://github.com.helper=", "-c", `credential.https://github.com.helper=!"path/to/gh" auth git-credential`, "fetch"},
},
{
name: "fallback when GhPath is not set",
pattern: AllMatchingCredentialsPattern,
wantArgs: []string{"path/to/git", "-c", "credential.helper=", "-c", `credential.helper=!"gh" auth git-credential`, "fetch"},
},
{
name: "errors when attempting to use an empty pattern that isn't marked all matching",
pattern: CredentialPattern{allMatching: false, pattern: ""},
wantErr: fmt.Errorf("empty credential pattern is not allowed unless provided explicitly"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
client := Client{
GhPath: tt.path,
GitPath: "path/to/git",
}
cmd, err := client.AuthenticatedCommand(context.Background(), tt.pattern, "fetch")
if tt.wantErr != nil {
require.Equal(t, tt.wantErr, err)
return
}
require.Equal(t, tt.wantArgs, cmd.Args)
})
}
}
func TestClientRemotes(t *testing.T) {
tempDir := t.TempDir()
initRepo(t, tempDir)
gitDir := filepath.Join(tempDir, ".git")
remoteFile := filepath.Join(gitDir, "config")
remotes := `
[remote "origin"]
url = git@example.com:monalisa/origin.git
[remote "test"]
url = git://github.com/hubot/test.git
gh-resolved = other
[remote "upstream"]
url = https://github.com/monalisa/upstream.git
gh-resolved = base
[remote "github"]
url = git@github.com:hubot/github.git
`
f, err := os.OpenFile(remoteFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0755)
assert.NoError(t, err)
_, err = f.Write([]byte(remotes))
assert.NoError(t, err)
err = f.Close()
assert.NoError(t, err)
client := Client{
RepoDir: tempDir,
}
rs, err := client.Remotes(context.Background())
assert.NoError(t, err)
assert.Equal(t, 4, len(rs))
assert.Equal(t, "upstream", rs[0].Name)
assert.Equal(t, "base", rs[0].Resolved)
assert.Equal(t, "github", rs[1].Name)
assert.Equal(t, "", rs[1].Resolved)
assert.Equal(t, "origin", rs[2].Name)
assert.Equal(t, "", rs[2].Resolved)
assert.Equal(t, "test", rs[3].Name)
assert.Equal(t, "other", rs[3].Resolved)
}
func TestClientRemotes_no_resolved_remote(t *testing.T) {
tempDir := t.TempDir()
initRepo(t, tempDir)
gitDir := filepath.Join(tempDir, ".git")
remoteFile := filepath.Join(gitDir, "config")
remotes := `
[remote "origin"]
url = git@example.com:monalisa/origin.git
[remote "test"]
url = git://github.com/hubot/test.git
[remote "upstream"]
url = https://github.com/monalisa/upstream.git
[remote "github"]
url = git@github.com:hubot/github.git
`
f, err := os.OpenFile(remoteFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0755)
assert.NoError(t, err)
_, err = f.Write([]byte(remotes))
assert.NoError(t, err)
err = f.Close()
assert.NoError(t, err)
client := Client{
RepoDir: tempDir,
}
rs, err := client.Remotes(context.Background())
assert.NoError(t, err)
assert.Equal(t, 4, len(rs))
assert.Equal(t, "upstream", rs[0].Name)
assert.Equal(t, "github", rs[1].Name)
assert.Equal(t, "origin", rs[2].Name)
assert.Equal(t, "", rs[2].Resolved)
assert.Equal(t, "test", rs[3].Name)
}
func TestParseRemotes(t *testing.T) {
remoteList := []string{
"mona\tgit@github.com:monalisa/myfork.git (fetch)",
"origin\thttps://github.com/monalisa/octo-cat.git (fetch)",
"origin\thttps://github.com/monalisa/octo-cat-push.git (push)",
"upstream\thttps://example.com/nowhere.git (fetch)",
"upstream\thttps://github.com/hubot/tools (push)",
"zardoz\thttps://example.com/zed.git (push)",
"koke\tgit://github.com/koke/grit.git (fetch)",
"koke\tgit://github.com/koke/grit.git (push)",
}
r := parseRemotes(remoteList)
assert.Equal(t, 5, len(r))
assert.Equal(t, "mona", r[0].Name)
assert.Equal(t, "ssh://git@github.com/monalisa/myfork.git", r[0].FetchURL.String())
assert.Nil(t, r[0].PushURL)
assert.Equal(t, "origin", r[1].Name)
assert.Equal(t, "/monalisa/octo-cat.git", r[1].FetchURL.Path)
assert.Equal(t, "/monalisa/octo-cat-push.git", r[1].PushURL.Path)
assert.Equal(t, "upstream", r[2].Name)
assert.Equal(t, "example.com", r[2].FetchURL.Host)
assert.Equal(t, "github.com", r[2].PushURL.Host)
assert.Equal(t, "zardoz", r[3].Name)
assert.Nil(t, r[3].FetchURL)
assert.Equal(t, "https://example.com/zed.git", r[3].PushURL.String())
assert.Equal(t, "koke", r[4].Name)
assert.Equal(t, "/koke/grit.git", r[4].FetchURL.Path)
assert.Equal(t, "/koke/grit.git", r[4].PushURL.Path)
}
func TestClientUpdateRemoteURL(t *testing.T) {
tests := []struct {
name string
cmdExitStatus int
cmdStdout string
cmdStderr string
wantCmdArgs string
wantErrorMsg string
}{
{
name: "update remote url",
wantCmdArgs: `path/to/git remote set-url test https://test.com`,
},
{
name: "git error",
cmdExitStatus: 1,
cmdStderr: "git error message",
wantCmdArgs: `path/to/git remote set-url test https://test.com`,
wantErrorMsg: "failed to run git: git error message",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cmd, cmdCtx := createCommandContext(t, tt.cmdExitStatus, tt.cmdStdout, tt.cmdStderr)
client := Client{
GitPath: "path/to/git",
commandContext: cmdCtx,
}
err := client.UpdateRemoteURL(context.Background(), "test", "https://test.com")
assert.Equal(t, tt.wantCmdArgs, strings.Join(cmd.Args[3:], " "))
if tt.wantErrorMsg == "" {
assert.NoError(t, err)
} else {
assert.EqualError(t, err, tt.wantErrorMsg)
}
})
}
}
func TestClientSetRemoteResolution(t *testing.T) {
tests := []struct {
name string
cmdExitStatus int
cmdStdout string
cmdStderr string
wantCmdArgs string
wantErrorMsg string
}{
{
name: "set remote resolution",
wantCmdArgs: `path/to/git config --add remote.origin.gh-resolved base`,
},
{
name: "git error",
cmdExitStatus: 1,
cmdStderr: "git error message",
wantCmdArgs: `path/to/git config --add remote.origin.gh-resolved base`,
wantErrorMsg: "failed to run git: git error message",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cmd, cmdCtx := createCommandContext(t, tt.cmdExitStatus, tt.cmdStdout, tt.cmdStderr)
client := Client{
GitPath: "path/to/git",
commandContext: cmdCtx,
}
err := client.SetRemoteResolution(context.Background(), "origin", "base")
assert.Equal(t, tt.wantCmdArgs, strings.Join(cmd.Args[3:], " "))
if tt.wantErrorMsg == "" {
assert.NoError(t, err)
} else {
assert.EqualError(t, err, tt.wantErrorMsg)
}
})
}
}
func TestClientCurrentBranch(t *testing.T) {
tests := []struct {
name string
cmdExitStatus int
cmdStdout string
cmdStderr string
wantCmdArgs string
wantErrorMsg string
wantBranch string
}{
{
name: "branch name",
cmdStdout: "branch-name\n",
wantCmdArgs: `path/to/git symbolic-ref --quiet HEAD`,
wantBranch: "branch-name",
},
{
name: "ref",
cmdStdout: "refs/heads/branch-name\n",
wantCmdArgs: `path/to/git symbolic-ref --quiet HEAD`,
wantBranch: "branch-name",
},
{
name: "escaped ref",
cmdStdout: "refs/heads/branch\u00A0with\u00A0non\u00A0breaking\u00A0space\n",
wantCmdArgs: `path/to/git symbolic-ref --quiet HEAD`,
wantBranch: "branch\u00A0with\u00A0non\u00A0breaking\u00A0space",
},
{
name: "detached head",
cmdExitStatus: 1,
wantCmdArgs: `path/to/git symbolic-ref --quiet HEAD`,
wantErrorMsg: "failed to run git: not on any branch",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cmd, cmdCtx := createCommandContext(t, tt.cmdExitStatus, tt.cmdStdout, tt.cmdStderr)
client := Client{
GitPath: "path/to/git",
commandContext: cmdCtx,
}
branch, err := client.CurrentBranch(context.Background())
assert.Equal(t, tt.wantCmdArgs, strings.Join(cmd.Args[3:], " "))
if tt.wantErrorMsg == "" {
assert.NoError(t, err)
} else {
assert.EqualError(t, err, tt.wantErrorMsg)
}
assert.Equal(t, tt.wantBranch, branch)
})
}
}
func TestClientShowRefs(t *testing.T) {
tests := []struct {
name string
cmdExitStatus int
cmdStdout string
cmdStderr string
wantCmdArgs string
wantRefs []Ref
wantErrorMsg string
}{
{
name: "show refs with one valid ref and one invalid ref",
cmdExitStatus: 128,
cmdStdout: "9ea76237a557015e73446d33268569a114c0649c refs/heads/valid",
cmdStderr: "fatal: 'refs/heads/invalid' - not a valid ref",
wantCmdArgs: `path/to/git show-ref --verify -- refs/heads/valid refs/heads/invalid`,
wantRefs: []Ref{{
Hash: "9ea76237a557015e73446d33268569a114c0649c",
Name: "refs/heads/valid",
}},
wantErrorMsg: "failed to run git: fatal: 'refs/heads/invalid' - not a valid ref",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cmd, cmdCtx := createCommandContext(t, tt.cmdExitStatus, tt.cmdStdout, tt.cmdStderr)
client := Client{
GitPath: "path/to/git",
commandContext: cmdCtx,
}
refs, err := client.ShowRefs(context.Background(), []string{"refs/heads/valid", "refs/heads/invalid"})
assert.Equal(t, tt.wantCmdArgs, strings.Join(cmd.Args[3:], " "))
assert.EqualError(t, err, tt.wantErrorMsg)
assert.Equal(t, tt.wantRefs, refs)
})
}
}
func TestClientConfig(t *testing.T) {
tests := []struct {
name string
cmdExitStatus int
cmdStdout string
cmdStderr string
wantCmdArgs string
wantOut string
wantErrorMsg string
}{
{
name: "get config key",
cmdStdout: "test",
wantCmdArgs: `path/to/git config credential.helper`,
wantOut: "test",
},
{
name: "get unknown config key",
cmdExitStatus: 1,
cmdStderr: "git error message",
wantCmdArgs: `path/to/git config credential.helper`,
wantErrorMsg: "failed to run git: unknown config key credential.helper",
},
{
name: "git error",
cmdExitStatus: 2,
cmdStderr: "git error message",
wantCmdArgs: `path/to/git config credential.helper`,
wantErrorMsg: "failed to run git: git error message",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cmd, cmdCtx := createCommandContext(t, tt.cmdExitStatus, tt.cmdStdout, tt.cmdStderr)
client := Client{
GitPath: "path/to/git",
commandContext: cmdCtx,
}
out, err := client.Config(context.Background(), "credential.helper")
assert.Equal(t, tt.wantCmdArgs, strings.Join(cmd.Args[3:], " "))
if tt.wantErrorMsg == "" {
assert.NoError(t, err)
} else {
assert.EqualError(t, err, tt.wantErrorMsg)
}
assert.Equal(t, tt.wantOut, out)
})
}
}
func TestClientUncommittedChangeCount(t *testing.T) {
tests := []struct {
name string
cmdExitStatus int
cmdStdout string
cmdStderr string
wantCmdArgs string
wantChangeCount int
}{
{
name: "no changes",
wantCmdArgs: `path/to/git status --porcelain`,
wantChangeCount: 0,
},
{
name: "one change",
cmdStdout: " M poem.txt",
wantCmdArgs: `path/to/git status --porcelain`,
wantChangeCount: 1,
},
{
name: "untracked file",
cmdStdout: " M poem.txt\n?? new.txt",
wantCmdArgs: `path/to/git status --porcelain`,
wantChangeCount: 2,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cmd, cmdCtx := createCommandContext(t, tt.cmdExitStatus, tt.cmdStdout, tt.cmdStderr)
client := Client{
GitPath: "path/to/git",
commandContext: cmdCtx,
}
ucc, err := client.UncommittedChangeCount(context.Background())
assert.Equal(t, tt.wantCmdArgs, strings.Join(cmd.Args[3:], " "))
assert.NoError(t, err)
assert.Equal(t, tt.wantChangeCount, ucc)
})
}
}
type stubbedCommit struct {
Sha string
Title string
Body string
}
type stubbedCommitsCommandData struct {
ExitStatus int
ErrMsg string
Commits []stubbedCommit
}
func TestClientCommits(t *testing.T) {
tests := []struct {
name string
testData stubbedCommitsCommandData
wantCmdArgs string
wantCommits []*Commit
wantErrorMsg string
}{
{
name: "single commit no body",
testData: stubbedCommitsCommandData{
Commits: []stubbedCommit{
{
Sha: "6a6872b918c601a0e730710ad8473938a7516d30",
Title: "testing testability test",
Body: "",
},
},
},
wantCmdArgs: `path/to/git -c log.ShowSignature=false log --pretty=format:%H%x00%s%x00%b%x00 --cherry SHA1...SHA2`,
wantCommits: []*Commit{{
Sha: "6a6872b918c601a0e730710ad8473938a7516d30",
Title: "testing testability test",
}},
},
{
name: "single commit with body",
testData: stubbedCommitsCommandData{
Commits: []stubbedCommit{
{
Sha: "6a6872b918c601a0e730710ad8473938a7516d30",
Title: "testing testability test",
Body: "This is the body",
},
},
},
wantCmdArgs: `path/to/git -c log.ShowSignature=false log --pretty=format:%H%x00%s%x00%b%x00 --cherry SHA1...SHA2`,
wantCommits: []*Commit{{
Sha: "6a6872b918c601a0e730710ad8473938a7516d30",
Title: "testing testability test",
Body: "This is the body",
}},
},
{
name: "multiple commits with bodies",
testData: stubbedCommitsCommandData{
Commits: []stubbedCommit{
{
Sha: "6a6872b918c601a0e730710ad8473938a7516d30",
Title: "testing testability test",
Body: "This is the body",
},
{
Sha: "7a6872b918c601a0e730710ad8473938a7516d31",
Title: "testing testability test 2",
Body: "This is the body 2",
},
},
},
wantCmdArgs: `path/to/git -c log.ShowSignature=false log --pretty=format:%H%x00%s%x00%b%x00 --cherry SHA1...SHA2`,
wantCommits: []*Commit{
{
Sha: "6a6872b918c601a0e730710ad8473938a7516d30",
Title: "testing testability test",
Body: "This is the body",
},
{
Sha: "7a6872b918c601a0e730710ad8473938a7516d31",
Title: "testing testability test 2",
Body: "This is the body 2",
},
},
},
{
name: "multiple commits mixed bodies",
testData: stubbedCommitsCommandData{
Commits: []stubbedCommit{
{
Sha: "6a6872b918c601a0e730710ad8473938a7516d30",
Title: "testing testability test",
},
{
Sha: "7a6872b918c601a0e730710ad8473938a7516d31",
Title: "testing testability test 2",
Body: "This is the body 2",
},
},
},
wantCmdArgs: `path/to/git -c log.ShowSignature=false log --pretty=format:%H%x00%s%x00%b%x00 --cherry SHA1...SHA2`,
wantCommits: []*Commit{
{
Sha: "6a6872b918c601a0e730710ad8473938a7516d30",
Title: "testing testability test",
},
{
Sha: "7a6872b918c601a0e730710ad8473938a7516d31",
Title: "testing testability test 2",
Body: "This is the body 2",
},
},
},
{
name: "multiple commits newlines in bodies",
testData: stubbedCommitsCommandData{
Commits: []stubbedCommit{
{
Sha: "6a6872b918c601a0e730710ad8473938a7516d30",
Title: "testing testability test",
Body: "This is the body\nwith a newline",
},
{
Sha: "7a6872b918c601a0e730710ad8473938a7516d31",
Title: "testing testability test 2",
Body: "This is the body 2",
},
},
},
wantCmdArgs: `path/to/git -c log.ShowSignature=false log --pretty=format:%H%x00%s%x00%b%x00 --cherry SHA1...SHA2`,
wantCommits: []*Commit{
{
Sha: "6a6872b918c601a0e730710ad8473938a7516d30",
Title: "testing testability test",
Body: "This is the body\nwith a newline",
},
{
Sha: "7a6872b918c601a0e730710ad8473938a7516d31",
Title: "testing testability test 2",
Body: "This is the body 2",
},
},
},
{
name: "no commits between SHAs",
testData: stubbedCommitsCommandData{
Commits: []stubbedCommit{},
},
wantCmdArgs: `path/to/git -c log.ShowSignature=false log --pretty=format:%H%x00%s%x00%b%x00 --cherry SHA1...SHA2`,
wantErrorMsg: "could not find any commits between SHA1 and SHA2",
},
{
name: "git error",
testData: stubbedCommitsCommandData{
ErrMsg: "git error message",
ExitStatus: 1,
},
wantCmdArgs: `path/to/git -c log.ShowSignature=false log --pretty=format:%H%x00%s%x00%b%x00 --cherry SHA1...SHA2`,
wantErrorMsg: "failed to run git: git error message",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cmd, cmdCtx := createCommitsCommandContext(t, tt.testData)
client := Client{
GitPath: "path/to/git",
commandContext: cmdCtx,
}
commits, err := client.Commits(context.Background(), "SHA1", "SHA2")
require.Equal(t, tt.wantCmdArgs, strings.Join(cmd.Args[3:], " "))
if tt.wantErrorMsg != "" {
require.EqualError(t, err, tt.wantErrorMsg)
} else {
require.NoError(t, err)
}
require.Equal(t, tt.wantCommits, commits)
})
}
}
func TestCommitsHelperProcess(t *testing.T) {
if os.Getenv("GH_WANT_HELPER_PROCESS") != "1" {
return
}
var td stubbedCommitsCommandData
_ = json.Unmarshal([]byte(os.Getenv("GH_COMMITS_TEST_DATA")), &td)
if td.ErrMsg != "" {
fmt.Fprint(os.Stderr, td.ErrMsg)
} else {
var sb strings.Builder
for _, commit := range td.Commits {
sb.WriteString(commit.Sha)
sb.WriteString("\u0000")
sb.WriteString(commit.Title)
sb.WriteString("\u0000")
sb.WriteString(commit.Body)
sb.WriteString("\u0000")
sb.WriteString("\n")
}
fmt.Fprint(os.Stdout, sb.String())
}
os.Exit(td.ExitStatus)
}
func createCommitsCommandContext(t *testing.T, testData stubbedCommitsCommandData) (*exec.Cmd, commandCtx) {
t.Helper()
b, err := json.Marshal(testData)
require.NoError(t, err)
cmd := exec.CommandContext(context.Background(), os.Args[0], "-test.run=TestCommitsHelperProcess", "--")
cmd.Env = []string{
"GH_WANT_HELPER_PROCESS=1",
"GH_COMMITS_TEST_DATA=" + string(b),
}
return cmd, func(ctx context.Context, exe string, args ...string) *exec.Cmd {
cmd.Args = append(cmd.Args, exe)
cmd.Args = append(cmd.Args, args...)
return cmd
}
}
func TestClientLastCommit(t *testing.T) {
client := Client{
RepoDir: "./fixtures/simple.git",
}
c, err := client.LastCommit(context.Background())
assert.NoError(t, err)
assert.Equal(t, "6f1a2405cace1633d89a79c74c65f22fe78f9659", c.Sha)
assert.Equal(t, "Second commit", c.Title)
}
func TestClientCommitBody(t *testing.T) {
client := Client{
RepoDir: "./fixtures/simple.git",
}
body, err := client.CommitBody(context.Background(), "6f1a2405cace1633d89a79c74c65f22fe78f9659")
assert.NoError(t, err)
assert.Equal(t, "I'm starting to get the hang of things\n", body)
}
func TestClientReadBranchConfig(t *testing.T) {
tests := []struct {
name string
cmds mockedCommands
branch string
wantBranchConfig BranchConfig
wantError *GitError
}{
{
name: "when the git config has no (remote|merge|pushremote|gh-merge-base) keys, it should return an empty BranchConfig and no error",
cmds: mockedCommands{
`path/to/git config --get-regexp ^branch\.trunk\.(remote|merge|pushremote|gh-merge-base)$`: {
ExitStatus: 1,
},
},
branch: "trunk",
wantBranchConfig: BranchConfig{},
wantError: nil,
},
{
name: "when the git fails to read the config, it should return an empty BranchConfig and the error",
cmds: mockedCommands{
`path/to/git config --get-regexp ^branch\.trunk\.(remote|merge|pushremote|gh-merge-base)$`: {
ExitStatus: 2,
Stderr: "git error",
},
},
branch: "trunk",
wantBranchConfig: BranchConfig{},
wantError: &GitError{
ExitCode: 2,
Stderr: "git error",
},
},
{
name: "when the config is read, it should return the correct BranchConfig",
cmds: mockedCommands{
`path/to/git config --get-regexp ^branch\.trunk\.(remote|merge|pushremote|gh-merge-base)$`: {
Stdout: heredoc.Doc(`
branch.trunk.remote upstream
branch.trunk.merge refs/heads/trunk
branch.trunk.pushremote origin
branch.trunk.gh-merge-base gh-merge-base
`),
},
},
branch: "trunk",
wantBranchConfig: BranchConfig{
RemoteName: "upstream",
PushRemoteName: "origin",
MergeRef: "refs/heads/trunk",
MergeBase: "gh-merge-base",
},
wantError: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cmdCtx := createMockedCommandContext(t, tt.cmds)
client := Client{
GitPath: "path/to/git",
commandContext: cmdCtx,
}
branchConfig, err := client.ReadBranchConfig(context.Background(), tt.branch)
if tt.wantError != nil {
var gitError *GitError
require.ErrorAs(t, err, &gitError)
assert.Equal(t, tt.wantError.ExitCode, gitError.ExitCode)
assert.Equal(t, tt.wantError.Stderr, gitError.Stderr)
} else {
require.NoError(t, err)
}
assert.Equal(t, tt.wantBranchConfig, branchConfig)
})
}
}
func Test_parseBranchConfig(t *testing.T) {
tests := []struct {
name string
configLines []string
wantBranchConfig BranchConfig
}{
{
name: "remote branch",
configLines: []string{"branch.trunk.remote origin"},
wantBranchConfig: BranchConfig{
RemoteName: "origin",
},
},
{
name: "merge ref",
configLines: []string{"branch.trunk.merge refs/heads/trunk"},
wantBranchConfig: BranchConfig{
MergeRef: "refs/heads/trunk",
},
},
{
name: "merge base",
configLines: []string{"branch.trunk.gh-merge-base gh-merge-base"},
wantBranchConfig: BranchConfig{
MergeBase: "gh-merge-base",
},
},
{
name: "pushremote",
configLines: []string{"branch.trunk.pushremote pushremote"},
wantBranchConfig: BranchConfig{
PushRemoteName: "pushremote",
},
},
{
name: "remote and pushremote are specified by name",
configLines: []string{
"branch.trunk.remote upstream",
"branch.trunk.pushremote origin",
},
wantBranchConfig: BranchConfig{
RemoteName: "upstream",
PushRemoteName: "origin",
},
},
{
name: "remote and pushremote are specified by url",
configLines: []string{
"branch.trunk.remote git@github.com:UPSTREAMOWNER/REPO.git",
"branch.trunk.pushremote git@github.com:ORIGINOWNER/REPO.git",
},
wantBranchConfig: BranchConfig{
RemoteURL: &url.URL{
Scheme: "ssh",
User: url.User("git"),
Host: "github.com",
Path: "/UPSTREAMOWNER/REPO.git",
},
PushRemoteURL: &url.URL{
Scheme: "ssh",
User: url.User("git"),
Host: "github.com",
Path: "/ORIGINOWNER/REPO.git",
},
},
},
{
name: "remote, pushremote, gh-merge-base, and merge ref all specified",
configLines: []string{
"branch.trunk.remote remote",
"branch.trunk.pushremote pushremote",
"branch.trunk.gh-merge-base gh-merge-base",
"branch.trunk.merge refs/heads/trunk",
},
wantBranchConfig: BranchConfig{
RemoteName: "remote",
PushRemoteName: "pushremote",
MergeBase: "gh-merge-base",
MergeRef: "refs/heads/trunk",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
branchConfig := parseBranchConfig(tt.configLines)
assert.Equalf(t, tt.wantBranchConfig.RemoteName, branchConfig.RemoteName, "unexpected RemoteName")
assert.Equalf(t, tt.wantBranchConfig.MergeRef, branchConfig.MergeRef, "unexpected MergeRef")
assert.Equalf(t, tt.wantBranchConfig.MergeBase, branchConfig.MergeBase, "unexpected MergeBase")
assert.Equalf(t, tt.wantBranchConfig.PushRemoteName, branchConfig.PushRemoteName, "unexpected PushRemoteName")
if tt.wantBranchConfig.RemoteURL != nil {
assert.Equalf(t, tt.wantBranchConfig.RemoteURL.String(), branchConfig.RemoteURL.String(), "unexpected RemoteURL")
}
if tt.wantBranchConfig.PushRemoteURL != nil {
assert.Equalf(t, tt.wantBranchConfig.PushRemoteURL.String(), branchConfig.PushRemoteURL.String(), "unexpected PushRemoteURL")
}
})
}
}
func Test_parseRemoteURLOrName(t *testing.T) {
tests := []struct {
name string
value string
wantRemoteURL *url.URL
wantRemoteName string
}{
{
name: "empty value",
value: "",
wantRemoteURL: nil,
wantRemoteName: "",
},
{
name: "remote URL",
value: "git@github.com:foo/bar.git",
wantRemoteURL: &url.URL{
Scheme: "ssh",
User: url.User("git"),
Host: "github.com",
Path: "/foo/bar.git",
},
wantRemoteName: "",
},
{
name: "remote name",
value: "origin",
wantRemoteURL: nil,
wantRemoteName: "origin",
},
{
name: "remote name is from filesystem",
value: "./path/to/repo",
wantRemoteURL: nil,
wantRemoteName: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
remoteURL, remoteName := parseRemoteURLOrName(tt.value)
assert.Equal(t, tt.wantRemoteURL, remoteURL)
assert.Equal(t, tt.wantRemoteName, remoteName)
})
}
}
func TestClientPushDefault(t *testing.T) {
tests := []struct {
name string
commandResult commandResult
wantPushDefault PushDefault
wantError *GitError
}{
{
name: "push default is not set",
commandResult: commandResult{
ExitStatus: 1,
Stderr: "error: key does not contain a section: remote.pushDefault",
},
wantPushDefault: PushDefaultSimple,
wantError: nil,
},
{
name: "push default is set to current",
commandResult: commandResult{
ExitStatus: 0,
Stdout: "current",
},
wantPushDefault: PushDefaultCurrent,
wantError: nil,
},
{
name: "push default errors",
commandResult: commandResult{
ExitStatus: 128,
Stderr: "fatal: git error",
},
wantPushDefault: "",
wantError: &GitError{
ExitCode: 128,
Stderr: "fatal: git error",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cmdCtx := createMockedCommandContext(t, mockedCommands{
`path/to/git config push.default`: tt.commandResult,
},
)
client := Client{
GitPath: "path/to/git",
commandContext: cmdCtx,
}
pushDefault, err := client.PushDefault(context.Background())
if tt.wantError != nil {
var gitError *GitError
require.ErrorAs(t, err, &gitError)
assert.Equal(t, tt.wantError.ExitCode, gitError.ExitCode)
assert.Equal(t, tt.wantError.Stderr, gitError.Stderr)
} else {
require.NoError(t, err)
}
assert.Equal(t, tt.wantPushDefault, pushDefault)
})
}
}
func TestClientRemotePushDefault(t *testing.T) {
tests := []struct {
name string
commandResult commandResult
wantRemotePushDefault string
wantError *GitError
}{
{
name: "remote.pushDefault is not set",
commandResult: commandResult{
ExitStatus: 1,
Stderr: "error: key does not contain a section: remote.pushDefault",
},
wantRemotePushDefault: "",
wantError: nil,
},
{
name: "remote.pushDefault is set to origin",
commandResult: commandResult{
ExitStatus: 0,
Stdout: "origin",
},
wantRemotePushDefault: "origin",
wantError: nil,
},
{
name: "remote.pushDefault errors",
commandResult: commandResult{
ExitStatus: 128,
Stderr: "fatal: git error",
},
wantRemotePushDefault: "",
wantError: &GitError{
ExitCode: 128,
Stderr: "fatal: git error",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cmdCtx := createMockedCommandContext(t, mockedCommands{
`path/to/git config remote.pushDefault`: tt.commandResult,
},
)
client := Client{
GitPath: "path/to/git",
commandContext: cmdCtx,
}
pushDefault, err := client.RemotePushDefault(context.Background())
if tt.wantError != nil {
var gitError *GitError
require.ErrorAs(t, err, &gitError)
assert.Equal(t, tt.wantError.ExitCode, gitError.ExitCode)
assert.Equal(t, tt.wantError.Stderr, gitError.Stderr)
} else {
require.NoError(t, err)
}
assert.Equal(t, tt.wantRemotePushDefault, pushDefault)
})
}
}
func TestClientParsePushRevision(t *testing.T) {
tests := []struct {
name string
branch string
commandResult commandResult
wantParsedPushRevision RemoteTrackingRef
wantError error
}{
{
name: "@{push} resolves to refs/remotes/origin/branchName",
branch: "branchName",
commandResult: commandResult{
ExitStatus: 0,
Stdout: "refs/remotes/origin/branchName",
},
wantParsedPushRevision: RemoteTrackingRef{Remote: "origin", Branch: "branchName"},
},
{
name: "@{push} doesn't resolve",
commandResult: commandResult{
ExitStatus: 128,
Stderr: "fatal: git error",
},
wantParsedPushRevision: RemoteTrackingRef{},
wantError: &GitError{
ExitCode: 128,
Stderr: "fatal: git error",
},
},
{
name: "@{push} resolves to something surprising",
commandResult: commandResult{
ExitStatus: 0,
Stdout: "not/a/valid/remote/ref",
},
wantParsedPushRevision: RemoteTrackingRef{},
wantError: fmt.Errorf("could not parse push revision: remote tracking branch must have format refs/remotes/<remote>/<branch> but was: not/a/valid/remote/ref"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cmd := fmt.Sprintf("path/to/git rev-parse --symbolic-full-name %s@{push}", tt.branch)
cmdCtx := createMockedCommandContext(t, mockedCommands{
args(cmd): tt.commandResult,
})
client := Client{
GitPath: "path/to/git",
commandContext: cmdCtx,
}
trackingRef, err := client.PushRevision(context.Background(), tt.branch)
if tt.wantError != nil {
var wantErrorAsGit *GitError
if errors.As(err, &wantErrorAsGit) {
var gitError *GitError
require.ErrorAs(t, err, &gitError)
assert.Equal(t, wantErrorAsGit.ExitCode, gitError.ExitCode)
assert.Equal(t, wantErrorAsGit.Stderr, gitError.Stderr)
} else {
assert.Equal(t, err, tt.wantError)
}
} else {
require.NoError(t, err)
}
assert.Equal(t, tt.wantParsedPushRevision, trackingRef)
})
}
}
func TestRemoteTrackingRef(t *testing.T) {
t.Run("parsing", func(t *testing.T) {
t.Parallel()
tests := []struct {
name string
remoteTrackingRef string
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | true |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/git/url.go | git/url.go | package git
import (
"net/url"
"strings"
)
func IsURL(u string) bool {
return strings.HasPrefix(u, "git@") || isSupportedProtocol(u)
}
func isSupportedProtocol(u string) bool {
return strings.HasPrefix(u, "ssh:") ||
strings.HasPrefix(u, "git+ssh:") ||
strings.HasPrefix(u, "git:") ||
strings.HasPrefix(u, "http:") ||
strings.HasPrefix(u, "git+https:") ||
strings.HasPrefix(u, "https:")
}
func isPossibleProtocol(u string) bool {
return isSupportedProtocol(u) ||
strings.HasPrefix(u, "ftp:") ||
strings.HasPrefix(u, "ftps:") ||
strings.HasPrefix(u, "file:")
}
// ParseURL normalizes git remote urls
func ParseURL(rawURL string) (*url.URL, error) {
if !isPossibleProtocol(rawURL) &&
strings.ContainsRune(rawURL, ':') &&
// not a Windows path
!strings.ContainsRune(rawURL, '\\') {
// support scp-like syntax for ssh protocol
rawURL = "ssh://" + strings.Replace(rawURL, ":", "/", 1)
}
u, err := url.Parse(rawURL)
if err != nil {
return nil, err
}
switch u.Scheme {
case "git+https":
u.Scheme = "https"
case "git+ssh":
u.Scheme = "ssh"
}
if u.Scheme != "ssh" {
return u, nil
}
if strings.HasPrefix(u.Path, "//") {
u.Path = strings.TrimPrefix(u.Path, "/")
}
u.Host = strings.TrimSuffix(u.Host, ":"+u.Port())
return u, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/git/objects.go | git/objects.go | package git
import (
"net/url"
"strings"
)
// RemoteSet is a slice of git remotes.
type RemoteSet []*Remote
func (r RemoteSet) Len() int { return len(r) }
func (r RemoteSet) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
func (r RemoteSet) Less(i, j int) bool {
return remoteNameSortScore(r[i].Name) > remoteNameSortScore(r[j].Name)
}
func remoteNameSortScore(name string) int {
switch strings.ToLower(name) {
case "upstream":
return 3
case "github":
return 2
case "origin":
return 1
default:
return 0
}
}
// Remote is a parsed git remote.
type Remote struct {
Name string
Resolved string
FetchURL *url.URL
PushURL *url.URL
}
func (r *Remote) String() string {
return r.Name
}
func NewRemote(name string, u string) *Remote {
pu, _ := url.Parse(u)
return &Remote{
Name: name,
FetchURL: pu,
PushURL: pu,
}
}
// Ref represents a git commit reference.
type Ref struct {
Hash string
Name string
}
type Commit struct {
Sha string
Title string
Body string
}
// These are the keys we read from the git branch.<name> config.
type BranchConfig struct {
RemoteName string // .remote if string
RemoteURL *url.URL // .remote if url
MergeRef string // .merge
PushRemoteName string // .pushremote if string
PushRemoteURL *url.URL // .pushremote if url
// MergeBase is the optional base branch to target in a new PR if `--base` is not specified.
MergeBase string
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/git/url_test.go | git/url_test.go | package git
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestIsURL(t *testing.T) {
tests := []struct {
name string
url string
want bool
}{
{
name: "scp-like",
url: "git@example.com:owner/repo",
want: true,
},
{
name: "scp-like with no user",
url: "example.com:owner/repo",
want: false,
},
{
name: "ssh",
url: "ssh://git@example.com/owner/repo",
want: true,
},
{
name: "git",
url: "git://example.com/owner/repo",
want: true,
},
{
name: "git with extension",
url: "git://example.com/owner/repo.git",
want: true,
},
{
name: "git+ssh",
url: "git+ssh://git@example.com/owner/repo.git",
want: true,
},
{
name: "https",
url: "https://example.com/owner/repo.git",
want: true,
},
{
name: "git+https",
url: "git+https://example.com/owner/repo.git",
want: true,
},
{
name: "no protocol",
url: "example.com/owner/repo",
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.want, IsURL(tt.url))
})
}
}
func TestParseURL(t *testing.T) {
type url struct {
Scheme string
User string
Host string
Path string
}
tests := []struct {
name string
url string
want url
wantErr bool
}{
{
name: "HTTPS",
url: "https://example.com/owner/repo.git",
want: url{
Scheme: "https",
User: "",
Host: "example.com",
Path: "/owner/repo.git",
},
},
{
name: "HTTP",
url: "http://example.com/owner/repo.git",
want: url{
Scheme: "http",
User: "",
Host: "example.com",
Path: "/owner/repo.git",
},
},
{
name: "git",
url: "git://example.com/owner/repo.git",
want: url{
Scheme: "git",
User: "",
Host: "example.com",
Path: "/owner/repo.git",
},
},
{
name: "ssh",
url: "ssh://git@example.com/owner/repo.git",
want: url{
Scheme: "ssh",
User: "git",
Host: "example.com",
Path: "/owner/repo.git",
},
},
{
name: "ssh with port",
url: "ssh://git@example.com:443/owner/repo.git",
want: url{
Scheme: "ssh",
User: "git",
Host: "example.com",
Path: "/owner/repo.git",
},
},
{
name: "ssh, ipv6",
url: "ssh://git@[::1]/owner/repo.git",
want: url{
Scheme: "ssh",
User: "git",
Host: "[::1]",
Path: "/owner/repo.git",
},
},
{
name: "ssh with port, ipv6",
url: "ssh://git@[::1]:22/owner/repo.git",
want: url{
Scheme: "ssh",
User: "git",
Host: "[::1]",
Path: "/owner/repo.git",
},
},
{
name: "git+ssh",
url: "git+ssh://example.com/owner/repo.git",
want: url{
Scheme: "ssh",
User: "",
Host: "example.com",
Path: "/owner/repo.git",
},
},
{
name: "git+https",
url: "git+https://example.com/owner/repo.git",
want: url{
Scheme: "https",
User: "",
Host: "example.com",
Path: "/owner/repo.git",
},
},
{
name: "scp-like",
url: "git@example.com:owner/repo.git",
want: url{
Scheme: "ssh",
User: "git",
Host: "example.com",
Path: "/owner/repo.git",
},
},
{
name: "scp-like, leading slash",
url: "git@example.com:/owner/repo.git",
want: url{
Scheme: "ssh",
User: "git",
Host: "example.com",
Path: "/owner/repo.git",
},
},
{
name: "file protocol",
url: "file:///example.com/owner/repo.git",
want: url{
Scheme: "file",
User: "",
Host: "",
Path: "/example.com/owner/repo.git",
},
},
{
name: "file path",
url: "/example.com/owner/repo.git",
want: url{
Scheme: "",
User: "",
Host: "",
Path: "/example.com/owner/repo.git",
},
},
{
name: "Windows file path",
url: "C:\\example.com\\owner\\repo.git",
want: url{
Scheme: "c",
User: "",
Host: "",
Path: "",
},
},
{
name: "fails to parse",
url: "ssh://git@[/tmp/git-repo",
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
u, err := ParseURL(tt.url)
if tt.wantErr {
require.Error(t, err)
return
}
assert.Equal(t, u.Scheme, tt.want.Scheme)
assert.Equal(t, u.User.Username(), tt.want.User)
assert.Equal(t, u.Host, tt.want.Host)
assert.Equal(t, u.Path, tt.want.Path)
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/git/command.go | git/command.go | package git
import (
"bytes"
"context"
"errors"
"io"
"os/exec"
"github.com/cli/cli/v2/internal/run"
)
type commandCtx = func(ctx context.Context, name string, args ...string) *exec.Cmd
type Command struct {
*exec.Cmd
}
func (gc *Command) Run() error {
stderr := &bytes.Buffer{}
if gc.Cmd.Stderr == nil {
gc.Cmd.Stderr = stderr
}
// This is a hack in order to not break the hundreds of
// existing tests that rely on `run.PrepareCmd` to be invoked.
err := run.PrepareCmd(gc.Cmd).Run()
if err != nil {
ge := GitError{err: err, Stderr: stderr.String()}
var exitError *exec.ExitError
if errors.As(err, &exitError) {
ge.ExitCode = exitError.ExitCode()
}
return &ge
}
return nil
}
func (gc *Command) Output() ([]byte, error) {
gc.Stdout = nil
gc.Stderr = nil
// This is a hack in order to not break the hundreds of
// existing tests that rely on `run.PrepareCmd` to be invoked.
out, err := run.PrepareCmd(gc.Cmd).Output()
if err != nil {
ge := GitError{err: err}
// In real implementation, this should be an exec.ExitError, as below,
// but the tests use a different type because exec.ExitError are difficult
// to create. We want to get the exit code and stderr, but stderr
// is not a method and so tests can't access it.
// THIS MEANS THAT TESTS WILL NOT CORRECTLY HAVE STDERR SET,
// but at least tests can get the exit code.
var exitErrorWithExitCode errWithExitCode
if errors.As(err, &exitErrorWithExitCode) {
ge.ExitCode = exitErrorWithExitCode.ExitCode()
}
var exitError *exec.ExitError
if errors.As(err, &exitError) {
ge.Stderr = string(exitError.Stderr)
}
err = &ge
}
return out, err
}
func (gc *Command) setRepoDir(repoDir string) {
for i, arg := range gc.Args {
if arg == "-C" {
gc.Args[i+1] = repoDir
return
}
}
// Handle "--" invocations for testing purposes.
var index int
for i, arg := range gc.Args {
if arg == "--" {
index = i + 1
}
}
gc.Args = append(gc.Args[:index+3], gc.Args[index+1:]...)
gc.Args[index+1] = "-C"
gc.Args[index+2] = repoDir
}
// Allow individual commands to be modified from the default client options.
type CommandModifier func(*Command)
func WithStderr(stderr io.Writer) CommandModifier {
return func(gc *Command) {
gc.Stderr = stderr
}
}
func WithStdout(stdout io.Writer) CommandModifier {
return func(gc *Command) {
gc.Stdout = stdout
}
}
func WithStdin(stdin io.Reader) CommandModifier {
return func(gc *Command) {
gc.Stdin = stdin
}
}
func WithRepoDir(repoDir string) CommandModifier {
return func(gc *Command) {
gc.setRepoDir(repoDir)
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/hashicorp/golang-lru/v2/2q_test.go | third-party/github.com/hashicorp/golang-lru/v2/2q_test.go | // Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package lru
import (
"testing"
)
func Benchmark2Q_Rand(b *testing.B) {
l, err := New2Q[int64, int64](8192)
if err != nil {
b.Fatalf("err: %v", err)
}
trace := make([]int64, b.N*2)
for i := 0; i < b.N*2; i++ {
trace[i] = getRand(b) % 32768
}
b.ResetTimer()
var hit, miss int
for i := 0; i < 2*b.N; i++ {
if i%2 == 0 {
l.Add(trace[i], trace[i])
} else {
if _, ok := l.Get(trace[i]); ok {
hit++
} else {
miss++
}
}
}
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(hit+miss))
}
func Benchmark2Q_Freq(b *testing.B) {
l, err := New2Q[int64, int64](8192)
if err != nil {
b.Fatalf("err: %v", err)
}
trace := make([]int64, b.N*2)
for i := 0; i < b.N*2; i++ {
if i%2 == 0 {
trace[i] = getRand(b) % 16384
} else {
trace[i] = getRand(b) % 32768
}
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
l.Add(trace[i], trace[i])
}
var hit, miss int
for i := 0; i < b.N; i++ {
if _, ok := l.Get(trace[i]); ok {
hit++
} else {
miss++
}
}
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(hit+miss))
}
func Test2Q_RandomOps(t *testing.T) {
size := 128
l, err := New2Q[int64, int64](128)
if err != nil {
t.Fatalf("err: %v", err)
}
n := 200000
for i := 0; i < n; i++ {
key := getRand(t) % 512
r := getRand(t)
switch r % 3 {
case 0:
l.Add(key, key)
case 1:
l.Get(key)
case 2:
l.Remove(key)
}
if l.recent.Len()+l.frequent.Len() > size {
t.Fatalf("bad: recent: %d freq: %d",
l.recent.Len(), l.frequent.Len())
}
}
}
func Test2Q_Get_RecentToFrequent(t *testing.T) {
l, err := New2Q[int, int](128)
if err != nil {
t.Fatalf("err: %v", err)
}
// Touch all the entries, should be in t1
for i := 0; i < 128; i++ {
l.Add(i, i)
}
if n := l.recent.Len(); n != 128 {
t.Fatalf("bad: %d", n)
}
if n := l.frequent.Len(); n != 0 {
t.Fatalf("bad: %d", n)
}
// Get should upgrade to t2
for i := 0; i < 128; i++ {
if _, ok := l.Get(i); !ok {
t.Fatalf("missing: %d", i)
}
}
if n := l.recent.Len(); n != 0 {
t.Fatalf("bad: %d", n)
}
if n := l.frequent.Len(); n != 128 {
t.Fatalf("bad: %d", n)
}
// Get be from t2
for i := 0; i < 128; i++ {
if _, ok := l.Get(i); !ok {
t.Fatalf("missing: %d", i)
}
}
if n := l.recent.Len(); n != 0 {
t.Fatalf("bad: %d", n)
}
if n := l.frequent.Len(); n != 128 {
t.Fatalf("bad: %d", n)
}
}
func Test2Q_Add_RecentToFrequent(t *testing.T) {
l, err := New2Q[int, int](128)
if err != nil {
t.Fatalf("err: %v", err)
}
// Add initially to recent
l.Add(1, 1)
if n := l.recent.Len(); n != 1 {
t.Fatalf("bad: %d", n)
}
if n := l.frequent.Len(); n != 0 {
t.Fatalf("bad: %d", n)
}
// Add should upgrade to frequent
l.Add(1, 1)
if n := l.recent.Len(); n != 0 {
t.Fatalf("bad: %d", n)
}
if n := l.frequent.Len(); n != 1 {
t.Fatalf("bad: %d", n)
}
// Add should remain in frequent
l.Add(1, 1)
if n := l.recent.Len(); n != 0 {
t.Fatalf("bad: %d", n)
}
if n := l.frequent.Len(); n != 1 {
t.Fatalf("bad: %d", n)
}
}
func Test2Q_Add_RecentEvict(t *testing.T) {
l, err := New2Q[int, int](4)
if err != nil {
t.Fatalf("err: %v", err)
}
// Add 1,2,3,4,5 -> Evict 1
l.Add(1, 1)
l.Add(2, 2)
l.Add(3, 3)
l.Add(4, 4)
l.Add(5, 5)
if n := l.recent.Len(); n != 4 {
t.Fatalf("bad: %d", n)
}
if n := l.recentEvict.Len(); n != 1 {
t.Fatalf("bad: %d", n)
}
if n := l.frequent.Len(); n != 0 {
t.Fatalf("bad: %d", n)
}
// Pull in the recently evicted
l.Add(1, 1)
if n := l.recent.Len(); n != 3 {
t.Fatalf("bad: %d", n)
}
if n := l.recentEvict.Len(); n != 1 {
t.Fatalf("bad: %d", n)
}
if n := l.frequent.Len(); n != 1 {
t.Fatalf("bad: %d", n)
}
// Add 6, should cause another recent evict
l.Add(6, 6)
if n := l.recent.Len(); n != 3 {
t.Fatalf("bad: %d", n)
}
if n := l.recentEvict.Len(); n != 2 {
t.Fatalf("bad: %d", n)
}
if n := l.frequent.Len(); n != 1 {
t.Fatalf("bad: %d", n)
}
}
func Test2Q_Resize(t *testing.T) {
l, err := New2Q[int, int](100)
if err != nil {
t.Fatalf("err: %v", err)
}
// Touch all the entries, should be in t1
for i := 0; i < 100; i++ {
l.Add(i, i)
}
evicted := l.Resize(50)
if evicted != 50 {
t.Fatalf("bad: %d", evicted)
}
if n := l.recent.Len(); n != 50 {
t.Fatalf("bad: %d", n)
}
if n := l.frequent.Len(); n != 0 {
t.Fatalf("bad: %d", n)
}
l, err = New2Q[int, int](100)
if err != nil {
t.Fatalf("err: %v", err)
}
for i := 0; i < 100; i++ {
l.Add(i, i)
}
for i := 0; i < 50; i++ {
l.Add(i, i)
}
evicted = l.Resize(50)
if evicted != 50 {
t.Fatalf("bad: %d", evicted)
}
if n := l.recent.Len(); n != 12 {
t.Fatalf("bad: %d", n)
}
if n := l.frequent.Len(); n != 38 {
t.Fatalf("bad: %d", n)
}
l, err = New2Q[int, int](100)
if err != nil {
t.Fatalf("err: %v", err)
}
for i := 0; i < 100; i++ {
l.Add(i, i)
l.Add(i, i)
}
evicted = l.Resize(50)
if evicted != 50 {
t.Fatalf("bad: %d", evicted)
}
if n := l.recent.Len(); n != 0 {
t.Fatalf("bad: %d", n)
}
if n := l.frequent.Len(); n != 50 {
t.Fatalf("bad: %d", n)
}
}
func Test2Q(t *testing.T) {
l, err := New2Q[int, int](128)
if err != nil {
t.Fatalf("err: %v", err)
}
for i := 0; i < 256; i++ {
l.Add(i, i)
}
if l.Len() != 128 {
t.Fatalf("bad len: %v", l.Len())
}
for i, k := range l.Keys() {
if v, ok := l.Get(k); !ok || v != k || v != i+128 {
t.Fatalf("bad key: %v", k)
}
}
for i, v := range l.Values() {
if v != i+128 {
t.Fatalf("bad key: %v", v)
}
}
for i := 0; i < 128; i++ {
if _, ok := l.Get(i); ok {
t.Fatalf("should be evicted")
}
}
for i := 128; i < 256; i++ {
if _, ok := l.Get(i); !ok {
t.Fatalf("should not be evicted")
}
}
for i := 128; i < 192; i++ {
l.Remove(i)
if _, ok := l.Get(i); ok {
t.Fatalf("should be deleted")
}
}
l.Purge()
if l.Len() != 0 {
t.Fatalf("bad len: %v", l.Len())
}
if _, ok := l.Get(200); ok {
t.Fatalf("should contain nothing")
}
}
// Test that Contains doesn't update recent-ness
func Test2Q_Contains(t *testing.T) {
l, err := New2Q[int, int](2)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
if !l.Contains(1) {
t.Errorf("1 should be contained")
}
l.Add(3, 3)
if l.Contains(1) {
t.Errorf("Contains should not have updated recent-ness of 1")
}
}
// Test that Peek doesn't update recent-ness
func Test2Q_Peek(t *testing.T) {
l, err := New2Q[int, int](2)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
if v, ok := l.Peek(1); !ok || v != 1 {
t.Errorf("1 should be set to 1: %v, %v", v, ok)
}
l.Add(3, 3)
if l.Contains(1) {
t.Errorf("should not have updated recent-ness of 1")
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/hashicorp/golang-lru/v2/testing_test.go | third-party/github.com/hashicorp/golang-lru/v2/testing_test.go | // Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package lru
import (
"crypto/rand"
"math"
"math/big"
"testing"
)
func getRand(tb testing.TB) int64 {
out, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
if err != nil {
tb.Fatal(err)
}
return out.Int64()
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/hashicorp/golang-lru/v2/lru_test.go | third-party/github.com/hashicorp/golang-lru/v2/lru_test.go | // Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package lru
import (
"reflect"
"testing"
)
func BenchmarkLRU_Rand(b *testing.B) {
l, err := New[int64, int64](8192)
if err != nil {
b.Fatalf("err: %v", err)
}
trace := make([]int64, b.N*2)
for i := 0; i < b.N*2; i++ {
trace[i] = getRand(b) % 32768
}
b.ResetTimer()
var hit, miss int
for i := 0; i < 2*b.N; i++ {
if i%2 == 0 {
l.Add(trace[i], trace[i])
} else {
if _, ok := l.Get(trace[i]); ok {
hit++
} else {
miss++
}
}
}
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(hit+miss))
}
func BenchmarkLRU_Freq(b *testing.B) {
l, err := New[int64, int64](8192)
if err != nil {
b.Fatalf("err: %v", err)
}
trace := make([]int64, b.N*2)
for i := 0; i < b.N*2; i++ {
if i%2 == 0 {
trace[i] = getRand(b) % 16384
} else {
trace[i] = getRand(b) % 32768
}
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
l.Add(trace[i], trace[i])
}
var hit, miss int
for i := 0; i < b.N; i++ {
if _, ok := l.Get(trace[i]); ok {
hit++
} else {
miss++
}
}
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(hit+miss))
}
func TestLRU(t *testing.T) {
evictCounter := 0
onEvicted := func(k int, v int) {
if k != v {
t.Fatalf("Evict values not equal (%v!=%v)", k, v)
}
evictCounter++
}
l, err := NewWithEvict(128, onEvicted)
if err != nil {
t.Fatalf("err: %v", err)
}
for i := 0; i < 256; i++ {
l.Add(i, i)
}
if l.Len() != 128 {
t.Fatalf("bad len: %v", l.Len())
}
if evictCounter != 128 {
t.Fatalf("bad evict count: %v", evictCounter)
}
for i, k := range l.Keys() {
if v, ok := l.Get(k); !ok || v != k || v != i+128 {
t.Fatalf("bad key: %v", k)
}
}
for i, v := range l.Values() {
if v != i+128 {
t.Fatalf("bad value: %v", v)
}
}
for i := 0; i < 128; i++ {
if _, ok := l.Get(i); ok {
t.Fatalf("should be evicted")
}
}
for i := 128; i < 256; i++ {
if _, ok := l.Get(i); !ok {
t.Fatalf("should not be evicted")
}
}
for i := 128; i < 192; i++ {
l.Remove(i)
if _, ok := l.Get(i); ok {
t.Fatalf("should be deleted")
}
}
l.Get(192) // expect 192 to be last key in l.Keys()
for i, k := range l.Keys() {
if (i < 63 && k != i+193) || (i == 63 && k != 192) {
t.Fatalf("out of order key: %v", k)
}
}
l.Purge()
if l.Len() != 0 {
t.Fatalf("bad len: %v", l.Len())
}
if _, ok := l.Get(200); ok {
t.Fatalf("should contain nothing")
}
}
// test that Add returns true/false if an eviction occurred
func TestLRUAdd(t *testing.T) {
evictCounter := 0
onEvicted := func(k int, v int) {
evictCounter++
}
l, err := NewWithEvict(1, onEvicted)
if err != nil {
t.Fatalf("err: %v", err)
}
if l.Add(1, 1) == true || evictCounter != 0 {
t.Errorf("should not have an eviction")
}
if l.Add(2, 2) == false || evictCounter != 1 {
t.Errorf("should have an eviction")
}
}
// test that Contains doesn't update recent-ness
func TestLRUContains(t *testing.T) {
l, err := New[int, int](2)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
if !l.Contains(1) {
t.Errorf("1 should be contained")
}
l.Add(3, 3)
if l.Contains(1) {
t.Errorf("Contains should not have updated recent-ness of 1")
}
}
// test that ContainsOrAdd doesn't update recent-ness
func TestLRUContainsOrAdd(t *testing.T) {
l, err := New[int, int](2)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
contains, evict := l.ContainsOrAdd(1, 1)
if !contains {
t.Errorf("1 should be contained")
}
if evict {
t.Errorf("nothing should be evicted here")
}
l.Add(3, 3)
contains, evict = l.ContainsOrAdd(1, 1)
if contains {
t.Errorf("1 should not have been contained")
}
if !evict {
t.Errorf("an eviction should have occurred")
}
if !l.Contains(1) {
t.Errorf("now 1 should be contained")
}
}
// test that PeekOrAdd doesn't update recent-ness
func TestLRUPeekOrAdd(t *testing.T) {
l, err := New[int, int](2)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
previous, contains, evict := l.PeekOrAdd(1, 1)
if !contains {
t.Errorf("1 should be contained")
}
if evict {
t.Errorf("nothing should be evicted here")
}
if previous != 1 {
t.Errorf("previous is not equal to 1")
}
l.Add(3, 3)
contains, evict = l.ContainsOrAdd(1, 1)
if contains {
t.Errorf("1 should not have been contained")
}
if !evict {
t.Errorf("an eviction should have occurred")
}
if !l.Contains(1) {
t.Errorf("now 1 should be contained")
}
}
// test that Peek doesn't update recent-ness
func TestLRUPeek(t *testing.T) {
l, err := New[int, int](2)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
if v, ok := l.Peek(1); !ok || v != 1 {
t.Errorf("1 should be set to 1: %v, %v", v, ok)
}
l.Add(3, 3)
if l.Contains(1) {
t.Errorf("should not have updated recent-ness of 1")
}
}
// test that Resize can upsize and downsize
func TestLRUResize(t *testing.T) {
onEvictCounter := 0
onEvicted := func(k int, v int) {
onEvictCounter++
}
l, err := NewWithEvict(2, onEvicted)
if err != nil {
t.Fatalf("err: %v", err)
}
// Downsize
l.Add(1, 1)
l.Add(2, 2)
evicted := l.Resize(1)
if evicted != 1 {
t.Errorf("1 element should have been evicted: %v", evicted)
}
if onEvictCounter != 1 {
t.Errorf("onEvicted should have been called 1 time: %v", onEvictCounter)
}
l.Add(3, 3)
if l.Contains(1) {
t.Errorf("Element 1 should have been evicted")
}
// Upsize
evicted = l.Resize(2)
if evicted != 0 {
t.Errorf("0 elements should have been evicted: %v", evicted)
}
l.Add(4, 4)
if !l.Contains(3) || !l.Contains(4) {
t.Errorf("Cache should have contained 2 elements")
}
}
func (c *Cache[K, V]) wantKeys(t *testing.T, want []K) {
t.Helper()
got := c.Keys()
if !reflect.DeepEqual(got, want) {
t.Errorf("wrong keys got: %v, want: %v ", got, want)
}
}
func TestCache_EvictionSameKey(t *testing.T) {
t.Run("Add", func(t *testing.T) {
var evictedKeys []int
cache, _ := NewWithEvict(
2,
func(key int, _ struct{}) {
evictedKeys = append(evictedKeys, key)
})
if evicted := cache.Add(1, struct{}{}); evicted {
t.Error("First 1: got unexpected eviction")
}
cache.wantKeys(t, []int{1})
if evicted := cache.Add(2, struct{}{}); evicted {
t.Error("2: got unexpected eviction")
}
cache.wantKeys(t, []int{1, 2})
if evicted := cache.Add(1, struct{}{}); evicted {
t.Error("Second 1: got unexpected eviction")
}
cache.wantKeys(t, []int{2, 1})
if evicted := cache.Add(3, struct{}{}); !evicted {
t.Error("3: did not get expected eviction")
}
cache.wantKeys(t, []int{1, 3})
want := []int{2}
if !reflect.DeepEqual(evictedKeys, want) {
t.Errorf("evictedKeys got: %v want: %v", evictedKeys, want)
}
})
t.Run("ContainsOrAdd", func(t *testing.T) {
var evictedKeys []int
cache, _ := NewWithEvict(
2,
func(key int, _ struct{}) {
evictedKeys = append(evictedKeys, key)
})
contained, evicted := cache.ContainsOrAdd(1, struct{}{})
if contained {
t.Error("First 1: got unexpected contained")
}
if evicted {
t.Error("First 1: got unexpected eviction")
}
cache.wantKeys(t, []int{1})
contained, evicted = cache.ContainsOrAdd(2, struct{}{})
if contained {
t.Error("2: got unexpected contained")
}
if evicted {
t.Error("2: got unexpected eviction")
}
cache.wantKeys(t, []int{1, 2})
contained, evicted = cache.ContainsOrAdd(1, struct{}{})
if !contained {
t.Error("Second 1: did not get expected contained")
}
if evicted {
t.Error("Second 1: got unexpected eviction")
}
cache.wantKeys(t, []int{1, 2})
contained, evicted = cache.ContainsOrAdd(3, struct{}{})
if contained {
t.Error("3: got unexpected contained")
}
if !evicted {
t.Error("3: did not get expected eviction")
}
cache.wantKeys(t, []int{2, 3})
want := []int{1}
if !reflect.DeepEqual(evictedKeys, want) {
t.Errorf("evictedKeys got: %v want: %v", evictedKeys, want)
}
})
t.Run("PeekOrAdd", func(t *testing.T) {
var evictedKeys []int
cache, _ := NewWithEvict(
2,
func(key int, _ struct{}) {
evictedKeys = append(evictedKeys, key)
})
_, contained, evicted := cache.PeekOrAdd(1, struct{}{})
if contained {
t.Error("First 1: got unexpected contained")
}
if evicted {
t.Error("First 1: got unexpected eviction")
}
cache.wantKeys(t, []int{1})
_, contained, evicted = cache.PeekOrAdd(2, struct{}{})
if contained {
t.Error("2: got unexpected contained")
}
if evicted {
t.Error("2: got unexpected eviction")
}
cache.wantKeys(t, []int{1, 2})
_, contained, evicted = cache.PeekOrAdd(1, struct{}{})
if !contained {
t.Error("Second 1: did not get expected contained")
}
if evicted {
t.Error("Second 1: got unexpected eviction")
}
cache.wantKeys(t, []int{1, 2})
_, contained, evicted = cache.PeekOrAdd(3, struct{}{})
if contained {
t.Error("3: got unexpected contained")
}
if !evicted {
t.Error("3: did not get expected eviction")
}
cache.wantKeys(t, []int{2, 3})
want := []int{1}
if !reflect.DeepEqual(evictedKeys, want) {
t.Errorf("evictedKeys got: %v want: %v", evictedKeys, want)
}
})
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/hashicorp/golang-lru/v2/doc.go | third-party/github.com/hashicorp/golang-lru/v2/doc.go | // Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
// Package lru provides three different LRU caches of varying sophistication.
//
// Cache is a simple LRU cache. It is based on the LRU implementation in
// groupcache: https://github.com/golang/groupcache/tree/master/lru
//
// TwoQueueCache tracks frequently used and recently used entries separately.
// This avoids a burst of accesses from taking out frequently used entries, at
// the cost of about 2x computational overhead and some extra bookkeeping.
//
// ARCCache is an adaptive replacement cache. It tracks recent evictions as well
// as recent usage in both the frequent and recent caches. Its computational
// overhead is comparable to TwoQueueCache, but the memory overhead is linear
// with the size of the cache.
//
// ARC has been patented by IBM, so do not use it if that is problematic for
// your program. For this reason, it is in a separate go module contained within
// this repository.
//
// All caches in this package take locks while operating, and are therefore
// thread-safe for consumers.
package lru
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/hashicorp/golang-lru/v2/2q.go | third-party/github.com/hashicorp/golang-lru/v2/2q.go | // Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package lru
import (
"errors"
"sync"
"github.com/hashicorp/golang-lru/v2/simplelru"
)
const (
// Default2QRecentRatio is the ratio of the 2Q cache dedicated
// to recently added entries that have only been accessed once.
Default2QRecentRatio = 0.25
// Default2QGhostEntries is the default ratio of ghost
// entries kept to track entries recently evicted
Default2QGhostEntries = 0.50
)
// TwoQueueCache is a thread-safe fixed size 2Q cache.
// 2Q is an enhancement over the standard LRU cache
// in that it tracks both frequently and recently used
// entries separately. This avoids a burst in access to new
// entries from evicting frequently used entries. It adds some
// additional tracking overhead to the standard LRU cache, and is
// computationally about 2x the cost, and adds some metadata over
// head. The ARCCache is similar, but does not require setting any
// parameters.
type TwoQueueCache[K comparable, V any] struct {
size int
recentSize int
recentRatio float64
ghostRatio float64
recent simplelru.LRUCache[K, V]
frequent simplelru.LRUCache[K, V]
recentEvict simplelru.LRUCache[K, struct{}]
lock sync.RWMutex
}
// New2Q creates a new TwoQueueCache using the default
// values for the parameters.
func New2Q[K comparable, V any](size int) (*TwoQueueCache[K, V], error) {
return New2QParams[K, V](size, Default2QRecentRatio, Default2QGhostEntries)
}
// New2QParams creates a new TwoQueueCache using the provided
// parameter values.
func New2QParams[K comparable, V any](size int, recentRatio, ghostRatio float64) (*TwoQueueCache[K, V], error) {
if size <= 0 {
return nil, errors.New("invalid size")
}
if recentRatio < 0.0 || recentRatio > 1.0 {
return nil, errors.New("invalid recent ratio")
}
if ghostRatio < 0.0 || ghostRatio > 1.0 {
return nil, errors.New("invalid ghost ratio")
}
// Determine the sub-sizes
recentSize := int(float64(size) * recentRatio)
evictSize := int(float64(size) * ghostRatio)
// Allocate the LRUs
recent, err := simplelru.NewLRU[K, V](size, nil)
if err != nil {
return nil, err
}
frequent, err := simplelru.NewLRU[K, V](size, nil)
if err != nil {
return nil, err
}
recentEvict, err := simplelru.NewLRU[K, struct{}](evictSize, nil)
if err != nil {
return nil, err
}
// Initialize the cache
c := &TwoQueueCache[K, V]{
size: size,
recentSize: recentSize,
recentRatio: recentRatio,
ghostRatio: ghostRatio,
recent: recent,
frequent: frequent,
recentEvict: recentEvict,
}
return c, nil
}
// Get looks up a key's value from the cache.
func (c *TwoQueueCache[K, V]) Get(key K) (value V, ok bool) {
c.lock.Lock()
defer c.lock.Unlock()
// Check if this is a frequent value
if val, ok := c.frequent.Get(key); ok {
return val, ok
}
// If the value is contained in recent, then we
// promote it to frequent
if val, ok := c.recent.Peek(key); ok {
c.recent.Remove(key)
c.frequent.Add(key, val)
return val, ok
}
// No hit
return
}
// Add adds a value to the cache.
func (c *TwoQueueCache[K, V]) Add(key K, value V) {
c.lock.Lock()
defer c.lock.Unlock()
// Check if the value is frequently used already,
// and just update the value
if c.frequent.Contains(key) {
c.frequent.Add(key, value)
return
}
// Check if the value is recently used, and promote
// the value into the frequent list
if c.recent.Contains(key) {
c.recent.Remove(key)
c.frequent.Add(key, value)
return
}
// If the value was recently evicted, add it to the
// frequently used list
if c.recentEvict.Contains(key) {
c.ensureSpace(true)
c.recentEvict.Remove(key)
c.frequent.Add(key, value)
return
}
// Add to the recently seen list
c.ensureSpace(false)
c.recent.Add(key, value)
}
// ensureSpace is used to ensure we have space in the cache
func (c *TwoQueueCache[K, V]) ensureSpace(recentEvict bool) {
// If we have space, nothing to do
recentLen := c.recent.Len()
freqLen := c.frequent.Len()
if recentLen+freqLen < c.size {
return
}
// If the recent buffer is larger than
// the target, evict from there
if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) {
k, _, _ := c.recent.RemoveOldest()
c.recentEvict.Add(k, struct{}{})
return
}
// Remove from the frequent list otherwise
c.frequent.RemoveOldest()
}
// Len returns the number of items in the cache.
func (c *TwoQueueCache[K, V]) Len() int {
c.lock.RLock()
defer c.lock.RUnlock()
return c.recent.Len() + c.frequent.Len()
}
// Resize changes the cache size.
func (c *TwoQueueCache[K, V]) Resize(size int) (evicted int) {
c.lock.Lock()
defer c.lock.Unlock()
// Recalculate the sub-sizes
recentSize := int(float64(size) * c.recentRatio)
evictSize := int(float64(size) * c.ghostRatio)
c.size = size
c.recentSize = recentSize
// ensureSpace
diff := c.recent.Len() + c.frequent.Len() - size
if diff < 0 {
diff = 0
}
for i := 0; i < diff; i++ {
c.ensureSpace(true)
}
// Reallocate the LRUs
c.recent.Resize(size)
c.frequent.Resize(size)
c.recentEvict.Resize(evictSize)
return diff
}
// Keys returns a slice of the keys in the cache.
// The frequently used keys are first in the returned slice.
func (c *TwoQueueCache[K, V]) Keys() []K {
c.lock.RLock()
defer c.lock.RUnlock()
k1 := c.frequent.Keys()
k2 := c.recent.Keys()
return append(k1, k2...)
}
// Values returns a slice of the values in the cache.
// The frequently used values are first in the returned slice.
func (c *TwoQueueCache[K, V]) Values() []V {
c.lock.RLock()
defer c.lock.RUnlock()
v1 := c.frequent.Values()
v2 := c.recent.Values()
return append(v1, v2...)
}
// Remove removes the provided key from the cache.
func (c *TwoQueueCache[K, V]) Remove(key K) {
c.lock.Lock()
defer c.lock.Unlock()
if c.frequent.Remove(key) {
return
}
if c.recent.Remove(key) {
return
}
if c.recentEvict.Remove(key) {
return
}
}
// Purge is used to completely clear the cache.
func (c *TwoQueueCache[K, V]) Purge() {
c.lock.Lock()
defer c.lock.Unlock()
c.recent.Purge()
c.frequent.Purge()
c.recentEvict.Purge()
}
// Contains is used to check if the cache contains a key
// without updating recency or frequency.
func (c *TwoQueueCache[K, V]) Contains(key K) bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.frequent.Contains(key) || c.recent.Contains(key)
}
// Peek is used to inspect the cache value of a key
// without updating recency or frequency.
func (c *TwoQueueCache[K, V]) Peek(key K) (value V, ok bool) {
c.lock.RLock()
defer c.lock.RUnlock()
if val, ok := c.frequent.Peek(key); ok {
return val, ok
}
return c.recent.Peek(key)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/hashicorp/golang-lru/v2/lru.go | third-party/github.com/hashicorp/golang-lru/v2/lru.go | // Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package lru
import (
"sync"
"github.com/hashicorp/golang-lru/v2/simplelru"
)
const (
// DefaultEvictedBufferSize defines the default buffer size to store evicted key/val
DefaultEvictedBufferSize = 16
)
// Cache is a thread-safe fixed size LRU cache.
type Cache[K comparable, V any] struct {
lru *simplelru.LRU[K, V]
evictedKeys []K
evictedVals []V
onEvictedCB func(k K, v V)
lock sync.RWMutex
}
// New creates an LRU of the given size.
func New[K comparable, V any](size int) (*Cache[K, V], error) {
return NewWithEvict[K, V](size, nil)
}
// NewWithEvict constructs a fixed size cache with the given eviction
// callback.
func NewWithEvict[K comparable, V any](size int, onEvicted func(key K, value V)) (c *Cache[K, V], err error) {
// create a cache with default settings
c = &Cache[K, V]{
onEvictedCB: onEvicted,
}
if onEvicted != nil {
c.initEvictBuffers()
onEvicted = c.onEvicted
}
c.lru, err = simplelru.NewLRU(size, onEvicted)
return
}
func (c *Cache[K, V]) initEvictBuffers() {
c.evictedKeys = make([]K, 0, DefaultEvictedBufferSize)
c.evictedVals = make([]V, 0, DefaultEvictedBufferSize)
}
// onEvicted save evicted key/val and sent in externally registered callback
// outside of critical section
func (c *Cache[K, V]) onEvicted(k K, v V) {
c.evictedKeys = append(c.evictedKeys, k)
c.evictedVals = append(c.evictedVals, v)
}
// Purge is used to completely clear the cache.
func (c *Cache[K, V]) Purge() {
var ks []K
var vs []V
c.lock.Lock()
c.lru.Purge()
if c.onEvictedCB != nil && len(c.evictedKeys) > 0 {
ks, vs = c.evictedKeys, c.evictedVals
c.initEvictBuffers()
}
c.lock.Unlock()
// invoke callback outside of critical section
if c.onEvictedCB != nil {
for i := 0; i < len(ks); i++ {
c.onEvictedCB(ks[i], vs[i])
}
}
}
// Add adds a value to the cache. Returns true if an eviction occurred.
func (c *Cache[K, V]) Add(key K, value V) (evicted bool) {
var k K
var v V
c.lock.Lock()
evicted = c.lru.Add(key, value)
if c.onEvictedCB != nil && evicted {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && evicted {
c.onEvictedCB(k, v)
}
return
}
// Get looks up a key's value from the cache.
func (c *Cache[K, V]) Get(key K) (value V, ok bool) {
c.lock.Lock()
value, ok = c.lru.Get(key)
c.lock.Unlock()
return value, ok
}
// Contains checks if a key is in the cache, without updating the
// recent-ness or deleting it for being stale.
func (c *Cache[K, V]) Contains(key K) bool {
c.lock.RLock()
containKey := c.lru.Contains(key)
c.lock.RUnlock()
return containKey
}
// Peek returns the key value (or undefined if not found) without updating
// the "recently used"-ness of the key.
func (c *Cache[K, V]) Peek(key K) (value V, ok bool) {
c.lock.RLock()
value, ok = c.lru.Peek(key)
c.lock.RUnlock()
return value, ok
}
// ContainsOrAdd checks if a key is in the cache without updating the
// recent-ness or deleting it for being stale, and if not, adds the value.
// Returns whether found and whether an eviction occurred.
func (c *Cache[K, V]) ContainsOrAdd(key K, value V) (ok, evicted bool) {
var k K
var v V
c.lock.Lock()
if c.lru.Contains(key) {
c.lock.Unlock()
return true, false
}
evicted = c.lru.Add(key, value)
if c.onEvictedCB != nil && evicted {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && evicted {
c.onEvictedCB(k, v)
}
return false, evicted
}
// PeekOrAdd checks if a key is in the cache without updating the
// recent-ness or deleting it for being stale, and if not, adds the value.
// Returns whether found and whether an eviction occurred.
func (c *Cache[K, V]) PeekOrAdd(key K, value V) (previous V, ok, evicted bool) {
var k K
var v V
c.lock.Lock()
previous, ok = c.lru.Peek(key)
if ok {
c.lock.Unlock()
return previous, true, false
}
evicted = c.lru.Add(key, value)
if c.onEvictedCB != nil && evicted {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && evicted {
c.onEvictedCB(k, v)
}
return
}
// Remove removes the provided key from the cache.
func (c *Cache[K, V]) Remove(key K) (present bool) {
var k K
var v V
c.lock.Lock()
present = c.lru.Remove(key)
if c.onEvictedCB != nil && present {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && present {
c.onEvictedCB(k, v)
}
return
}
// Resize changes the cache size.
func (c *Cache[K, V]) Resize(size int) (evicted int) {
var ks []K
var vs []V
c.lock.Lock()
evicted = c.lru.Resize(size)
if c.onEvictedCB != nil && evicted > 0 {
ks, vs = c.evictedKeys, c.evictedVals
c.initEvictBuffers()
}
c.lock.Unlock()
if c.onEvictedCB != nil && evicted > 0 {
for i := 0; i < len(ks); i++ {
c.onEvictedCB(ks[i], vs[i])
}
}
return evicted
}
// RemoveOldest removes the oldest item from the cache.
func (c *Cache[K, V]) RemoveOldest() (key K, value V, ok bool) {
var k K
var v V
c.lock.Lock()
key, value, ok = c.lru.RemoveOldest()
if c.onEvictedCB != nil && ok {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && ok {
c.onEvictedCB(k, v)
}
return
}
// GetOldest returns the oldest entry
func (c *Cache[K, V]) GetOldest() (key K, value V, ok bool) {
c.lock.RLock()
key, value, ok = c.lru.GetOldest()
c.lock.RUnlock()
return
}
// Keys returns a slice of the keys in the cache, from oldest to newest.
func (c *Cache[K, V]) Keys() []K {
c.lock.RLock()
keys := c.lru.Keys()
c.lock.RUnlock()
return keys
}
// Values returns a slice of the values in the cache, from oldest to newest.
func (c *Cache[K, V]) Values() []V {
c.lock.RLock()
values := c.lru.Values()
c.lock.RUnlock()
return values
}
// Len returns the number of items in the cache.
func (c *Cache[K, V]) Len() int {
c.lock.RLock()
length := c.lru.Len()
c.lock.RUnlock()
return length
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/hashicorp/golang-lru/v2/expirable/expirable_lru_test.go | third-party/github.com/hashicorp/golang-lru/v2/expirable/expirable_lru_test.go | // Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package expirable
import (
"crypto/rand"
"fmt"
"math"
"math/big"
"reflect"
"sync"
"testing"
"time"
"github.com/hashicorp/golang-lru/v2/simplelru"
)
func BenchmarkLRU_Rand_NoExpire(b *testing.B) {
l := NewLRU[int64, int64](8192, nil, 0)
trace := make([]int64, b.N*2)
for i := 0; i < b.N*2; i++ {
trace[i] = getRand(b) % 32768
}
b.ResetTimer()
var hit, miss int
for i := 0; i < 2*b.N; i++ {
if i%2 == 0 {
l.Add(trace[i], trace[i])
} else {
if _, ok := l.Get(trace[i]); ok {
hit++
} else {
miss++
}
}
}
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(hit+miss))
}
func BenchmarkLRU_Freq_NoExpire(b *testing.B) {
l := NewLRU[int64, int64](8192, nil, 0)
trace := make([]int64, b.N*2)
for i := 0; i < b.N*2; i++ {
if i%2 == 0 {
trace[i] = getRand(b) % 16384
} else {
trace[i] = getRand(b) % 32768
}
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
l.Add(trace[i], trace[i])
}
var hit, miss int
for i := 0; i < b.N; i++ {
if _, ok := l.Get(trace[i]); ok {
hit++
} else {
miss++
}
}
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(hit+miss))
}
func BenchmarkLRU_Rand_WithExpire(b *testing.B) {
l := NewLRU[int64, int64](8192, nil, time.Millisecond*10)
trace := make([]int64, b.N*2)
for i := 0; i < b.N*2; i++ {
trace[i] = getRand(b) % 32768
}
b.ResetTimer()
var hit, miss int
for i := 0; i < 2*b.N; i++ {
if i%2 == 0 {
l.Add(trace[i], trace[i])
} else {
if _, ok := l.Get(trace[i]); ok {
hit++
} else {
miss++
}
}
}
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(hit+miss))
}
func BenchmarkLRU_Freq_WithExpire(b *testing.B) {
l := NewLRU[int64, int64](8192, nil, time.Millisecond*10)
trace := make([]int64, b.N*2)
for i := 0; i < b.N*2; i++ {
if i%2 == 0 {
trace[i] = getRand(b) % 16384
} else {
trace[i] = getRand(b) % 32768
}
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
l.Add(trace[i], trace[i])
}
var hit, miss int
for i := 0; i < b.N; i++ {
if _, ok := l.Get(trace[i]); ok {
hit++
} else {
miss++
}
}
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(hit+miss))
}
func TestLRUInterface(_ *testing.T) {
var _ simplelru.LRUCache[int, int] = &LRU[int, int]{}
}
func TestLRUNoPurge(t *testing.T) {
lc := NewLRU[string, string](10, nil, 0)
lc.Add("key1", "val1")
if lc.Len() != 1 {
t.Fatalf("length differs from expected")
}
v, ok := lc.Peek("key1")
if v != "val1" {
t.Fatalf("value differs from expected")
}
if !ok {
t.Fatalf("should be true")
}
if !lc.Contains("key1") {
t.Fatalf("should contain key1")
}
if lc.Contains("key2") {
t.Fatalf("should not contain key2")
}
v, ok = lc.Peek("key2")
if v != "" {
t.Fatalf("should be empty")
}
if ok {
t.Fatalf("should be false")
}
if !reflect.DeepEqual(lc.Keys(), []string{"key1"}) {
t.Fatalf("value differs from expected")
}
if lc.Resize(0) != 0 {
t.Fatalf("evicted count differs from expected")
}
if lc.Resize(2) != 0 {
t.Fatalf("evicted count differs from expected")
}
lc.Add("key2", "val2")
if lc.Resize(1) != 1 {
t.Fatalf("evicted count differs from expected")
}
}
func TestLRUEdgeCases(t *testing.T) {
lc := NewLRU[string, *string](2, nil, 0)
// Adding a nil value
lc.Add("key1", nil)
value, exists := lc.Get("key1")
if value != nil || !exists {
t.Fatalf("unexpected value or existence flag for key1: value=%v, exists=%v", value, exists)
}
// Adding an entry with the same key but different value
newVal := "val1"
lc.Add("key1", &newVal)
value, exists = lc.Get("key1")
if value != &newVal || !exists {
t.Fatalf("unexpected value or existence flag for key1: value=%v, exists=%v", value, exists)
}
}
func TestLRU_Values(t *testing.T) {
lc := NewLRU[string, string](3, nil, 0)
lc.Add("key1", "val1")
lc.Add("key2", "val2")
lc.Add("key3", "val3")
values := lc.Values()
if !reflect.DeepEqual(values, []string{"val1", "val2", "val3"}) {
t.Fatalf("values differs from expected")
}
}
// func TestExpirableMultipleClose(_ *testing.T) {
// lc := NewLRU[string, string](10, nil, 0)
// lc.Close()
// // should not panic
// lc.Close()
// }
func TestLRUWithPurge(t *testing.T) {
var evicted []string
lc := NewLRU(10, func(key string, value string) { evicted = append(evicted, key, value) }, 150*time.Millisecond)
k, v, ok := lc.GetOldest()
if k != "" {
t.Fatalf("should be empty")
}
if v != "" {
t.Fatalf("should be empty")
}
if ok {
t.Fatalf("should be false")
}
lc.Add("key1", "val1")
time.Sleep(100 * time.Millisecond) // not enough to expire
if lc.Len() != 1 {
t.Fatalf("length differs from expected")
}
v, ok = lc.Get("key1")
if v != "val1" {
t.Fatalf("value differs from expected")
}
if !ok {
t.Fatalf("should be true")
}
time.Sleep(200 * time.Millisecond) // expire
v, ok = lc.Get("key1")
if ok {
t.Fatalf("should be false")
}
if v != "" {
t.Fatalf("should be nil")
}
if lc.Len() != 0 {
t.Fatalf("length differs from expected")
}
if !reflect.DeepEqual(evicted, []string{"key1", "val1"}) {
t.Fatalf("value differs from expected")
}
// add new entry
lc.Add("key2", "val2")
if lc.Len() != 1 {
t.Fatalf("length differs from expected")
}
k, v, ok = lc.GetOldest()
if k != "key2" {
t.Fatalf("value differs from expected")
}
if v != "val2" {
t.Fatalf("value differs from expected")
}
if !ok {
t.Fatalf("should be true")
}
// DeleteExpired, nothing deleted
lc.deleteExpired()
if lc.Len() != 1 {
t.Fatalf("length differs from expected")
}
if !reflect.DeepEqual(evicted, []string{"key1", "val1"}) {
t.Fatalf("value differs from expected")
}
// Purge, cache should be clean
lc.Purge()
if lc.Len() != 0 {
t.Fatalf("length differs from expected")
}
if !reflect.DeepEqual(evicted, []string{"key1", "val1", "key2", "val2"}) {
t.Fatalf("value differs from expected")
}
}
func TestLRUWithPurgeEnforcedBySize(t *testing.T) {
lc := NewLRU[string, string](10, nil, time.Hour)
for i := 0; i < 100; i++ {
i := i
lc.Add(fmt.Sprintf("key%d", i), fmt.Sprintf("val%d", i))
v, ok := lc.Get(fmt.Sprintf("key%d", i))
if v != fmt.Sprintf("val%d", i) {
t.Fatalf("value differs from expected")
}
if !ok {
t.Fatalf("should be true")
}
if lc.Len() > 20 {
t.Fatalf("length should be less than 20")
}
}
if lc.Len() != 10 {
t.Fatalf("length differs from expected")
}
}
func TestLRUConcurrency(t *testing.T) {
lc := NewLRU[string, string](0, nil, 0)
wg := sync.WaitGroup{}
wg.Add(1000)
for i := 0; i < 1000; i++ {
go func(i int) {
lc.Add(fmt.Sprintf("key-%d", i/10), fmt.Sprintf("val-%d", i/10))
wg.Done()
}(i)
}
wg.Wait()
if lc.Len() != 100 {
t.Fatalf("length differs from expected")
}
}
func TestLRUInvalidateAndEvict(t *testing.T) {
var evicted int
lc := NewLRU(-1, func(_, _ string) { evicted++ }, 0)
lc.Add("key1", "val1")
lc.Add("key2", "val2")
val, ok := lc.Get("key1")
if !ok {
t.Fatalf("should be true")
}
if val != "val1" {
t.Fatalf("value differs from expected")
}
if evicted != 0 {
t.Fatalf("value differs from expected")
}
lc.Remove("key1")
if evicted != 1 {
t.Fatalf("value differs from expected")
}
val, ok = lc.Get("key1")
if val != "" {
t.Fatalf("should be empty")
}
if ok {
t.Fatalf("should be false")
}
}
func TestLoadingExpired(t *testing.T) {
lc := NewLRU[string, string](0, nil, time.Millisecond*5)
lc.Add("key1", "val1")
if lc.Len() != 1 {
t.Fatalf("length differs from expected")
}
v, ok := lc.Peek("key1")
if v != "val1" {
t.Fatalf("value differs from expected")
}
if !ok {
t.Fatalf("should be true")
}
v, ok = lc.Get("key1")
if v != "val1" {
t.Fatalf("value differs from expected")
}
if !ok {
t.Fatalf("should be true")
}
for {
result, ok := lc.Get("key1")
if ok && result == "" {
t.Fatalf("ok should return a result")
}
if !ok {
break
}
}
time.Sleep(time.Millisecond * 100) // wait for expiration reaper
if lc.Len() != 0 {
t.Fatalf("length differs from expected")
}
v, ok = lc.Peek("key1")
if v != "" {
t.Fatalf("should be empty")
}
if ok {
t.Fatalf("should be false")
}
v, ok = lc.Get("key1")
if v != "" {
t.Fatalf("should be empty")
}
if ok {
t.Fatalf("should be false")
}
}
func TestLRURemoveOldest(t *testing.T) {
lc := NewLRU[string, string](2, nil, 0)
k, v, ok := lc.RemoveOldest()
if k != "" {
t.Fatalf("should be empty")
}
if v != "" {
t.Fatalf("should be empty")
}
if ok {
t.Fatalf("should be false")
}
ok = lc.Remove("non_existent")
if ok {
t.Fatalf("should be false")
}
lc.Add("key1", "val1")
if lc.Len() != 1 {
t.Fatalf("length differs from expected")
}
v, ok = lc.Get("key1")
if !ok {
t.Fatalf("should be true")
}
if v != "val1" {
t.Fatalf("value differs from expected")
}
if !reflect.DeepEqual(lc.Keys(), []string{"key1"}) {
t.Fatalf("value differs from expected")
}
if lc.Len() != 1 {
t.Fatalf("length differs from expected")
}
lc.Add("key2", "val2")
if !reflect.DeepEqual(lc.Keys(), []string{"key1", "key2"}) {
t.Fatalf("value differs from expected")
}
if lc.Len() != 2 {
t.Fatalf("length differs from expected")
}
k, v, ok = lc.RemoveOldest()
if k != "key1" {
t.Fatalf("value differs from expected")
}
if v != "val1" {
t.Fatalf("value differs from expected")
}
if !ok {
t.Fatalf("should be true")
}
if !reflect.DeepEqual(lc.Keys(), []string{"key2"}) {
t.Fatalf("value differs from expected")
}
if lc.Len() != 1 {
t.Fatalf("length differs from expected")
}
}
func ExampleLRU() {
// make cache with 10ms TTL and 5 max keys
cache := NewLRU[string, string](5, nil, time.Millisecond*10)
// set value under key1.
cache.Add("key1", "val1")
// get value under key1
r, ok := cache.Get("key1")
// check for OK value
if ok {
fmt.Printf("value before expiration is found: %v, value: %q\n", ok, r)
}
// wait for cache to expire
time.Sleep(time.Millisecond * 100)
// get value under key1 after key expiration
r, ok = cache.Get("key1")
fmt.Printf("value after expiration is found: %v, value: %q\n", ok, r)
// set value under key2, would evict old entry because it is already expired.
cache.Add("key2", "val2")
fmt.Printf("Cache len: %d\n", cache.Len())
// Output:
// value before expiration is found: true, value: "val1"
// value after expiration is found: false, value: ""
// Cache len: 1
}
func getRand(tb testing.TB) int64 {
out, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
if err != nil {
tb.Fatal(err)
}
return out.Int64()
}
func (c *LRU[K, V]) wantKeys(t *testing.T, want []K) {
t.Helper()
got := c.Keys()
if !reflect.DeepEqual(got, want) {
t.Errorf("wrong keys got: %v, want: %v ", got, want)
}
}
func TestCache_EvictionSameKey(t *testing.T) {
var evictedKeys []int
cache := NewLRU[int, struct{}](
2,
func(key int, _ struct{}) {
evictedKeys = append(evictedKeys, key)
},
0)
if evicted := cache.Add(1, struct{}{}); evicted {
t.Error("First 1: got unexpected eviction")
}
cache.wantKeys(t, []int{1})
if evicted := cache.Add(2, struct{}{}); evicted {
t.Error("2: got unexpected eviction")
}
cache.wantKeys(t, []int{1, 2})
if evicted := cache.Add(1, struct{}{}); evicted {
t.Error("Second 1: got unexpected eviction")
}
cache.wantKeys(t, []int{2, 1})
if evicted := cache.Add(3, struct{}{}); !evicted {
t.Error("3: did not get expected eviction")
}
cache.wantKeys(t, []int{1, 3})
want := []int{2}
if !reflect.DeepEqual(evictedKeys, want) {
t.Errorf("evictedKeys got: %v want: %v", evictedKeys, want)
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/hashicorp/golang-lru/v2/expirable/expirable_lru.go | third-party/github.com/hashicorp/golang-lru/v2/expirable/expirable_lru.go | // Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package expirable
import (
"sync"
"time"
"github.com/hashicorp/golang-lru/v2/internal"
)
// EvictCallback is used to get a callback when a cache entry is evicted
type EvictCallback[K comparable, V any] func(key K, value V)
// LRU implements a thread-safe LRU with expirable entries.
type LRU[K comparable, V any] struct {
size int
evictList *internal.LruList[K, V]
items map[K]*internal.Entry[K, V]
onEvict EvictCallback[K, V]
// expirable options
mu sync.Mutex
ttl time.Duration
done chan struct{}
// buckets for expiration
buckets []bucket[K, V]
// uint8 because it's number between 0 and numBuckets
nextCleanupBucket uint8
}
// bucket is a container for holding entries to be expired
type bucket[K comparable, V any] struct {
entries map[K]*internal.Entry[K, V]
newestEntry time.Time
}
// noEvictionTTL - very long ttl to prevent eviction
const noEvictionTTL = time.Hour * 24 * 365 * 10
// because of uint8 usage for nextCleanupBucket, should not exceed 256.
// casting it as uint8 explicitly requires type conversions in multiple places
const numBuckets = 100
// NewLRU returns a new thread-safe cache with expirable entries.
//
// Size parameter set to 0 makes cache of unlimited size, e.g. turns LRU mechanism off.
//
// Providing 0 TTL turns expiring off.
//
// Delete expired entries every 1/100th of ttl value. Goroutine which deletes expired entries runs indefinitely.
func NewLRU[K comparable, V any](size int, onEvict EvictCallback[K, V], ttl time.Duration) *LRU[K, V] {
if size < 0 {
size = 0
}
if ttl <= 0 {
ttl = noEvictionTTL
}
res := LRU[K, V]{
ttl: ttl,
size: size,
evictList: internal.NewList[K, V](),
items: make(map[K]*internal.Entry[K, V]),
onEvict: onEvict,
done: make(chan struct{}),
}
// initialize the buckets
res.buckets = make([]bucket[K, V], numBuckets)
for i := 0; i < numBuckets; i++ {
res.buckets[i] = bucket[K, V]{entries: make(map[K]*internal.Entry[K, V])}
}
// enable deleteExpired() running in separate goroutine for cache with non-zero TTL
//
// Important: done channel is never closed, so deleteExpired() goroutine will never exit,
// it's decided to add functionality to close it in the version later than v2.
if res.ttl != noEvictionTTL {
go func(done <-chan struct{}) {
ticker := time.NewTicker(res.ttl / numBuckets)
defer ticker.Stop()
for {
select {
case <-done:
return
case <-ticker.C:
res.deleteExpired()
}
}
}(res.done)
}
return &res
}
// Purge clears the cache completely.
// onEvict is called for each evicted key.
func (c *LRU[K, V]) Purge() {
c.mu.Lock()
defer c.mu.Unlock()
for k, v := range c.items {
if c.onEvict != nil {
c.onEvict(k, v.Value)
}
delete(c.items, k)
}
for _, b := range c.buckets {
for _, ent := range b.entries {
delete(b.entries, ent.Key)
}
}
c.evictList.Init()
}
// Add adds a value to the cache. Returns true if an eviction occurred.
// Returns false if there was no eviction: the item was already in the cache,
// or the size was not exceeded.
func (c *LRU[K, V]) Add(key K, value V) (evicted bool) {
c.mu.Lock()
defer c.mu.Unlock()
now := time.Now()
// Check for existing item
if ent, ok := c.items[key]; ok {
c.evictList.MoveToFront(ent)
c.removeFromBucket(ent) // remove the entry from its current bucket as expiresAt is renewed
ent.Value = value
ent.ExpiresAt = now.Add(c.ttl)
c.addToBucket(ent)
return false
}
// Add new item
ent := c.evictList.PushFrontExpirable(key, value, now.Add(c.ttl))
c.items[key] = ent
c.addToBucket(ent) // adds the entry to the appropriate bucket and sets entry.expireBucket
evict := c.size > 0 && c.evictList.Length() > c.size
// Verify size not exceeded
if evict {
c.removeOldest()
}
return evict
}
// Get looks up a key's value from the cache.
func (c *LRU[K, V]) Get(key K) (value V, ok bool) {
c.mu.Lock()
defer c.mu.Unlock()
var ent *internal.Entry[K, V]
if ent, ok = c.items[key]; ok {
// Expired item check
if time.Now().After(ent.ExpiresAt) {
return value, false
}
c.evictList.MoveToFront(ent)
return ent.Value, true
}
return
}
// Contains checks if a key is in the cache, without updating the recent-ness
// or deleting it for being stale.
func (c *LRU[K, V]) Contains(key K) (ok bool) {
c.mu.Lock()
defer c.mu.Unlock()
_, ok = c.items[key]
return ok
}
// Peek returns the key value (or undefined if not found) without updating
// the "recently used"-ness of the key.
func (c *LRU[K, V]) Peek(key K) (value V, ok bool) {
c.mu.Lock()
defer c.mu.Unlock()
var ent *internal.Entry[K, V]
if ent, ok = c.items[key]; ok {
// Expired item check
if time.Now().After(ent.ExpiresAt) {
return value, false
}
return ent.Value, true
}
return
}
// Remove removes the provided key from the cache, returning if the
// key was contained.
func (c *LRU[K, V]) Remove(key K) bool {
c.mu.Lock()
defer c.mu.Unlock()
if ent, ok := c.items[key]; ok {
c.removeElement(ent)
return true
}
return false
}
// RemoveOldest removes the oldest item from the cache.
func (c *LRU[K, V]) RemoveOldest() (key K, value V, ok bool) {
c.mu.Lock()
defer c.mu.Unlock()
if ent := c.evictList.Back(); ent != nil {
c.removeElement(ent)
return ent.Key, ent.Value, true
}
return
}
// GetOldest returns the oldest entry
func (c *LRU[K, V]) GetOldest() (key K, value V, ok bool) {
c.mu.Lock()
defer c.mu.Unlock()
if ent := c.evictList.Back(); ent != nil {
return ent.Key, ent.Value, true
}
return
}
// Keys returns a slice of the keys in the cache, from oldest to newest.
func (c *LRU[K, V]) Keys() []K {
c.mu.Lock()
defer c.mu.Unlock()
keys := make([]K, 0, len(c.items))
for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() {
keys = append(keys, ent.Key)
}
return keys
}
// Values returns a slice of the values in the cache, from oldest to newest.
// Expired entries are filtered out.
func (c *LRU[K, V]) Values() []V {
c.mu.Lock()
defer c.mu.Unlock()
values := make([]V, len(c.items))
i := 0
now := time.Now()
for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() {
if now.After(ent.ExpiresAt) {
continue
}
values[i] = ent.Value
i++
}
return values
}
// Len returns the number of items in the cache.
func (c *LRU[K, V]) Len() int {
c.mu.Lock()
defer c.mu.Unlock()
return c.evictList.Length()
}
// Resize changes the cache size. Size of 0 means unlimited.
func (c *LRU[K, V]) Resize(size int) (evicted int) {
c.mu.Lock()
defer c.mu.Unlock()
if size <= 0 {
c.size = 0
return 0
}
diff := c.evictList.Length() - size
if diff < 0 {
diff = 0
}
for i := 0; i < diff; i++ {
c.removeOldest()
}
c.size = size
return diff
}
// Close destroys cleanup goroutine. To clean up the cache, run Purge() before Close().
// func (c *LRU[K, V]) Close() {
// c.mu.Lock()
// defer c.mu.Unlock()
// select {
// case <-c.done:
// return
// default:
// }
// close(c.done)
// }
// removeOldest removes the oldest item from the cache. Has to be called with lock!
func (c *LRU[K, V]) removeOldest() {
if ent := c.evictList.Back(); ent != nil {
c.removeElement(ent)
}
}
// removeElement is used to remove a given list element from the cache. Has to be called with lock!
func (c *LRU[K, V]) removeElement(e *internal.Entry[K, V]) {
c.evictList.Remove(e)
delete(c.items, e.Key)
c.removeFromBucket(e)
if c.onEvict != nil {
c.onEvict(e.Key, e.Value)
}
}
// deleteExpired deletes expired records from the oldest bucket, waiting for the newest entry
// in it to expire first.
func (c *LRU[K, V]) deleteExpired() {
c.mu.Lock()
bucketIdx := c.nextCleanupBucket
timeToExpire := time.Until(c.buckets[bucketIdx].newestEntry)
// wait for newest entry to expire before cleanup without holding lock
if timeToExpire > 0 {
c.mu.Unlock()
time.Sleep(timeToExpire)
c.mu.Lock()
}
for _, ent := range c.buckets[bucketIdx].entries {
c.removeElement(ent)
}
c.nextCleanupBucket = (c.nextCleanupBucket + 1) % numBuckets
c.mu.Unlock()
}
// addToBucket adds entry to expire bucket so that it will be cleaned up when the time comes. Has to be called with lock!
func (c *LRU[K, V]) addToBucket(e *internal.Entry[K, V]) {
bucketID := (numBuckets + c.nextCleanupBucket - 1) % numBuckets
e.ExpireBucket = bucketID
c.buckets[bucketID].entries[e.Key] = e
if c.buckets[bucketID].newestEntry.Before(e.ExpiresAt) {
c.buckets[bucketID].newestEntry = e.ExpiresAt
}
}
// removeFromBucket removes the entry from its corresponding bucket. Has to be called with lock!
func (c *LRU[K, V]) removeFromBucket(e *internal.Entry[K, V]) {
delete(c.buckets[e.ExpireBucket].entries, e.Key)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/hashicorp/golang-lru/v2/simplelru/lru_test.go | third-party/github.com/hashicorp/golang-lru/v2/simplelru/lru_test.go | // Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package simplelru
import (
"reflect"
"testing"
)
func TestLRU(t *testing.T) {
evictCounter := 0
onEvicted := func(k int, v int) {
if k != v {
t.Fatalf("Evict values not equal (%v!=%v)", k, v)
}
evictCounter++
}
l, err := NewLRU(128, onEvicted)
if err != nil {
t.Fatalf("err: %v", err)
}
for i := 0; i < 256; i++ {
l.Add(i, i)
}
if l.Len() != 128 {
t.Fatalf("bad len: %v", l.Len())
}
if evictCounter != 128 {
t.Fatalf("bad evict count: %v", evictCounter)
}
for i, k := range l.Keys() {
if v, ok := l.Get(k); !ok || v != k || v != i+128 {
t.Fatalf("bad key: %v", k)
}
}
for i, v := range l.Values() {
if v != i+128 {
t.Fatalf("bad value: %v", v)
}
}
for i := 0; i < 128; i++ {
if _, ok := l.Get(i); ok {
t.Fatalf("should be evicted")
}
}
for i := 128; i < 256; i++ {
if _, ok := l.Get(i); !ok {
t.Fatalf("should not be evicted")
}
}
for i := 128; i < 192; i++ {
if ok := l.Remove(i); !ok {
t.Fatalf("should be contained")
}
if ok := l.Remove(i); ok {
t.Fatalf("should not be contained")
}
if _, ok := l.Get(i); ok {
t.Fatalf("should be deleted")
}
}
l.Get(192) // expect 192 to be last key in l.Keys()
for i, k := range l.Keys() {
if (i < 63 && k != i+193) || (i == 63 && k != 192) {
t.Fatalf("out of order key: %v", k)
}
}
l.Purge()
if l.Len() != 0 {
t.Fatalf("bad len: %v", l.Len())
}
if _, ok := l.Get(200); ok {
t.Fatalf("should contain nothing")
}
}
func TestLRU_GetOldest_RemoveOldest(t *testing.T) {
l, err := NewLRU[int, int](128, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
for i := 0; i < 256; i++ {
l.Add(i, i)
}
k, _, ok := l.GetOldest()
if !ok {
t.Fatalf("missing")
}
if k != 128 {
t.Fatalf("bad: %v", k)
}
k, _, ok = l.RemoveOldest()
if !ok {
t.Fatalf("missing")
}
if k != 128 {
t.Fatalf("bad: %v", k)
}
k, _, ok = l.RemoveOldest()
if !ok {
t.Fatalf("missing")
}
if k != 129 {
t.Fatalf("bad: %v", k)
}
}
// Test that Add returns true/false if an eviction occurred
func TestLRU_Add(t *testing.T) {
evictCounter := 0
onEvicted := func(k int, v int) {
evictCounter++
}
l, err := NewLRU(1, onEvicted)
if err != nil {
t.Fatalf("err: %v", err)
}
if l.Add(1, 1) == true || evictCounter != 0 {
t.Errorf("should not have an eviction")
}
if l.Add(2, 2) == false || evictCounter != 1 {
t.Errorf("should have an eviction")
}
}
// Test that Contains doesn't update recent-ness
func TestLRU_Contains(t *testing.T) {
l, err := NewLRU[int, int](2, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
if !l.Contains(1) {
t.Errorf("1 should be contained")
}
l.Add(3, 3)
if l.Contains(1) {
t.Errorf("Contains should not have updated recent-ness of 1")
}
}
// Test that Peek doesn't update recent-ness
func TestLRU_Peek(t *testing.T) {
l, err := NewLRU[int, int](2, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
if v, ok := l.Peek(1); !ok || v != 1 {
t.Errorf("1 should be set to 1: %v, %v", v, ok)
}
l.Add(3, 3)
if l.Contains(1) {
t.Errorf("should not have updated recent-ness of 1")
}
}
// Test that Resize can upsize and downsize
func TestLRU_Resize(t *testing.T) {
onEvictCounter := 0
onEvicted := func(k int, v int) {
onEvictCounter++
}
l, err := NewLRU(2, onEvicted)
if err != nil {
t.Fatalf("err: %v", err)
}
// Downsize
l.Add(1, 1)
l.Add(2, 2)
evicted := l.Resize(1)
if evicted != 1 {
t.Errorf("1 element should have been evicted: %v", evicted)
}
if onEvictCounter != 1 {
t.Errorf("onEvicted should have been called 1 time: %v", onEvictCounter)
}
l.Add(3, 3)
if l.Contains(1) {
t.Errorf("Element 1 should have been evicted")
}
// Upsize
evicted = l.Resize(2)
if evicted != 0 {
t.Errorf("0 elements should have been evicted: %v", evicted)
}
l.Add(4, 4)
if !l.Contains(3) || !l.Contains(4) {
t.Errorf("Cache should have contained 2 elements")
}
}
func (c *LRU[K, V]) wantKeys(t *testing.T, want []K) {
t.Helper()
got := c.Keys()
if !reflect.DeepEqual(got, want) {
t.Errorf("wrong keys got: %v, want: %v ", got, want)
}
}
func TestCache_EvictionSameKey(t *testing.T) {
var evictedKeys []int
cache, _ := NewLRU(
2,
func(key int, _ struct{}) {
evictedKeys = append(evictedKeys, key)
})
if evicted := cache.Add(1, struct{}{}); evicted {
t.Error("First 1: got unexpected eviction")
}
cache.wantKeys(t, []int{1})
if evicted := cache.Add(2, struct{}{}); evicted {
t.Error("2: got unexpected eviction")
}
cache.wantKeys(t, []int{1, 2})
if evicted := cache.Add(1, struct{}{}); evicted {
t.Error("Second 1: got unexpected eviction")
}
cache.wantKeys(t, []int{2, 1})
if evicted := cache.Add(3, struct{}{}); !evicted {
t.Error("3: did not get expected eviction")
}
cache.wantKeys(t, []int{1, 3})
want := []int{2}
if !reflect.DeepEqual(evictedKeys, want) {
t.Errorf("evictedKeys got: %v want: %v", evictedKeys, want)
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go | third-party/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go | // Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
// Package simplelru provides simple LRU implementation based on build-in container/list.
package simplelru
// LRUCache is the interface for simple LRU cache.
type LRUCache[K comparable, V any] interface {
// Adds a value to the cache, returns true if an eviction occurred and
// updates the "recently used"-ness of the key.
Add(key K, value V) bool
// Returns key's value from the cache and
// updates the "recently used"-ness of the key. #value, isFound
Get(key K) (value V, ok bool)
// Checks if a key exists in cache without updating the recent-ness.
Contains(key K) (ok bool)
// Returns key's value without updating the "recently used"-ness of the key.
Peek(key K) (value V, ok bool)
// Removes a key from the cache.
Remove(key K) bool
// Removes the oldest entry from cache.
RemoveOldest() (K, V, bool)
// Returns the oldest entry from the cache. #key, value, isFound
GetOldest() (K, V, bool)
// Returns a slice of the keys in the cache, from oldest to newest.
Keys() []K
// Values returns a slice of the values in the cache, from oldest to newest.
Values() []V
// Returns the number of items in the cache.
Len() int
// Clears all cache entries.
Purge()
// Resizes cache, returning number evicted
Resize(int) int
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/hashicorp/golang-lru/v2/simplelru/lru.go | third-party/github.com/hashicorp/golang-lru/v2/simplelru/lru.go | // Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package simplelru
import (
"errors"
"github.com/hashicorp/golang-lru/v2/internal"
)
// EvictCallback is used to get a callback when a cache entry is evicted
type EvictCallback[K comparable, V any] func(key K, value V)
// LRU implements a non-thread safe fixed size LRU cache
type LRU[K comparable, V any] struct {
size int
evictList *internal.LruList[K, V]
items map[K]*internal.Entry[K, V]
onEvict EvictCallback[K, V]
}
// NewLRU constructs an LRU of the given size
func NewLRU[K comparable, V any](size int, onEvict EvictCallback[K, V]) (*LRU[K, V], error) {
if size <= 0 {
return nil, errors.New("must provide a positive size")
}
c := &LRU[K, V]{
size: size,
evictList: internal.NewList[K, V](),
items: make(map[K]*internal.Entry[K, V]),
onEvict: onEvict,
}
return c, nil
}
// Purge is used to completely clear the cache.
func (c *LRU[K, V]) Purge() {
for k, v := range c.items {
if c.onEvict != nil {
c.onEvict(k, v.Value)
}
delete(c.items, k)
}
c.evictList.Init()
}
// Add adds a value to the cache. Returns true if an eviction occurred.
func (c *LRU[K, V]) Add(key K, value V) (evicted bool) {
// Check for existing item
if ent, ok := c.items[key]; ok {
c.evictList.MoveToFront(ent)
ent.Value = value
return false
}
// Add new item
ent := c.evictList.PushFront(key, value)
c.items[key] = ent
evict := c.evictList.Length() > c.size
// Verify size not exceeded
if evict {
c.removeOldest()
}
return evict
}
// Get looks up a key's value from the cache.
func (c *LRU[K, V]) Get(key K) (value V, ok bool) {
if ent, ok := c.items[key]; ok {
c.evictList.MoveToFront(ent)
return ent.Value, true
}
return
}
// Contains checks if a key is in the cache, without updating the recent-ness
// or deleting it for being stale.
func (c *LRU[K, V]) Contains(key K) (ok bool) {
_, ok = c.items[key]
return ok
}
// Peek returns the key value (or undefined if not found) without updating
// the "recently used"-ness of the key.
func (c *LRU[K, V]) Peek(key K) (value V, ok bool) {
var ent *internal.Entry[K, V]
if ent, ok = c.items[key]; ok {
return ent.Value, true
}
return
}
// Remove removes the provided key from the cache, returning if the
// key was contained.
func (c *LRU[K, V]) Remove(key K) (present bool) {
if ent, ok := c.items[key]; ok {
c.removeElement(ent)
return true
}
return false
}
// RemoveOldest removes the oldest item from the cache.
func (c *LRU[K, V]) RemoveOldest() (key K, value V, ok bool) {
if ent := c.evictList.Back(); ent != nil {
c.removeElement(ent)
return ent.Key, ent.Value, true
}
return
}
// GetOldest returns the oldest entry
func (c *LRU[K, V]) GetOldest() (key K, value V, ok bool) {
if ent := c.evictList.Back(); ent != nil {
return ent.Key, ent.Value, true
}
return
}
// Keys returns a slice of the keys in the cache, from oldest to newest.
func (c *LRU[K, V]) Keys() []K {
keys := make([]K, c.evictList.Length())
i := 0
for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() {
keys[i] = ent.Key
i++
}
return keys
}
// Values returns a slice of the values in the cache, from oldest to newest.
func (c *LRU[K, V]) Values() []V {
values := make([]V, len(c.items))
i := 0
for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() {
values[i] = ent.Value
i++
}
return values
}
// Len returns the number of items in the cache.
func (c *LRU[K, V]) Len() int {
return c.evictList.Length()
}
// Resize changes the cache size.
func (c *LRU[K, V]) Resize(size int) (evicted int) {
diff := c.Len() - size
if diff < 0 {
diff = 0
}
for i := 0; i < diff; i++ {
c.removeOldest()
}
c.size = size
return diff
}
// removeOldest removes the oldest item from the cache.
func (c *LRU[K, V]) removeOldest() {
if ent := c.evictList.Back(); ent != nil {
c.removeElement(ent)
}
}
// removeElement is used to remove a given list element from the cache
func (c *LRU[K, V]) removeElement(e *internal.Entry[K, V]) {
c.evictList.Remove(e)
delete(c.items, e.Key)
if c.onEvict != nil {
c.onEvict(e.Key, e.Value)
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/hashicorp/golang-lru/v2/internal/list.go | third-party/github.com/hashicorp/golang-lru/v2/internal/list.go | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE_list file.
package internal
import "time"
// Entry is an LRU Entry
type Entry[K comparable, V any] struct {
// Next and previous pointers in the doubly-linked list of elements.
// To simplify the implementation, internally a list l is implemented
// as a ring, such that &l.root is both the next element of the last
// list element (l.Back()) and the previous element of the first list
// element (l.Front()).
next, prev *Entry[K, V]
// The list to which this element belongs.
list *LruList[K, V]
// The LRU Key of this element.
Key K
// The Value stored with this element.
Value V
// The time this element would be cleaned up, optional
ExpiresAt time.Time
// The expiry bucket item was put in, optional
ExpireBucket uint8
}
// PrevEntry returns the previous list element or nil.
func (e *Entry[K, V]) PrevEntry() *Entry[K, V] {
if p := e.prev; e.list != nil && p != &e.list.root {
return p
}
return nil
}
// LruList represents a doubly linked list.
// The zero Value for LruList is an empty list ready to use.
type LruList[K comparable, V any] struct {
root Entry[K, V] // sentinel list element, only &root, root.prev, and root.next are used
len int // current list Length excluding (this) sentinel element
}
// Init initializes or clears list l.
func (l *LruList[K, V]) Init() *LruList[K, V] {
l.root.next = &l.root
l.root.prev = &l.root
l.len = 0
return l
}
// NewList returns an initialized list.
func NewList[K comparable, V any]() *LruList[K, V] { return new(LruList[K, V]).Init() }
// Length returns the number of elements of list l.
// The complexity is O(1).
func (l *LruList[K, V]) Length() int { return l.len }
// Back returns the last element of list l or nil if the list is empty.
func (l *LruList[K, V]) Back() *Entry[K, V] {
if l.len == 0 {
return nil
}
return l.root.prev
}
// lazyInit lazily initializes a zero List Value.
func (l *LruList[K, V]) lazyInit() {
if l.root.next == nil {
l.Init()
}
}
// insert inserts e after at, increments l.len, and returns e.
func (l *LruList[K, V]) insert(e, at *Entry[K, V]) *Entry[K, V] {
e.prev = at
e.next = at.next
e.prev.next = e
e.next.prev = e
e.list = l
l.len++
return e
}
// insertValue is a convenience wrapper for insert(&Entry{Value: v, ExpiresAt: ExpiresAt}, at).
func (l *LruList[K, V]) insertValue(k K, v V, expiresAt time.Time, at *Entry[K, V]) *Entry[K, V] {
return l.insert(&Entry[K, V]{Value: v, Key: k, ExpiresAt: expiresAt}, at)
}
// Remove removes e from its list, decrements l.len
func (l *LruList[K, V]) Remove(e *Entry[K, V]) V {
e.prev.next = e.next
e.next.prev = e.prev
e.next = nil // avoid memory leaks
e.prev = nil // avoid memory leaks
e.list = nil
l.len--
return e.Value
}
// move moves e to next to at.
func (l *LruList[K, V]) move(e, at *Entry[K, V]) {
if e == at {
return
}
e.prev.next = e.next
e.next.prev = e.prev
e.prev = at
e.next = at.next
e.prev.next = e
e.next.prev = e
}
// PushFront inserts a new element e with value v at the front of list l and returns e.
func (l *LruList[K, V]) PushFront(k K, v V) *Entry[K, V] {
l.lazyInit()
return l.insertValue(k, v, time.Time{}, &l.root)
}
// PushFrontExpirable inserts a new expirable element e with Value v at the front of list l and returns e.
func (l *LruList[K, V]) PushFrontExpirable(k K, v V, expiresAt time.Time) *Entry[K, V] {
l.lazyInit()
return l.insertValue(k, v, expiresAt, &l.root)
}
// MoveToFront moves element e to the front of list l.
// If e is not an element of l, the list is not modified.
// The element must not be nil.
func (l *LruList[K, V]) MoveToFront(e *Entry[K, V]) {
if e.list != l || l.root.next == e {
return
}
// see comment in List.Remove about initialization of l
l.move(e, &l.root)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/hashicorp/go-version/constraint_test.go | third-party/github.com/hashicorp/go-version/constraint_test.go | // Copyright IBM Corp. 2014, 2025
// SPDX-License-Identifier: MPL-2.0
package version
import (
"fmt"
"reflect"
"sort"
"testing"
)
func TestNewConstraint(t *testing.T) {
cases := []struct {
input string
count int
err bool
}{
{">= 1.2", 1, false},
{"1.0", 1, false},
{">= 1.x", 0, true},
{">= 1.2, < 1.0", 2, false},
// Out of bounds
{"11387778780781445675529500000000000000000", 0, true},
}
for _, tc := range cases {
v, err := NewConstraint(tc.input)
if tc.err && err == nil {
t.Fatalf("expected error for input: %s", tc.input)
} else if !tc.err && err != nil {
t.Fatalf("error for input %s: %s", tc.input, err)
}
if len(v) != tc.count {
t.Fatalf("input: %s\nexpected len: %d\nactual: %d",
tc.input, tc.count, len(v))
}
}
}
func TestConstraintCheck(t *testing.T) {
cases := []struct {
constraint string
version string
check bool
}{
{">= 1.0, < 1.2", "1.1.5", true},
{"< 1.0, < 1.2", "1.1.5", false},
{"= 1.0", "1.1.5", false},
{"= 1.0", "1.0.0", true},
{"1.0", "1.0.0", true},
{"~> 1.0", "2.0", false},
{"~> 1.0", "1.1", true},
{"~> 1.0", "1.2.3", true},
{"~> 1.0.0", "1.2.3", false},
{"~> 1.0.0", "1.0.7", true},
{"~> 1.0.0", "1.1.0", false},
{"~> 1.0.7", "1.0.4", false},
{"~> 1.0.7", "1.0.7", true},
{"~> 1.0.7", "1.0.8", true},
{"~> 1.0.7", "1.0.7.5", true},
{"~> 1.0.7", "1.0.6.99", false},
{"~> 1.0.7", "1.0.8.0", true},
{"~> 1.0.9.5", "1.0.9.5", true},
{"~> 1.0.9.5", "1.0.9.4", false},
{"~> 1.0.9.5", "1.0.9.6", true},
{"~> 1.0.9.5", "1.0.9.5.0", true},
{"~> 1.0.9.5", "1.0.9.5.1", true},
{"~> 2.0", "2.1.0-beta", false},
{"~> 2.1.0-a", "2.2.0", false},
{"~> 2.1.0-a", "2.1.0", false},
{"~> 2.1.0-a", "2.1.0-beta", true},
{"~> 2.1.0-a", "2.2.0-alpha", false},
{"> 2.0", "2.1.0-beta", false},
{">= 2.1.0-a", "2.1.0-beta", true},
{">= 2.1.0-a", "2.1.1-beta", false},
{">= 2.0.0", "2.1.0-beta", false},
{">= 2.1.0-a", "2.1.1", true},
{">= 2.1.0-a", "2.1.1-beta", false},
{">= 2.1.0-a", "2.1.0", true},
{"<= 2.1.0-a", "2.0.0", true},
}
for _, tc := range cases {
c, err := NewConstraint(tc.constraint)
if err != nil {
t.Fatalf("err: %s", err)
}
v, err := NewVersion(tc.version)
if err != nil {
t.Fatalf("err: %s", err)
}
actual := c.Check(v)
expected := tc.check
if actual != expected {
t.Fatalf("Version: %s\nConstraint: %s\nExpected: %#v",
tc.version, tc.constraint, expected)
}
}
}
func TestConstraintPrerelease(t *testing.T) {
cases := []struct {
constraint string
prerelease bool
}{
{"= 1.0", false},
{"= 1.0-beta", true},
{"~> 2.1.0", false},
{"~> 2.1.0-dev", true},
{"> 2.0", false},
{">= 2.1.0-a", true},
}
for _, tc := range cases {
c, err := parseSingle(tc.constraint)
if err != nil {
t.Fatalf("err: %s", err)
}
actual := c.Prerelease()
expected := tc.prerelease
if actual != expected {
t.Fatalf("Constraint: %s\nExpected: %#v",
tc.constraint, expected)
}
}
}
func TestConstraintEqual(t *testing.T) {
cases := []struct {
leftConstraint string
rightConstraint string
expectedEqual bool
}{
{
"0.0.1",
"0.0.1",
true,
},
{ // whitespaces
" 0.0.1 ",
"0.0.1",
true,
},
{ // equal op implied
"=0.0.1 ",
"0.0.1",
true,
},
{ // version difference
"=0.0.1",
"=0.0.2",
false,
},
{ // operator difference
">0.0.1",
"=0.0.1",
false,
},
{ // different order
">0.1.0, <=1.0.0",
"<=1.0.0, >0.1.0",
true,
},
}
for _, tc := range cases {
leftCon, err := NewConstraint(tc.leftConstraint)
if err != nil {
t.Fatalf("err: %s", err)
}
rightCon, err := NewConstraint(tc.rightConstraint)
if err != nil {
t.Fatalf("err: %s", err)
}
actual := leftCon.Equals(rightCon)
if actual != tc.expectedEqual {
t.Fatalf("Constraints: %s vs %s\nExpected: %t\nActual: %t",
tc.leftConstraint, tc.rightConstraint, tc.expectedEqual, actual)
}
}
}
func TestConstraint_sort(t *testing.T) {
cases := []struct {
constraint string
expectedConstraints string
}{
{
">= 0.1.0,< 1.12",
"< 1.12,>= 0.1.0",
},
{
"< 1.12,>= 0.1.0",
"< 1.12,>= 0.1.0",
},
{
"< 1.12,>= 0.1.0,0.2.0",
"< 1.12,0.2.0,>= 0.1.0",
},
{
">1.0,>0.1.0,>0.3.0,>0.2.0",
">0.1.0,>0.2.0,>0.3.0,>1.0",
},
}
for i, tc := range cases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
c, err := NewConstraint(tc.constraint)
if err != nil {
t.Fatalf("err: %s", err)
}
sort.Sort(c)
actual := c.String()
if !reflect.DeepEqual(actual, tc.expectedConstraints) {
t.Fatalf("unexpected order\nexpected: %#v\nactual: %#v",
tc.expectedConstraints, actual)
}
})
}
}
func TestConstraintsString(t *testing.T) {
cases := []struct {
constraint string
result string
}{
{">= 1.0, < 1.2", ""},
{"~> 1.0.7", ""},
}
for _, tc := range cases {
c, err := NewConstraint(tc.constraint)
if err != nil {
t.Fatalf("err: %s", err)
}
actual := c.String()
expected := tc.result
if expected == "" {
expected = tc.constraint
}
if actual != expected {
t.Fatalf("Constraint: %s\nExpected: %#v\nActual: %s",
tc.constraint, expected, actual)
}
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/hashicorp/go-version/version_collection_test.go | third-party/github.com/hashicorp/go-version/version_collection_test.go | // Copyright IBM Corp. 2014, 2025
// SPDX-License-Identifier: MPL-2.0
package version
import (
"reflect"
"sort"
"testing"
)
func TestCollection(t *testing.T) {
versionsRaw := []string{
"1.1.1",
"1.0",
"1.2",
"2",
"0.7.1",
}
versions := make([]*Version, len(versionsRaw))
for i, raw := range versionsRaw {
v, err := NewVersion(raw)
if err != nil {
t.Fatalf("err: %s", err)
}
versions[i] = v
}
sort.Sort(Collection(versions))
actual := make([]string, len(versions))
for i, v := range versions {
actual[i] = v.String()
}
expected := []string{
"0.7.1",
"1.0.0",
"1.1.1",
"1.2.0",
"2.0.0",
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/hashicorp/go-version/version_test.go | third-party/github.com/hashicorp/go-version/version_test.go | // Copyright IBM Corp. 2014, 2025
// SPDX-License-Identifier: MPL-2.0
package version
import (
"encoding/json"
"fmt"
"reflect"
"testing"
)
func TestNewVersion(t *testing.T) {
cases := []struct {
version string
err bool
}{
{"", true},
{"1.2.3", false},
{"1.0", false},
{"1", false},
{"1.2.beta", true},
{"1.21.beta", true},
{"foo", true},
{"1.2-5", false},
{"1.2-beta.5", false},
{"\n1.2", true},
{"1.2.0-x.Y.0+metadata", false},
{"1.2.0-x.Y.0+metadata-width-hyphen", false},
{"1.2.3-rc1-with-hyphen", false},
{"1.2.3.4", false},
{"1.2.0.4-x.Y.0+metadata", false},
{"1.2.0.4-x.Y.0+metadata-width-hyphen", false},
{"1.2.0-X-1.2.0+metadata~dist", false},
{"1.2.3.4-rc1-with-hyphen", false},
{"1.2.3.4", false},
{"v1.2.3", false},
{"foo1.2.3", true},
{"1.7rc2", false},
{"v1.7rc2", false},
{"1.0-", false},
}
for _, tc := range cases {
_, err := NewVersion(tc.version)
if tc.err && err == nil {
t.Fatalf("expected error for version: %q", tc.version)
} else if !tc.err && err != nil {
t.Fatalf("error for version %q: %s", tc.version, err)
}
}
}
func TestNewSemver(t *testing.T) {
cases := []struct {
version string
err bool
}{
{"", true},
{"1.2.3", false},
{"1.0", false},
{"1", false},
{"1.2.beta", true},
{"1.21.beta", true},
{"foo", true},
{"1.2-5", false},
{"1.2-beta.5", false},
{"\n1.2", true},
{"1.2.0-x.Y.0+metadata", false},
{"1.2.0-x.Y.0+metadata-width-hyphen", false},
{"1.2.3-rc1-with-hyphen", false},
{"1.2.3.4", false},
{"1.2.0.4-x.Y.0+metadata", false},
{"1.2.0.4-x.Y.0+metadata-width-hyphen", false},
{"1.2.0-X-1.2.0+metadata~dist", false},
{"1.2.3.4-rc1-with-hyphen", false},
{"1.2.3.4", false},
{"v1.2.3", false},
{"foo1.2.3", true},
{"1.7rc2", true},
{"v1.7rc2", true},
{"1.0-", true},
}
for _, tc := range cases {
_, err := NewSemver(tc.version)
if tc.err && err == nil {
t.Fatalf("expected error for version: %q", tc.version)
} else if !tc.err && err != nil {
t.Fatalf("error for version %q: %s", tc.version, err)
}
}
}
func TestCore(t *testing.T) {
cases := []struct {
v1 string
v2 string
}{
{"1.2.3", "1.2.3"},
{"2.3.4-alpha1", "2.3.4"},
{"3.4.5alpha1", "3.4.5"},
{"1.2.3-2", "1.2.3"},
{"4.5.6-beta1+meta", "4.5.6"},
{"5.6.7.1.2.3", "5.6.7"},
}
for _, tc := range cases {
v1, err := NewVersion(tc.v1)
if err != nil {
t.Fatalf("error for version %q: %s", tc.v1, err)
}
v2, err := NewVersion(tc.v2)
if err != nil {
t.Fatalf("error for version %q: %s", tc.v2, err)
}
actual := v1.Core()
expected := v2
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("expected: %s\nactual: %s", expected, actual)
}
}
}
func TestVersionCompare(t *testing.T) {
cases := []struct {
v1 string
v2 string
expected int
}{
{"1.2.3", "1.4.5", -1},
{"1.2-beta", "1.2-beta", 0},
{"1.2", "1.1.4", 1},
{"1.2", "1.2-beta", 1},
{"1.2+foo", "1.2+beta", 0},
{"v1.2", "v1.2-beta", 1},
{"v1.2+foo", "v1.2+beta", 0},
{"v1.2.3.4", "v1.2.3.4", 0},
{"v1.2.0.0", "v1.2", 0},
{"v1.2.0.0.1", "v1.2", 1},
{"v1.2", "v1.2.0.0", 0},
{"v1.2", "v1.2.0.0.1", -1},
{"v1.2.0.0", "v1.2.0.0.1", -1},
{"v1.2.3.0", "v1.2.3.4", -1},
{"1.7rc2", "1.7rc1", 1},
{"1.7rc2", "1.7", -1},
{"1.2.0", "1.2.0-X-1.2.0+metadata~dist", 1},
}
for _, tc := range cases {
v1, err := NewVersion(tc.v1)
if err != nil {
t.Fatalf("err: %s", err)
}
v2, err := NewVersion(tc.v2)
if err != nil {
t.Fatalf("err: %s", err)
}
actual := v1.Compare(v2)
expected := tc.expected
if actual != expected {
t.Fatalf(
"%s <=> %s\nexpected: %d\nactual: %d",
tc.v1, tc.v2,
expected, actual)
}
}
}
func TestVersionCompare_versionAndSemver(t *testing.T) {
cases := []struct {
versionRaw string
semverRaw string
expected int
}{
{"0.0.2", "0.0.2", 0},
{"1.0.2alpha", "1.0.2-alpha", 0},
{"v1.2+foo", "v1.2+beta", 0},
{"v1.2", "v1.2+meta", 0},
{"1.2", "1.2-beta", 1},
{"v1.2", "v1.2-beta", 1},
{"1.2.3", "1.4.5", -1},
{"v1.2", "v1.2.0.0.1", -1},
{"v1.0.3-", "v1.0.3", -1},
}
for _, tc := range cases {
ver, err := NewVersion(tc.versionRaw)
if err != nil {
t.Fatalf("err: %s", err)
}
semver, err := NewSemver(tc.semverRaw)
if err != nil {
t.Fatalf("err: %s", err)
}
actual := ver.Compare(semver)
if actual != tc.expected {
t.Fatalf(
"%s <=> %s\nexpected: %d\n actual: %d",
tc.versionRaw, tc.semverRaw, tc.expected, actual,
)
}
}
}
func TestVersionEqual_nil(t *testing.T) {
mustVersion := func(v string) *Version {
ver, err := NewVersion(v)
if err != nil {
t.Fatal(err)
}
return ver
}
cases := []struct {
leftVersion *Version
rightVersion *Version
expected bool
}{
{mustVersion("1.0.0"), nil, false},
{nil, mustVersion("1.0.0"), false},
{nil, nil, true},
}
for _, tc := range cases {
given := tc.leftVersion.Equal(tc.rightVersion)
if given != tc.expected {
t.Fatalf("expected Equal to nil to be %t", tc.expected)
}
}
}
func TestComparePreReleases(t *testing.T) {
cases := []struct {
v1 string
v2 string
expected int
}{
{"1.2-beta.2", "1.2-beta.2", 0},
{"1.2-beta.1", "1.2-beta.2", -1},
{"1.2-beta.2", "1.2-beta.11", -1},
{"3.2-alpha.1", "3.2-alpha", 1},
{"1.2-beta.2", "1.2-beta.1", 1},
{"1.2-beta.11", "1.2-beta.2", 1},
{"1.2-beta", "1.2-beta.3", -1},
{"1.2-alpha", "1.2-beta.3", -1},
{"1.2-beta", "1.2-alpha.3", 1},
{"3.0-alpha.3", "3.0-rc.1", -1},
{"3.0-alpha3", "3.0-rc1", -1},
{"3.0-alpha.1", "3.0-alpha.beta", -1},
{"5.4-alpha", "5.4-alpha.beta", 1},
{"v1.2-beta.2", "v1.2-beta.2", 0},
{"v1.2-beta.1", "v1.2-beta.2", -1},
{"v3.2-alpha.1", "v3.2-alpha", 1},
{"v3.2-rc.1-1-g123", "v3.2-rc.2", 1},
}
for _, tc := range cases {
v1, err := NewVersion(tc.v1)
if err != nil {
t.Fatalf("err: %s", err)
}
v2, err := NewVersion(tc.v2)
if err != nil {
t.Fatalf("err: %s", err)
}
actual := v1.Compare(v2)
expected := tc.expected
if actual != expected {
t.Fatalf(
"%s <=> %s\nexpected: %d\nactual: %d",
tc.v1, tc.v2,
expected, actual)
}
}
}
func TestVersionMetadata(t *testing.T) {
cases := []struct {
version string
expected string
}{
{"1.2.3", ""},
{"1.2-beta", ""},
{"1.2.0-x.Y.0", ""},
{"1.2.0-x.Y.0+metadata", "metadata"},
{"1.2.0-metadata-1.2.0+metadata~dist", "metadata~dist"},
}
for _, tc := range cases {
v, err := NewVersion(tc.version)
if err != nil {
t.Fatalf("err: %s", err)
}
actual := v.Metadata()
expected := tc.expected
if actual != expected {
t.Fatalf("expected: %s\nactual: %s", expected, actual)
}
}
}
func TestVersionPrerelease(t *testing.T) {
cases := []struct {
version string
expected string
}{
{"1.2.3", ""},
{"1.2-beta", "beta"},
{"1.2.0-x.Y.0", "x.Y.0"},
{"1.2.0-7.Y.0", "7.Y.0"},
{"1.2.0-x.Y.0+metadata", "x.Y.0"},
{"1.2.0-metadata-1.2.0+metadata~dist", "metadata-1.2.0"},
{"17.03.0-ce", "ce"}, // zero-padded fields
}
for _, tc := range cases {
v, err := NewVersion(tc.version)
if err != nil {
t.Fatalf("err: %s", err)
}
actual := v.Prerelease()
expected := tc.expected
if actual != expected {
t.Fatalf("expected: %s\nactual: %s", expected, actual)
}
}
}
func TestVersionSegments(t *testing.T) {
cases := []struct {
version string
expected []int
}{
{"1.2.3", []int{1, 2, 3}},
{"1.2-beta", []int{1, 2, 0}},
{"1-x.Y.0", []int{1, 0, 0}},
{"1.2.0-x.Y.0+metadata", []int{1, 2, 0}},
{"1.2.0-metadata-1.2.0+metadata~dist", []int{1, 2, 0}},
{"17.03.0-ce", []int{17, 3, 0}}, // zero-padded fields
}
for _, tc := range cases {
v, err := NewVersion(tc.version)
if err != nil {
t.Fatalf("err: %s", err)
}
actual := v.Segments()
expected := tc.expected
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("expected: %#v\nactual: %#v", expected, actual)
}
}
}
func TestVersionSegments64(t *testing.T) {
cases := []struct {
version string
expected []int64
}{
{"1.2.3", []int64{1, 2, 3}},
{"1.2-beta", []int64{1, 2, 0}},
{"1-x.Y.0", []int64{1, 0, 0}},
{"1.2.0-x.Y.0+metadata", []int64{1, 2, 0}},
{"1.4.9223372036854775807", []int64{1, 4, 9223372036854775807}},
}
for _, tc := range cases {
v, err := NewVersion(tc.version)
if err != nil {
t.Fatalf("err: %s", err)
}
actual := v.Segments64()
expected := tc.expected
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("expected: %#v\nactual: %#v", expected, actual)
}
{
expected := actual[0]
actual[0]++
actual = v.Segments64()
if actual[0] != expected {
t.Fatalf("Segments64 is mutable")
}
}
}
}
func TestJsonMarshal(t *testing.T) {
cases := []struct {
version string
err bool
}{
{"1.2.3", false},
{"1.2.0-x.Y.0+metadata", false},
{"1.2.0-x.Y.0+metadata-width-hyphen", false},
{"1.2.3-rc1-with-hyphen", false},
{"1.2.3.4", false},
{"1.2.0.4-x.Y.0+metadata", false},
{"1.2.0.4-x.Y.0+metadata-width-hyphen", false},
{"1.2.0-X-1.2.0+metadata~dist", false},
{"1.2.3.4-rc1-with-hyphen", false},
{"1.2.3.4", false},
}
for _, tc := range cases {
v, err1 := NewVersion(tc.version)
if err1 != nil {
t.Fatalf("error for version %q: %s", tc.version, err1)
}
parsed, err2 := json.Marshal(v)
if err2 != nil {
t.Fatalf("error marshaling version %q: %s", tc.version, err2)
}
result := string(parsed)
expected := fmt.Sprintf("%q", tc.version)
if result != expected && !tc.err {
t.Fatalf("Error marshaling unexpected marshaled content: result=%q expected=%q", result, expected)
}
}
}
func TestJsonUnmarshal(t *testing.T) {
cases := []struct {
version string
err bool
}{
{"1.2.3", false},
{"1.2.0-x.Y.0+metadata", false},
{"1.2.0-x.Y.0+metadata-width-hyphen", false},
{"1.2.3-rc1-with-hyphen", false},
{"1.2.3.4", false},
{"1.2.0.4-x.Y.0+metadata", false},
{"1.2.0.4-x.Y.0+metadata-width-hyphen", false},
{"1.2.0-X-1.2.0+metadata~dist", false},
{"1.2.3.4-rc1-with-hyphen", false},
{"1.2.3.4", false},
}
for _, tc := range cases {
expected, err1 := NewVersion(tc.version)
if err1 != nil {
t.Fatalf("err: %s", err1)
}
actual := &Version{}
err2 := json.Unmarshal([]byte(fmt.Sprintf("%q", tc.version)), actual)
if err2 != nil {
t.Fatalf("error unmarshaling version: %s", err2)
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("error unmarshaling, unexpected object content: actual=%q expected=%q", actual, expected)
}
}
}
func TestVersionString(t *testing.T) {
cases := [][]string{
{"1.2.3", "1.2.3"},
{"1.2-beta", "1.2.0-beta"},
{"1.2.0-x.Y.0", "1.2.0-x.Y.0"},
{"1.2.0-x.Y.0+metadata", "1.2.0-x.Y.0+metadata"},
{"1.2.0-metadata-1.2.0+metadata~dist", "1.2.0-metadata-1.2.0+metadata~dist"},
{"17.03.0-ce", "17.3.0-ce"}, // zero-padded fields
}
for _, tc := range cases {
v, err := NewVersion(tc[0])
if err != nil {
t.Fatalf("err: %s", err)
}
actual := v.String()
expected := tc[1]
if actual != expected {
t.Fatalf("expected: %s\nactual: %s", expected, actual)
}
if actual := v.Original(); actual != tc[0] {
t.Fatalf("expected original: %q\nactual: %q", tc[0], actual)
}
}
}
func TestEqual(t *testing.T) {
cases := []struct {
v1 string
v2 string
expected bool
}{
{"1.2.3", "1.4.5", false},
{"1.2-beta", "1.2-beta", true},
{"1.2", "1.1.4", false},
{"1.2", "1.2-beta", false},
{"1.2+foo", "1.2+beta", true},
{"v1.2", "v1.2-beta", false},
{"v1.2+foo", "v1.2+beta", true},
{"v1.2.3.4", "v1.2.3.4", true},
{"v1.2.0.0", "v1.2", true},
{"v1.2.0.0.1", "v1.2", false},
{"v1.2", "v1.2.0.0", true},
{"v1.2", "v1.2.0.0.1", false},
{"v1.2.0.0", "v1.2.0.0.1", false},
{"v1.2.3.0", "v1.2.3.4", false},
{"1.7rc2", "1.7rc1", false},
{"1.7rc2", "1.7", false},
{"1.2.0", "1.2.0-X-1.2.0+metadata~dist", false},
}
for _, tc := range cases {
v1, err := NewVersion(tc.v1)
if err != nil {
t.Fatalf("err: %s", err)
}
v2, err := NewVersion(tc.v2)
if err != nil {
t.Fatalf("err: %s", err)
}
actual := v1.Equal(v2)
expected := tc.expected
if actual != expected {
t.Fatalf(
"%s <=> %s\nexpected: %t\nactual: %t",
tc.v1, tc.v2,
expected, actual)
}
}
}
func TestGreaterThan(t *testing.T) {
cases := []struct {
v1 string
v2 string
expected bool
}{
{"1.2.3", "1.4.5", false},
{"1.2-beta", "1.2-beta", false},
{"1.2", "1.1.4", true},
{"1.2", "1.2-beta", true},
{"1.2+foo", "1.2+beta", false},
{"v1.2", "v1.2-beta", true},
{"v1.2+foo", "v1.2+beta", false},
{"v1.2.3.4", "v1.2.3.4", false},
{"v1.2.0.0", "v1.2", false},
{"v1.2.0.0.1", "v1.2", true},
{"v1.2", "v1.2.0.0", false},
{"v1.2", "v1.2.0.0.1", false},
{"v1.2.0.0", "v1.2.0.0.1", false},
{"v1.2.3.0", "v1.2.3.4", false},
{"1.7rc2", "1.7rc1", true},
{"1.7rc2", "1.7", false},
{"1.2.0", "1.2.0-X-1.2.0+metadata~dist", true},
}
for _, tc := range cases {
v1, err := NewVersion(tc.v1)
if err != nil {
t.Fatalf("err: %s", err)
}
v2, err := NewVersion(tc.v2)
if err != nil {
t.Fatalf("err: %s", err)
}
actual := v1.GreaterThan(v2)
expected := tc.expected
if actual != expected {
t.Fatalf(
"%s > %s\nexpected: %t\nactual: %t",
tc.v1, tc.v2,
expected, actual)
}
}
}
func TestLessThan(t *testing.T) {
cases := []struct {
v1 string
v2 string
expected bool
}{
{"1.2.3", "1.4.5", true},
{"1.2-beta", "1.2-beta", false},
{"1.2", "1.1.4", false},
{"1.2", "1.2-beta", false},
{"1.2+foo", "1.2+beta", false},
{"v1.2", "v1.2-beta", false},
{"v1.2+foo", "v1.2+beta", false},
{"v1.2.3.4", "v1.2.3.4", false},
{"v1.2.0.0", "v1.2", false},
{"v1.2.0.0.1", "v1.2", false},
{"v1.2", "v1.2.0.0", false},
{"v1.2", "v1.2.0.0.1", true},
{"v1.2.0.0", "v1.2.0.0.1", true},
{"v1.2.3.0", "v1.2.3.4", true},
{"1.7rc2", "1.7rc1", false},
{"1.7rc2", "1.7", true},
{"1.2.0", "1.2.0-X-1.2.0+metadata~dist", false},
}
for _, tc := range cases {
v1, err := NewVersion(tc.v1)
if err != nil {
t.Fatalf("err: %s", err)
}
v2, err := NewVersion(tc.v2)
if err != nil {
t.Fatalf("err: %s", err)
}
actual := v1.LessThan(v2)
expected := tc.expected
if actual != expected {
t.Fatalf(
"%s < %s\nexpected: %t\nactual: %t",
tc.v1, tc.v2,
expected, actual)
}
}
}
func TestGreaterThanOrEqual(t *testing.T) {
cases := []struct {
v1 string
v2 string
expected bool
}{
{"1.2.3", "1.4.5", false},
{"1.2-beta", "1.2-beta", true},
{"1.2", "1.1.4", true},
{"1.2", "1.2-beta", true},
{"1.2+foo", "1.2+beta", true},
{"v1.2", "v1.2-beta", true},
{"v1.2+foo", "v1.2+beta", true},
{"v1.2.3.4", "v1.2.3.4", true},
{"v1.2.0.0", "v1.2", true},
{"v1.2.0.0.1", "v1.2", true},
{"v1.2", "v1.2.0.0", true},
{"v1.2", "v1.2.0.0.1", false},
{"v1.2.0.0", "v1.2.0.0.1", false},
{"v1.2.3.0", "v1.2.3.4", false},
{"1.7rc2", "1.7rc1", true},
{"1.7rc2", "1.7", false},
{"1.2.0", "1.2.0-X-1.2.0+metadata~dist", true},
}
for _, tc := range cases {
v1, err := NewVersion(tc.v1)
if err != nil {
t.Fatalf("err: %s", err)
}
v2, err := NewVersion(tc.v2)
if err != nil {
t.Fatalf("err: %s", err)
}
actual := v1.GreaterThanOrEqual(v2)
expected := tc.expected
if actual != expected {
t.Fatalf(
"%s >= %s\nexpected: %t\nactual: %t",
tc.v1, tc.v2,
expected, actual)
}
}
}
func TestLessThanOrEqual(t *testing.T) {
cases := []struct {
v1 string
v2 string
expected bool
}{
{"1.2.3", "1.4.5", true},
{"1.2-beta", "1.2-beta", true},
{"1.2", "1.1.4", false},
{"1.2", "1.2-beta", false},
{"1.2+foo", "1.2+beta", true},
{"v1.2", "v1.2-beta", false},
{"v1.2+foo", "v1.2+beta", true},
{"v1.2.3.4", "v1.2.3.4", true},
{"v1.2.0.0", "v1.2", true},
{"v1.2.0.0.1", "v1.2", false},
{"v1.2", "v1.2.0.0", true},
{"v1.2", "v1.2.0.0.1", true},
{"v1.2.0.0", "v1.2.0.0.1", true},
{"v1.2.3.0", "v1.2.3.4", true},
{"1.7rc2", "1.7rc1", false},
{"1.7rc2", "1.7", true},
{"1.2.0", "1.2.0-X-1.2.0+metadata~dist", false},
}
for _, tc := range cases {
v1, err := NewVersion(tc.v1)
if err != nil {
t.Fatalf("err: %s", err)
}
v2, err := NewVersion(tc.v2)
if err != nil {
t.Fatalf("err: %s", err)
}
actual := v1.LessThanOrEqual(v2)
expected := tc.expected
if actual != expected {
t.Fatalf(
"%s <= %s\nexpected: %t\nactual: %t",
tc.v1, tc.v2,
expected, actual)
}
}
}
func BenchmarkVersionString(b *testing.B) {
v, _ := NewVersion("3.4.5-rc1+meta")
_ = v.String()
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
_ = v.String()
}
}
func BenchmarkCompareVersionV1(b *testing.B) {
v, _ := NewVersion("3.4.5")
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
v.Compare(v)
}
}
func BenchmarkVersionCompareV2(b *testing.B) {
v, _ := NewVersion("1.2.3")
o, _ := NewVersion("v1.2.3.4")
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
v.Compare(o)
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/hashicorp/go-version/version.go | third-party/github.com/hashicorp/go-version/version.go | // Copyright IBM Corp. 2014, 2025
// SPDX-License-Identifier: MPL-2.0
package version
import (
"database/sql/driver"
"fmt"
"regexp"
"strconv"
"strings"
"sync"
)
// The compiled regular expression used to test the validity of a version.
var (
versionRegexp *regexp.Regexp
versionRegexpOnce sync.Once
semverRegexp *regexp.Regexp
semverRegexpOnce sync.Once
)
func getVersionRegexp() *regexp.Regexp {
versionRegexpOnce.Do(func() {
versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$")
})
return versionRegexp
}
func getSemverRegexp() *regexp.Regexp {
semverRegexpOnce.Do(func() {
semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$")
})
return semverRegexp
}
// The raw regular expression string used for testing the validity
// of a version.
const (
VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` +
`(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` +
`(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` +
`?`
// SemverRegexpRaw requires a separator between version and prerelease
SemverRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` +
`(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` +
`(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` +
`?`
)
// Version represents a single version.
type Version struct {
metadata string
pre string
segments []int64
si int
original string
}
// NewVersion parses the given version and returns a new
// Version.
func NewVersion(v string) (*Version, error) {
return newVersion(v, getVersionRegexp())
}
// NewSemver parses the given version and returns a new
// Version that adheres strictly to SemVer specs
// https://semver.org/
func NewSemver(v string) (*Version, error) {
return newVersion(v, getSemverRegexp())
}
func newVersion(v string, pattern *regexp.Regexp) (*Version, error) {
matches := pattern.FindStringSubmatch(v)
if matches == nil {
return nil, fmt.Errorf("malformed version: %s", v)
}
segmentsStr := strings.Split(matches[1], ".")
segments := make([]int64, len(segmentsStr))
for i, str := range segmentsStr {
val, err := strconv.ParseInt(str, 10, 64)
if err != nil {
return nil, fmt.Errorf(
"error parsing version: %s", err)
}
segments[i] = val
}
// Even though we could support more than three segments, if we
// got less than three, pad it with 0s. This is to cover the basic
// default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum
for i := len(segments); i < 3; i++ {
segments = append(segments, 0)
}
pre := matches[7]
if pre == "" {
pre = matches[4]
}
return &Version{
metadata: matches[10],
pre: pre,
segments: segments,
si: len(segmentsStr),
original: v,
}, nil
}
// Must is a helper that wraps a call to a function returning (*Version, error)
// and panics if error is non-nil.
func Must(v *Version, err error) *Version {
if err != nil {
panic(err)
}
return v
}
// Compare compares this version to another version. This
// returns -1, 0, or 1 if this version is smaller, equal,
// or larger than the other version, respectively.
//
// If you want boolean results, use the LessThan, Equal,
// GreaterThan, GreaterThanOrEqual or LessThanOrEqual methods.
func (v *Version) Compare(other *Version) int {
// A quick, efficient equality check
if v.String() == other.String() {
return 0
}
// If the segments are the same, we must compare on prerelease info
if v.equalSegments(other) {
preSelf := v.Prerelease()
preOther := other.Prerelease()
if preSelf == "" && preOther == "" {
return 0
}
if preSelf == "" {
return 1
}
if preOther == "" {
return -1
}
return comparePrereleases(preSelf, preOther)
}
segmentsSelf := v.Segments64()
segmentsOther := other.Segments64()
// Get the highest specificity (hS), or if they're equal, just use segmentSelf length
lenSelf := len(segmentsSelf)
lenOther := len(segmentsOther)
hS := lenSelf
if lenSelf < lenOther {
hS = lenOther
}
// Compare the segments
// Because a constraint could have more/less specificity than the version it's
// checking, we need to account for a lopsided or jagged comparison
for i := 0; i < hS; i++ {
if i > lenSelf-1 {
// This means Self had the lower specificity
// Check to see if the remaining segments in Other are all zeros
if !allZero(segmentsOther[i:]) {
// if not, it means that Other has to be greater than Self
return -1
}
break
} else if i > lenOther-1 {
// this means Other had the lower specificity
// Check to see if the remaining segments in Self are all zeros -
if !allZero(segmentsSelf[i:]) {
// if not, it means that Self has to be greater than Other
return 1
}
break
}
lhs := segmentsSelf[i]
rhs := segmentsOther[i]
if lhs == rhs {
continue
} else if lhs < rhs {
return -1
}
// Otherwise, rhs was > lhs, they're not equal
return 1
}
// if we got this far, they're equal
return 0
}
func (v *Version) equalSegments(other *Version) bool {
segmentsSelf := v.Segments64()
segmentsOther := other.Segments64()
if len(segmentsSelf) != len(segmentsOther) {
return false
}
for i, v := range segmentsSelf {
if v != segmentsOther[i] {
return false
}
}
return true
}
func allZero(segs []int64) bool {
for _, s := range segs {
if s != 0 {
return false
}
}
return true
}
func comparePart(preSelf string, preOther string) int {
if preSelf == preOther {
return 0
}
var selfInt int64
selfNumeric := true
selfInt, err := strconv.ParseInt(preSelf, 10, 64)
if err != nil {
selfNumeric = false
}
var otherInt int64
otherNumeric := true
otherInt, err = strconv.ParseInt(preOther, 10, 64)
if err != nil {
otherNumeric = false
}
// if a part is empty, we use the other to decide
if preSelf == "" {
if otherNumeric {
return -1
}
return 1
}
if preOther == "" {
if selfNumeric {
return 1
}
return -1
}
if selfNumeric && !otherNumeric {
return -1
} else if !selfNumeric && otherNumeric {
return 1
} else if !selfNumeric && !otherNumeric && preSelf > preOther {
return 1
} else if selfInt > otherInt {
return 1
}
return -1
}
func comparePrereleases(v string, other string) int {
// the same pre release!
if v == other {
return 0
}
// split both pre releases for analyse their parts
selfPreReleaseMeta := strings.Split(v, ".")
otherPreReleaseMeta := strings.Split(other, ".")
selfPreReleaseLen := len(selfPreReleaseMeta)
otherPreReleaseLen := len(otherPreReleaseMeta)
biggestLen := otherPreReleaseLen
if selfPreReleaseLen > otherPreReleaseLen {
biggestLen = selfPreReleaseLen
}
// loop for parts to find the first difference
for i := 0; i < biggestLen; i = i + 1 {
partSelfPre := ""
if i < selfPreReleaseLen {
partSelfPre = selfPreReleaseMeta[i]
}
partOtherPre := ""
if i < otherPreReleaseLen {
partOtherPre = otherPreReleaseMeta[i]
}
compare := comparePart(partSelfPre, partOtherPre)
// if parts are equals, continue the loop
if compare != 0 {
return compare
}
}
return 0
}
// Core returns a new version constructed from only the MAJOR.MINOR.PATCH
// segments of the version, without prerelease or metadata.
func (v *Version) Core() *Version {
segments := v.Segments64()
segmentsOnly := fmt.Sprintf("%d.%d.%d", segments[0], segments[1], segments[2])
return Must(NewVersion(segmentsOnly))
}
// Equal tests if two versions are equal.
func (v *Version) Equal(o *Version) bool {
if v == nil || o == nil {
return v == o
}
return v.Compare(o) == 0
}
// GreaterThan tests if this version is greater than another version.
func (v *Version) GreaterThan(o *Version) bool {
return v.Compare(o) > 0
}
// GreaterThanOrEqual tests if this version is greater than or equal to another version.
func (v *Version) GreaterThanOrEqual(o *Version) bool {
return v.Compare(o) >= 0
}
// LessThan tests if this version is less than another version.
func (v *Version) LessThan(o *Version) bool {
return v.Compare(o) < 0
}
// LessThanOrEqual tests if this version is less than or equal to another version.
func (v *Version) LessThanOrEqual(o *Version) bool {
return v.Compare(o) <= 0
}
// Metadata returns any metadata that was part of the version
// string.
//
// Metadata is anything that comes after the "+" in the version.
// For example, with "1.2.3+beta", the metadata is "beta".
func (v *Version) Metadata() string {
return v.metadata
}
// Prerelease returns any prerelease data that is part of the version,
// or blank if there is no prerelease data.
//
// Prerelease information is anything that comes after the "-" in the
// version (but before any metadata). For example, with "1.2.3-beta",
// the prerelease information is "beta".
func (v *Version) Prerelease() string {
return v.pre
}
// Segments returns the numeric segments of the version as a slice of ints.
//
// This excludes any metadata or pre-release information. For example,
// for a version "1.2.3-beta", segments will return a slice of
// 1, 2, 3.
func (v *Version) Segments() []int {
segmentSlice := make([]int, len(v.segments))
for i, v := range v.segments {
segmentSlice[i] = int(v)
}
return segmentSlice
}
// Segments64 returns the numeric segments of the version as a slice of int64s.
//
// This excludes any metadata or pre-release information. For example,
// for a version "1.2.3-beta", segments will return a slice of
// 1, 2, 3.
func (v *Version) Segments64() []int64 {
result := make([]int64, len(v.segments))
copy(result, v.segments)
return result
}
// String returns the full version string included pre-release
// and metadata information.
//
// This value is rebuilt according to the parsed segments and other
// information. Therefore, ambiguities in the version string such as
// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and
// missing parts (1.0 => 1.0.0) will be made into a canonicalized form
// as shown in the parenthesized examples.
func (v *Version) String() string {
return string(v.bytes())
}
func (v *Version) bytes() []byte {
var buf []byte
for i, s := range v.segments {
if i > 0 {
buf = append(buf, '.')
}
buf = strconv.AppendInt(buf, s, 10)
}
if v.pre != "" {
buf = append(buf, '-')
buf = append(buf, v.pre...)
}
if v.metadata != "" {
buf = append(buf, '+')
buf = append(buf, v.metadata...)
}
return buf
}
// Original returns the original parsed version as-is, including any
// potential whitespace, `v` prefix, etc.
func (v *Version) Original() string {
return v.original
}
// UnmarshalText implements encoding.TextUnmarshaler interface.
func (v *Version) UnmarshalText(b []byte) error {
temp, err := NewVersion(string(b))
if err != nil {
return err
}
*v = *temp
return nil
}
// MarshalText implements encoding.TextMarshaler interface.
func (v *Version) MarshalText() ([]byte, error) {
return []byte(v.String()), nil
}
// Scan implements the sql.Scanner interface.
func (v *Version) Scan(src interface{}) error {
switch src := src.(type) {
case string:
return v.UnmarshalText([]byte(src))
case nil:
return nil
default:
return fmt.Errorf("cannot scan %T as Version", src)
}
}
// Value implements the driver.Valuer interface.
func (v *Version) Value() (driver.Value, error) {
return v.String(), nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/hashicorp/go-version/constraint.go | third-party/github.com/hashicorp/go-version/constraint.go | // Copyright IBM Corp. 2014, 2025
// SPDX-License-Identifier: MPL-2.0
package version
import (
"fmt"
"regexp"
"sort"
"strings"
"sync"
)
var (
constraintRegexp *regexp.Regexp
constraintRegexpOnce sync.Once
)
func getConstraintRegexp() *regexp.Regexp {
constraintRegexpOnce.Do(func() {
// This heavy lifting only happens the first time this function is called
constraintRegexp = regexp.MustCompile(fmt.Sprintf(
`^\s*(%s)\s*(%s)\s*$`,
`<=|>=|!=|~>|<|>|=|`,
VersionRegexpRaw,
))
})
return constraintRegexp
}
// Constraint represents a single constraint for a version, such as
// ">= 1.0".
type Constraint struct {
f constraintFunc
op operator
check *Version
original string
}
func (c *Constraint) Equals(con *Constraint) bool {
return c.op == con.op && c.check.Equal(con.check)
}
// Constraints is a slice of constraints. We make a custom type so that
// we can add methods to it.
type Constraints []*Constraint
type constraintFunc func(v, c *Version) bool
type constraintOperation struct {
op operator
f constraintFunc
}
// NewConstraint will parse one or more constraints from the given
// constraint string. The string must be a comma-separated list of
// constraints.
func NewConstraint(v string) (Constraints, error) {
vs := strings.Split(v, ",")
result := make([]*Constraint, len(vs))
for i, single := range vs {
c, err := parseSingle(single)
if err != nil {
return nil, err
}
result[i] = c
}
return Constraints(result), nil
}
// MustConstraints is a helper that wraps a call to a function
// returning (Constraints, error) and panics if error is non-nil.
func MustConstraints(c Constraints, err error) Constraints {
if err != nil {
panic(err)
}
return c
}
// Check tests if a version satisfies all the constraints.
func (cs Constraints) Check(v *Version) bool {
for _, c := range cs {
if !c.Check(v) {
return false
}
}
return true
}
// Equals compares Constraints with other Constraints
// for equality. This may not represent logical equivalence
// of compared constraints.
// e.g. even though '>0.1,>0.2' is logically equivalent
// to '>0.2' it is *NOT* treated as equal.
//
// Missing operator is treated as equal to '=', whitespaces
// are ignored and constraints are sorted before comparison.
func (cs Constraints) Equals(c Constraints) bool {
if len(cs) != len(c) {
return false
}
// make copies to retain order of the original slices
left := make(Constraints, len(cs))
copy(left, cs)
sort.Stable(left)
right := make(Constraints, len(c))
copy(right, c)
sort.Stable(right)
// compare sorted slices
for i, con := range left {
if !con.Equals(right[i]) {
return false
}
}
return true
}
func (cs Constraints) Len() int {
return len(cs)
}
func (cs Constraints) Less(i, j int) bool {
if cs[i].op < cs[j].op {
return true
}
if cs[i].op > cs[j].op {
return false
}
return cs[i].check.LessThan(cs[j].check)
}
func (cs Constraints) Swap(i, j int) {
cs[i], cs[j] = cs[j], cs[i]
}
// Returns the string format of the constraints
func (cs Constraints) String() string {
csStr := make([]string, len(cs))
for i, c := range cs {
csStr[i] = c.String()
}
return strings.Join(csStr, ",")
}
// Check tests if a constraint is validated by the given version.
func (c *Constraint) Check(v *Version) bool {
return c.f(v, c.check)
}
// Prerelease returns true if the version underlying this constraint
// contains a prerelease field.
func (c *Constraint) Prerelease() bool {
return len(c.check.Prerelease()) > 0
}
func (c *Constraint) String() string {
return c.original
}
func parseSingle(v string) (*Constraint, error) {
matches := getConstraintRegexp().FindStringSubmatch(v)
if matches == nil {
return nil, fmt.Errorf("malformed constraint: %s", v)
}
check, err := NewVersion(matches[2])
if err != nil {
return nil, err
}
var cop constraintOperation
switch matches[1] {
case "=":
cop = constraintOperation{op: equal, f: constraintEqual}
case "!=":
cop = constraintOperation{op: notEqual, f: constraintNotEqual}
case ">":
cop = constraintOperation{op: greaterThan, f: constraintGreaterThan}
case "<":
cop = constraintOperation{op: lessThan, f: constraintLessThan}
case ">=":
cop = constraintOperation{op: greaterThanEqual, f: constraintGreaterThanEqual}
case "<=":
cop = constraintOperation{op: lessThanEqual, f: constraintLessThanEqual}
case "~>":
cop = constraintOperation{op: pessimistic, f: constraintPessimistic}
default:
cop = constraintOperation{op: equal, f: constraintEqual}
}
return &Constraint{
f: cop.f,
op: cop.op,
check: check,
original: v,
}, nil
}
func prereleaseCheck(v, c *Version) bool {
switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; {
case cPre && vPre:
// A constraint with a pre-release can only match a pre-release version
// with the same base segments.
return v.equalSegments(c)
case !cPre && vPre:
// A constraint without a pre-release can only match a version without a
// pre-release.
return false
case cPre && !vPre:
// OK, except with the pessimistic operator
case !cPre && !vPre:
// OK
}
return true
}
//-------------------------------------------------------------------
// Constraint functions
//-------------------------------------------------------------------
type operator rune
const (
equal operator = '='
notEqual operator = '≠'
greaterThan operator = '>'
lessThan operator = '<'
greaterThanEqual operator = '≥'
lessThanEqual operator = '≤'
pessimistic operator = '~'
)
func constraintEqual(v, c *Version) bool {
return v.Equal(c)
}
func constraintNotEqual(v, c *Version) bool {
return !v.Equal(c)
}
func constraintGreaterThan(v, c *Version) bool {
return prereleaseCheck(v, c) && v.Compare(c) == 1
}
func constraintLessThan(v, c *Version) bool {
return prereleaseCheck(v, c) && v.Compare(c) == -1
}
func constraintGreaterThanEqual(v, c *Version) bool {
return prereleaseCheck(v, c) && v.Compare(c) >= 0
}
func constraintLessThanEqual(v, c *Version) bool {
return prereleaseCheck(v, c) && v.Compare(c) <= 0
}
func constraintPessimistic(v, c *Version) bool {
// Using a pessimistic constraint with a pre-release, restricts versions to pre-releases
if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") {
return false
}
// If the version being checked is naturally less than the constraint, then there
// is no way for the version to be valid against the constraint
if v.LessThan(c) {
return false
}
// We'll use this more than once, so grab the length now so it's a little cleaner
// to write the later checks
cs := len(c.segments)
// If the version being checked has less specificity than the constraint, then there
// is no way for the version to be valid against the constraint
if cs > len(v.segments) {
return false
}
// Check the segments in the constraint against those in the version. If the version
// being checked, at any point, does not have the same values in each index of the
// constraints segments, then it cannot be valid against the constraint.
for i := 0; i < c.si-1; i++ {
if v.segments[i] != c.segments[i] {
return false
}
}
// Check the last part of the segment in the constraint. If the version segment at
// this index is less than the constraints segment at this index, then it cannot
// be valid against the constraint
if c.segments[cs-1] > v.segments[cs-1] {
return false
}
// If nothing has rejected the version by now, it's valid
return true
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/hashicorp/go-version/version_collection.go | third-party/github.com/hashicorp/go-version/version_collection.go | // Copyright IBM Corp. 2014, 2025
// SPDX-License-Identifier: MPL-2.0
package version
// Collection is a type that implements the sort.Interface interface
// so that versions can be sorted.
type Collection []*Version
func (v Collection) Len() int {
return len(v)
}
func (v Collection) Less(i, j int) bool {
return v[i].LessThan(v[j])
}
func (v Collection) Swap(i, j int) {
v[i], v[j] = v[j], v[i]
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/allowlist/main_test.go | third-party/github.com/letsencrypt/boulder/allowlist/main_test.go | package allowlist
import (
"testing"
)
func TestNewFromYAML(t *testing.T) {
t.Parallel()
tests := []struct {
name string
yamlData string
check []string
expectAnswers []bool
expectErr bool
}{
{
name: "valid YAML",
yamlData: "- oak\n- maple\n- cherry",
check: []string{"oak", "walnut", "maple", "cherry"},
expectAnswers: []bool{true, false, true, true},
expectErr: false,
},
{
name: "empty YAML",
yamlData: "",
check: []string{"oak", "walnut", "maple", "cherry"},
expectAnswers: []bool{false, false, false, false},
expectErr: false,
},
{
name: "invalid YAML",
yamlData: "{ invalid_yaml",
check: []string{},
expectAnswers: []bool{},
expectErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
list, err := NewFromYAML[string]([]byte(tt.yamlData))
if (err != nil) != tt.expectErr {
t.Fatalf("NewFromYAML() error = %v, expectErr = %v", err, tt.expectErr)
}
if err == nil {
for i, item := range tt.check {
got := list.Contains(item)
if got != tt.expectAnswers[i] {
t.Errorf("Contains(%q) got %v, want %v", item, got, tt.expectAnswers[i])
}
}
}
})
}
}
func TestNewList(t *testing.T) {
t.Parallel()
tests := []struct {
name string
members []string
check []string
expectAnswers []bool
}{
{
name: "unique members",
members: []string{"oak", "maple", "cherry"},
check: []string{"oak", "walnut", "maple", "cherry"},
expectAnswers: []bool{true, false, true, true},
},
{
name: "duplicate members",
members: []string{"oak", "maple", "cherry", "oak"},
check: []string{"oak", "walnut", "maple", "cherry"},
expectAnswers: []bool{true, false, true, true},
},
{
name: "nil list",
members: nil,
check: []string{"oak", "walnut", "maple", "cherry"},
expectAnswers: []bool{false, false, false, false},
},
{
name: "empty list",
members: []string{},
check: []string{"oak", "walnut", "maple", "cherry"},
expectAnswers: []bool{false, false, false, false},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
list := NewList[string](tt.members)
for i, item := range tt.check {
got := list.Contains(item)
if got != tt.expectAnswers[i] {
t.Errorf("Contains(%q) got %v, want %v", item, got, tt.expectAnswers[i])
}
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/allowlist/main.go | third-party/github.com/letsencrypt/boulder/allowlist/main.go | package allowlist
import (
"github.com/letsencrypt/boulder/strictyaml"
)
// List holds a unique collection of items of type T. Membership can be checked
// by calling the Contains method.
type List[T comparable] struct {
members map[T]struct{}
}
// NewList returns a *List[T] populated with the provided members of type T. All
// duplicate entries are ignored, ensuring uniqueness.
func NewList[T comparable](members []T) *List[T] {
l := &List[T]{members: make(map[T]struct{})}
for _, m := range members {
l.members[m] = struct{}{}
}
return l
}
// NewFromYAML reads a YAML sequence of values of type T and returns a *List[T]
// containing those values. If data is empty, an empty (deny all) list is
// returned. If data cannot be parsed, an error is returned.
func NewFromYAML[T comparable](data []byte) (*List[T], error) {
if len(data) == 0 {
return NewList([]T{}), nil
}
var entries []T
err := strictyaml.Unmarshal(data, &entries)
if err != nil {
return nil, err
}
return NewList(entries), nil
}
// Contains reports whether the provided entry is a member of the list.
func (l *List[T]) Contains(entry T) bool {
_, ok := l.members[entry]
return ok
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/bdns/mocks.go | third-party/github.com/letsencrypt/boulder/bdns/mocks.go | package bdns
import (
"context"
"errors"
"fmt"
"net"
"net/netip"
"os"
"github.com/miekg/dns"
blog "github.com/letsencrypt/boulder/log"
)
// MockClient is a mock
type MockClient struct {
Log blog.Logger
}
// LookupTXT is a mock
func (mock *MockClient) LookupTXT(_ context.Context, hostname string) ([]string, ResolverAddrs, error) {
if hostname == "_acme-challenge.servfail.com" {
return nil, ResolverAddrs{"MockClient"}, fmt.Errorf("SERVFAIL")
}
if hostname == "_acme-challenge.good-dns01.com" {
// base64(sha256("LoqXcYV8q5ONbJQxbmR7SCTNo3tiAXDfowyjxAjEuX0"
// + "." + "9jg46WB3rR_AHD-EBXdN7cBkH1WOu0tA3M9fm21mqTI"))
// expected token + test account jwk thumbprint
return []string{"LPsIwTo7o8BoG0-vjCyGQGBWSVIPxI-i_X336eUOQZo"}, ResolverAddrs{"MockClient"}, nil
}
if hostname == "_acme-challenge.wrong-dns01.com" {
return []string{"a"}, ResolverAddrs{"MockClient"}, nil
}
if hostname == "_acme-challenge.wrong-many-dns01.com" {
return []string{"a", "b", "c", "d", "e"}, ResolverAddrs{"MockClient"}, nil
}
if hostname == "_acme-challenge.long-dns01.com" {
return []string{"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, ResolverAddrs{"MockClient"}, nil
}
if hostname == "_acme-challenge.no-authority-dns01.com" {
// base64(sha256("LoqXcYV8q5ONbJQxbmR7SCTNo3tiAXDfowyjxAjEuX0"
// + "." + "9jg46WB3rR_AHD-EBXdN7cBkH1WOu0tA3M9fm21mqTI"))
// expected token + test account jwk thumbprint
return []string{"LPsIwTo7o8BoG0-vjCyGQGBWSVIPxI-i_X336eUOQZo"}, ResolverAddrs{"MockClient"}, nil
}
// empty-txts.com always returns zero TXT records
if hostname == "_acme-challenge.empty-txts.com" {
return []string{}, ResolverAddrs{"MockClient"}, nil
}
return []string{"hostname"}, ResolverAddrs{"MockClient"}, nil
}
// makeTimeoutError returns a a net.OpError for which Timeout() returns true.
func makeTimeoutError() *net.OpError {
return &net.OpError{
Err: os.NewSyscallError("ugh timeout", timeoutError{}),
}
}
type timeoutError struct{}
func (t timeoutError) Error() string {
return "so sloooow"
}
func (t timeoutError) Timeout() bool {
return true
}
// LookupHost is a mock
func (mock *MockClient) LookupHost(_ context.Context, hostname string) ([]netip.Addr, ResolverAddrs, error) {
if hostname == "always.invalid" ||
hostname == "invalid.invalid" {
return []netip.Addr{}, ResolverAddrs{"MockClient"}, nil
}
if hostname == "always.timeout" {
return []netip.Addr{}, ResolverAddrs{"MockClient"}, &Error{dns.TypeA, "always.timeout", makeTimeoutError(), -1, nil}
}
if hostname == "always.error" {
err := &net.OpError{
Op: "read",
Net: "udp",
Err: errors.New("some net error"),
}
m := new(dns.Msg)
m.SetQuestion(dns.Fqdn(hostname), dns.TypeA)
m.AuthenticatedData = true
m.SetEdns0(4096, false)
logDNSError(mock.Log, "mock.server", hostname, m, nil, err)
return []netip.Addr{}, ResolverAddrs{"MockClient"}, &Error{dns.TypeA, hostname, err, -1, nil}
}
if hostname == "id.mismatch" {
err := dns.ErrId
m := new(dns.Msg)
m.SetQuestion(dns.Fqdn(hostname), dns.TypeA)
m.AuthenticatedData = true
m.SetEdns0(4096, false)
r := new(dns.Msg)
record := new(dns.A)
record.Hdr = dns.RR_Header{Name: dns.Fqdn(hostname), Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 0}
record.A = net.ParseIP("127.0.0.1")
r.Answer = append(r.Answer, record)
logDNSError(mock.Log, "mock.server", hostname, m, r, err)
return []netip.Addr{}, ResolverAddrs{"MockClient"}, &Error{dns.TypeA, hostname, err, -1, nil}
}
// dual-homed host with an IPv6 and an IPv4 address
if hostname == "ipv4.and.ipv6.localhost" {
return []netip.Addr{
netip.MustParseAddr("::1"),
netip.MustParseAddr("127.0.0.1"),
}, ResolverAddrs{"MockClient"}, nil
}
if hostname == "ipv6.localhost" {
return []netip.Addr{
netip.MustParseAddr("::1"),
}, ResolverAddrs{"MockClient"}, nil
}
return []netip.Addr{netip.MustParseAddr("127.0.0.1")}, ResolverAddrs{"MockClient"}, nil
}
// LookupCAA returns mock records for use in tests.
func (mock *MockClient) LookupCAA(_ context.Context, domain string) ([]*dns.CAA, string, ResolverAddrs, error) {
return nil, "", ResolverAddrs{"MockClient"}, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/bdns/dns_test.go | third-party/github.com/letsencrypt/boulder/bdns/dns_test.go | package bdns
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io"
"log"
"net"
"net/http"
"net/netip"
"net/url"
"os"
"regexp"
"slices"
"strings"
"sync"
"testing"
"time"
"github.com/jmhodges/clock"
"github.com/miekg/dns"
"github.com/prometheus/client_golang/prometheus"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/test"
)
const dnsLoopbackAddr = "127.0.0.1:4053"
func mockDNSQuery(w http.ResponseWriter, httpReq *http.Request) {
if httpReq.Header.Get("Content-Type") != "application/dns-message" {
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(w, "client didn't send Content-Type: application/dns-message")
}
if httpReq.Header.Get("Accept") != "application/dns-message" {
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(w, "client didn't accept Content-Type: application/dns-message")
}
requestBody, err := io.ReadAll(httpReq.Body)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(w, "reading body: %s", err)
}
httpReq.Body.Close()
r := new(dns.Msg)
err = r.Unpack(requestBody)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(w, "unpacking request: %s", err)
}
m := new(dns.Msg)
m.SetReply(r)
m.Compress = false
appendAnswer := func(rr dns.RR) {
m.Answer = append(m.Answer, rr)
}
for _, q := range r.Question {
q.Name = strings.ToLower(q.Name)
if q.Name == "servfail.com." || q.Name == "servfailexception.example.com" {
m.Rcode = dns.RcodeServerFailure
break
}
switch q.Qtype {
case dns.TypeSOA:
record := new(dns.SOA)
record.Hdr = dns.RR_Header{Name: "letsencrypt.org.", Rrtype: dns.TypeSOA, Class: dns.ClassINET, Ttl: 0}
record.Ns = "ns.letsencrypt.org."
record.Mbox = "master.letsencrypt.org."
record.Serial = 1
record.Refresh = 1
record.Retry = 1
record.Expire = 1
record.Minttl = 1
appendAnswer(record)
case dns.TypeAAAA:
if q.Name == "v6.letsencrypt.org." {
record := new(dns.AAAA)
record.Hdr = dns.RR_Header{Name: "v6.letsencrypt.org.", Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: 0}
record.AAAA = net.ParseIP("2602:80a:6000:abad:cafe::1")
appendAnswer(record)
}
if q.Name == "dualstack.letsencrypt.org." {
record := new(dns.AAAA)
record.Hdr = dns.RR_Header{Name: "dualstack.letsencrypt.org.", Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: 0}
record.AAAA = net.ParseIP("2602:80a:6000:abad:cafe::1")
appendAnswer(record)
}
if q.Name == "v4error.letsencrypt.org." {
record := new(dns.AAAA)
record.Hdr = dns.RR_Header{Name: "v4error.letsencrypt.org.", Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: 0}
record.AAAA = net.ParseIP("2602:80a:6000:abad:cafe::1")
appendAnswer(record)
}
if q.Name == "v6error.letsencrypt.org." {
m.SetRcode(r, dns.RcodeNotImplemented)
}
if q.Name == "nxdomain.letsencrypt.org." {
m.SetRcode(r, dns.RcodeNameError)
}
if q.Name == "dualstackerror.letsencrypt.org." {
m.SetRcode(r, dns.RcodeNotImplemented)
}
case dns.TypeA:
if q.Name == "cps.letsencrypt.org." {
record := new(dns.A)
record.Hdr = dns.RR_Header{Name: "cps.letsencrypt.org.", Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 0}
record.A = net.ParseIP("64.112.117.1")
appendAnswer(record)
}
if q.Name == "dualstack.letsencrypt.org." {
record := new(dns.A)
record.Hdr = dns.RR_Header{Name: "dualstack.letsencrypt.org.", Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 0}
record.A = net.ParseIP("64.112.117.1")
appendAnswer(record)
}
if q.Name == "v6error.letsencrypt.org." {
record := new(dns.A)
record.Hdr = dns.RR_Header{Name: "dualstack.letsencrypt.org.", Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 0}
record.A = net.ParseIP("64.112.117.1")
appendAnswer(record)
}
if q.Name == "v4error.letsencrypt.org." {
m.SetRcode(r, dns.RcodeNotImplemented)
}
if q.Name == "nxdomain.letsencrypt.org." {
m.SetRcode(r, dns.RcodeNameError)
}
if q.Name == "dualstackerror.letsencrypt.org." {
m.SetRcode(r, dns.RcodeRefused)
}
case dns.TypeCNAME:
if q.Name == "cname.letsencrypt.org." {
record := new(dns.CNAME)
record.Hdr = dns.RR_Header{Name: "cname.letsencrypt.org.", Rrtype: dns.TypeCNAME, Class: dns.ClassINET, Ttl: 30}
record.Target = "cps.letsencrypt.org."
appendAnswer(record)
}
if q.Name == "cname.example.com." {
record := new(dns.CNAME)
record.Hdr = dns.RR_Header{Name: "cname.example.com.", Rrtype: dns.TypeCNAME, Class: dns.ClassINET, Ttl: 30}
record.Target = "CAA.example.com."
appendAnswer(record)
}
case dns.TypeDNAME:
if q.Name == "dname.letsencrypt.org." {
record := new(dns.DNAME)
record.Hdr = dns.RR_Header{Name: "dname.letsencrypt.org.", Rrtype: dns.TypeDNAME, Class: dns.ClassINET, Ttl: 30}
record.Target = "cps.letsencrypt.org."
appendAnswer(record)
}
case dns.TypeCAA:
if q.Name == "bracewel.net." || q.Name == "caa.example.com." {
record := new(dns.CAA)
record.Hdr = dns.RR_Header{Name: q.Name, Rrtype: dns.TypeCAA, Class: dns.ClassINET, Ttl: 0}
record.Tag = "issue"
record.Value = "letsencrypt.org"
record.Flag = 1
appendAnswer(record)
}
if q.Name == "cname.example.com." {
record := new(dns.CAA)
record.Hdr = dns.RR_Header{Name: "caa.example.com.", Rrtype: dns.TypeCAA, Class: dns.ClassINET, Ttl: 0}
record.Tag = "issue"
record.Value = "letsencrypt.org"
record.Flag = 1
appendAnswer(record)
}
if q.Name == "gonetld." {
m.SetRcode(r, dns.RcodeNameError)
}
case dns.TypeTXT:
if q.Name == "split-txt.letsencrypt.org." {
record := new(dns.TXT)
record.Hdr = dns.RR_Header{Name: "split-txt.letsencrypt.org.", Rrtype: dns.TypeTXT, Class: dns.ClassINET, Ttl: 0}
record.Txt = []string{"a", "b", "c"}
appendAnswer(record)
} else {
auth := new(dns.SOA)
auth.Hdr = dns.RR_Header{Name: "letsencrypt.org.", Rrtype: dns.TypeSOA, Class: dns.ClassINET, Ttl: 0}
auth.Ns = "ns.letsencrypt.org."
auth.Mbox = "master.letsencrypt.org."
auth.Serial = 1
auth.Refresh = 1
auth.Retry = 1
auth.Expire = 1
auth.Minttl = 1
m.Ns = append(m.Ns, auth)
}
if q.Name == "nxdomain.letsencrypt.org." {
m.SetRcode(r, dns.RcodeNameError)
}
}
}
body, err := m.Pack()
if err != nil {
fmt.Fprintf(os.Stderr, "packing reply: %s\n", err)
}
w.Header().Set("Content-Type", "application/dns-message")
_, err = w.Write(body)
if err != nil {
panic(err) // running tests, so panic is OK
}
}
func serveLoopResolver(stopChan chan bool) {
m := http.NewServeMux()
m.HandleFunc("/dns-query", mockDNSQuery)
httpServer := &http.Server{
Addr: dnsLoopbackAddr,
Handler: m,
ReadTimeout: time.Second,
WriteTimeout: time.Second,
}
go func() {
cert := "../test/certs/ipki/localhost/cert.pem"
key := "../test/certs/ipki/localhost/key.pem"
err := httpServer.ListenAndServeTLS(cert, key)
if err != nil {
fmt.Println(err)
}
}()
go func() {
<-stopChan
err := httpServer.Shutdown(context.Background())
if err != nil {
log.Fatal(err)
}
}()
}
func pollServer() {
backoff := 200 * time.Millisecond
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
ticker := time.NewTicker(backoff)
for {
select {
case <-ctx.Done():
fmt.Fprintln(os.Stderr, "Timeout reached while testing for the dns server to come up")
os.Exit(1)
case <-ticker.C:
conn, _ := dns.DialTimeout("udp", dnsLoopbackAddr, backoff)
if conn != nil {
_ = conn.Close()
return
}
}
}
}
// tlsConfig is used for the TLS config of client instances that talk to the
// DoH server set up in TestMain.
var tlsConfig *tls.Config
func TestMain(m *testing.M) {
root, err := os.ReadFile("../test/certs/ipki/minica.pem")
if err != nil {
log.Fatal(err)
}
pool := x509.NewCertPool()
pool.AppendCertsFromPEM(root)
tlsConfig = &tls.Config{
RootCAs: pool,
}
stop := make(chan bool, 1)
serveLoopResolver(stop)
pollServer()
ret := m.Run()
stop <- true
os.Exit(ret)
}
func TestDNSNoServers(t *testing.T) {
staticProvider, err := NewStaticProvider([]string{})
test.AssertNotError(t, err, "Got error creating StaticProvider")
obj := New(time.Hour, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, "", blog.UseMock(), tlsConfig)
_, resolvers, err := obj.LookupHost(context.Background(), "letsencrypt.org")
test.AssertEquals(t, len(resolvers), 0)
test.AssertError(t, err, "No servers")
_, _, err = obj.LookupTXT(context.Background(), "letsencrypt.org")
test.AssertError(t, err, "No servers")
_, _, _, err = obj.LookupCAA(context.Background(), "letsencrypt.org")
test.AssertError(t, err, "No servers")
}
func TestDNSOneServer(t *testing.T) {
staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr})
test.AssertNotError(t, err, "Got error creating StaticProvider")
obj := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, "", blog.UseMock(), tlsConfig)
_, resolvers, err := obj.LookupHost(context.Background(), "cps.letsencrypt.org")
test.AssertEquals(t, len(resolvers), 2)
slices.Sort(resolvers)
test.AssertDeepEquals(t, resolvers, ResolverAddrs{"A:127.0.0.1:4053", "AAAA:127.0.0.1:4053"})
test.AssertNotError(t, err, "No message")
}
func TestDNSDuplicateServers(t *testing.T) {
staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr, dnsLoopbackAddr})
test.AssertNotError(t, err, "Got error creating StaticProvider")
obj := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, "", blog.UseMock(), tlsConfig)
_, resolvers, err := obj.LookupHost(context.Background(), "cps.letsencrypt.org")
test.AssertEquals(t, len(resolvers), 2)
slices.Sort(resolvers)
test.AssertDeepEquals(t, resolvers, ResolverAddrs{"A:127.0.0.1:4053", "AAAA:127.0.0.1:4053"})
test.AssertNotError(t, err, "No message")
}
func TestDNSServFail(t *testing.T) {
staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr})
test.AssertNotError(t, err, "Got error creating StaticProvider")
obj := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, "", blog.UseMock(), tlsConfig)
bad := "servfail.com"
_, _, err = obj.LookupTXT(context.Background(), bad)
test.AssertError(t, err, "LookupTXT didn't return an error")
_, _, err = obj.LookupHost(context.Background(), bad)
test.AssertError(t, err, "LookupHost didn't return an error")
emptyCaa, _, _, err := obj.LookupCAA(context.Background(), bad)
test.Assert(t, len(emptyCaa) == 0, "Query returned non-empty list of CAA records")
test.AssertError(t, err, "LookupCAA should have returned an error")
}
func TestDNSLookupTXT(t *testing.T) {
staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr})
test.AssertNotError(t, err, "Got error creating StaticProvider")
obj := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, "", blog.UseMock(), tlsConfig)
a, _, err := obj.LookupTXT(context.Background(), "letsencrypt.org")
t.Logf("A: %v", a)
test.AssertNotError(t, err, "No message")
a, _, err = obj.LookupTXT(context.Background(), "split-txt.letsencrypt.org")
t.Logf("A: %v ", a)
test.AssertNotError(t, err, "No message")
test.AssertEquals(t, len(a), 1)
test.AssertEquals(t, a[0], "abc")
}
// TODO(#8213): Convert this to a table test.
func TestDNSLookupHost(t *testing.T) {
staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr})
test.AssertNotError(t, err, "Got error creating StaticProvider")
obj := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, "", blog.UseMock(), tlsConfig)
ip, resolvers, err := obj.LookupHost(context.Background(), "servfail.com")
t.Logf("servfail.com - IP: %s, Err: %s", ip, err)
test.AssertError(t, err, "Server failure")
test.Assert(t, len(ip) == 0, "Should not have IPs")
slices.Sort(resolvers)
test.AssertDeepEquals(t, resolvers, ResolverAddrs{"A:127.0.0.1:4053", "AAAA:127.0.0.1:4053"})
ip, resolvers, err = obj.LookupHost(context.Background(), "nonexistent.letsencrypt.org")
t.Logf("nonexistent.letsencrypt.org - IP: %s, Err: %s", ip, err)
test.AssertError(t, err, "No valid A or AAAA records should error")
test.Assert(t, len(ip) == 0, "Should not have IPs")
slices.Sort(resolvers)
test.AssertDeepEquals(t, resolvers, ResolverAddrs{"A:127.0.0.1:4053", "AAAA:127.0.0.1:4053"})
// Single IPv4 address
ip, resolvers, err = obj.LookupHost(context.Background(), "cps.letsencrypt.org")
t.Logf("cps.letsencrypt.org - IP: %s, Err: %s", ip, err)
test.AssertNotError(t, err, "Not an error to exist")
test.Assert(t, len(ip) == 1, "Should have IP")
slices.Sort(resolvers)
test.AssertDeepEquals(t, resolvers, ResolverAddrs{"A:127.0.0.1:4053", "AAAA:127.0.0.1:4053"})
ip, resolvers, err = obj.LookupHost(context.Background(), "cps.letsencrypt.org")
t.Logf("cps.letsencrypt.org - IP: %s, Err: %s", ip, err)
test.AssertNotError(t, err, "Not an error to exist")
test.Assert(t, len(ip) == 1, "Should have IP")
slices.Sort(resolvers)
test.AssertDeepEquals(t, resolvers, ResolverAddrs{"A:127.0.0.1:4053", "AAAA:127.0.0.1:4053"})
// Single IPv6 address
ip, resolvers, err = obj.LookupHost(context.Background(), "v6.letsencrypt.org")
t.Logf("v6.letsencrypt.org - IP: %s, Err: %s", ip, err)
test.AssertNotError(t, err, "Not an error to exist")
test.Assert(t, len(ip) == 1, "Should not have IPs")
slices.Sort(resolvers)
test.AssertDeepEquals(t, resolvers, ResolverAddrs{"A:127.0.0.1:4053", "AAAA:127.0.0.1:4053"})
// Both IPv6 and IPv4 address
ip, resolvers, err = obj.LookupHost(context.Background(), "dualstack.letsencrypt.org")
t.Logf("dualstack.letsencrypt.org - IP: %s, Err: %s", ip, err)
test.AssertNotError(t, err, "Not an error to exist")
test.Assert(t, len(ip) == 2, "Should have 2 IPs")
expected := netip.MustParseAddr("64.112.117.1")
test.Assert(t, ip[0] == expected, "wrong ipv4 address")
expected = netip.MustParseAddr("2602:80a:6000:abad:cafe::1")
test.Assert(t, ip[1] == expected, "wrong ipv6 address")
slices.Sort(resolvers)
test.AssertDeepEquals(t, resolvers, ResolverAddrs{"A:127.0.0.1:4053", "AAAA:127.0.0.1:4053"})
// IPv6 error, IPv4 success
ip, resolvers, err = obj.LookupHost(context.Background(), "v6error.letsencrypt.org")
t.Logf("v6error.letsencrypt.org - IP: %s, Err: %s", ip, err)
test.AssertNotError(t, err, "Not an error to exist")
test.Assert(t, len(ip) == 1, "Should have 1 IP")
expected = netip.MustParseAddr("64.112.117.1")
test.Assert(t, ip[0] == expected, "wrong ipv4 address")
slices.Sort(resolvers)
test.AssertDeepEquals(t, resolvers, ResolverAddrs{"A:127.0.0.1:4053", "AAAA:127.0.0.1:4053"})
// IPv6 success, IPv4 error
ip, resolvers, err = obj.LookupHost(context.Background(), "v4error.letsencrypt.org")
t.Logf("v4error.letsencrypt.org - IP: %s, Err: %s", ip, err)
test.AssertNotError(t, err, "Not an error to exist")
test.Assert(t, len(ip) == 1, "Should have 1 IP")
expected = netip.MustParseAddr("2602:80a:6000:abad:cafe::1")
test.Assert(t, ip[0] == expected, "wrong ipv6 address")
slices.Sort(resolvers)
test.AssertDeepEquals(t, resolvers, ResolverAddrs{"A:127.0.0.1:4053", "AAAA:127.0.0.1:4053"})
// IPv6 error, IPv4 error
// Should return both the IPv4 error (Refused) and the IPv6 error (NotImplemented)
hostname := "dualstackerror.letsencrypt.org"
ip, resolvers, err = obj.LookupHost(context.Background(), hostname)
t.Logf("%s - IP: %s, Err: %s", hostname, ip, err)
test.AssertError(t, err, "Should be an error")
test.AssertContains(t, err.Error(), "REFUSED looking up A for")
test.AssertContains(t, err.Error(), "NOTIMP looking up AAAA for")
slices.Sort(resolvers)
test.AssertDeepEquals(t, resolvers, ResolverAddrs{"A:127.0.0.1:4053", "AAAA:127.0.0.1:4053"})
}
func TestDNSNXDOMAIN(t *testing.T) {
staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr})
test.AssertNotError(t, err, "Got error creating StaticProvider")
obj := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, "", blog.UseMock(), tlsConfig)
hostname := "nxdomain.letsencrypt.org"
_, _, err = obj.LookupHost(context.Background(), hostname)
test.AssertContains(t, err.Error(), "NXDOMAIN looking up A for")
test.AssertContains(t, err.Error(), "NXDOMAIN looking up AAAA for")
_, _, err = obj.LookupTXT(context.Background(), hostname)
expected := Error{dns.TypeTXT, hostname, nil, dns.RcodeNameError, nil}
test.AssertDeepEquals(t, err, expected)
}
func TestDNSLookupCAA(t *testing.T) {
staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr})
test.AssertNotError(t, err, "Got error creating StaticProvider")
obj := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, "", blog.UseMock(), tlsConfig)
removeIDExp := regexp.MustCompile(" id: [[:digit:]]+")
caas, resp, resolvers, err := obj.LookupCAA(context.Background(), "bracewel.net")
test.AssertNotError(t, err, "CAA lookup failed")
test.Assert(t, len(caas) > 0, "Should have CAA records")
test.AssertEquals(t, len(resolvers), 1)
test.AssertDeepEquals(t, resolvers, ResolverAddrs{"127.0.0.1:4053"})
expectedResp := `;; opcode: QUERY, status: NOERROR, id: XXXX
;; flags: qr rd; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0
;; QUESTION SECTION:
;bracewel.net. IN CAA
;; ANSWER SECTION:
bracewel.net. 0 IN CAA 1 issue "letsencrypt.org"
`
test.AssertEquals(t, removeIDExp.ReplaceAllString(resp, " id: XXXX"), expectedResp)
caas, resp, resolvers, err = obj.LookupCAA(context.Background(), "nonexistent.letsencrypt.org")
test.AssertNotError(t, err, "CAA lookup failed")
test.Assert(t, len(caas) == 0, "Shouldn't have CAA records")
test.AssertEquals(t, resolvers[0], "127.0.0.1:4053")
expectedResp = ""
test.AssertEquals(t, resp, expectedResp)
caas, resp, resolvers, err = obj.LookupCAA(context.Background(), "nxdomain.letsencrypt.org")
slices.Sort(resolvers)
test.AssertNotError(t, err, "CAA lookup failed")
test.Assert(t, len(caas) == 0, "Shouldn't have CAA records")
test.AssertEquals(t, resolvers[0], "127.0.0.1:4053")
expectedResp = ""
test.AssertEquals(t, resp, expectedResp)
caas, resp, resolvers, err = obj.LookupCAA(context.Background(), "cname.example.com")
test.AssertNotError(t, err, "CAA lookup failed")
test.Assert(t, len(caas) > 0, "Should follow CNAME to find CAA")
test.AssertEquals(t, resolvers[0], "127.0.0.1:4053")
expectedResp = `;; opcode: QUERY, status: NOERROR, id: XXXX
;; flags: qr rd; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0
;; QUESTION SECTION:
;cname.example.com. IN CAA
;; ANSWER SECTION:
caa.example.com. 0 IN CAA 1 issue "letsencrypt.org"
`
test.AssertEquals(t, removeIDExp.ReplaceAllString(resp, " id: XXXX"), expectedResp)
_, _, resolvers, err = obj.LookupCAA(context.Background(), "gonetld")
test.AssertError(t, err, "should fail for TLD NXDOMAIN")
test.AssertContains(t, err.Error(), "NXDOMAIN")
test.AssertEquals(t, resolvers[0], "127.0.0.1:4053")
}
type testExchanger struct {
sync.Mutex
count int
errs []error
}
var errTooManyRequests = errors.New("too many requests")
func (te *testExchanger) Exchange(m *dns.Msg, a string) (*dns.Msg, time.Duration, error) {
te.Lock()
defer te.Unlock()
msg := &dns.Msg{
MsgHdr: dns.MsgHdr{Rcode: dns.RcodeSuccess},
}
if len(te.errs) <= te.count {
return nil, 0, errTooManyRequests
}
err := te.errs[te.count]
te.count++
return msg, 2 * time.Millisecond, err
}
func TestRetry(t *testing.T) {
isTempErr := &url.Error{Op: "read", Err: tempError(true)}
nonTempErr := &url.Error{Op: "read", Err: tempError(false)}
servFailError := errors.New("DNS problem: server failure at resolver looking up TXT for example.com")
type testCase struct {
name string
maxTries int
te *testExchanger
expected error
expectedCount int
metricsAllRetries float64
}
tests := []*testCase{
// The success on first try case
{
name: "success",
maxTries: 3,
te: &testExchanger{
errs: []error{nil},
},
expected: nil,
expectedCount: 1,
},
// Immediate non-OpError, error returns immediately
{
name: "non-operror",
maxTries: 3,
te: &testExchanger{
errs: []error{errors.New("nope")},
},
expected: servFailError,
expectedCount: 1,
},
// Temporary err, then non-OpError stops at two tries
{
name: "err-then-non-operror",
maxTries: 3,
te: &testExchanger{
errs: []error{isTempErr, errors.New("nope")},
},
expected: servFailError,
expectedCount: 2,
},
// Temporary error given always
{
name: "persistent-temp-error",
maxTries: 3,
te: &testExchanger{
errs: []error{
isTempErr,
isTempErr,
isTempErr,
},
},
expected: servFailError,
expectedCount: 3,
metricsAllRetries: 1,
},
// Even with maxTries at 0, we should still let a single request go
// through
{
name: "zero-maxtries",
maxTries: 0,
te: &testExchanger{
errs: []error{nil},
},
expected: nil,
expectedCount: 1,
},
// Temporary error given just once causes two tries
{
name: "single-temp-error",
maxTries: 3,
te: &testExchanger{
errs: []error{
isTempErr,
nil,
},
},
expected: nil,
expectedCount: 2,
},
// Temporary error given twice causes three tries
{
name: "double-temp-error",
maxTries: 3,
te: &testExchanger{
errs: []error{
isTempErr,
isTempErr,
nil,
},
},
expected: nil,
expectedCount: 3,
},
// Temporary error given thrice causes three tries and fails
{
name: "triple-temp-error",
maxTries: 3,
te: &testExchanger{
errs: []error{
isTempErr,
isTempErr,
isTempErr,
},
},
expected: servFailError,
expectedCount: 3,
metricsAllRetries: 1,
},
// temporary then non-Temporary error causes two retries
{
name: "temp-nontemp-error",
maxTries: 3,
te: &testExchanger{
errs: []error{
isTempErr,
nonTempErr,
},
},
expected: servFailError,
expectedCount: 2,
},
}
for i, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr})
test.AssertNotError(t, err, "Got error creating StaticProvider")
testClient := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), tc.maxTries, "", blog.UseMock(), tlsConfig)
dr := testClient.(*impl)
dr.dnsClient = tc.te
_, _, err = dr.LookupTXT(context.Background(), "example.com")
if err == errTooManyRequests {
t.Errorf("#%d, sent more requests than the test case handles", i)
}
expectedErr := tc.expected
if (expectedErr == nil && err != nil) ||
(expectedErr != nil && err == nil) ||
(expectedErr != nil && expectedErr.Error() != err.Error()) {
t.Errorf("#%d, error, expected %v, got %v", i, expectedErr, err)
}
if tc.expectedCount != tc.te.count {
t.Errorf("#%d, error, expectedCount %v, got %v", i, tc.expectedCount, tc.te.count)
}
if tc.metricsAllRetries > 0 {
test.AssertMetricWithLabelsEquals(
t, dr.timeoutCounter, prometheus.Labels{
"qtype": "TXT",
"type": "out of retries",
"resolver": "127.0.0.1",
"isTLD": "false",
}, tc.metricsAllRetries)
}
})
}
staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr})
test.AssertNotError(t, err, "Got error creating StaticProvider")
testClient := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 3, "", blog.UseMock(), tlsConfig)
dr := testClient.(*impl)
dr.dnsClient = &testExchanger{errs: []error{isTempErr, isTempErr, nil}}
ctx, cancel := context.WithCancel(context.Background())
cancel()
_, _, err = dr.LookupTXT(ctx, "example.com")
if err == nil ||
err.Error() != "DNS problem: query timed out (and was canceled) looking up TXT for example.com" {
t.Errorf("expected %s, got %s", context.Canceled, err)
}
dr.dnsClient = &testExchanger{errs: []error{isTempErr, isTempErr, nil}}
ctx, cancel = context.WithTimeout(context.Background(), -10*time.Hour)
defer cancel()
_, _, err = dr.LookupTXT(ctx, "example.com")
if err == nil ||
err.Error() != "DNS problem: query timed out looking up TXT for example.com" {
t.Errorf("expected %s, got %s", context.DeadlineExceeded, err)
}
dr.dnsClient = &testExchanger{errs: []error{isTempErr, isTempErr, nil}}
ctx, deadlineCancel := context.WithTimeout(context.Background(), -10*time.Hour)
deadlineCancel()
_, _, err = dr.LookupTXT(ctx, "example.com")
if err == nil ||
err.Error() != "DNS problem: query timed out looking up TXT for example.com" {
t.Errorf("expected %s, got %s", context.DeadlineExceeded, err)
}
test.AssertMetricWithLabelsEquals(
t, dr.timeoutCounter, prometheus.Labels{
"qtype": "TXT",
"type": "canceled",
"resolver": "127.0.0.1",
}, 1)
test.AssertMetricWithLabelsEquals(
t, dr.timeoutCounter, prometheus.Labels{
"qtype": "TXT",
"type": "deadline exceeded",
"resolver": "127.0.0.1",
}, 2)
}
func TestIsTLD(t *testing.T) {
if isTLD("com") != "true" {
t.Errorf("expected 'com' to be a TLD, got %q", isTLD("com"))
}
if isTLD("example.com") != "false" {
t.Errorf("expected 'example.com' to not a TLD, got %q", isTLD("example.com"))
}
}
type tempError bool
func (t tempError) Temporary() bool { return bool(t) }
func (t tempError) Error() string { return fmt.Sprintf("Temporary: %t", t) }
// rotateFailureExchanger is a dns.Exchange implementation that tracks a count
// of the number of calls to `Exchange` for a given address in the `lookups`
// map. For all addresses in the `brokenAddresses` map, a retryable error is
// returned from `Exchange`. This mock is used by `TestRotateServerOnErr`.
type rotateFailureExchanger struct {
sync.Mutex
lookups map[string]int
brokenAddresses map[string]bool
}
// Exchange for rotateFailureExchanger tracks the `a` argument in `lookups` and
// if present in `brokenAddresses`, returns a temporary error.
func (e *rotateFailureExchanger) Exchange(m *dns.Msg, a string) (*dns.Msg, time.Duration, error) {
e.Lock()
defer e.Unlock()
// Track that exchange was called for the given server
e.lookups[a]++
// If its a broken server, return a retryable error
if e.brokenAddresses[a] {
isTempErr := &url.Error{Op: "read", Err: tempError(true)}
return nil, 2 * time.Millisecond, isTempErr
}
return m, 2 * time.Millisecond, nil
}
// TestRotateServerOnErr ensures that a retryable error returned from a DNS
// server will result in the retry being performed against the next server in
// the list.
func TestRotateServerOnErr(t *testing.T) {
// Configure three DNS servers
dnsServers := []string{
"a:53", "b:53", "[2606:4700:4700::1111]:53",
}
// Set up a DNS client using these servers that will retry queries up to
// a maximum of 5 times. It's important to choose a maxTries value >= the
// number of dnsServers to ensure we always get around to trying the one
// working server
staticProvider, err := NewStaticProvider(dnsServers)
test.AssertNotError(t, err, "Got error creating StaticProvider")
maxTries := 5
client := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), maxTries, "", blog.UseMock(), tlsConfig)
// Configure a mock exchanger that will always return a retryable error for
// servers A and B. This will force server "[2606:4700:4700::1111]:53" to do
// all the work once retries reach it.
mock := &rotateFailureExchanger{
brokenAddresses: map[string]bool{
"a:53": true,
"b:53": true,
},
lookups: make(map[string]int),
}
client.(*impl).dnsClient = mock
// Perform a bunch of lookups. We choose the initial server randomly. Any time
// A or B is chosen there should be an error and a retry using the next server
// in the list. Since we configured maxTries to be larger than the number of
// servers *all* queries should eventually succeed by being retried against
// server "[2606:4700:4700::1111]:53".
for range maxTries * 2 {
_, resolvers, err := client.LookupTXT(context.Background(), "example.com")
test.AssertEquals(t, len(resolvers), 1)
test.AssertEquals(t, resolvers[0], "[2606:4700:4700::1111]:53")
// Any errors are unexpected - server "[2606:4700:4700::1111]:53" should
// have responded without error.
test.AssertNotError(t, err, "Expected no error from eventual retry with functional server")
}
// We expect that the A and B servers had a non-zero number of lookups
// attempted.
test.Assert(t, mock.lookups["a:53"] > 0, "Expected A server to have non-zero lookup attempts")
test.Assert(t, mock.lookups["b:53"] > 0, "Expected B server to have non-zero lookup attempts")
// We expect that the server "[2606:4700:4700::1111]:53" eventually served
// all of the lookups attempted.
test.AssertEquals(t, mock.lookups["[2606:4700:4700::1111]:53"], maxTries*2)
}
type mockTempURLError struct{}
func (m *mockTempURLError) Error() string { return "whoops, oh gosh" }
func (m *mockTempURLError) Timeout() bool { return false }
func (m *mockTempURLError) Temporary() bool { return true }
type dohAlwaysRetryExchanger struct {
sync.Mutex
err error
}
func (dohE *dohAlwaysRetryExchanger) Exchange(m *dns.Msg, a string) (*dns.Msg, time.Duration, error) {
dohE.Lock()
defer dohE.Unlock()
tempURLerror := &url.Error{
Op: "GET",
URL: "https://example.com",
Err: &mockTempURLError{},
}
return nil, time.Second, tempURLerror
}
func TestDOHMetric(t *testing.T) {
staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr})
test.AssertNotError(t, err, "Got error creating StaticProvider")
testClient := New(time.Second*11, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 0, "", blog.UseMock(), tlsConfig)
resolver := testClient.(*impl)
resolver.dnsClient = &dohAlwaysRetryExchanger{err: &url.Error{Op: "read", Err: tempError(true)}}
// Starting out, we should count 0 "out of retries" errors.
test.AssertMetricWithLabelsEquals(t, resolver.timeoutCounter, prometheus.Labels{"qtype": "None", "type": "out of retries", "resolver": "127.0.0.1", "isTLD": "false"}, 0)
// Trigger the error.
_, _, _ = resolver.exchangeOne(context.Background(), "example.com", 0)
// Now, we should count 1 "out of retries" errors.
test.AssertMetricWithLabelsEquals(t, resolver.timeoutCounter, prometheus.Labels{"qtype": "None", "type": "out of retries", "resolver": "127.0.0.1", "isTLD": "false"}, 1)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/bdns/dns.go | third-party/github.com/letsencrypt/boulder/bdns/dns.go | package bdns
import (
"context"
"crypto/tls"
"encoding/base64"
"errors"
"fmt"
"io"
"net"
"net/http"
"net/netip"
"net/url"
"slices"
"strconv"
"strings"
"sync"
"time"
"github.com/jmhodges/clock"
"github.com/miekg/dns"
"github.com/prometheus/client_golang/prometheus"
"github.com/letsencrypt/boulder/iana"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
)
// ResolverAddrs contains DNS resolver(s) that were chosen to perform a
// validation request or CAA recheck. A ResolverAddr will be in the form of
// host:port, A:host:port, or AAAA:host:port depending on which type of lookup
// was done.
type ResolverAddrs []string
// Client queries for DNS records
type Client interface {
LookupTXT(context.Context, string) (txts []string, resolver ResolverAddrs, err error)
LookupHost(context.Context, string) ([]netip.Addr, ResolverAddrs, error)
LookupCAA(context.Context, string) ([]*dns.CAA, string, ResolverAddrs, error)
}
// impl represents a client that talks to an external resolver
type impl struct {
dnsClient exchanger
servers ServerProvider
allowRestrictedAddresses bool
maxTries int
clk clock.Clock
log blog.Logger
queryTime *prometheus.HistogramVec
totalLookupTime *prometheus.HistogramVec
timeoutCounter *prometheus.CounterVec
idMismatchCounter *prometheus.CounterVec
}
var _ Client = &impl{}
type exchanger interface {
Exchange(m *dns.Msg, a string) (*dns.Msg, time.Duration, error)
}
// New constructs a new DNS resolver object that utilizes the
// provided list of DNS servers for resolution.
//
// `tlsConfig` is the configuration used for outbound DoH queries,
// if applicable.
func New(
readTimeout time.Duration,
servers ServerProvider,
stats prometheus.Registerer,
clk clock.Clock,
maxTries int,
userAgent string,
log blog.Logger,
tlsConfig *tls.Config,
) Client {
var client exchanger
// Clone the default transport because it comes with various settings
// that we like, which are different from the zero value of an
// `http.Transport`.
transport := http.DefaultTransport.(*http.Transport).Clone()
transport.TLSClientConfig = tlsConfig
// The default transport already sets this field, but it isn't
// documented that it will always be set. Set it again to be sure,
// because Unbound will reject non-HTTP/2 DoH requests.
transport.ForceAttemptHTTP2 = true
client = &dohExchanger{
clk: clk,
hc: http.Client{
Timeout: readTimeout,
Transport: transport,
},
userAgent: userAgent,
}
queryTime := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "dns_query_time",
Help: "Time taken to perform a DNS query",
Buckets: metrics.InternetFacingBuckets,
},
[]string{"qtype", "result", "resolver"},
)
totalLookupTime := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "dns_total_lookup_time",
Help: "Time taken to perform a DNS lookup, including all retried queries",
Buckets: metrics.InternetFacingBuckets,
},
[]string{"qtype", "result", "retries", "resolver"},
)
timeoutCounter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "dns_timeout",
Help: "Counter of various types of DNS query timeouts",
},
[]string{"qtype", "type", "resolver", "isTLD"},
)
idMismatchCounter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "dns_id_mismatch",
Help: "Counter of DNS ErrId errors sliced by query type and resolver",
},
[]string{"qtype", "resolver"},
)
stats.MustRegister(queryTime, totalLookupTime, timeoutCounter, idMismatchCounter)
return &impl{
dnsClient: client,
servers: servers,
allowRestrictedAddresses: false,
maxTries: maxTries,
clk: clk,
queryTime: queryTime,
totalLookupTime: totalLookupTime,
timeoutCounter: timeoutCounter,
idMismatchCounter: idMismatchCounter,
log: log,
}
}
// NewTest constructs a new DNS resolver object that utilizes the
// provided list of DNS servers for resolution and will allow loopback addresses.
// This constructor should *only* be called from tests (unit or integration).
func NewTest(
readTimeout time.Duration,
servers ServerProvider,
stats prometheus.Registerer,
clk clock.Clock,
maxTries int,
userAgent string,
log blog.Logger,
tlsConfig *tls.Config,
) Client {
resolver := New(readTimeout, servers, stats, clk, maxTries, userAgent, log, tlsConfig)
resolver.(*impl).allowRestrictedAddresses = true
return resolver
}
// exchangeOne performs a single DNS exchange with a randomly chosen server
// out of the server list, returning the response, time, and error (if any).
// We assume that the upstream resolver requests and validates DNSSEC records
// itself.
func (dnsClient *impl) exchangeOne(ctx context.Context, hostname string, qtype uint16) (resp *dns.Msg, resolver string, err error) {
m := new(dns.Msg)
// Set question type
m.SetQuestion(dns.Fqdn(hostname), qtype)
// Set the AD bit in the query header so that the resolver knows that
// we are interested in this bit in the response header. If this isn't
// set the AD bit in the response is useless (RFC 6840 Section 5.7).
// This has no security implications, it simply allows us to gather
// metrics about the percentage of responses that are secured with
// DNSSEC.
m.AuthenticatedData = true
// Tell the resolver that we're willing to receive responses up to 4096 bytes.
// This happens sometimes when there are a very large number of CAA records
// present.
m.SetEdns0(4096, false)
servers, err := dnsClient.servers.Addrs()
if err != nil {
return nil, "", fmt.Errorf("failed to list DNS servers: %w", err)
}
chosenServerIndex := 0
chosenServer := servers[chosenServerIndex]
resolver = chosenServer
// Strip off the IP address part of the server address because
// we talk to the same server on multiple ports, and don't want
// to blow up the cardinality.
chosenServerIP, _, err := net.SplitHostPort(chosenServer)
if err != nil {
return
}
start := dnsClient.clk.Now()
client := dnsClient.dnsClient
qtypeStr := dns.TypeToString[qtype]
tries := 1
defer func() {
result := "failed"
if resp != nil {
result = dns.RcodeToString[resp.Rcode]
}
dnsClient.totalLookupTime.With(prometheus.Labels{
"qtype": qtypeStr,
"result": result,
"retries": strconv.Itoa(tries),
"resolver": chosenServerIP,
}).Observe(dnsClient.clk.Since(start).Seconds())
}()
for {
ch := make(chan dnsResp, 1)
// Strip off the IP address part of the server address because
// we talk to the same server on multiple ports, and don't want
// to blow up the cardinality.
// Note: validateServerAddress() has already checked net.SplitHostPort()
// and ensures that chosenServer can't be a bare port, e.g. ":1337"
chosenServerIP, _, err = net.SplitHostPort(chosenServer)
if err != nil {
return
}
go func() {
rsp, rtt, err := client.Exchange(m, chosenServer)
result := "failed"
if rsp != nil {
result = dns.RcodeToString[rsp.Rcode]
}
if err != nil {
logDNSError(dnsClient.log, chosenServer, hostname, m, rsp, err)
if err == dns.ErrId {
dnsClient.idMismatchCounter.With(prometheus.Labels{
"qtype": qtypeStr,
"resolver": chosenServerIP,
}).Inc()
}
}
dnsClient.queryTime.With(prometheus.Labels{
"qtype": qtypeStr,
"result": result,
"resolver": chosenServerIP,
}).Observe(rtt.Seconds())
ch <- dnsResp{m: rsp, err: err}
}()
select {
case <-ctx.Done():
if ctx.Err() == context.DeadlineExceeded {
dnsClient.timeoutCounter.With(prometheus.Labels{
"qtype": qtypeStr,
"type": "deadline exceeded",
"resolver": chosenServerIP,
"isTLD": isTLD(hostname),
}).Inc()
} else if ctx.Err() == context.Canceled {
dnsClient.timeoutCounter.With(prometheus.Labels{
"qtype": qtypeStr,
"type": "canceled",
"resolver": chosenServerIP,
"isTLD": isTLD(hostname),
}).Inc()
} else {
dnsClient.timeoutCounter.With(prometheus.Labels{
"qtype": qtypeStr,
"type": "unknown",
"resolver": chosenServerIP,
}).Inc()
}
err = ctx.Err()
return
case r := <-ch:
if r.err != nil {
var isRetryable bool
// According to the http package documentation, retryable
// errors emitted by the http package are of type *url.Error.
var urlErr *url.Error
isRetryable = errors.As(r.err, &urlErr) && urlErr.Temporary()
hasRetriesLeft := tries < dnsClient.maxTries
if isRetryable && hasRetriesLeft {
tries++
// Chose a new server to retry the query with by incrementing the
// chosen server index modulo the number of servers. This ensures that
// if one dns server isn't available we retry with the next in the
// list.
chosenServerIndex = (chosenServerIndex + 1) % len(servers)
chosenServer = servers[chosenServerIndex]
resolver = chosenServer
continue
} else if isRetryable && !hasRetriesLeft {
dnsClient.timeoutCounter.With(prometheus.Labels{
"qtype": qtypeStr,
"type": "out of retries",
"resolver": chosenServerIP,
"isTLD": isTLD(hostname),
}).Inc()
}
}
resp, err = r.m, r.err
return
}
}
}
// isTLD returns a simplified view of whether something is a TLD: does it have
// any dots in it? This returns true or false as a string, and is meant solely
// for Prometheus metrics.
func isTLD(hostname string) string {
if strings.Contains(hostname, ".") {
return "false"
} else {
return "true"
}
}
type dnsResp struct {
m *dns.Msg
err error
}
// LookupTXT sends a DNS query to find all TXT records associated with
// the provided hostname which it returns along with the returned
// DNS authority section.
func (dnsClient *impl) LookupTXT(ctx context.Context, hostname string) ([]string, ResolverAddrs, error) {
var txt []string
dnsType := dns.TypeTXT
r, resolver, err := dnsClient.exchangeOne(ctx, hostname, dnsType)
errWrap := wrapErr(dnsType, hostname, r, err)
if errWrap != nil {
return nil, ResolverAddrs{resolver}, errWrap
}
for _, answer := range r.Answer {
if answer.Header().Rrtype == dnsType {
if txtRec, ok := answer.(*dns.TXT); ok {
txt = append(txt, strings.Join(txtRec.Txt, ""))
}
}
}
return txt, ResolverAddrs{resolver}, err
}
func (dnsClient *impl) lookupIP(ctx context.Context, hostname string, ipType uint16) ([]dns.RR, string, error) {
resp, resolver, err := dnsClient.exchangeOne(ctx, hostname, ipType)
switch ipType {
case dns.TypeA:
if resolver != "" {
resolver = "A:" + resolver
}
case dns.TypeAAAA:
if resolver != "" {
resolver = "AAAA:" + resolver
}
}
errWrap := wrapErr(ipType, hostname, resp, err)
if errWrap != nil {
return nil, resolver, errWrap
}
return resp.Answer, resolver, nil
}
// LookupHost sends a DNS query to find all A and AAAA records associated with
// the provided hostname. This method assumes that the external resolver will
// chase CNAME/DNAME aliases and return relevant records. It will retry
// requests in the case of temporary network errors. It returns an error if
// both the A and AAAA lookups fail or are empty, but succeeds otherwise.
func (dnsClient *impl) LookupHost(ctx context.Context, hostname string) ([]netip.Addr, ResolverAddrs, error) {
var recordsA, recordsAAAA []dns.RR
var errA, errAAAA error
var resolverA, resolverAAAA string
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
recordsA, resolverA, errA = dnsClient.lookupIP(ctx, hostname, dns.TypeA)
}()
wg.Add(1)
go func() {
defer wg.Done()
recordsAAAA, resolverAAAA, errAAAA = dnsClient.lookupIP(ctx, hostname, dns.TypeAAAA)
}()
wg.Wait()
resolvers := ResolverAddrs{resolverA, resolverAAAA}
resolvers = slices.DeleteFunc(resolvers, func(a string) bool {
return a == ""
})
var addrsA []netip.Addr
if errA == nil {
for _, answer := range recordsA {
if answer.Header().Rrtype == dns.TypeA {
a, ok := answer.(*dns.A)
if ok && a.A.To4() != nil {
netIP, ok := netip.AddrFromSlice(a.A)
if ok && (iana.IsReservedAddr(netIP) == nil || dnsClient.allowRestrictedAddresses) {
addrsA = append(addrsA, netIP)
}
}
}
}
if len(addrsA) == 0 {
errA = fmt.Errorf("no valid A records found for %s", hostname)
}
}
var addrsAAAA []netip.Addr
if errAAAA == nil {
for _, answer := range recordsAAAA {
if answer.Header().Rrtype == dns.TypeAAAA {
aaaa, ok := answer.(*dns.AAAA)
if ok && aaaa.AAAA.To16() != nil {
netIP, ok := netip.AddrFromSlice(aaaa.AAAA)
if ok && (iana.IsReservedAddr(netIP) == nil || dnsClient.allowRestrictedAddresses) {
addrsAAAA = append(addrsAAAA, netIP)
}
}
}
}
if len(addrsAAAA) == 0 {
errAAAA = fmt.Errorf("no valid AAAA records found for %s", hostname)
}
}
if errA != nil && errAAAA != nil {
// Construct a new error from both underlying errors. We can only use %w for
// one of them, because the go error unwrapping protocol doesn't support
// branching. We don't use ProblemDetails and SubProblemDetails here, because
// this error will get wrapped in a DNSError and further munged by higher
// layers in the stack.
return nil, resolvers, fmt.Errorf("%w; %s", errA, errAAAA)
}
return append(addrsA, addrsAAAA...), resolvers, nil
}
// LookupCAA sends a DNS query to find all CAA records associated with
// the provided hostname and the complete dig-style RR `response`. This
// response is quite verbose, however it's only populated when the CAA
// response is non-empty.
func (dnsClient *impl) LookupCAA(ctx context.Context, hostname string) ([]*dns.CAA, string, ResolverAddrs, error) {
dnsType := dns.TypeCAA
r, resolver, err := dnsClient.exchangeOne(ctx, hostname, dnsType)
// Special case: when checking CAA for non-TLD names, treat NXDOMAIN as a
// successful response containing an empty set of records. This can come up in
// situations where records were provisioned for validation (e.g. TXT records
// for DNS-01 challenge) and then removed after validation but before CAA
// rechecking. But allow NXDOMAIN for TLDs to fall through to the error code
// below, so we don't issue for gTLDs that have been removed by ICANN.
if err == nil && r.Rcode == dns.RcodeNameError && strings.Contains(hostname, ".") {
return nil, "", ResolverAddrs{resolver}, nil
}
errWrap := wrapErr(dnsType, hostname, r, err)
if errWrap != nil {
return nil, "", ResolverAddrs{resolver}, errWrap
}
var CAAs []*dns.CAA
for _, answer := range r.Answer {
if caaR, ok := answer.(*dns.CAA); ok {
CAAs = append(CAAs, caaR)
}
}
var response string
if len(CAAs) > 0 {
response = r.String()
}
return CAAs, response, ResolverAddrs{resolver}, nil
}
// logDNSError logs the provided err result from making a query for hostname to
// the chosenServer. If the err is a `dns.ErrId` instance then the Base64
// encoded bytes of the query (and if not-nil, the response) in wire format
// is logged as well. This function is called from exchangeOne only for the case
// where an error occurs querying a hostname that indicates a problem between
// the VA and the chosenServer.
func logDNSError(
logger blog.Logger,
chosenServer string,
hostname string,
msg, resp *dns.Msg,
underlying error) {
// We don't expect logDNSError to be called with a nil msg or err but
// if it happens return early. We allow resp to be nil.
if msg == nil || len(msg.Question) == 0 || underlying == nil {
return
}
queryType := dns.TypeToString[msg.Question[0].Qtype]
// If the error indicates there was a query/response ID mismatch then we want
// to log more detail.
if underlying == dns.ErrId {
packedMsgBytes, err := msg.Pack()
if err != nil {
logger.Errf("logDNSError failed to pack msg: %v", err)
return
}
encodedMsg := base64.StdEncoding.EncodeToString(packedMsgBytes)
var encodedResp string
var respQname string
if resp != nil {
packedRespBytes, err := resp.Pack()
if err != nil {
logger.Errf("logDNSError failed to pack resp: %v", err)
return
}
encodedResp = base64.StdEncoding.EncodeToString(packedRespBytes)
if len(resp.Answer) > 0 && resp.Answer[0].Header() != nil {
respQname = resp.Answer[0].Header().Name
}
}
logger.Infof(
"logDNSError ID mismatch chosenServer=[%s] hostname=[%s] respHostname=[%s] queryType=[%s] msg=[%s] resp=[%s] err=[%s]",
chosenServer,
hostname,
respQname,
queryType,
encodedMsg,
encodedResp,
underlying)
} else {
// Otherwise log a general DNS error
logger.Infof("logDNSError chosenServer=[%s] hostname=[%s] queryType=[%s] err=[%s]",
chosenServer,
hostname,
queryType,
underlying)
}
}
type dohExchanger struct {
clk clock.Clock
hc http.Client
userAgent string
}
// Exchange sends a DoH query to the provided DoH server and returns the response.
func (d *dohExchanger) Exchange(query *dns.Msg, server string) (*dns.Msg, time.Duration, error) {
q, err := query.Pack()
if err != nil {
return nil, 0, err
}
// The default Unbound URL template
url := fmt.Sprintf("https://%s/dns-query", server)
req, err := http.NewRequest("POST", url, strings.NewReader(string(q)))
if err != nil {
return nil, 0, err
}
req.Header.Set("Content-Type", "application/dns-message")
req.Header.Set("Accept", "application/dns-message")
if len(d.userAgent) > 0 {
req.Header.Set("User-Agent", d.userAgent)
}
start := d.clk.Now()
resp, err := d.hc.Do(req)
if err != nil {
return nil, d.clk.Since(start), err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, d.clk.Since(start), fmt.Errorf("doh: http status %d", resp.StatusCode)
}
b, err := io.ReadAll(resp.Body)
if err != nil {
return nil, d.clk.Since(start), fmt.Errorf("doh: reading response body: %w", err)
}
response := new(dns.Msg)
err = response.Unpack(b)
if err != nil {
return nil, d.clk.Since(start), fmt.Errorf("doh: unpacking response: %w", err)
}
return response, d.clk.Since(start), nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/bdns/servers.go | third-party/github.com/letsencrypt/boulder/bdns/servers.go | package bdns
import (
"context"
"errors"
"fmt"
"math/rand/v2"
"net"
"net/netip"
"strconv"
"sync"
"time"
"github.com/miekg/dns"
"github.com/prometheus/client_golang/prometheus"
"github.com/letsencrypt/boulder/cmd"
)
// ServerProvider represents a type which can provide a list of addresses for
// the bdns to use as DNS resolvers. Different implementations may provide
// different strategies for providing addresses, and may provide different kinds
// of addresses (e.g. host:port combos vs IP addresses).
type ServerProvider interface {
Addrs() ([]string, error)
Stop()
}
// staticProvider stores a list of host:port combos, and provides that whole
// list in randomized order when asked for addresses. This replicates the old
// behavior of the bdns.impl's servers field.
type staticProvider struct {
servers []string
}
var _ ServerProvider = &staticProvider{}
// validateServerAddress ensures that a given server address is formatted in
// such a way that it can be dialed. The provided server address must include a
// host/IP and port separated by colon. Additionally, if the host is a literal
// IPv6 address, it must be enclosed in square brackets.
// (https://golang.org/src/net/dial.go?s=9833:9881#L281)
func validateServerAddress(address string) error {
// Ensure the host and port portions of `address` can be split.
host, port, err := net.SplitHostPort(address)
if err != nil {
return err
}
// Ensure `address` contains both a `host` and `port` portion.
if host == "" || port == "" {
return errors.New("port cannot be missing")
}
// Ensure the `port` portion of `address` is a valid port.
portNum, err := strconv.Atoi(port)
if err != nil {
return fmt.Errorf("parsing port number: %s", err)
}
if portNum <= 0 || portNum > 65535 {
return errors.New("port must be an integer between 0 - 65535")
}
// Ensure the `host` portion of `address` is a valid FQDN or IP address.
_, err = netip.ParseAddr(host)
FQDN := dns.IsFqdn(dns.Fqdn(host))
if err != nil && !FQDN {
return errors.New("host is not an FQDN or IP address")
}
return nil
}
func NewStaticProvider(servers []string) (*staticProvider, error) {
var serverAddrs []string
for _, server := range servers {
err := validateServerAddress(server)
if err != nil {
return nil, fmt.Errorf("server address %q invalid: %s", server, err)
}
serverAddrs = append(serverAddrs, server)
}
return &staticProvider{servers: serverAddrs}, nil
}
func (sp *staticProvider) Addrs() ([]string, error) {
if len(sp.servers) == 0 {
return nil, fmt.Errorf("no servers configured")
}
r := make([]string, len(sp.servers))
perm := rand.Perm(len(sp.servers))
for i, v := range perm {
r[i] = sp.servers[v]
}
return r, nil
}
func (sp *staticProvider) Stop() {}
// dynamicProvider uses DNS to look up the set of IP addresses which correspond
// to its single host. It returns this list in random order when asked for
// addresses, and refreshes it regularly using a goroutine started by its
// constructor.
type dynamicProvider struct {
// dnsAuthority is the single <hostname|IPv4|[IPv6]>:<port> of the DNS
// server to be used for resolution of DNS backends. If the address contains
// a hostname it will be resolved via the system DNS. If the port is left
// unspecified it will default to '53'. If this field is left unspecified
// the system DNS will be used for resolution of DNS backends.
dnsAuthority string
// service is the service name to look up SRV records for within the domain.
// If this field is left unspecified 'dns' will be used as the service name.
service string
// proto is the IP protocol (tcp or udp) to look up SRV records for.
proto string
// domain is the name to look up SRV records within.
domain string
// A map of IP addresses (results of A record lookups for SRV Targets) to
// ports (Port fields in SRV records) associated with those addresses.
addrs map[string][]uint16
// Other internal bookkeeping state.
cancel chan interface{}
mu sync.RWMutex
refresh time.Duration
updateCounter *prometheus.CounterVec
}
// ParseTarget takes the user input target string and default port, returns
// formatted host and port info. If target doesn't specify a port, set the port
// to be the defaultPort. If target is in IPv6 format and host-name is enclosed
// in square brackets, brackets are stripped when setting the host.
//
// Examples:
// - target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443"
// - target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80"
// - target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443"
// - target: ":80" defaultPort: "443" returns host: "localhost", port: "80"
//
// This function is copied from:
// https://github.com/grpc/grpc-go/blob/master/internal/resolver/dns/dns_resolver.go
// It has been minimally modified to fit our code style.
func ParseTarget(target, defaultPort string) (host, port string, err error) {
if target == "" {
return "", "", errors.New("missing address")
}
ip := net.ParseIP(target)
if ip != nil {
// Target is an IPv4 or IPv6(without brackets) address.
return target, defaultPort, nil
}
host, port, err = net.SplitHostPort(target)
if err == nil {
if port == "" {
// If the port field is empty (target ends with colon), e.g.
// "[::1]:", this is an error.
return "", "", errors.New("missing port after port-separator colon")
}
// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
if host == "" {
// Keep consistent with net.Dial(): If the host is empty, as in
// ":80", the local system is assumed.
host = "localhost"
}
return host, port, nil
}
host, port, err = net.SplitHostPort(target + ":" + defaultPort)
if err == nil {
// Target doesn't have port.
return host, port, nil
}
return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err)
}
var _ ServerProvider = &dynamicProvider{}
// StartDynamicProvider constructs a new dynamicProvider and starts its
// auto-update goroutine. The auto-update process queries DNS for SRV records
// at refresh intervals and uses the resulting IP/port combos to populate the
// list returned by Addrs. The update process ignores the Priority and Weight
// attributes of the SRV records.
//
// `proto` is the IP protocol (tcp or udp) to look up SRV records for.
func StartDynamicProvider(c *cmd.DNSProvider, refresh time.Duration, proto string) (*dynamicProvider, error) {
if c.SRVLookup.Domain == "" {
return nil, fmt.Errorf("'domain' cannot be empty")
}
service := c.SRVLookup.Service
if service == "" {
// Default to "dns" if no service is specified. This is the default
// service name for DNS servers.
service = "dns"
}
host, port, err := ParseTarget(c.DNSAuthority, "53")
if err != nil {
return nil, err
}
dnsAuthority := net.JoinHostPort(host, port)
err = validateServerAddress(dnsAuthority)
if err != nil {
return nil, err
}
dp := dynamicProvider{
dnsAuthority: dnsAuthority,
service: service,
proto: proto,
domain: c.SRVLookup.Domain,
addrs: make(map[string][]uint16),
cancel: make(chan interface{}),
refresh: refresh,
updateCounter: prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "dns_update",
Help: "Counter of attempts to update a dynamic provider",
},
[]string{"success"},
),
}
// Update once immediately, so we can know whether that was successful, then
// kick off the long-running update goroutine.
err = dp.update()
if err != nil {
return nil, fmt.Errorf("failed to start dynamic provider: %w", err)
}
go dp.run()
return &dp, nil
}
// run loops forever, calling dp.update() every dp.refresh interval. Does not
// halt until the dp.cancel channel is closed, so should be run in a goroutine.
func (dp *dynamicProvider) run() {
t := time.NewTicker(dp.refresh)
for {
select {
case <-t.C:
err := dp.update()
if err != nil {
dp.updateCounter.With(prometheus.Labels{
"success": "false",
}).Inc()
continue
}
dp.updateCounter.With(prometheus.Labels{
"success": "true",
}).Inc()
case <-dp.cancel:
return
}
}
}
// update performs the SRV and A record queries necessary to map the given DNS
// domain name to a set of cacheable IP addresses and ports, and stores the
// results in dp.addrs.
func (dp *dynamicProvider) update() error {
ctx, cancel := context.WithTimeout(context.Background(), dp.refresh/2)
defer cancel()
resolver := &net.Resolver{
PreferGo: true,
Dial: func(ctx context.Context, network, address string) (net.Conn, error) {
d := &net.Dialer{}
return d.DialContext(ctx, network, dp.dnsAuthority)
},
}
// RFC 2782 formatted SRV record being queried e.g. "_service._proto.name."
record := fmt.Sprintf("_%s._%s.%s.", dp.service, dp.proto, dp.domain)
_, srvs, err := resolver.LookupSRV(ctx, dp.service, dp.proto, dp.domain)
if err != nil {
return fmt.Errorf("during SRV lookup of %q: %w", record, err)
}
if len(srvs) == 0 {
return fmt.Errorf("SRV lookup of %q returned 0 results", record)
}
addrPorts := make(map[string][]uint16)
for _, srv := range srvs {
addrs, err := resolver.LookupHost(ctx, srv.Target)
if err != nil {
return fmt.Errorf("during A/AAAA lookup of target %q from SRV record %q: %w", srv.Target, record, err)
}
for _, addr := range addrs {
joinedHostPort := net.JoinHostPort(addr, fmt.Sprint(srv.Port))
err := validateServerAddress(joinedHostPort)
if err != nil {
return fmt.Errorf("invalid addr %q from SRV record %q: %w", joinedHostPort, record, err)
}
addrPorts[addr] = append(addrPorts[addr], srv.Port)
}
}
dp.mu.Lock()
dp.addrs = addrPorts
dp.mu.Unlock()
return nil
}
// Addrs returns a shuffled list of IP/port pairs, with the guarantee that no
// two IP/port pairs will share the same IP.
func (dp *dynamicProvider) Addrs() ([]string, error) {
var r []string
dp.mu.RLock()
for ip, ports := range dp.addrs {
port := fmt.Sprint(ports[rand.IntN(len(ports))])
addr := net.JoinHostPort(ip, port)
r = append(r, addr)
}
dp.mu.RUnlock()
rand.Shuffle(len(r), func(i, j int) {
r[i], r[j] = r[j], r[i]
})
return r, nil
}
// Stop tells the background update goroutine to cease. It does not wait for
// confirmation that it has done so.
func (dp *dynamicProvider) Stop() {
close(dp.cancel)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/bdns/problem.go | third-party/github.com/letsencrypt/boulder/bdns/problem.go | package bdns
import (
"context"
"errors"
"fmt"
"net"
"net/url"
"github.com/miekg/dns"
)
// Error wraps a DNS error with various relevant information
type Error struct {
recordType uint16
hostname string
// Exactly one of rCode or underlying should be set.
underlying error
rCode int
// Optional: If the resolver returned extended error information, it will be stored here.
// https://www.rfc-editor.org/rfc/rfc8914
extended *dns.EDNS0_EDE
}
// extendedDNSError returns non-nil if the input message contained an OPT RR
// with an EDE option. https://www.rfc-editor.org/rfc/rfc8914.
func extendedDNSError(msg *dns.Msg) *dns.EDNS0_EDE {
opt := msg.IsEdns0()
if opt != nil {
for _, opt := range opt.Option {
ede, ok := opt.(*dns.EDNS0_EDE)
if !ok {
continue
}
return ede
}
}
return nil
}
// wrapErr returns a non-nil error if err is non-nil or if resp.Rcode is not dns.RcodeSuccess.
// The error includes appropriate details about the DNS query that failed.
func wrapErr(queryType uint16, hostname string, resp *dns.Msg, err error) error {
if err != nil {
return Error{
recordType: queryType,
hostname: hostname,
underlying: err,
extended: nil,
}
}
if resp.Rcode != dns.RcodeSuccess {
return Error{
recordType: queryType,
hostname: hostname,
rCode: resp.Rcode,
underlying: nil,
extended: extendedDNSError(resp),
}
}
return nil
}
// A copy of miekg/dns's mapping of error codes to strings. We tweak it slightly so all DNSSEC-related
// errors say "DNSSEC" at the beginning.
// https://pkg.go.dev/github.com/miekg/dns#ExtendedErrorCodeToString
// Also note that not all of these codes can currently be emitted by Unbound. See Unbound's
// announcement post for EDE: https://blog.nlnetlabs.nl/extended-dns-error-support-for-unbound/
var extendedErrorCodeToString = map[uint16]string{
dns.ExtendedErrorCodeOther: "Other",
dns.ExtendedErrorCodeUnsupportedDNSKEYAlgorithm: "DNSSEC: Unsupported DNSKEY Algorithm",
dns.ExtendedErrorCodeUnsupportedDSDigestType: "DNSSEC: Unsupported DS Digest Type",
dns.ExtendedErrorCodeStaleAnswer: "Stale Answer",
dns.ExtendedErrorCodeForgedAnswer: "Forged Answer",
dns.ExtendedErrorCodeDNSSECIndeterminate: "DNSSEC: Indeterminate",
dns.ExtendedErrorCodeDNSBogus: "DNSSEC: Bogus",
dns.ExtendedErrorCodeSignatureExpired: "DNSSEC: Signature Expired",
dns.ExtendedErrorCodeSignatureNotYetValid: "DNSSEC: Signature Not Yet Valid",
dns.ExtendedErrorCodeDNSKEYMissing: "DNSSEC: DNSKEY Missing",
dns.ExtendedErrorCodeRRSIGsMissing: "DNSSEC: RRSIGs Missing",
dns.ExtendedErrorCodeNoZoneKeyBitSet: "DNSSEC: No Zone Key Bit Set",
dns.ExtendedErrorCodeNSECMissing: "DNSSEC: NSEC Missing",
dns.ExtendedErrorCodeCachedError: "Cached Error",
dns.ExtendedErrorCodeNotReady: "Not Ready",
dns.ExtendedErrorCodeBlocked: "Blocked",
dns.ExtendedErrorCodeCensored: "Censored",
dns.ExtendedErrorCodeFiltered: "Filtered",
dns.ExtendedErrorCodeProhibited: "Prohibited",
dns.ExtendedErrorCodeStaleNXDOMAINAnswer: "Stale NXDOMAIN Answer",
dns.ExtendedErrorCodeNotAuthoritative: "Not Authoritative",
dns.ExtendedErrorCodeNotSupported: "Not Supported",
dns.ExtendedErrorCodeNoReachableAuthority: "No Reachable Authority",
dns.ExtendedErrorCodeNetworkError: "Network Error between Resolver and Authority",
dns.ExtendedErrorCodeInvalidData: "Invalid Data",
}
func (d Error) Error() string {
var detail, additional string
if d.underlying != nil {
var netErr *net.OpError
var urlErr *url.Error
if errors.As(d.underlying, &netErr) {
if netErr.Timeout() {
detail = detailDNSTimeout
} else {
detail = detailDNSNetFailure
}
// Note: we check d.underlying here even though `Timeout()` does this because the call to `netErr.Timeout()` above only
// happens for `*net.OpError` underlying types!
} else if errors.As(d.underlying, &urlErr) && urlErr.Timeout() {
// For DOH queries, we can get back a `*url.Error` that wraps the unexported type
// `http.httpError`. Unfortunately `http.httpError` doesn't wrap any errors (like
// context.DeadlineExceeded), we can't check for that; instead we need to call Timeout().
detail = detailDNSTimeout
} else if errors.Is(d.underlying, context.DeadlineExceeded) {
detail = detailDNSTimeout
} else if errors.Is(d.underlying, context.Canceled) {
detail = detailCanceled
} else {
detail = detailServerFailure
}
} else if d.rCode != dns.RcodeSuccess {
detail = dns.RcodeToString[d.rCode]
if explanation, ok := rcodeExplanations[d.rCode]; ok {
additional = " - " + explanation
}
} else {
detail = detailServerFailure
}
if d.extended == nil {
return fmt.Sprintf("DNS problem: %s looking up %s for %s%s", detail,
dns.TypeToString[d.recordType], d.hostname, additional)
}
summary := extendedErrorCodeToString[d.extended.InfoCode]
if summary == "" {
summary = fmt.Sprintf("Unknown Extended DNS Error code %d", d.extended.InfoCode)
}
result := fmt.Sprintf("DNS problem: looking up %s for %s: %s",
dns.TypeToString[d.recordType], d.hostname, summary)
if d.extended.ExtraText != "" {
result = result + ": " + d.extended.ExtraText
}
return result
}
const detailDNSTimeout = "query timed out"
const detailCanceled = "query timed out (and was canceled)"
const detailDNSNetFailure = "networking error"
const detailServerFailure = "server failure at resolver"
// rcodeExplanations provide additional friendly explanatory text to be included in DNS
// error messages, for select inscrutable RCODEs.
var rcodeExplanations = map[int]string{
dns.RcodeNameError: "check that a DNS record exists for this domain",
dns.RcodeServerFailure: "the domain's nameservers may be malfunctioning",
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/bdns/servers_test.go | third-party/github.com/letsencrypt/boulder/bdns/servers_test.go | package bdns
import (
"testing"
"github.com/letsencrypt/boulder/test"
)
func Test_validateServerAddress(t *testing.T) {
type args struct {
server string
}
tests := []struct {
name string
args args
wantErr bool
}{
// ipv4 cases
{"ipv4 with port", args{"1.1.1.1:53"}, false},
// sad path
{"ipv4 without port", args{"1.1.1.1"}, true},
{"ipv4 port num missing", args{"1.1.1.1:"}, true},
{"ipv4 string for port", args{"1.1.1.1:foo"}, true},
{"ipv4 port out of range high", args{"1.1.1.1:65536"}, true},
{"ipv4 port out of range low", args{"1.1.1.1:0"}, true},
// ipv6 cases
{"ipv6 with port", args{"[2606:4700:4700::1111]:53"}, false},
// sad path
{"ipv6 sans brackets", args{"2606:4700:4700::1111:53"}, true},
{"ipv6 without port", args{"[2606:4700:4700::1111]"}, true},
{"ipv6 port num missing", args{"[2606:4700:4700::1111]:"}, true},
{"ipv6 string for port", args{"[2606:4700:4700::1111]:foo"}, true},
{"ipv6 port out of range high", args{"[2606:4700:4700::1111]:65536"}, true},
{"ipv6 port out of range low", args{"[2606:4700:4700::1111]:0"}, true},
// hostname cases
{"hostname with port", args{"foo:53"}, false},
// sad path
{"hostname without port", args{"foo"}, true},
{"hostname port num missing", args{"foo:"}, true},
{"hostname string for port", args{"foo:bar"}, true},
{"hostname port out of range high", args{"foo:65536"}, true},
{"hostname port out of range low", args{"foo:0"}, true},
// fqdn cases
{"fqdn with port", args{"bar.foo.baz:53"}, false},
// sad path
{"fqdn without port", args{"bar.foo.baz"}, true},
{"fqdn port num missing", args{"bar.foo.baz:"}, true},
{"fqdn string for port", args{"bar.foo.baz:bar"}, true},
{"fqdn port out of range high", args{"bar.foo.baz:65536"}, true},
{"fqdn port out of range low", args{"bar.foo.baz:0"}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := validateServerAddress(tt.args.server)
if (err != nil) != tt.wantErr {
t.Errorf("formatServer() error = %v, wantErr %v", err, tt.wantErr)
return
}
})
}
}
func Test_resolveDNSAuthority(t *testing.T) {
type want struct {
host string
port string
}
tests := []struct {
name string
target string
want want
wantErr bool
}{
{"IP4 with port", "10.10.10.10:53", want{"10.10.10.10", "53"}, false},
{"IP4 without port", "10.10.10.10", want{"10.10.10.10", "53"}, false},
{"IP6 with port and brackets", "[2606:4700:4700::1111]:53", want{"2606:4700:4700::1111", "53"}, false},
{"IP6 without port", "2606:4700:4700::1111", want{"2606:4700:4700::1111", "53"}, false},
{"IP6 with brackets without port", "[2606:4700:4700::1111]", want{"2606:4700:4700::1111", "53"}, false},
{"hostname with port", "localhost:53", want{"localhost", "53"}, false},
{"hostname without port", "localhost", want{"localhost", "53"}, false},
{"only port", ":53", want{"localhost", "53"}, false},
{"hostname with no port after colon", "localhost:", want{"", ""}, true},
{"IP4 with no port after colon", "10.10.10.10:", want{"", ""}, true},
{"IP6 with no port after colon", "[2606:4700:4700::1111]:", want{"", ""}, true},
{"no hostname or port", "", want{"", ""}, true},
{"invalid addr", "foo:bar:baz", want{"", ""}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotHost, gotPort, gotErr := ParseTarget(tt.target, "53")
test.AssertEquals(t, gotHost, tt.want.host)
test.AssertEquals(t, gotPort, tt.want.port)
if tt.wantErr {
test.AssertError(t, gotErr, "expected error")
} else {
test.AssertNotError(t, gotErr, "unexpected error")
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/bdns/problem_test.go | third-party/github.com/letsencrypt/boulder/bdns/problem_test.go | package bdns
import (
"context"
"errors"
"net"
"net/url"
"testing"
"github.com/letsencrypt/boulder/test"
"github.com/miekg/dns"
)
func TestError(t *testing.T) {
testCases := []struct {
err error
expected string
}{
{
&Error{dns.TypeA, "hostname", makeTimeoutError(), -1, nil},
"DNS problem: query timed out looking up A for hostname",
}, {
&Error{dns.TypeMX, "hostname", &net.OpError{Err: errors.New("some net error")}, -1, nil},
"DNS problem: networking error looking up MX for hostname",
}, {
&Error{dns.TypeTXT, "hostname", nil, dns.RcodeNameError, nil},
"DNS problem: NXDOMAIN looking up TXT for hostname - check that a DNS record exists for this domain",
}, {
&Error{dns.TypeTXT, "hostname", context.DeadlineExceeded, -1, nil},
"DNS problem: query timed out looking up TXT for hostname",
}, {
&Error{dns.TypeTXT, "hostname", context.Canceled, -1, nil},
"DNS problem: query timed out (and was canceled) looking up TXT for hostname",
}, {
&Error{dns.TypeCAA, "hostname", nil, dns.RcodeServerFailure, nil},
"DNS problem: SERVFAIL looking up CAA for hostname - the domain's nameservers may be malfunctioning",
}, {
&Error{dns.TypeA, "hostname", nil, dns.RcodeServerFailure, &dns.EDNS0_EDE{InfoCode: 1, ExtraText: "oh no"}},
"DNS problem: looking up A for hostname: DNSSEC: Unsupported DNSKEY Algorithm: oh no",
}, {
&Error{dns.TypeA, "hostname", nil, dns.RcodeServerFailure, &dns.EDNS0_EDE{InfoCode: 6, ExtraText: ""}},
"DNS problem: looking up A for hostname: DNSSEC: Bogus",
}, {
&Error{dns.TypeA, "hostname", nil, dns.RcodeServerFailure, &dns.EDNS0_EDE{InfoCode: 1337, ExtraText: "mysterious"}},
"DNS problem: looking up A for hostname: Unknown Extended DNS Error code 1337: mysterious",
}, {
&Error{dns.TypeCAA, "hostname", nil, dns.RcodeServerFailure, nil},
"DNS problem: SERVFAIL looking up CAA for hostname - the domain's nameservers may be malfunctioning",
}, {
&Error{dns.TypeCAA, "hostname", nil, dns.RcodeServerFailure, nil},
"DNS problem: SERVFAIL looking up CAA for hostname - the domain's nameservers may be malfunctioning",
}, {
&Error{dns.TypeA, "hostname", nil, dns.RcodeFormatError, nil},
"DNS problem: FORMERR looking up A for hostname",
}, {
&Error{dns.TypeA, "hostname", &url.Error{Op: "GET", URL: "https://example.com/", Err: dohTimeoutError{}}, -1, nil},
"DNS problem: query timed out looking up A for hostname",
},
}
for _, tc := range testCases {
if tc.err.Error() != tc.expected {
t.Errorf("got %q, expected %q", tc.err.Error(), tc.expected)
}
}
}
type dohTimeoutError struct{}
func (dohTimeoutError) Error() string {
return "doh no"
}
func (dohTimeoutError) Timeout() bool {
return true
}
func TestWrapErr(t *testing.T) {
err := wrapErr(dns.TypeA, "hostname", &dns.Msg{
MsgHdr: dns.MsgHdr{Rcode: dns.RcodeSuccess},
}, nil)
test.AssertNotError(t, err, "expected success")
err = wrapErr(dns.TypeA, "hostname", &dns.Msg{
MsgHdr: dns.MsgHdr{Rcode: dns.RcodeRefused},
}, nil)
test.AssertError(t, err, "expected error")
err = wrapErr(dns.TypeA, "hostname", &dns.Msg{
MsgHdr: dns.MsgHdr{Rcode: dns.RcodeSuccess},
}, errors.New("oh no"))
test.AssertError(t, err, "expected error")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ctpolicy/ctpolicy_test.go | third-party/github.com/letsencrypt/boulder/ctpolicy/ctpolicy_test.go | package ctpolicy
import (
"bytes"
"context"
"errors"
"strings"
"testing"
"time"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/ctpolicy/loglist"
berrors "github.com/letsencrypt/boulder/errors"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
pubpb "github.com/letsencrypt/boulder/publisher/proto"
"github.com/letsencrypt/boulder/test"
)
type mockPub struct{}
func (mp *mockPub) SubmitToSingleCTWithResult(_ context.Context, _ *pubpb.Request, _ ...grpc.CallOption) (*pubpb.Result, error) {
return &pubpb.Result{Sct: []byte{0}}, nil
}
type mockFailPub struct{}
func (mp *mockFailPub) SubmitToSingleCTWithResult(_ context.Context, _ *pubpb.Request, _ ...grpc.CallOption) (*pubpb.Result, error) {
return nil, errors.New("BAD")
}
type mockSlowPub struct{}
func (mp *mockSlowPub) SubmitToSingleCTWithResult(ctx context.Context, _ *pubpb.Request, _ ...grpc.CallOption) (*pubpb.Result, error) {
<-ctx.Done()
return nil, errors.New("timed out")
}
func TestGetSCTs(t *testing.T) {
expired, cancel := context.WithDeadline(context.Background(), time.Now())
defer cancel()
missingSCTErr := berrors.MissingSCTs
testCases := []struct {
name string
mock pubpb.PublisherClient
logs loglist.List
ctx context.Context
result core.SCTDERs
expectErr string
berrorType *berrors.ErrorType
}{
{
name: "basic success case",
mock: &mockPub{},
logs: loglist.List{
{Name: "LogA1", Operator: "OperA", Url: "UrlA1", Key: []byte("KeyA1")},
{Name: "LogA2", Operator: "OperA", Url: "UrlA2", Key: []byte("KeyA2")},
{Name: "LogB1", Operator: "OperB", Url: "UrlB1", Key: []byte("KeyB1")},
{Name: "LogC1", Operator: "OperC", Url: "UrlC1", Key: []byte("KeyC1")},
},
ctx: context.Background(),
result: core.SCTDERs{[]byte{0}, []byte{0}},
},
{
name: "basic failure case",
mock: &mockFailPub{},
logs: loglist.List{
{Name: "LogA1", Operator: "OperA", Url: "UrlA1", Key: []byte("KeyA1")},
{Name: "LogA2", Operator: "OperA", Url: "UrlA2", Key: []byte("KeyA2")},
{Name: "LogB1", Operator: "OperB", Url: "UrlB1", Key: []byte("KeyB1")},
{Name: "LogC1", Operator: "OperC", Url: "UrlC1", Key: []byte("KeyC1")},
},
ctx: context.Background(),
expectErr: "failed to get 2 SCTs, got 4 error(s)",
berrorType: &missingSCTErr,
},
{
name: "parent context timeout failure case",
mock: &mockSlowPub{},
logs: loglist.List{
{Name: "LogA1", Operator: "OperA", Url: "UrlA1", Key: []byte("KeyA1")},
{Name: "LogA2", Operator: "OperA", Url: "UrlA2", Key: []byte("KeyA2")},
{Name: "LogB1", Operator: "OperB", Url: "UrlB1", Key: []byte("KeyB1")},
{Name: "LogC1", Operator: "OperC", Url: "UrlC1", Key: []byte("KeyC1")},
},
ctx: expired,
expectErr: "failed to get 2 SCTs before ctx finished",
berrorType: &missingSCTErr,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
ctp := New(tc.mock, tc.logs, nil, nil, 0, blog.NewMock(), metrics.NoopRegisterer)
ret, err := ctp.GetSCTs(tc.ctx, []byte{0}, time.Time{})
if tc.result != nil {
test.AssertDeepEquals(t, ret, tc.result)
} else if tc.expectErr != "" {
if !strings.Contains(err.Error(), tc.expectErr) {
t.Errorf("Error %q did not match expected %q", err, tc.expectErr)
}
if tc.berrorType != nil {
test.AssertErrorIs(t, err, *tc.berrorType)
}
}
})
}
}
type mockFailOnePub struct {
badURL string
}
func (mp *mockFailOnePub) SubmitToSingleCTWithResult(_ context.Context, req *pubpb.Request, _ ...grpc.CallOption) (*pubpb.Result, error) {
if req.LogURL == mp.badURL {
return nil, errors.New("BAD")
}
return &pubpb.Result{Sct: []byte{0}}, nil
}
func TestGetSCTsMetrics(t *testing.T) {
ctp := New(&mockFailOnePub{badURL: "UrlA1"}, loglist.List{
{Name: "LogA1", Operator: "OperA", Url: "UrlA1", Key: []byte("KeyA1")},
{Name: "LogB1", Operator: "OperB", Url: "UrlB1", Key: []byte("KeyB1")},
{Name: "LogC1", Operator: "OperC", Url: "UrlC1", Key: []byte("KeyC1")},
}, nil, nil, 0, blog.NewMock(), metrics.NoopRegisterer)
_, err := ctp.GetSCTs(context.Background(), []byte{0}, time.Time{})
test.AssertNotError(t, err, "GetSCTs failed")
test.AssertMetricWithLabelsEquals(t, ctp.winnerCounter, prometheus.Labels{"url": "UrlB1", "result": succeeded}, 1)
test.AssertMetricWithLabelsEquals(t, ctp.winnerCounter, prometheus.Labels{"url": "UrlC1", "result": succeeded}, 1)
}
func TestGetSCTsFailMetrics(t *testing.T) {
// Ensure the proper metrics are incremented when GetSCTs fails.
ctp := New(&mockFailOnePub{badURL: "UrlA1"}, loglist.List{
{Name: "LogA1", Operator: "OperA", Url: "UrlA1", Key: []byte("KeyA1")},
}, nil, nil, 0, blog.NewMock(), metrics.NoopRegisterer)
_, err := ctp.GetSCTs(context.Background(), []byte{0}, time.Time{})
test.AssertError(t, err, "GetSCTs should have failed")
test.AssertErrorIs(t, err, berrors.MissingSCTs)
test.AssertMetricWithLabelsEquals(t, ctp.winnerCounter, prometheus.Labels{"url": "UrlA1", "result": failed}, 1)
// Ensure the proper metrics are incremented when GetSCTs times out.
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
defer cancel()
ctp = New(&mockSlowPub{}, loglist.List{
{Name: "LogA1", Operator: "OperA", Url: "UrlA1", Key: []byte("KeyA1")},
}, nil, nil, 0, blog.NewMock(), metrics.NoopRegisterer)
_, err = ctp.GetSCTs(ctx, []byte{0}, time.Time{})
test.AssertError(t, err, "GetSCTs should have timed out")
test.AssertErrorIs(t, err, berrors.MissingSCTs)
test.AssertContains(t, err.Error(), context.DeadlineExceeded.Error())
test.AssertMetricWithLabelsEquals(t, ctp.winnerCounter, prometheus.Labels{"url": "UrlA1", "result": failed}, 1)
}
func TestLogListMetrics(t *testing.T) {
fc := clock.NewFake()
Tomorrow := fc.Now().Add(24 * time.Hour)
NextWeek := fc.Now().Add(7 * 24 * time.Hour)
// Multiple operator groups with configured logs.
ctp := New(&mockPub{}, loglist.List{
{Name: "LogA1", Operator: "OperA", Url: "UrlA1", Key: []byte("KeyA1"), EndExclusive: Tomorrow},
{Name: "LogA2", Operator: "OperA", Url: "UrlA2", Key: []byte("KeyA2"), EndExclusive: NextWeek},
{Name: "LogB1", Operator: "OperB", Url: "UrlB1", Key: []byte("KeyB1"), EndExclusive: Tomorrow},
}, nil, nil, 0, blog.NewMock(), metrics.NoopRegisterer)
test.AssertMetricWithLabelsEquals(t, ctp.shardExpiryGauge, prometheus.Labels{"operator": "OperA", "logID": "LogA1"}, 86400)
test.AssertMetricWithLabelsEquals(t, ctp.shardExpiryGauge, prometheus.Labels{"operator": "OperA", "logID": "LogA2"}, 604800)
test.AssertMetricWithLabelsEquals(t, ctp.shardExpiryGauge, prometheus.Labels{"operator": "OperB", "logID": "LogB1"}, 86400)
}
func TestCompliantSet(t *testing.T) {
for _, tc := range []struct {
name string
results []result
want core.SCTDERs
}{
{
name: "nil input",
results: nil,
want: nil,
},
{
name: "zero length input",
results: []result{},
want: nil,
},
{
name: "only one result",
results: []result{
{log: loglist.Log{Operator: "A", Tiled: false}, sct: []byte("sct1")},
},
want: nil,
},
{
name: "only one good result",
results: []result{
{log: loglist.Log{Operator: "A", Tiled: false}, sct: []byte("sct1")},
{log: loglist.Log{Operator: "B", Tiled: false}, err: errors.New("oops")},
},
want: nil,
},
{
name: "only one operator",
results: []result{
{log: loglist.Log{Operator: "A", Tiled: false}, sct: []byte("sct1")},
{log: loglist.Log{Operator: "A", Tiled: false}, sct: []byte("sct2")},
},
want: nil,
},
{
name: "all tiled",
results: []result{
{log: loglist.Log{Operator: "A", Tiled: true}, sct: []byte("sct1")},
{log: loglist.Log{Operator: "B", Tiled: true}, sct: []byte("sct2")},
},
want: nil,
},
{
name: "happy path",
results: []result{
{log: loglist.Log{Operator: "A", Tiled: false}, err: errors.New("oops")},
{log: loglist.Log{Operator: "A", Tiled: true}, sct: []byte("sct2")},
{log: loglist.Log{Operator: "A", Tiled: false}, sct: []byte("sct3")},
{log: loglist.Log{Operator: "B", Tiled: false}, err: errors.New("oops")},
{log: loglist.Log{Operator: "B", Tiled: true}, sct: []byte("sct4")},
{log: loglist.Log{Operator: "B", Tiled: false}, sct: []byte("sct6")},
{log: loglist.Log{Operator: "C", Tiled: false}, err: errors.New("oops")},
{log: loglist.Log{Operator: "C", Tiled: true}, sct: []byte("sct8")},
{log: loglist.Log{Operator: "C", Tiled: false}, sct: []byte("sct9")},
},
// The second and sixth results should be picked, because first and fourth
// are skipped for being errors, and fifth is skipped for also being tiled.
want: core.SCTDERs{[]byte("sct2"), []byte("sct6")},
},
} {
t.Run(tc.name, func(t *testing.T) {
got := compliantSet(tc.results)
if len(got) != len(tc.want) {
t.Fatalf("compliantSet(%#v) returned %d SCTs, but want %d", tc.results, len(got), len(tc.want))
}
for i, sct := range tc.want {
if !bytes.Equal(got[i], sct) {
t.Errorf("compliantSet(%#v) returned unexpected SCT at index %d", tc.results, i)
}
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ctpolicy/ctpolicy.go | third-party/github.com/letsencrypt/boulder/ctpolicy/ctpolicy.go | package ctpolicy
import (
"context"
"encoding/base64"
"fmt"
"strings"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/ctpolicy/loglist"
berrors "github.com/letsencrypt/boulder/errors"
blog "github.com/letsencrypt/boulder/log"
pubpb "github.com/letsencrypt/boulder/publisher/proto"
)
const (
succeeded = "succeeded"
failed = "failed"
)
// CTPolicy is used to hold information about SCTs required from various
// groupings
type CTPolicy struct {
pub pubpb.PublisherClient
sctLogs loglist.List
infoLogs loglist.List
finalLogs loglist.List
stagger time.Duration
log blog.Logger
winnerCounter *prometheus.CounterVec
shardExpiryGauge *prometheus.GaugeVec
}
// New creates a new CTPolicy struct
func New(pub pubpb.PublisherClient, sctLogs loglist.List, infoLogs loglist.List, finalLogs loglist.List, stagger time.Duration, log blog.Logger, stats prometheus.Registerer) *CTPolicy {
winnerCounter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "sct_winner",
Help: "Counter of logs which are selected for sct submission, by log URL and result (succeeded or failed).",
},
[]string{"url", "result"},
)
stats.MustRegister(winnerCounter)
shardExpiryGauge := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "ct_shard_expiration_seconds",
Help: "CT shard end_exclusive field expressed as Unix epoch time, by operator and logID.",
},
[]string{"operator", "logID"},
)
stats.MustRegister(shardExpiryGauge)
for _, log := range sctLogs {
if log.EndExclusive.IsZero() {
// Handles the case for non-temporally sharded logs too.
shardExpiryGauge.WithLabelValues(log.Operator, log.Name).Set(float64(0))
} else {
shardExpiryGauge.WithLabelValues(log.Operator, log.Name).Set(float64(log.EndExclusive.Unix()))
}
}
return &CTPolicy{
pub: pub,
sctLogs: sctLogs,
infoLogs: infoLogs,
finalLogs: finalLogs,
stagger: stagger,
log: log,
winnerCounter: winnerCounter,
shardExpiryGauge: shardExpiryGauge,
}
}
type result struct {
log loglist.Log
sct []byte
err error
}
// GetSCTs retrieves exactly two SCTs from the total collection of configured
// log groups, with at most one SCT coming from each group. It expects that all
// logs run by a single operator (e.g. Google) are in the same group, to
// guarantee that SCTs from logs in different groups do not end up coming from
// the same operator. As such, it enforces Google's current CT Policy, which
// requires that certs have two SCTs from logs run by different operators.
func (ctp *CTPolicy) GetSCTs(ctx context.Context, cert core.CertDER, expiration time.Time) (core.SCTDERs, error) {
// We'll cancel this sub-context when we have the two SCTs we need, to cause
// any other ongoing submission attempts to quit.
subCtx, cancel := context.WithCancel(ctx)
defer cancel()
// This closure will be called in parallel once for each log.
getOne := func(i int, l loglist.Log) ([]byte, error) {
// Sleep a little bit to stagger our requests to the later logs. Use `i-1`
// to compute the stagger duration so that the first two logs (indices 0
// and 1) get negative or zero (i.e. instant) sleep durations. If the
// context gets cancelled (most likely because we got enough SCTs from other
// logs already) before the sleep is complete, quit instead.
select {
case <-subCtx.Done():
return nil, subCtx.Err()
case <-time.After(time.Duration(i-1) * ctp.stagger):
}
sct, err := ctp.pub.SubmitToSingleCTWithResult(ctx, &pubpb.Request{
LogURL: l.Url,
LogPublicKey: base64.StdEncoding.EncodeToString(l.Key),
Der: cert,
Kind: pubpb.SubmissionType_sct,
})
if err != nil {
return nil, fmt.Errorf("ct submission to %q (%q) failed: %w", l.Name, l.Url, err)
}
return sct.Sct, nil
}
// Identify the set of candidate logs whose temporal interval includes this
// cert's expiry. Randomize the order of the logs so that we're not always
// trying to submit to the same two.
logs := ctp.sctLogs.ForTime(expiration).Permute()
// Kick off a collection of goroutines to try to submit the precert to each
// log. Ensure that the results channel has a buffer equal to the number of
// goroutines we're kicking off, so that they're all guaranteed to be able to
// write to it and exit without blocking and leaking.
resChan := make(chan result, len(logs))
for i, log := range logs {
go func(i int, l loglist.Log) {
sctDER, err := getOne(i, l)
resChan <- result{log: l, sct: sctDER, err: err}
}(i, log)
}
go ctp.submitPrecertInformational(cert, expiration)
// Finally, collect SCTs and/or errors from our results channel. We know that
// we can collect len(logs) results from the channel because every goroutine
// is guaranteed to write one result (either sct or error) to the channel.
results := make([]result, 0)
errs := make([]string, 0)
for range len(logs) {
res := <-resChan
if res.err != nil {
errs = append(errs, res.err.Error())
ctp.winnerCounter.WithLabelValues(res.log.Url, failed).Inc()
continue
}
results = append(results, res)
ctp.winnerCounter.WithLabelValues(res.log.Url, succeeded).Inc()
scts := compliantSet(results)
if scts != nil {
return scts, nil
}
}
// If we made it to the end of that loop, that means we never got two SCTs
// to return. Error out instead.
if ctx.Err() != nil {
// We timed out (the calling function returned and canceled our context),
// thereby causing all of our getOne sub-goroutines to be cancelled.
return nil, berrors.MissingSCTsError("failed to get 2 SCTs before ctx finished: %s", ctx.Err())
}
return nil, berrors.MissingSCTsError("failed to get 2 SCTs, got %d error(s): %s", len(errs), strings.Join(errs, "; "))
}
// compliantSet returns a slice of SCTs which complies with all relevant CT Log
// Policy requirements, namely that the set of SCTs:
// - contain at least two SCTs, which
// - come from logs run by at least two different operators, and
// - contain at least one RFC6962-compliant (i.e. non-static/tiled) log.
//
// If no such set of SCTs exists, returns nil.
func compliantSet(results []result) core.SCTDERs {
for _, first := range results {
if first.err != nil {
continue
}
for _, second := range results {
if second.err != nil {
continue
}
if first.log.Operator == second.log.Operator {
// The two SCTs must come from different operators.
continue
}
if first.log.Tiled && second.log.Tiled {
// At least one must come from a non-tiled log.
continue
}
return core.SCTDERs{first.sct, second.sct}
}
}
return nil
}
// submitAllBestEffort submits the given certificate or precertificate to every
// log ("informational" for precerts, "final" for certs) configured in the policy.
// It neither waits for these submission to complete, nor tracks their success.
func (ctp *CTPolicy) submitAllBestEffort(blob core.CertDER, kind pubpb.SubmissionType, expiry time.Time) {
logs := ctp.finalLogs
if kind == pubpb.SubmissionType_info {
logs = ctp.infoLogs
}
for _, log := range logs {
if log.StartInclusive.After(expiry) || log.EndExclusive.Equal(expiry) || log.EndExclusive.Before(expiry) {
continue
}
go func(log loglist.Log) {
_, err := ctp.pub.SubmitToSingleCTWithResult(
context.Background(),
&pubpb.Request{
LogURL: log.Url,
LogPublicKey: base64.StdEncoding.EncodeToString(log.Key),
Der: blob,
Kind: kind,
},
)
if err != nil {
ctp.log.Warningf("ct submission of cert to log %q failed: %s", log.Url, err)
}
}(log)
}
}
// submitPrecertInformational submits precertificates to any configured
// "informational" logs, but does not care about success or returned SCTs.
func (ctp *CTPolicy) submitPrecertInformational(cert core.CertDER, expiration time.Time) {
ctp.submitAllBestEffort(cert, pubpb.SubmissionType_info, expiration)
}
// SubmitFinalCert submits finalized certificates created from precertificates
// to any configured "final" logs, but does not care about success.
func (ctp *CTPolicy) SubmitFinalCert(cert core.CertDER, expiration time.Time) {
ctp.submitAllBestEffort(cert, pubpb.SubmissionType_final, expiration)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ctpolicy/loglist/lintlist.go | third-party/github.com/letsencrypt/boulder/ctpolicy/loglist/lintlist.go | package loglist
import "sync"
var lintlist struct {
sync.Once
list List
err error
}
// InitLintList creates and stores a loglist intended for linting (i.e. with
// purpose Validation). We have to store this in a global because the zlint
// framework doesn't (yet) support configuration, so the e_scts_from_same_operator
// lint cannot load a log list on its own. Instead, we have the CA call this
// initialization function at startup, and have the lint call the getter below
// to get access to the cached list.
func InitLintList(path string) error {
lintlist.Do(func() {
l, err := New(path)
if err != nil {
lintlist.err = err
return
}
l, err = l.forPurpose(Validation)
if err != nil {
lintlist.err = err
return
}
lintlist.list = l
})
return lintlist.err
}
// GetLintList returns the log list initialized by InitLintList. This must
// only be called after InitLintList has been called on the same (or parent)
// goroutine.
func GetLintList() List {
return lintlist.list
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ctpolicy/loglist/loglist.go | third-party/github.com/letsencrypt/boulder/ctpolicy/loglist/loglist.go | package loglist
import (
_ "embed"
"encoding/base64"
"errors"
"fmt"
"math/rand/v2"
"os"
"slices"
"time"
"github.com/google/certificate-transparency-go/loglist3"
)
// purpose is the use to which a log list will be put. This type exists to allow
// the following consts to be declared for use by LogList consumers.
type purpose string
// Issuance means that the new log list should only contain Usable logs, which
// can issue SCTs that will be trusted by all Chrome clients.
const Issuance purpose = "scts"
// Informational means that the new log list can contain Usable, Qualified, and
// Pending logs, which will all accept submissions but not necessarily be
// trusted by Chrome clients.
const Informational purpose = "info"
// Validation means that the new log list should only contain Usable and
// Readonly logs, whose SCTs will be trusted by all Chrome clients but aren't
// necessarily still issuing SCTs today.
const Validation purpose = "lint"
// List represents a list of logs arranged by the "v3" schema as published by
// Chrome: https://www.gstatic.com/ct/log_list/v3/log_list_schema.json
type List []Log
// Log represents a single log run by an operator. It contains just the info
// necessary to determine whether we want to submit to that log, and how to
// do so.
type Log struct {
Operator string
Name string
Id string
Key []byte
Url string
StartInclusive time.Time
EndExclusive time.Time
State loglist3.LogStatus
Tiled bool
}
// usableForPurpose returns true if the log state is acceptable for the given
// log list purpose, and false otherwise.
func usableForPurpose(s loglist3.LogStatus, p purpose) bool {
switch p {
case Issuance:
return s == loglist3.UsableLogStatus
case Informational:
return s == loglist3.UsableLogStatus || s == loglist3.QualifiedLogStatus || s == loglist3.PendingLogStatus
case Validation:
return s == loglist3.UsableLogStatus || s == loglist3.ReadOnlyLogStatus
}
return false
}
// New returns a LogList of all operators and all logs parsed from the file at
// the given path. The file must conform to the JSON Schema published by Google:
// https://www.gstatic.com/ct/log_list/v3/log_list_schema.json
func New(path string) (List, error) {
file, err := os.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("failed to read CT Log List: %w", err)
}
return newHelper(file)
}
// newHelper is a helper to allow the core logic of `New()` to be unit tested
// without having to write files to disk.
func newHelper(file []byte) (List, error) {
parsed, err := loglist3.NewFromJSON(file)
if err != nil {
return nil, fmt.Errorf("failed to parse CT Log List: %w", err)
}
result := make(List, 0)
for _, op := range parsed.Operators {
for _, log := range op.Logs {
info := Log{
Operator: op.Name,
Name: log.Description,
Id: base64.StdEncoding.EncodeToString(log.LogID),
Key: log.Key,
Url: log.URL,
State: log.State.LogStatus(),
Tiled: false,
}
if log.TemporalInterval != nil {
info.StartInclusive = log.TemporalInterval.StartInclusive
info.EndExclusive = log.TemporalInterval.EndExclusive
}
result = append(result, info)
}
for _, log := range op.TiledLogs {
info := Log{
Operator: op.Name,
Name: log.Description,
Id: base64.StdEncoding.EncodeToString(log.LogID),
Key: log.Key,
Url: log.SubmissionURL,
State: log.State.LogStatus(),
Tiled: true,
}
if log.TemporalInterval != nil {
info.StartInclusive = log.TemporalInterval.StartInclusive
info.EndExclusive = log.TemporalInterval.EndExclusive
}
result = append(result, info)
}
}
return result, nil
}
// SubsetForPurpose returns a new log list containing only those logs whose
// names match those in the given list, and whose state is acceptable for the
// given purpose. It returns an error if any of the given names are not found
// in the starting list, or if the resulting list is too small to satisfy the
// Chrome "two operators" policy.
func (ll List) SubsetForPurpose(names []string, p purpose) (List, error) {
sub, err := ll.subset(names)
if err != nil {
return nil, err
}
res, err := sub.forPurpose(p)
if err != nil {
return nil, err
}
return res, nil
}
// subset returns a new log list containing only those logs whose names match
// those in the given list. It returns an error if any of the given names are
// not found.
func (ll List) subset(names []string) (List, error) {
res := make(List, 0)
for _, name := range names {
found := false
for _, log := range ll {
if log.Name == name {
if found {
return nil, fmt.Errorf("found multiple logs matching name %q", name)
}
found = true
res = append(res, log)
}
}
if !found {
return nil, fmt.Errorf("no log found matching name %q", name)
}
}
return res, nil
}
// forPurpose returns a new log list containing only those logs whose states are
// acceptable for the given purpose. It returns an error if the purpose is
// Issuance or Validation and the set of remaining logs is too small to satisfy
// the Google "two operators" log policy.
func (ll List) forPurpose(p purpose) (List, error) {
res := make(List, 0)
operators := make(map[string]struct{})
for _, log := range ll {
if !usableForPurpose(log.State, p) {
continue
}
res = append(res, log)
operators[log.Operator] = struct{}{}
}
if len(operators) < 2 && p != Informational {
return nil, errors.New("log list does not have enough groups to satisfy Chrome policy")
}
return res, nil
}
// ForTime returns a new log list containing only those logs whose temporal
// intervals include the given certificate expiration timestamp.
func (ll List) ForTime(expiry time.Time) List {
res := slices.Clone(ll)
res = slices.DeleteFunc(res, func(l Log) bool {
if (l.StartInclusive.IsZero() || l.StartInclusive.Equal(expiry) || l.StartInclusive.Before(expiry)) &&
(l.EndExclusive.IsZero() || l.EndExclusive.After(expiry)) {
return false
}
return true
})
return res
}
// Permute returns a new log list containing the exact same logs, but in a
// randomly-shuffled order.
func (ll List) Permute() List {
res := slices.Clone(ll)
rand.Shuffle(len(res), func(i int, j int) {
res[i], res[j] = res[j], res[i]
})
return res
}
// GetByID returns the Log matching the given ID, or an error if no such
// log can be found.
func (ll List) GetByID(logID string) (Log, error) {
for _, log := range ll {
if log.Id == logID {
return log, nil
}
}
return Log{}, fmt.Errorf("no log with ID %q found", logID)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ctpolicy/loglist/loglist_test.go | third-party/github.com/letsencrypt/boulder/ctpolicy/loglist/loglist_test.go | package loglist
import (
"testing"
"time"
"github.com/google/certificate-transparency-go/loglist3"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/test"
)
func TestNew(t *testing.T) {
}
func TestSubset(t *testing.T) {
input := List{
Log{Name: "Log A1"},
Log{Name: "Log A2"},
Log{Name: "Log B1"},
Log{Name: "Log B2"},
Log{Name: "Log C1"},
Log{Name: "Log C2"},
}
actual, err := input.subset(nil)
test.AssertNotError(t, err, "nil names should not error")
test.AssertEquals(t, len(actual), 0)
actual, err = input.subset([]string{})
test.AssertNotError(t, err, "empty names should not error")
test.AssertEquals(t, len(actual), 0)
actual, err = input.subset([]string{"Other Log"})
test.AssertError(t, err, "wrong name should result in error")
test.AssertEquals(t, len(actual), 0)
expected := List{
Log{Name: "Log B1"},
Log{Name: "Log A1"},
Log{Name: "Log A2"},
}
actual, err = input.subset([]string{"Log B1", "Log A1", "Log A2"})
test.AssertNotError(t, err, "normal usage should not error")
test.AssertDeepEquals(t, actual, expected)
}
func TestForPurpose(t *testing.T) {
input := List{
Log{Name: "Log A1", Operator: "A", State: loglist3.UsableLogStatus},
Log{Name: "Log A2", Operator: "A", State: loglist3.RejectedLogStatus},
Log{Name: "Log B1", Operator: "B", State: loglist3.UsableLogStatus},
Log{Name: "Log B2", Operator: "B", State: loglist3.RetiredLogStatus},
Log{Name: "Log C1", Operator: "C", State: loglist3.PendingLogStatus},
Log{Name: "Log C2", Operator: "C", State: loglist3.ReadOnlyLogStatus},
}
expected := List{
Log{Name: "Log A1", Operator: "A", State: loglist3.UsableLogStatus},
Log{Name: "Log B1", Operator: "B", State: loglist3.UsableLogStatus},
}
actual, err := input.forPurpose(Issuance)
test.AssertNotError(t, err, "should have two acceptable logs")
test.AssertDeepEquals(t, actual, expected)
input = List{
Log{Name: "Log A1", Operator: "A", State: loglist3.UsableLogStatus},
Log{Name: "Log A2", Operator: "A", State: loglist3.RejectedLogStatus},
Log{Name: "Log B1", Operator: "B", State: loglist3.QualifiedLogStatus},
Log{Name: "Log B2", Operator: "B", State: loglist3.RetiredLogStatus},
Log{Name: "Log C1", Operator: "C", State: loglist3.PendingLogStatus},
Log{Name: "Log C2", Operator: "C", State: loglist3.ReadOnlyLogStatus},
}
_, err = input.forPurpose(Issuance)
test.AssertError(t, err, "should only have one acceptable log")
expected = List{
Log{Name: "Log A1", Operator: "A", State: loglist3.UsableLogStatus},
Log{Name: "Log C2", Operator: "C", State: loglist3.ReadOnlyLogStatus},
}
actual, err = input.forPurpose(Validation)
test.AssertNotError(t, err, "should have two acceptable logs")
test.AssertDeepEquals(t, actual, expected)
expected = List{
Log{Name: "Log A1", Operator: "A", State: loglist3.UsableLogStatus},
Log{Name: "Log B1", Operator: "B", State: loglist3.QualifiedLogStatus},
Log{Name: "Log C1", Operator: "C", State: loglist3.PendingLogStatus},
}
actual, err = input.forPurpose(Informational)
test.AssertNotError(t, err, "should have three acceptable logs")
test.AssertDeepEquals(t, actual, expected)
}
func TestForTime(t *testing.T) {
fc := clock.NewFake()
fc.Set(time.Now())
input := List{
Log{Name: "Fully Bound", StartInclusive: fc.Now().Add(-time.Hour), EndExclusive: fc.Now().Add(time.Hour)},
Log{Name: "Open End", StartInclusive: fc.Now().Add(-time.Hour)},
Log{Name: "Open Start", EndExclusive: fc.Now().Add(time.Hour)},
Log{Name: "Fully Open"},
}
expected := List{
Log{Name: "Fully Bound", StartInclusive: fc.Now().Add(-time.Hour), EndExclusive: fc.Now().Add(time.Hour)},
Log{Name: "Open End", StartInclusive: fc.Now().Add(-time.Hour)},
Log{Name: "Open Start", EndExclusive: fc.Now().Add(time.Hour)},
Log{Name: "Fully Open"},
}
actual := input.ForTime(fc.Now())
test.AssertDeepEquals(t, actual, expected)
expected = List{
Log{Name: "Fully Bound", StartInclusive: fc.Now().Add(-time.Hour), EndExclusive: fc.Now().Add(time.Hour)},
Log{Name: "Open End", StartInclusive: fc.Now().Add(-time.Hour)},
Log{Name: "Open Start", EndExclusive: fc.Now().Add(time.Hour)},
Log{Name: "Fully Open"},
}
actual = input.ForTime(fc.Now().Add(-time.Hour))
test.AssertDeepEquals(t, actual, expected)
expected = List{
Log{Name: "Open Start", EndExclusive: fc.Now().Add(time.Hour)},
Log{Name: "Fully Open"},
}
actual = input.ForTime(fc.Now().Add(-2 * time.Hour))
test.AssertDeepEquals(t, actual, expected)
expected = List{
Log{Name: "Open End", StartInclusive: fc.Now().Add(-time.Hour)},
Log{Name: "Fully Open"},
}
actual = input.ForTime(fc.Now().Add(time.Hour))
test.AssertDeepEquals(t, actual, expected)
}
func TestPermute(t *testing.T) {
input := List{
Log{Name: "Log A1"},
Log{Name: "Log A2"},
Log{Name: "Log B1"},
Log{Name: "Log B2"},
Log{Name: "Log C1"},
Log{Name: "Log C2"},
}
foundIndices := make(map[string]map[int]int)
for _, log := range input {
foundIndices[log.Name] = make(map[int]int)
}
for range 100 {
actual := input.Permute()
for index, log := range actual {
foundIndices[log.Name][index]++
}
}
for name, counts := range foundIndices {
for index, count := range counts {
if count == 0 {
t.Errorf("Log %s appeared at index %d too few times", name, index)
}
}
}
}
func TestGetByID(t *testing.T) {
input := List{
Log{Name: "Log A1", Id: "ID A1"},
Log{Name: "Log B1", Id: "ID B1"},
}
expected := Log{Name: "Log A1", Id: "ID A1"}
actual, err := input.GetByID("ID A1")
test.AssertNotError(t, err, "should have found log")
test.AssertDeepEquals(t, actual, expected)
_, err = input.GetByID("Other ID")
test.AssertError(t, err, "should not have found log")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/ctpolicy/ctconfig/ctconfig.go | third-party/github.com/letsencrypt/boulder/ctpolicy/ctconfig/ctconfig.go | package ctconfig
import (
"github.com/letsencrypt/boulder/config"
)
// CTConfig is the top-level config object expected to be embedded in an
// executable's JSON config struct.
type CTConfig struct {
// Stagger is duration (e.g. "200ms") indicating how long to wait for a log
// from one operator group to accept a certificate before attempting
// submission to a log run by a different operator instead.
Stagger config.Duration
// LogListFile is a path to a JSON log list file. The file must match Chrome's
// schema: https://www.gstatic.com/ct/log_list/v3/log_list_schema.json
LogListFile string `validate:"required"`
// SCTLogs is a list of CT log names to submit precerts to in order to get SCTs.
SCTLogs []string `validate:"min=1,dive,required"`
// InfoLogs is a list of CT log names to submit precerts to on a best-effort
// basis. Logs are included here for the sake of wider distribution of our
// precerts, and to exercise logs that in the qualification process.
InfoLogs []string
// FinalLogs is a list of CT log names to submit final certificates to.
// This may include duplicates from the lists above, to submit both precerts
// and final certs to the same log.
FinalLogs []string
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/clock_generic.go | third-party/github.com/letsencrypt/boulder/cmd/clock_generic.go | //go:build !integration
package cmd
import "github.com/jmhodges/clock"
// Clock functions similarly to clock.New(), but the returned value can be
// changed using the FAKECLOCK environment variable if the 'integration' build
// flag is set.
//
// This function returns the default Clock.
func Clock() clock.Clock {
return clock.New()
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/registry.go | third-party/github.com/letsencrypt/boulder/cmd/registry.go | package cmd
import (
"fmt"
"reflect"
"sort"
"sync"
"github.com/letsencrypt/validator/v10"
)
type ConfigValidator struct {
Config interface{}
Validators map[string]validator.Func
}
var registry struct {
sync.Mutex
commands map[string]func()
configs map[string]*ConfigValidator
}
// RegisterCommand registers a subcommand and its corresponding config
// validator. The provided func() is called when the subcommand is invoked on
// the command line. The ConfigValidator is optional and used to validate the
// config file for the subcommand.
func RegisterCommand(name string, f func(), cv *ConfigValidator) {
registry.Lock()
defer registry.Unlock()
if registry.commands == nil {
registry.commands = make(map[string]func())
}
if registry.commands[name] != nil {
panic(fmt.Sprintf("command %q was registered twice", name))
}
registry.commands[name] = f
if cv == nil {
return
}
if registry.configs == nil {
registry.configs = make(map[string]*ConfigValidator)
}
if registry.configs[name] != nil {
panic(fmt.Sprintf("config validator for command %q was registered twice", name))
}
registry.configs[name] = cv
}
func LookupCommand(name string) func() {
registry.Lock()
defer registry.Unlock()
return registry.commands[name]
}
func AvailableCommands() []string {
registry.Lock()
defer registry.Unlock()
var avail []string
for name := range registry.commands {
avail = append(avail, name)
}
sort.Strings(avail)
return avail
}
// LookupConfigValidator constructs an instance of the *ConfigValidator for the
// given Boulder component name. If no *ConfigValidator was registered, nil is
// returned.
func LookupConfigValidator(name string) *ConfigValidator {
registry.Lock()
defer registry.Unlock()
if registry.configs[name] == nil {
return nil
}
// Create a new copy of the config struct so that we can validate it
// multiple times without mutating the registry's copy.
copy := reflect.New(reflect.ValueOf(
registry.configs[name].Config).Elem().Type(),
).Interface()
return &ConfigValidator{
Config: copy,
Validators: registry.configs[name].Validators,
}
}
// AvailableConfigValidators returns a list of Boulder component names for which
// a *ConfigValidator has been registered.
func AvailableConfigValidators() []string {
registry.Lock()
defer registry.Unlock()
var avail []string
for name := range registry.configs {
avail = append(avail, name)
}
sort.Strings(avail)
return avail
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/config.go | third-party/github.com/letsencrypt/boulder/cmd/config.go | package cmd
import (
"crypto/tls"
"crypto/x509"
"encoding/hex"
"errors"
"fmt"
"net"
"os"
"strings"
"github.com/prometheus/client_golang/prometheus"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"google.golang.org/grpc/resolver"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/identifier"
)
// PasswordConfig contains a path to a file containing a password.
type PasswordConfig struct {
PasswordFile string `validate:"required"`
}
// Pass returns a password, extracted from the PasswordConfig's PasswordFile
func (pc *PasswordConfig) Pass() (string, error) {
// Make PasswordConfigs optional, for backwards compatibility.
if pc.PasswordFile == "" {
return "", nil
}
contents, err := os.ReadFile(pc.PasswordFile)
if err != nil {
return "", err
}
return strings.TrimRight(string(contents), "\n"), nil
}
// ServiceConfig contains config items that are common to all our services, to
// be embedded in other config structs.
type ServiceConfig struct {
// DebugAddr is the address to run the /debug handlers on.
DebugAddr string `validate:"omitempty,hostname_port"`
GRPC *GRPCServerConfig
TLS TLSConfig
// HealthCheckInterval is the duration between deep health checks of the
// service. Defaults to 5 seconds.
HealthCheckInterval config.Duration `validate:"-"`
}
// DBConfig defines how to connect to a database. The connect string is
// stored in a file separate from the config, because it can contain a password,
// which we want to keep out of configs.
type DBConfig struct {
// A file containing a connect URL for the DB.
DBConnectFile string `validate:"required"`
// MaxOpenConns sets the maximum number of open connections to the
// database. If MaxIdleConns is greater than 0 and MaxOpenConns is
// less than MaxIdleConns, then MaxIdleConns will be reduced to
// match the new MaxOpenConns limit. If n < 0, then there is no
// limit on the number of open connections.
MaxOpenConns int `validate:"min=-1"`
// MaxIdleConns sets the maximum number of connections in the idle
// connection pool. If MaxOpenConns is greater than 0 but less than
// MaxIdleConns, then MaxIdleConns will be reduced to match the
// MaxOpenConns limit. If n < 0, no idle connections are retained.
MaxIdleConns int `validate:"min=-1"`
// ConnMaxLifetime sets the maximum amount of time a connection may
// be reused. Expired connections may be closed lazily before reuse.
// If d < 0, connections are not closed due to a connection's age.
ConnMaxLifetime config.Duration `validate:"-"`
// ConnMaxIdleTime sets the maximum amount of time a connection may
// be idle. Expired connections may be closed lazily before reuse.
// If d < 0, connections are not closed due to a connection's idle
// time.
ConnMaxIdleTime config.Duration `validate:"-"`
}
// URL returns the DBConnect URL represented by this DBConfig object, loading it
// from the file on disk. Leading and trailing whitespace is stripped.
func (d *DBConfig) URL() (string, error) {
url, err := os.ReadFile(d.DBConnectFile)
return strings.TrimSpace(string(url)), err
}
// SMTPConfig is deprecated.
// TODO(#8199): Delete this when it is removed from bad-key-revoker's config.
type SMTPConfig struct {
PasswordConfig
Server string `validate:"required"`
Port string `validate:"required,numeric,min=1,max=65535"`
Username string `validate:"required"`
}
// PAConfig specifies how a policy authority should connect to its
// database, what policies it should enforce, and what challenges
// it should offer.
type PAConfig struct {
DBConfig `validate:"-"`
Challenges map[core.AcmeChallenge]bool `validate:"omitempty,dive,keys,oneof=http-01 dns-01 tls-alpn-01,endkeys"`
Identifiers map[identifier.IdentifierType]bool `validate:"omitempty,dive,keys,oneof=dns ip,endkeys"`
}
// CheckChallenges checks whether the list of challenges in the PA config
// actually contains valid challenge names
func (pc PAConfig) CheckChallenges() error {
if len(pc.Challenges) == 0 {
return errors.New("empty challenges map in the Policy Authority config is not allowed")
}
for c := range pc.Challenges {
if !c.IsValid() {
return fmt.Errorf("invalid challenge in PA config: %s", c)
}
}
return nil
}
// CheckIdentifiers checks whether the list of identifiers in the PA config
// actually contains valid identifier type names
func (pc PAConfig) CheckIdentifiers() error {
for i := range pc.Identifiers {
if !i.IsValid() {
return fmt.Errorf("invalid identifier type in PA config: %s", i)
}
}
return nil
}
// HostnamePolicyConfig specifies a file from which to load a policy regarding
// what hostnames to issue for.
type HostnamePolicyConfig struct {
HostnamePolicyFile string `validate:"required"`
}
// TLSConfig represents certificates and a key for authenticated TLS.
type TLSConfig struct {
CertFile string `validate:"required"`
KeyFile string `validate:"required"`
// The CACertFile file may contain any number of root certificates and will
// be deduplicated internally.
CACertFile string `validate:"required"`
}
// Load reads and parses the certificates and key listed in the TLSConfig, and
// returns a *tls.Config suitable for either client or server use. The
// CACertFile file may contain any number of root certificates and will be
// deduplicated internally. Prometheus metrics for various certificate fields
// will be exported.
func (t *TLSConfig) Load(scope prometheus.Registerer) (*tls.Config, error) {
if t == nil {
return nil, fmt.Errorf("nil TLS section in config")
}
if t.CertFile == "" {
return nil, fmt.Errorf("nil CertFile in TLSConfig")
}
if t.KeyFile == "" {
return nil, fmt.Errorf("nil KeyFile in TLSConfig")
}
if t.CACertFile == "" {
return nil, fmt.Errorf("nil CACertFile in TLSConfig")
}
caCertBytes, err := os.ReadFile(t.CACertFile)
if err != nil {
return nil, fmt.Errorf("reading CA cert from %q: %s", t.CACertFile, err)
}
rootCAs := x509.NewCertPool()
if ok := rootCAs.AppendCertsFromPEM(caCertBytes); !ok {
return nil, fmt.Errorf("parsing CA certs from %s failed", t.CACertFile)
}
cert, err := tls.LoadX509KeyPair(t.CertFile, t.KeyFile)
if err != nil {
return nil, fmt.Errorf("loading key pair from %q and %q: %s",
t.CertFile, t.KeyFile, err)
}
tlsNotBefore := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "tlsconfig_notbefore_seconds",
Help: "TLS certificate NotBefore field expressed as Unix epoch time",
},
[]string{"serial"})
err = scope.Register(tlsNotBefore)
if err != nil {
are := prometheus.AlreadyRegisteredError{}
if errors.As(err, &are) {
tlsNotBefore = are.ExistingCollector.(*prometheus.GaugeVec)
} else {
return nil, err
}
}
tlsNotAfter := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "tlsconfig_notafter_seconds",
Help: "TLS certificate NotAfter field expressed as Unix epoch time",
},
[]string{"serial"})
err = scope.Register(tlsNotAfter)
if err != nil {
are := prometheus.AlreadyRegisteredError{}
if errors.As(err, &are) {
tlsNotAfter = are.ExistingCollector.(*prometheus.GaugeVec)
} else {
return nil, err
}
}
leaf, err := x509.ParseCertificate(cert.Certificate[0])
if err != nil {
return nil, err
}
serial := leaf.SerialNumber.String()
tlsNotBefore.WithLabelValues(serial).Set(float64(leaf.NotBefore.Unix()))
tlsNotAfter.WithLabelValues(serial).Set(float64(leaf.NotAfter.Unix()))
return &tls.Config{
RootCAs: rootCAs,
ClientCAs: rootCAs,
ClientAuth: tls.RequireAndVerifyClientCert,
Certificates: []tls.Certificate{cert},
// Set the only acceptable TLS to v1.3.
MinVersion: tls.VersionTLS13,
}, nil
}
// SyslogConfig defines the config for syslogging.
// 3 means "error", 4 means "warning", 6 is "info" and 7 is "debug".
// Configuring a given level causes all messages at that level and below to
// be logged.
type SyslogConfig struct {
// When absent or zero, this causes no logs to be emitted on stdout/stderr.
// Errors and warnings will be emitted on stderr if the configured level
// allows.
StdoutLevel int `validate:"min=-1,max=7"`
// When absent or zero, this defaults to logging all messages of level 6
// or below. To disable syslog logging entirely, set this to -1.
SyslogLevel int `validate:"min=-1,max=7"`
}
// ServiceDomain contains the service and domain name the gRPC or bdns provider
// will use to construct a SRV DNS query to lookup backends.
type ServiceDomain struct {
// Service is the service name to be used for SRV lookups. For example: if
// record is 'foo.service.consul', then the Service is 'foo'.
Service string `validate:"required"`
// Domain is the domain name to be used for SRV lookups. For example: if the
// record is 'foo.service.consul', then the Domain is 'service.consul'.
Domain string `validate:"required"`
}
// GRPCClientConfig contains the information necessary to setup a gRPC client
// connection. The following field combinations are allowed:
//
// ServerIPAddresses, [Timeout]
// ServerAddress, DNSAuthority, [Timeout], [HostOverride]
// SRVLookup, DNSAuthority, [Timeout], [HostOverride], [SRVResolver]
// SRVLookups, DNSAuthority, [Timeout], [HostOverride], [SRVResolver]
type GRPCClientConfig struct {
// DNSAuthority is a single <hostname|IPv4|[IPv6]>:<port> of the DNS server
// to be used for resolution of gRPC backends. If the address contains a
// hostname the gRPC client will resolve it via the system DNS. If the
// address contains a port, the client will use it directly, otherwise port
// 53 is used.
DNSAuthority string `validate:"required_with=SRVLookup SRVLookups,omitempty,ip|hostname|hostname_port"`
// SRVLookup contains the service and domain name the gRPC client will use
// to construct a SRV DNS query to lookup backends. For example: if the
// resource record is 'foo.service.consul', then the 'Service' is 'foo' and
// the 'Domain' is 'service.consul'. The expected dNSName to be
// authenticated in the server certificate would be 'foo.service.consul'.
//
// Note: The 'proto' field of the SRV record MUST contain 'tcp' and the
// 'port' field MUST be a valid port. In a Consul configuration file you
// would specify 'foo.service.consul' as:
//
// services {
// id = "some-unique-id-1"
// name = "foo"
// address = "10.77.77.77"
// port = 8080
// tags = ["tcp"]
// }
// services {
// id = "some-unique-id-2"
// name = "foo"
// address = "10.77.77.77"
// port = 8180
// tags = ["tcp"]
// }
//
// If you've added the above to your Consul configuration file (and reloaded
// Consul) then you should be able to resolve the following dig query:
//
// $ dig @10.77.77.10 -t SRV _foo._tcp.service.consul +short
// 1 1 8080 0a585858.addr.dc1.consul.
// 1 1 8080 0a4d4d4d.addr.dc1.consul.
SRVLookup *ServiceDomain `validate:"required_without_all=SRVLookups ServerAddress ServerIPAddresses"`
// SRVLookups allows you to pass multiple SRV records to the gRPC client.
// The gRPC client will resolves each SRV record and use the results to
// construct a list of backends to connect to. For more details, see the
// documentation for the SRVLookup field. Note: while you can pass multiple
// targets to the gRPC client using this field, all of the targets will use
// the same HostOverride and TLS configuration.
SRVLookups []*ServiceDomain `validate:"required_without_all=SRVLookup ServerAddress ServerIPAddresses"`
// SRVResolver is an optional override to indicate that a specific
// implementation of the SRV resolver should be used. The default is 'srv'
// For more details, see the documentation in:
// grpc/internal/resolver/dns/dns_resolver.go.
SRVResolver string `validate:"excluded_with=ServerAddress ServerIPAddresses,isdefault|oneof=srv nonce-srv"`
// ServerAddress is a single <hostname|IPv4|[IPv6]>:<port> or `:<port>` that
// the gRPC client will, if necessary, resolve via DNS and then connect to.
// If the address provided is 'foo.service.consul:8080' then the dNSName to
// be authenticated in the server certificate would be 'foo.service.consul'.
//
// In a Consul configuration file you would specify 'foo.service.consul' as:
//
// services {
// id = "some-unique-id-1"
// name = "foo"
// address = "10.77.77.77"
// }
// services {
// id = "some-unique-id-2"
// name = "foo"
// address = "10.88.88.88"
// }
//
// If you've added the above to your Consul configuration file (and reloaded
// Consul) then you should be able to resolve the following dig query:
//
// $ dig A @10.77.77.10 foo.service.consul +short
// 10.77.77.77
// 10.88.88.88
ServerAddress string `validate:"required_without_all=ServerIPAddresses SRVLookup SRVLookups,omitempty,hostname_port"`
// ServerIPAddresses is a comma separated list of IP addresses, in the
// format `<IPv4|[IPv6]>:<port>` or `:<port>`, that the gRPC client will
// connect to. If the addresses provided are ["10.77.77.77", "10.88.88.88"]
// then the iPAddress' to be authenticated in the server certificate would
// be '10.77.77.77' and '10.88.88.88'.
ServerIPAddresses []string `validate:"required_without_all=ServerAddress SRVLookup SRVLookups,omitempty,dive,hostname_port"`
// HostOverride is an optional override for the dNSName the client will
// verify in the certificate presented by the server.
HostOverride string `validate:"excluded_with=ServerIPAddresses,omitempty,hostname"`
Timeout config.Duration
// NoWaitForReady turns off our (current) default of setting grpc.WaitForReady(true).
// This means if all of a GRPC client's backends are down, it will error immediately.
// The current default, grpc.WaitForReady(true), means that if all of a GRPC client's
// backends are down, it will wait until either one becomes available or the RPC
// times out.
NoWaitForReady bool
}
// MakeTargetAndHostOverride constructs the target URI that the gRPC client will
// connect to and the hostname (only for 'ServerAddress' and 'SRVLookup') that
// will be validated during the mTLS handshake. An error is returned if the
// provided configuration is invalid.
func (c *GRPCClientConfig) MakeTargetAndHostOverride() (string, string, error) {
var hostOverride string
if c.ServerAddress != "" {
if c.ServerIPAddresses != nil || c.SRVLookup != nil {
return "", "", errors.New(
"both 'serverAddress' and 'serverIPAddresses' or 'SRVLookup' in gRPC client config. Only one should be provided",
)
}
// Lookup backends using DNS A records.
targetHost, _, err := net.SplitHostPort(c.ServerAddress)
if err != nil {
return "", "", err
}
hostOverride = targetHost
if c.HostOverride != "" {
hostOverride = c.HostOverride
}
return fmt.Sprintf("dns://%s/%s", c.DNSAuthority, c.ServerAddress), hostOverride, nil
} else if c.SRVLookup != nil {
if c.DNSAuthority == "" {
return "", "", errors.New("field 'dnsAuthority' is required in gRPC client config with SRVLookup")
}
scheme, err := c.makeSRVScheme()
if err != nil {
return "", "", err
}
if c.ServerIPAddresses != nil {
return "", "", errors.New(
"both 'SRVLookup' and 'serverIPAddresses' in gRPC client config. Only one should be provided",
)
}
// Lookup backends using DNS SRV records.
targetHost := c.SRVLookup.Service + "." + c.SRVLookup.Domain
hostOverride = targetHost
if c.HostOverride != "" {
hostOverride = c.HostOverride
}
return fmt.Sprintf("%s://%s/%s", scheme, c.DNSAuthority, targetHost), hostOverride, nil
} else if c.SRVLookups != nil {
if c.DNSAuthority == "" {
return "", "", errors.New("field 'dnsAuthority' is required in gRPC client config with SRVLookups")
}
scheme, err := c.makeSRVScheme()
if err != nil {
return "", "", err
}
if c.ServerIPAddresses != nil {
return "", "", errors.New(
"both 'SRVLookups' and 'serverIPAddresses' in gRPC client config. Only one should be provided",
)
}
// Lookup backends using multiple DNS SRV records.
var targetHosts []string
for _, s := range c.SRVLookups {
targetHosts = append(targetHosts, s.Service+"."+s.Domain)
}
if c.HostOverride != "" {
hostOverride = c.HostOverride
}
return fmt.Sprintf("%s://%s/%s", scheme, c.DNSAuthority, strings.Join(targetHosts, ",")), hostOverride, nil
} else {
if c.ServerIPAddresses == nil {
return "", "", errors.New(
"neither 'serverAddress', 'SRVLookup', 'SRVLookups' nor 'serverIPAddresses' in gRPC client config. One should be provided",
)
}
// Specify backends as a list of IP addresses.
return "static:///" + strings.Join(c.ServerIPAddresses, ","), "", nil
}
}
// makeSRVScheme returns the scheme to use for SRV lookups. If the SRVResolver
// field is empty, it returns "srv". Otherwise it checks that the specified
// SRVResolver is registered with the gRPC runtime and returns it.
func (c *GRPCClientConfig) makeSRVScheme() (string, error) {
if c.SRVResolver == "" {
return "srv", nil
}
rb := resolver.Get(c.SRVResolver)
if rb == nil {
return "", fmt.Errorf("resolver %q is not registered", c.SRVResolver)
}
return c.SRVResolver, nil
}
// GRPCServerConfig contains the information needed to start a gRPC server.
type GRPCServerConfig struct {
Address string `json:"address" validate:"omitempty,hostname_port"`
// Services is a map of service names to configuration specific to that service.
// These service names must match the service names advertised by gRPC itself,
// which are identical to the names set in our gRPC .proto files prefixed by
// the package names set in those files (e.g. "ca.CertificateAuthority").
Services map[string]*GRPCServiceConfig `json:"services" validate:"required,dive,required"`
// MaxConnectionAge specifies how long a connection may live before the server sends a GoAway to the
// client. Because gRPC connections re-resolve DNS after a connection close,
// this controls how long it takes before a client learns about changes to its
// backends.
// https://pkg.go.dev/google.golang.org/grpc/keepalive#ServerParameters
MaxConnectionAge config.Duration `validate:"required"`
}
// GRPCServiceConfig contains the information needed to configure a gRPC service.
type GRPCServiceConfig struct {
// ClientNames is the list of accepted gRPC client certificate SANs.
// Connections from clients not in this list will be rejected by the
// upstream listener, and RPCs from unlisted clients will be denied by the
// server interceptor.
ClientNames []string `json:"clientNames" validate:"min=1,dive,hostname,required"`
}
// OpenTelemetryConfig configures tracing via OpenTelemetry.
// To enable tracing, set a nonzero SampleRatio and configure an Endpoint
type OpenTelemetryConfig struct {
// Endpoint to connect to with the OTLP protocol over gRPC.
// It should be of the form "localhost:4317"
//
// It always connects over plaintext, and so is only intended to connect
// to a local OpenTelemetry collector. This should not be used over an
// insecure network.
Endpoint string
// SampleRatio is the ratio of new traces to head sample.
// This only affects new traces without a parent with its own sampling
// decision, and otherwise use the parent's sampling decision.
//
// Set to something between 0 and 1, where 1 is sampling all traces.
// This is primarily meant as a pressure relief if the Endpoint we connect to
// is being overloaded, and we otherwise handle sampling in the collectors.
// See otel trace.ParentBased and trace.TraceIDRatioBased for details.
SampleRatio float64
}
// OpenTelemetryHTTPConfig configures the otelhttp server tracing.
type OpenTelemetryHTTPConfig struct {
// TrustIncomingSpans should only be set true if there's a trusted service
// connecting to Boulder, such as a load balancer that's tracing-aware.
// If false, the default, incoming traces won't be set as the parent.
// See otelhttp.WithPublicEndpoint
TrustIncomingSpans bool
}
// Options returns the otelhttp options for this configuration. They can be
// passed to otelhttp.NewHandler or Boulder's wrapper, measured_http.New.
func (c *OpenTelemetryHTTPConfig) Options() []otelhttp.Option {
var options []otelhttp.Option
if !c.TrustIncomingSpans {
options = append(options, otelhttp.WithPublicEndpoint())
}
return options
}
// DNSProvider contains the configuration for a DNS provider in the bdns package
// which supports dynamic reloading of its backends.
type DNSProvider struct {
// DNSAuthority is the single <hostname|IPv4|[IPv6]>:<port> of the DNS
// server to be used for resolution of DNS backends. If the address contains
// a hostname it will be resolved via the system DNS. If the port is left
// unspecified it will default to '53'. If this field is left unspecified
// the system DNS will be used for resolution of DNS backends.
DNSAuthority string `validate:"required,ip|hostname|hostname_port"`
// SRVLookup contains the service and domain name used to construct a SRV
// DNS query to lookup DNS backends. 'Domain' is required. 'Service' is
// optional and will be defaulted to 'dns' if left unspecified.
//
// Usage: If the resource record is 'unbound.service.consul', then the
// 'Service' is 'unbound' and the 'Domain' is 'service.consul'. The expected
// dNSName to be authenticated in the server certificate would be
// 'unbound.service.consul'. The 'proto' field of the SRV record MUST
// contain 'udp' and the 'port' field MUST be a valid port. In a Consul
// configuration file you would specify 'unbound.service.consul' as:
//
// services {
// id = "unbound-1" // Must be unique
// name = "unbound"
// address = "10.77.77.77"
// port = 8053
// tags = ["udp"]
// }
//
// services {
// id = "unbound-2" // Must be unique
// name = "unbound"
// address = "10.77.77.77"
// port = 8153
// tags = ["udp"]
// }
//
// If you've added the above to your Consul configuration file (and reloaded
// Consul) then you should be able to resolve the following dig query:
//
// $ dig @10.77.77.10 -t SRV _unbound._udp.service.consul +short
// 1 1 8053 0a4d4d4d.addr.dc1.consul.
// 1 1 8153 0a4d4d4d.addr.dc1.consul.
SRVLookup ServiceDomain `validate:"required"`
}
// HMACKeyConfig specifies a path to a file containing a hexadecimal-encoded
// HMAC key. The key must represent exactly 256 bits (32 bytes) of random data
// to be suitable for use as a 256-bit hashing key (e.g., the output of `openssl
// rand -hex 32`).
type HMACKeyConfig struct {
KeyFile string `validate:"required"`
}
// Load reads the HMAC key from the file, decodes it from hexadecimal, ensures
// it represents exactly 256 bits (32 bytes), and returns it as a byte slice.
func (hc *HMACKeyConfig) Load() ([]byte, error) {
contents, err := os.ReadFile(hc.KeyFile)
if err != nil {
return nil, err
}
decoded, err := hex.DecodeString(strings.TrimSpace(string(contents)))
if err != nil {
return nil, fmt.Errorf("invalid hexadecimal encoding: %w", err)
}
if len(decoded) != 32 {
return nil, fmt.Errorf(
"validating HMAC key, must be exactly 256 bits (32 bytes) after decoding, got %d",
len(decoded),
)
}
return decoded, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/shell.go | third-party/github.com/letsencrypt/boulder/cmd/shell.go | // Package cmd provides utilities that underlie the specific commands.
package cmd
import (
"context"
"encoding/json"
"errors"
"expvar"
"fmt"
"io"
"log"
"log/syslog"
"net/http"
"net/http/pprof"
"os"
"os/signal"
"runtime"
"runtime/debug"
"strings"
"syscall"
"time"
"github.com/go-logr/stdr"
"github.com/go-sql-driver/mysql"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/redis/go-redis/v9"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/sdk/resource"
"go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.30.0"
"google.golang.org/grpc/grpclog"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/core"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/strictyaml"
"github.com/letsencrypt/validator/v10"
)
// Because we don't know when this init will be called with respect to
// flag.Parse() and other flag definitions, we can't rely on the regular
// flag mechanism. But this one is fine.
func init() {
for _, v := range os.Args {
if v == "--version" || v == "-version" {
fmt.Println(VersionString())
os.Exit(0)
}
}
}
// mysqlLogger implements the mysql.Logger interface.
type mysqlLogger struct {
blog.Logger
}
func (m mysqlLogger) Print(v ...interface{}) {
m.AuditErrf("[mysql] %s", fmt.Sprint(v...))
}
// grpcLogger implements the grpclog.LoggerV2 interface.
type grpcLogger struct {
blog.Logger
}
// Ensure that fatal logs exit, because we use neither the gRPC default logger
// nor the stdlib default logger, both of which would call os.Exit(1) for us.
func (log grpcLogger) Fatal(args ...interface{}) {
log.Error(args...)
os.Exit(1)
}
func (log grpcLogger) Fatalf(format string, args ...interface{}) {
log.Errorf(format, args...)
os.Exit(1)
}
func (log grpcLogger) Fatalln(args ...interface{}) {
log.Errorln(args...)
os.Exit(1)
}
// Treat all gRPC error logs as potential audit events.
func (log grpcLogger) Error(args ...interface{}) {
log.Logger.AuditErr(fmt.Sprint(args...))
}
func (log grpcLogger) Errorf(format string, args ...interface{}) {
log.Logger.AuditErrf(format, args...)
}
func (log grpcLogger) Errorln(args ...interface{}) {
log.Logger.AuditErr(fmt.Sprintln(args...))
}
// Pass through most Warnings, but filter out a few noisy ones.
func (log grpcLogger) Warning(args ...interface{}) {
log.Logger.Warning(fmt.Sprint(args...))
}
func (log grpcLogger) Warningf(format string, args ...interface{}) {
log.Logger.Warningf(format, args...)
}
func (log grpcLogger) Warningln(args ...interface{}) {
msg := fmt.Sprintln(args...)
// See https://github.com/letsencrypt/boulder/issues/4628
if strings.Contains(msg, `ccResolverWrapper: error parsing service config: no JSON service config provided`) {
return
}
// See https://github.com/letsencrypt/boulder/issues/4379
if strings.Contains(msg, `Server.processUnaryRPC failed to write status: connection error: desc = "transport is closing"`) {
return
}
// Since we've already formatted the message, just pass through to .Warning()
log.Logger.Warning(msg)
}
// Don't log any INFO-level gRPC stuff. In practice this is all noise, like
// failed TXT lookups for service discovery (we only use A records).
func (log grpcLogger) Info(args ...interface{}) {}
func (log grpcLogger) Infof(format string, args ...interface{}) {}
func (log grpcLogger) Infoln(args ...interface{}) {}
// V returns true if the verbosity level l is less than the verbosity we want to
// log at.
func (log grpcLogger) V(l int) bool {
// We always return false. This causes gRPC to not log some things which are
// only logged conditionally if the logLevel is set below a certain value.
// TODO: Use the wrapped log.Logger.stdoutLevel and log.Logger.syslogLevel
// to determine a correct return value here.
return false
}
// promLogger implements the promhttp.Logger interface.
type promLogger struct {
blog.Logger
}
func (log promLogger) Println(args ...interface{}) {
log.AuditErr(fmt.Sprint(args...))
}
type redisLogger struct {
blog.Logger
}
func (rl redisLogger) Printf(ctx context.Context, format string, v ...interface{}) {
rl.Infof(format, v...)
}
// logWriter implements the io.Writer interface.
type logWriter struct {
blog.Logger
}
func (lw logWriter) Write(p []byte) (n int, err error) {
// Lines received by logWriter will always have a trailing newline.
lw.Logger.Info(strings.Trim(string(p), "\n"))
return
}
// logOutput implements the log.Logger interface's Output method for use with logr
type logOutput struct {
blog.Logger
}
func (l logOutput) Output(calldepth int, logline string) error {
l.Logger.Info(logline)
return nil
}
// StatsAndLogging sets up an AuditLogger, Prometheus Registerer, and
// OpenTelemetry tracing. It returns the Registerer and AuditLogger, along
// with a graceful shutdown function to be deferred.
//
// It spawns off an HTTP server on the provided port to report the stats and
// provide pprof profiling handlers.
//
// The constructed AuditLogger as the default logger, and configures the mysql
// and grpc packages to use our logger. This must be called before any gRPC code
// is called, because gRPC's SetLogger doesn't use any locking.
//
// This function does not return an error, and will panic on problems.
func StatsAndLogging(logConf SyslogConfig, otConf OpenTelemetryConfig, addr string) (prometheus.Registerer, blog.Logger, func(context.Context)) {
logger := NewLogger(logConf)
shutdown := NewOpenTelemetry(otConf, logger)
return newStatsRegistry(addr, logger), logger, shutdown
}
// NewLogger creates a logger object with the provided settings, sets it as
// the global logger, and returns it.
//
// It also sets the logging systems for various packages we use to go through
// the created logger, and sets up a periodic log event for the current timestamp.
func NewLogger(logConf SyslogConfig) blog.Logger {
var logger blog.Logger
if logConf.SyslogLevel >= 0 {
syslogger, err := syslog.Dial(
"",
"",
syslog.LOG_INFO, // default, not actually used
core.Command())
FailOnError(err, "Could not connect to Syslog")
syslogLevel := int(syslog.LOG_INFO)
if logConf.SyslogLevel != 0 {
syslogLevel = logConf.SyslogLevel
}
logger, err = blog.New(syslogger, logConf.StdoutLevel, syslogLevel)
FailOnError(err, "Could not connect to Syslog")
} else {
logger = blog.StdoutLogger(logConf.StdoutLevel)
}
_ = blog.Set(logger)
_ = mysql.SetLogger(mysqlLogger{logger})
grpclog.SetLoggerV2(grpcLogger{logger})
log.SetOutput(logWriter{logger})
redis.SetLogger(redisLogger{logger})
// Periodically log the current timestamp, to ensure syslog timestamps match
// Boulder's conception of time.
go func() {
for {
time.Sleep(time.Hour)
logger.Info(fmt.Sprintf("time=%s", time.Now().Format(time.RFC3339Nano)))
}
}()
return logger
}
func newVersionCollector() prometheus.Collector {
buildTime := core.Unspecified
if core.GetBuildTime() != core.Unspecified {
// core.BuildTime is set by our Makefile using the shell command 'date
// -u' which outputs in a consistent format across all POSIX systems.
bt, err := time.Parse(time.UnixDate, core.BuildTime)
if err != nil {
// Should never happen unless the Makefile is changed.
buildTime = "Unparsable"
} else {
buildTime = bt.Format(time.RFC3339)
}
}
return prometheus.NewGaugeFunc(
prometheus.GaugeOpts{
Name: "version",
Help: fmt.Sprintf(
"A metric with a constant value of '1' labeled by the short commit-id (buildId), build timestamp in RFC3339 format (buildTime), and Go release tag like 'go1.3' (goVersion) from which %s was built.",
core.Command(),
),
ConstLabels: prometheus.Labels{
"buildId": core.GetBuildID(),
"buildTime": buildTime,
"goVersion": runtime.Version(),
},
},
func() float64 { return 1 },
)
}
func newStatsRegistry(addr string, logger blog.Logger) prometheus.Registerer {
registry := prometheus.NewRegistry()
if addr == "" {
logger.Info("No debug listen address specified")
return registry
}
registry.MustRegister(collectors.NewGoCollector())
registry.MustRegister(collectors.NewProcessCollector(
collectors.ProcessCollectorOpts{}))
registry.MustRegister(newVersionCollector())
mux := http.NewServeMux()
// Register the available pprof handlers. These are all registered on
// DefaultServeMux just by importing pprof, but since we eschew
// DefaultServeMux, we need to explicitly register them on our own mux.
mux.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index))
mux.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile))
mux.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol))
mux.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace))
// These handlers are defined in runtime/pprof instead of net/http/pprof, and
// have to be accessed through net/http/pprof's Handler func.
mux.Handle("/debug/pprof/goroutine", pprof.Handler("goroutine"))
mux.Handle("/debug/pprof/block", pprof.Handler("block"))
mux.Handle("/debug/pprof/heap", pprof.Handler("heap"))
mux.Handle("/debug/pprof/mutex", pprof.Handler("mutex"))
mux.Handle("/debug/pprof/threadcreate", pprof.Handler("threadcreate"))
mux.Handle("/debug/vars", expvar.Handler())
mux.Handle("/metrics", promhttp.HandlerFor(registry, promhttp.HandlerOpts{
ErrorLog: promLogger{logger},
}))
logger.Infof("Debug server listening on %s", addr)
server := http.Server{
Addr: addr,
Handler: mux,
ReadTimeout: time.Minute,
}
go func() {
err := server.ListenAndServe()
if err != nil {
logger.Errf("unable to boot debug server on %s: %v", addr, err)
os.Exit(1)
}
}()
return registry
}
// NewOpenTelemetry sets up our OpenTelemetry tracing
// It returns a graceful shutdown function to be deferred.
func NewOpenTelemetry(config OpenTelemetryConfig, logger blog.Logger) func(ctx context.Context) {
otel.SetLogger(stdr.New(logOutput{logger}))
otel.SetErrorHandler(otel.ErrorHandlerFunc(func(err error) { logger.Errf("OpenTelemetry error: %v", err) }))
resources := resource.NewWithAttributes(
semconv.SchemaURL,
semconv.ServiceName(core.Command()),
semconv.ServiceVersion(core.GetBuildID()),
semconv.ProcessPID(os.Getpid()),
)
opts := []trace.TracerProviderOption{
trace.WithResource(resources),
// Use a ParentBased sampler to respect the sample decisions on incoming
// traces, and TraceIDRatioBased to randomly sample new traces.
trace.WithSampler(trace.ParentBased(trace.TraceIDRatioBased(config.SampleRatio))),
}
if config.Endpoint != "" {
exporter, err := otlptracegrpc.New(
context.Background(),
otlptracegrpc.WithInsecure(),
otlptracegrpc.WithEndpoint(config.Endpoint))
if err != nil {
FailOnError(err, "Could not create OpenTelemetry OTLP exporter")
}
opts = append(opts, trace.WithBatcher(exporter))
}
tracerProvider := trace.NewTracerProvider(opts...)
otel.SetTracerProvider(tracerProvider)
otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}))
return func(ctx context.Context) {
err := tracerProvider.Shutdown(ctx)
if err != nil {
logger.Errf("Error while shutting down OpenTelemetry: %v", err)
}
}
}
// AuditPanic catches and logs panics, then exits with exit code 1.
// This method should be called in a defer statement as early as possible.
func AuditPanic() {
err := recover()
// No panic, no problem
if err == nil {
return
}
// Get the global logger if it's initialized, or create a default one if not.
// We could wind up creating a default logger if we panic so early in a process'
// lifetime that we haven't yet parsed the config and created a logger.
log := blog.Get()
// For the special type `failure`, audit log the message and exit quietly
fail, ok := err.(failure)
if ok {
log.AuditErr(fail.msg)
} else {
// For all other values passed to `panic`, log them and a stack trace
log.AuditErrf("Panic caused by err: %s", err)
log.AuditErrf("Stack Trace (Current goroutine) %s", debug.Stack())
}
// Because this function is deferred as early as possible, there's no further defers to run after this one
// So it is safe to os.Exit to set the exit code and exit without losing any defers we haven't executed.
os.Exit(1)
}
// failure is a sentinel type that `Fail` passes to `panic` so `AuditPanic` can exit
// quietly and print the msg.
type failure struct {
msg string
}
func (f failure) String() string {
return f.msg
}
// Fail raises a panic with a special type that causes `AuditPanic` to audit log the provided message
// and then exit nonzero (without printing a stack trace).
func Fail(msg string) {
panic(failure{msg})
}
// FailOnError calls Fail if the provided error is non-nil.
// This is useful for one-line error handling in top-level executables,
// but should generally be avoided in libraries. The message argument is optional.
func FailOnError(err error, msg string) {
if err == nil {
return
}
if msg == "" {
Fail(err.Error())
} else {
Fail(fmt.Sprintf("%s: %s", msg, err))
}
}
func decodeJSONStrict(in io.Reader, out interface{}) error {
decoder := json.NewDecoder(in)
decoder.DisallowUnknownFields()
return decoder.Decode(out)
}
// ReadConfigFile takes a file path as an argument and attempts to
// unmarshal the content of the file into a struct containing a
// configuration of a boulder component. Any config keys in the JSON
// file which do not correspond to expected keys in the config struct
// will result in errors.
func ReadConfigFile(filename string, out interface{}) error {
file, err := os.Open(filename)
if err != nil {
return err
}
defer file.Close()
return decodeJSONStrict(file, out)
}
// ValidateJSONConfig takes a *ConfigValidator and an io.Reader containing a
// JSON representation of a config. The JSON data is unmarshaled into the
// *ConfigValidator's inner Config and then validated according to the
// 'validate' tags for on each field. Callers can use cmd.LookupConfigValidator
// to get a *ConfigValidator for a given Boulder component. This is exported for
// use in SRE CI tooling.
func ValidateJSONConfig(cv *ConfigValidator, in io.Reader) error {
if cv == nil {
return errors.New("config validator cannot be nil")
}
// Initialize the validator and load any custom tags.
validate := validator.New()
for tag, v := range cv.Validators {
err := validate.RegisterValidation(tag, v)
if err != nil {
return err
}
}
// Register custom types for use with existing validation tags.
validate.RegisterCustomTypeFunc(config.DurationCustomTypeFunc, config.Duration{})
err := decodeJSONStrict(in, cv.Config)
if err != nil {
return err
}
err = validate.Struct(cv.Config)
if err != nil {
errs, ok := err.(validator.ValidationErrors)
if !ok {
// This should never happen.
return err
}
if len(errs) > 0 {
allErrs := []string{}
for _, e := range errs {
allErrs = append(allErrs, e.Error())
}
return errors.New(strings.Join(allErrs, ", "))
}
}
return nil
}
// ValidateYAMLConfig takes a *ConfigValidator and an io.Reader containing a
// YAML representation of a config. The YAML data is unmarshaled into the
// *ConfigValidator's inner Config and then validated according to the
// 'validate' tags for on each field. Callers can use cmd.LookupConfigValidator
// to get a *ConfigValidator for a given Boulder component. This is exported for
// use in SRE CI tooling.
func ValidateYAMLConfig(cv *ConfigValidator, in io.Reader) error {
if cv == nil {
return errors.New("config validator cannot be nil")
}
// Initialize the validator and load any custom tags.
validate := validator.New()
for tag, v := range cv.Validators {
err := validate.RegisterValidation(tag, v)
if err != nil {
return err
}
}
// Register custom types for use with existing validation tags.
validate.RegisterCustomTypeFunc(config.DurationCustomTypeFunc, config.Duration{})
inBytes, err := io.ReadAll(in)
if err != nil {
return err
}
err = strictyaml.Unmarshal(inBytes, cv.Config)
if err != nil {
return err
}
err = validate.Struct(cv.Config)
if err != nil {
errs, ok := err.(validator.ValidationErrors)
if !ok {
// This should never happen.
return err
}
if len(errs) > 0 {
allErrs := []string{}
for _, e := range errs {
allErrs = append(allErrs, e.Error())
}
return errors.New(strings.Join(allErrs, ", "))
}
}
return nil
}
// VersionString produces a friendly Application version string.
func VersionString() string {
return fmt.Sprintf("Versions: %s=(%s %s) Golang=(%s) BuildHost=(%s)", core.Command(), core.GetBuildID(), core.GetBuildTime(), runtime.Version(), core.GetBuildHost())
}
// CatchSignals blocks until a SIGTERM, SIGINT, or SIGHUP is received, then
// executes the given callback. The callback should not block, it should simply
// signal other goroutines (particularly the main goroutine) to clean themselves
// up and exit. This function is intended to be called in its own goroutine,
// while the main goroutine waits for an indication that the other goroutines
// have exited cleanly.
func CatchSignals(callback func()) {
WaitForSignal()
callback()
}
// WaitForSignal blocks until a SIGTERM, SIGINT, or SIGHUP is received. It then
// returns, allowing execution to resume, generally allowing a main() function
// to return and trigger and deferred cleanup functions. This function is
// intended to be called directly from the main goroutine, while a gRPC or HTTP
// server runs in a background goroutine.
func WaitForSignal() {
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGTERM)
signal.Notify(sigChan, syscall.SIGINT)
signal.Notify(sigChan, syscall.SIGHUP)
<-sigChan
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/config_test.go | third-party/github.com/letsencrypt/boulder/cmd/config_test.go | package cmd
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"math/big"
"os"
"path"
"regexp"
"strings"
"testing"
"time"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/test"
)
func TestDBConfigURL(t *testing.T) {
tests := []struct {
conf DBConfig
expected string
}{
{
// Test with one config file that has no trailing newline
conf: DBConfig{DBConnectFile: "testdata/test_dburl"},
expected: "test@tcp(testhost:3306)/testDB?readTimeout=800ms&writeTimeout=800ms",
},
{
// Test with a config file that *has* a trailing newline
conf: DBConfig{DBConnectFile: "testdata/test_dburl_newline"},
expected: "test@tcp(testhost:3306)/testDB?readTimeout=800ms&writeTimeout=800ms",
},
}
for _, tc := range tests {
url, err := tc.conf.URL()
test.AssertNotError(t, err, "Failed calling URL() on DBConfig")
test.AssertEquals(t, url, tc.expected)
}
}
func TestPasswordConfig(t *testing.T) {
tests := []struct {
pc PasswordConfig
expected string
}{
{pc: PasswordConfig{}, expected: ""},
{pc: PasswordConfig{PasswordFile: "testdata/test_secret"}, expected: "secret"},
}
for _, tc := range tests {
password, err := tc.pc.Pass()
test.AssertNotError(t, err, "Failed to retrieve password")
test.AssertEquals(t, password, tc.expected)
}
}
func TestTLSConfigLoad(t *testing.T) {
null := "/dev/null"
nonExistent := "[nonexistent]"
tmp := t.TempDir()
cert := path.Join(tmp, "TestTLSConfigLoad.cert.pem")
key := path.Join(tmp, "TestTLSConfigLoad.key.pem")
caCert := path.Join(tmp, "TestTLSConfigLoad.cacert.pem")
rootKey, err := ecdsa.GenerateKey(elliptic.P224(), rand.Reader)
test.AssertNotError(t, err, "creating test root key")
rootTemplate := &x509.Certificate{
Subject: pkix.Name{CommonName: "test root"},
SerialNumber: big.NewInt(12345),
NotBefore: time.Now().Add(-24 * time.Hour),
NotAfter: time.Now().Add(24 * time.Hour),
IsCA: true,
}
rootCert, err := x509.CreateCertificate(rand.Reader, rootTemplate, rootTemplate, rootKey.Public(), rootKey)
test.AssertNotError(t, err, "creating test root cert")
err = os.WriteFile(caCert, pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: rootCert}), os.ModeAppend)
test.AssertNotError(t, err, "writing test root cert to disk")
intKey, err := ecdsa.GenerateKey(elliptic.P224(), rand.Reader)
test.AssertNotError(t, err, "creating test intermediate key")
intKeyBytes, err := x509.MarshalECPrivateKey(intKey)
test.AssertNotError(t, err, "marshalling test intermediate key")
err = os.WriteFile(key, pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: intKeyBytes}), os.ModeAppend)
test.AssertNotError(t, err, "writing test intermediate key cert to disk")
intTemplate := &x509.Certificate{
Subject: pkix.Name{CommonName: "test intermediate"},
SerialNumber: big.NewInt(67890),
NotBefore: time.Now().Add(-12 * time.Hour),
NotAfter: time.Now().Add(12 * time.Hour),
IsCA: true,
}
intCert, err := x509.CreateCertificate(rand.Reader, intTemplate, rootTemplate, intKey.Public(), rootKey)
test.AssertNotError(t, err, "creating test intermediate cert")
err = os.WriteFile(cert, pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: intCert}), os.ModeAppend)
test.AssertNotError(t, err, "writing test intermediate cert to disk")
testCases := []struct {
TLSConfig
want string
}{
{TLSConfig{"", null, null}, "nil CertFile in TLSConfig"},
{TLSConfig{null, "", null}, "nil KeyFile in TLSConfig"},
{TLSConfig{null, null, ""}, "nil CACertFile in TLSConfig"},
{TLSConfig{nonExistent, key, caCert}, "loading key pair.*no such file or directory"},
{TLSConfig{cert, nonExistent, caCert}, "loading key pair.*no such file or directory"},
{TLSConfig{cert, key, nonExistent}, "reading CA cert from.*no such file or directory"},
{TLSConfig{null, key, caCert}, "loading key pair.*failed to find any PEM data"},
{TLSConfig{cert, null, caCert}, "loading key pair.*failed to find any PEM data"},
{TLSConfig{cert, key, null}, "parsing CA certs"},
{TLSConfig{cert, key, caCert}, ""},
}
for _, tc := range testCases {
title := [3]string{tc.CertFile, tc.KeyFile, tc.CACertFile}
for i := range title {
if title[i] == "" {
title[i] = "nil"
}
}
t.Run(strings.Join(title[:], "_"), func(t *testing.T) {
_, err := tc.TLSConfig.Load(metrics.NoopRegisterer)
if err == nil && tc.want == "" {
return
}
if err == nil {
t.Errorf("got no error")
}
if matched, _ := regexp.MatchString(tc.want, err.Error()); !matched {
t.Errorf("got error %q, wanted %q", err, tc.want)
}
})
}
}
func TestHMACKeyConfigLoad(t *testing.T) {
t.Parallel()
tests := []struct {
name string
content string
expectedErr bool
}{
{
name: "Valid key",
content: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
expectedErr: false,
},
{
name: "Empty file",
content: "",
expectedErr: true,
},
{
name: "Just under 256-bit",
content: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789ab",
expectedErr: true,
},
{
name: "Just over 256-bit",
content: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01",
expectedErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
tempKeyFile, err := os.CreateTemp("", "*")
if err != nil {
t.Fatalf("failed to create temp file: %v", err)
}
defer os.Remove(tempKeyFile.Name())
_, err = tempKeyFile.WriteString(tt.content)
if err != nil {
t.Fatalf("failed to write to temp file: %v", err)
}
tempKeyFile.Close()
hmacKeyConfig := HMACKeyConfig{KeyFile: tempKeyFile.Name()}
_, err = hmacKeyConfig.Load()
if (err != nil) != tt.expectedErr {
t.Errorf("expected error: %v, got: %v", tt.expectedErr, err)
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/clock_integration.go | third-party/github.com/letsencrypt/boulder/cmd/clock_integration.go | //go:build integration
package cmd
import (
"fmt"
"os"
"time"
"github.com/jmhodges/clock"
blog "github.com/letsencrypt/boulder/log"
)
// Clock functions similarly to clock.New(), but the returned value can be
// changed using the FAKECLOCK environment variable if the 'integration' build
// flag is set.
//
// The FAKECLOCK env var is in the time.UnixDate format, returned by `date -d`.
func Clock() clock.Clock {
if tgt := os.Getenv("FAKECLOCK"); tgt != "" {
targetTime, err := time.Parse(time.UnixDate, tgt)
FailOnError(err, fmt.Sprintf("cmd.Clock: bad format for FAKECLOCK: %v\n", err))
cl := clock.NewFake()
cl.Set(targetTime)
blog.Get().Debugf("Time was set to %v via FAKECLOCK", targetTime)
return cl
}
return clock.New()
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/shell_test.go | third-party/github.com/letsencrypt/boulder/cmd/shell_test.go | package cmd
import (
"encoding/json"
"fmt"
"log"
"os"
"os/exec"
"runtime"
"strings"
"testing"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/core"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/test"
)
var (
validPAConfig = []byte(`{
"dbConnect": "dummyDBConnect",
"enforcePolicyWhitelist": false,
"challenges": { "http-01": true },
"identifiers": { "dns": true, "ip": true }
}`)
invalidPAConfig = []byte(`{
"dbConnect": "dummyDBConnect",
"enforcePolicyWhitelist": false,
"challenges": { "nonsense": true },
"identifiers": { "openpgp": true }
}`)
noChallengesIdentsPAConfig = []byte(`{
"dbConnect": "dummyDBConnect",
"enforcePolicyWhitelist": false
}`)
emptyChallengesIdentsPAConfig = []byte(`{
"dbConnect": "dummyDBConnect",
"enforcePolicyWhitelist": false,
"challenges": {},
"identifiers": {}
}`)
)
func TestPAConfigUnmarshal(t *testing.T) {
var pc1 PAConfig
err := json.Unmarshal(validPAConfig, &pc1)
test.AssertNotError(t, err, "Failed to unmarshal PAConfig")
test.AssertNotError(t, pc1.CheckChallenges(), "Flagged valid challenges as bad")
test.AssertNotError(t, pc1.CheckIdentifiers(), "Flagged valid identifiers as bad")
var pc2 PAConfig
err = json.Unmarshal(invalidPAConfig, &pc2)
test.AssertNotError(t, err, "Failed to unmarshal PAConfig")
test.AssertError(t, pc2.CheckChallenges(), "Considered invalid challenges as good")
test.AssertError(t, pc2.CheckIdentifiers(), "Considered invalid identifiers as good")
var pc3 PAConfig
err = json.Unmarshal(noChallengesIdentsPAConfig, &pc3)
test.AssertNotError(t, err, "Failed to unmarshal PAConfig")
test.AssertError(t, pc3.CheckChallenges(), "Disallow empty challenges map")
test.AssertNotError(t, pc3.CheckIdentifiers(), "Disallowed empty identifiers map")
var pc4 PAConfig
err = json.Unmarshal(emptyChallengesIdentsPAConfig, &pc4)
test.AssertNotError(t, err, "Failed to unmarshal PAConfig")
test.AssertError(t, pc4.CheckChallenges(), "Disallow empty challenges map")
test.AssertNotError(t, pc4.CheckIdentifiers(), "Disallowed empty identifiers map")
}
func TestMysqlLogger(t *testing.T) {
log := blog.UseMock()
mLog := mysqlLogger{log}
testCases := []struct {
args []interface{}
expected string
}{
{
[]interface{}{nil},
`ERR: [AUDIT] [mysql] <nil>`,
},
{
[]interface{}{""},
`ERR: [AUDIT] [mysql] `,
},
{
[]interface{}{"Sup ", 12345, " Sup sup"},
`ERR: [AUDIT] [mysql] Sup 12345 Sup sup`,
},
}
for _, tc := range testCases {
// mysqlLogger proxies blog.AuditLogger to provide a Print() method
mLog.Print(tc.args...)
logged := log.GetAll()
// Calling Print should produce the expected output
test.AssertEquals(t, len(logged), 1)
test.AssertEquals(t, logged[0], tc.expected)
log.Clear()
}
}
func TestCaptureStdlibLog(t *testing.T) {
logger := blog.UseMock()
oldDest := log.Writer()
defer func() {
log.SetOutput(oldDest)
}()
log.SetOutput(logWriter{logger})
log.Print("thisisatest")
results := logger.GetAllMatching("thisisatest")
if len(results) != 1 {
t.Fatalf("Expected logger to receive 'thisisatest', got: %s",
strings.Join(logger.GetAllMatching(".*"), "\n"))
}
}
func TestVersionString(t *testing.T) {
core.BuildID = "TestBuildID"
core.BuildTime = "RightNow!"
core.BuildHost = "Localhost"
versionStr := VersionString()
expected := fmt.Sprintf("Versions: cmd.test=(TestBuildID RightNow!) Golang=(%s) BuildHost=(Localhost)", runtime.Version())
test.AssertEquals(t, versionStr, expected)
}
func TestReadConfigFile(t *testing.T) {
err := ReadConfigFile("", nil)
test.AssertError(t, err, "ReadConfigFile('') did not error")
type config struct {
GRPC *GRPCClientConfig
TLS *TLSConfig
}
var c config
err = ReadConfigFile("../test/config/health-checker.json", &c)
test.AssertNotError(t, err, "ReadConfigFile(../test/config/health-checker.json) errored")
test.AssertEquals(t, c.GRPC.Timeout.Duration, 1*time.Second)
}
func TestLogWriter(t *testing.T) {
mock := blog.UseMock()
lw := logWriter{mock}
_, _ = lw.Write([]byte("hi\n"))
lines := mock.GetAllMatching(".*")
test.AssertEquals(t, len(lines), 1)
test.AssertEquals(t, lines[0], "INFO: hi")
}
func TestGRPCLoggerWarningFilter(t *testing.T) {
m := blog.NewMock()
l := grpcLogger{m}
l.Warningln("asdf", "qwer")
lines := m.GetAllMatching(".*")
test.AssertEquals(t, len(lines), 1)
m = blog.NewMock()
l = grpcLogger{m}
l.Warningln("Server.processUnaryRPC failed to write status: connection error: desc = \"transport is closing\"")
lines = m.GetAllMatching(".*")
test.AssertEquals(t, len(lines), 0)
}
func Test_newVersionCollector(t *testing.T) {
// 'buildTime'
core.BuildTime = core.Unspecified
version := newVersionCollector()
// Default 'Unspecified' should emit 'Unspecified'.
test.AssertMetricWithLabelsEquals(t, version, prometheus.Labels{"buildTime": core.Unspecified}, 1)
// Parsable UnixDate should emit UnixTime.
now := time.Now().UTC()
core.BuildTime = now.Format(time.UnixDate)
version = newVersionCollector()
test.AssertMetricWithLabelsEquals(t, version, prometheus.Labels{"buildTime": now.Format(time.RFC3339)}, 1)
// Unparsable timestamp should emit 'Unsparsable'.
core.BuildTime = "outta time"
version = newVersionCollector()
test.AssertMetricWithLabelsEquals(t, version, prometheus.Labels{"buildTime": "Unparsable"}, 1)
// 'buildId'
expectedBuildID := "TestBuildId"
core.BuildID = expectedBuildID
version = newVersionCollector()
test.AssertMetricWithLabelsEquals(t, version, prometheus.Labels{"buildId": expectedBuildID}, 1)
// 'goVersion'
test.AssertMetricWithLabelsEquals(t, version, prometheus.Labels{"goVersion": runtime.Version()}, 1)
}
func loadConfigFile(t *testing.T, path string) *os.File {
cf, err := os.Open(path)
if err != nil {
t.Fatal(err)
}
return cf
}
func TestFailedConfigValidation(t *testing.T) {
type FooConfig struct {
VitalValue string `yaml:"vitalValue" validate:"required"`
VoluntarilyVoid string `yaml:"voluntarilyVoid"`
VisciouslyVetted string `yaml:"visciouslyVetted" validate:"omitempty,endswith=baz"`
VolatileVagary config.Duration `yaml:"volatileVagary" validate:"required,lte=120s"`
VernalVeil config.Duration `yaml:"vernalVeil" validate:"required"`
}
// Violates 'endswith' tag JSON.
cf := loadConfigFile(t, "testdata/1_missing_endswith.json")
defer cf.Close()
err := ValidateJSONConfig(&ConfigValidator{&FooConfig{}, nil}, cf)
test.AssertError(t, err, "Expected validation error")
test.AssertContains(t, err.Error(), "'endswith'")
// Violates 'endswith' tag YAML.
cf = loadConfigFile(t, "testdata/1_missing_endswith.yaml")
defer cf.Close()
err = ValidateYAMLConfig(&ConfigValidator{&FooConfig{}, nil}, cf)
test.AssertError(t, err, "Expected validation error")
test.AssertContains(t, err.Error(), "'endswith'")
// Violates 'required' tag JSON.
cf = loadConfigFile(t, "testdata/2_missing_required.json")
defer cf.Close()
err = ValidateJSONConfig(&ConfigValidator{&FooConfig{}, nil}, cf)
test.AssertError(t, err, "Expected validation error")
test.AssertContains(t, err.Error(), "'required'")
// Violates 'required' tag YAML.
cf = loadConfigFile(t, "testdata/2_missing_required.yaml")
defer cf.Close()
err = ValidateYAMLConfig(&ConfigValidator{&FooConfig{}, nil}, cf)
test.AssertError(t, err, "Expected validation error")
test.AssertContains(t, err.Error(), "'required'")
// Violates 'lte' tag JSON for config.Duration type.
cf = loadConfigFile(t, "testdata/3_configDuration_too_darn_big.json")
defer cf.Close()
err = ValidateJSONConfig(&ConfigValidator{&FooConfig{}, nil}, cf)
test.AssertError(t, err, "Expected validation error")
test.AssertContains(t, err.Error(), "'lte'")
// Violates 'lte' tag JSON for config.Duration type.
cf = loadConfigFile(t, "testdata/3_configDuration_too_darn_big.json")
defer cf.Close()
err = ValidateJSONConfig(&ConfigValidator{&FooConfig{}, nil}, cf)
test.AssertError(t, err, "Expected validation error")
test.AssertContains(t, err.Error(), "'lte'")
// Incorrect value for the config.Duration type.
cf = loadConfigFile(t, "testdata/4_incorrect_data_for_type.json")
defer cf.Close()
err = ValidateJSONConfig(&ConfigValidator{&FooConfig{}, nil}, cf)
test.AssertError(t, err, "Expected error")
test.AssertContains(t, err.Error(), "missing unit in duration")
// Incorrect value for the config.Duration type.
cf = loadConfigFile(t, "testdata/4_incorrect_data_for_type.yaml")
defer cf.Close()
err = ValidateYAMLConfig(&ConfigValidator{&FooConfig{}, nil}, cf)
test.AssertError(t, err, "Expected error")
test.AssertContains(t, err.Error(), "missing unit in duration")
}
func TestFailExit(t *testing.T) {
// Test that when Fail is called with a `defer AuditPanic()`,
// the program exits with a non-zero exit code and logs
// the result (but not stack trace).
// Inspired by https://go.dev/talks/2014/testing.slide#23
if os.Getenv("TIME_TO_DIE") == "1" {
defer AuditPanic()
Fail("tears in the rain")
return
}
cmd := exec.Command(os.Args[0], "-test.run=TestFailExit")
cmd.Env = append(os.Environ(), "TIME_TO_DIE=1")
output, err := cmd.CombinedOutput()
test.AssertError(t, err, "running a failing program")
test.AssertContains(t, string(output), "[AUDIT] tears in the rain")
// "goroutine" usually shows up in stack traces, so we check it
// to make sure we didn't print a stack trace.
test.AssertNotContains(t, string(output), "goroutine")
}
func testPanicStackTraceHelper() {
var x *int
*x = 1 //nolint: govet // Purposeful nil pointer dereference to trigger a panic
}
func TestPanicStackTrace(t *testing.T) {
// Test that when a nil pointer dereference is hit after a
// `defer AuditPanic()`, the program exits with a non-zero
// exit code and prints the result (but not stack trace).
// Inspired by https://go.dev/talks/2014/testing.slide#23
if os.Getenv("AT_THE_DISCO") == "1" {
defer AuditPanic()
testPanicStackTraceHelper()
return
}
cmd := exec.Command(os.Args[0], "-test.run=TestPanicStackTrace")
cmd.Env = append(os.Environ(), "AT_THE_DISCO=1")
output, err := cmd.CombinedOutput()
test.AssertError(t, err, "running a failing program")
test.AssertContains(t, string(output), "nil pointer dereference")
test.AssertContains(t, string(output), "Stack Trace")
test.AssertContains(t, string(output), "cmd/shell_test.go:")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/boulder-observer/main.go | third-party/github.com/letsencrypt/boulder/cmd/boulder-observer/main.go | package notmain
import (
"flag"
"os"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/observer"
"github.com/letsencrypt/boulder/strictyaml"
)
func main() {
debugAddr := flag.String("debug-addr", "", "Debug server address override")
configPath := flag.String(
"config", "config.yml", "Path to boulder-observer configuration file")
flag.Parse()
configYAML, err := os.ReadFile(*configPath)
cmd.FailOnError(err, "failed to read config file")
// Parse the YAML config file.
var config observer.ObsConf
err = strictyaml.Unmarshal(configYAML, &config)
if *debugAddr != "" {
config.DebugAddr = *debugAddr
}
if err != nil {
cmd.FailOnError(err, "failed to parse YAML config")
}
// Make an `Observer` object.
observer, err := config.MakeObserver()
if err != nil {
cmd.FailOnError(err, "config failed validation")
}
// Start the `Observer` daemon.
observer.Start()
}
func init() {
cmd.RegisterCommand("boulder-observer", main, &cmd.ConfigValidator{Config: &observer.ObsConf{}})
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/boulder-va/main.go | third-party/github.com/letsencrypt/boulder/cmd/boulder-va/main.go | package notmain
import (
"context"
"flag"
"os"
"time"
"github.com/letsencrypt/boulder/bdns"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/features"
bgrpc "github.com/letsencrypt/boulder/grpc"
"github.com/letsencrypt/boulder/iana"
"github.com/letsencrypt/boulder/va"
vaConfig "github.com/letsencrypt/boulder/va/config"
vapb "github.com/letsencrypt/boulder/va/proto"
)
// RemoteVAGRPCClientConfig contains the information necessary to setup a gRPC
// client connection. The following GRPC client configuration field combinations
// are allowed:
//
// ServerIPAddresses, [Timeout]
// ServerAddress, DNSAuthority, [Timeout], [HostOverride]
// SRVLookup, DNSAuthority, [Timeout], [HostOverride], [SRVResolver]
// SRVLookups, DNSAuthority, [Timeout], [HostOverride], [SRVResolver]
type RemoteVAGRPCClientConfig struct {
cmd.GRPCClientConfig
// Perspective uniquely identifies the Network Perspective used to
// perform the validation, as specified in BRs Section 5.4.1,
// Requirement 2.7 ("Multi-Perspective Issuance Corroboration attempts
// from each Network Perspective"). It should uniquely identify a group
// of RVAs deployed in the same datacenter.
Perspective string `validate:"required"`
// RIR indicates the Regional Internet Registry where this RVA is
// located. This field is used to identify the RIR region from which a
// given validation was performed, as specified in the "Phased
// Implementation Timeline" in BRs Section 3.2.2.9. It must be one of
// the following values:
// - ARIN
// - RIPE
// - APNIC
// - LACNIC
// - AFRINIC
RIR string `validate:"required,oneof=ARIN RIPE APNIC LACNIC AFRINIC"`
}
type Config struct {
VA struct {
vaConfig.Common
RemoteVAs []RemoteVAGRPCClientConfig `validate:"omitempty,dive"`
// Deprecated and ignored
MaxRemoteValidationFailures int `validate:"omitempty,min=0,required_with=RemoteVAs"`
Features features.Config
}
Syslog cmd.SyslogConfig
OpenTelemetry cmd.OpenTelemetryConfig
}
func main() {
grpcAddr := flag.String("addr", "", "gRPC listen address override")
debugAddr := flag.String("debug-addr", "", "Debug server address override")
configFile := flag.String("config", "", "File path to the configuration file for this service")
flag.Parse()
if *configFile == "" {
flag.Usage()
os.Exit(1)
}
var c Config
err := cmd.ReadConfigFile(*configFile, &c)
cmd.FailOnError(err, "Reading JSON config file into config structure")
err = c.VA.SetDefaultsAndValidate(grpcAddr, debugAddr)
cmd.FailOnError(err, "Setting and validating default config values")
features.Set(c.VA.Features)
scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.VA.DebugAddr)
defer oTelShutdown(context.Background())
logger.Info(cmd.VersionString())
clk := cmd.Clock()
var servers bdns.ServerProvider
if len(c.VA.DNSStaticResolvers) != 0 {
servers, err = bdns.NewStaticProvider(c.VA.DNSStaticResolvers)
cmd.FailOnError(err, "Couldn't start static DNS server resolver")
} else {
servers, err = bdns.StartDynamicProvider(c.VA.DNSProvider, 60*time.Second, "tcp")
cmd.FailOnError(err, "Couldn't start dynamic DNS server resolver")
}
defer servers.Stop()
tlsConfig, err := c.VA.TLS.Load(scope)
cmd.FailOnError(err, "tlsConfig config")
var resolver bdns.Client
if !c.VA.DNSAllowLoopbackAddresses {
resolver = bdns.New(
c.VA.DNSTimeout.Duration,
servers,
scope,
clk,
c.VA.DNSTries,
c.VA.UserAgent,
logger,
tlsConfig)
} else {
resolver = bdns.NewTest(
c.VA.DNSTimeout.Duration,
servers,
scope,
clk,
c.VA.DNSTries,
c.VA.UserAgent,
logger,
tlsConfig)
}
var remotes []va.RemoteVA
if len(c.VA.RemoteVAs) > 0 {
for _, rva := range c.VA.RemoteVAs {
rva := rva
vaConn, err := bgrpc.ClientSetup(&rva.GRPCClientConfig, tlsConfig, scope, clk)
cmd.FailOnError(err, "Unable to create remote VA client")
remotes = append(
remotes,
va.RemoteVA{
RemoteClients: va.RemoteClients{
VAClient: vapb.NewVAClient(vaConn),
CAAClient: vapb.NewCAAClient(vaConn),
},
Address: rva.ServerAddress,
Perspective: rva.Perspective,
RIR: rva.RIR,
},
)
}
}
vai, err := va.NewValidationAuthorityImpl(
resolver,
remotes,
c.VA.UserAgent,
c.VA.IssuerDomain,
scope,
clk,
logger,
c.VA.AccountURIPrefixes,
va.PrimaryPerspective,
"",
iana.IsReservedAddr)
cmd.FailOnError(err, "Unable to create VA server")
start, err := bgrpc.NewServer(c.VA.GRPC, logger).Add(
&vapb.VA_ServiceDesc, vai).Add(
&vapb.CAA_ServiceDesc, vai).Build(tlsConfig, scope, clk)
cmd.FailOnError(err, "Unable to setup VA gRPC server")
cmd.FailOnError(start(), "VA gRPC service failed")
}
func init() {
cmd.RegisterCommand("boulder-va", main, &cmd.ConfigValidator{Config: &Config{}})
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/boulder-ra/main_test.go | third-party/github.com/letsencrypt/boulder/cmd/boulder-ra/main_test.go | package notmain
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/boulder-ra/main.go | third-party/github.com/letsencrypt/boulder/cmd/boulder-ra/main.go | package notmain
import (
"context"
"flag"
"os"
akamaipb "github.com/letsencrypt/boulder/akamai/proto"
capb "github.com/letsencrypt/boulder/ca/proto"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/ctpolicy"
"github.com/letsencrypt/boulder/ctpolicy/ctconfig"
"github.com/letsencrypt/boulder/ctpolicy/loglist"
"github.com/letsencrypt/boulder/features"
"github.com/letsencrypt/boulder/goodkey"
"github.com/letsencrypt/boulder/goodkey/sagoodkey"
bgrpc "github.com/letsencrypt/boulder/grpc"
"github.com/letsencrypt/boulder/issuance"
"github.com/letsencrypt/boulder/policy"
pubpb "github.com/letsencrypt/boulder/publisher/proto"
"github.com/letsencrypt/boulder/ra"
rapb "github.com/letsencrypt/boulder/ra/proto"
"github.com/letsencrypt/boulder/ratelimits"
bredis "github.com/letsencrypt/boulder/redis"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/va"
vapb "github.com/letsencrypt/boulder/va/proto"
)
type Config struct {
RA struct {
cmd.ServiceConfig
cmd.HostnamePolicyConfig
// RateLimitPoliciesFilename is deprecated.
RateLimitPoliciesFilename string
MaxContactsPerRegistration int
SAService *cmd.GRPCClientConfig
VAService *cmd.GRPCClientConfig
CAService *cmd.GRPCClientConfig
OCSPService *cmd.GRPCClientConfig
PublisherService *cmd.GRPCClientConfig
AkamaiPurgerService *cmd.GRPCClientConfig
Limiter struct {
// Redis contains the configuration necessary to connect to Redis
// for rate limiting. This field is required to enable rate
// limiting.
Redis *bredis.Config `validate:"required_with=Defaults"`
// Defaults is a path to a YAML file containing default rate limits.
// See: ratelimits/README.md for details. This field is required to
// enable rate limiting. If any individual rate limit is not set,
// that limit will be disabled. Limits passed in this file must be
// identical to those in the WFE.
//
// Note: At this time, only the Failed Authorizations rate limit is
// necessary in the RA.
Defaults string `validate:"required_with=Redis"`
// Overrides is a path to a YAML file containing overrides for the
// default rate limits. See: ratelimits/README.md for details. If
// this field is not set, all requesters will be subject to the
// default rate limits. Overrides passed in this file must be
// identical to those in the WFE.
//
// Note: At this time, only the Failed Authorizations overrides are
// necessary in the RA.
Overrides string
}
// MaxNames is the maximum number of subjectAltNames in a single cert.
// The value supplied MUST be greater than 0 and no more than 100. These
// limits are per section 7.1 of our combined CP/CPS, under "DV-SSL
// Subscriber Certificate". The value must match the CA and WFE
// configurations.
//
// Deprecated: Set ValidationProfiles[*].MaxNames instead.
MaxNames int `validate:"omitempty,min=1,max=100"`
// ValidationProfiles is a map of validation profiles to their
// respective issuance allow lists. If a profile is not included in this
// mapping, it cannot be used by any account. If this field is left
// empty, all profiles are open to all accounts.
ValidationProfiles map[string]*ra.ValidationProfileConfig `validate:"required"`
// DefaultProfileName sets the profile to use if one wasn't provided by the
// client in the new-order request. Must match a configured validation
// profile or the RA will fail to start. Must match a certificate profile
// configured in the CA or finalization will fail for orders using this
// default.
DefaultProfileName string `validate:"required"`
// MustStapleAllowList specified the path to a YAML file containing a
// list of account IDs permitted to request certificates with the OCSP
// Must-Staple extension.
//
// Deprecated: This field no longer has any effect, all Must-Staple requests
// are rejected.
// TODO(#8177): Remove this field.
MustStapleAllowList string `validate:"omitempty"`
// GoodKey is an embedded config stanza for the goodkey library.
GoodKey goodkey.Config
// FinalizeTimeout is how long the RA is willing to wait for the Order
// finalization process to take. This config parameter only has an effect
// if the AsyncFinalization feature flag is enabled. Any systems which
// manage the shutdown of an RA must be willing to wait at least this long
// after sending the shutdown signal, to allow background goroutines to
// complete.
FinalizeTimeout config.Duration `validate:"-"`
// CTLogs contains groupings of CT logs organized by what organization
// operates them. When we submit precerts to logs in order to get SCTs, we
// will submit the cert to one randomly-chosen log from each group, and use
// the SCTs from the first two groups which reply. This allows us to comply
// with various CT policies that require (for certs with short lifetimes
// like ours) two SCTs from logs run by different operators. It also holds
// a `Stagger` value controlling how long we wait for one operator group
// to respond before trying a different one.
CTLogs ctconfig.CTConfig
// IssuerCerts are paths to all intermediate certificates which may have
// been used to issue certificates in the last 90 days. These are used to
// generate OCSP URLs to purge during revocation.
IssuerCerts []string `validate:"min=1,dive,required"`
Features features.Config
}
PA cmd.PAConfig
Syslog cmd.SyslogConfig
OpenTelemetry cmd.OpenTelemetryConfig
}
func main() {
grpcAddr := flag.String("addr", "", "gRPC listen address override")
debugAddr := flag.String("debug-addr", "", "Debug server address override")
configFile := flag.String("config", "", "File path to the configuration file for this service")
flag.Parse()
if *configFile == "" {
flag.Usage()
os.Exit(1)
}
var c Config
err := cmd.ReadConfigFile(*configFile, &c)
cmd.FailOnError(err, "Reading JSON config file into config structure")
features.Set(c.RA.Features)
if *grpcAddr != "" {
c.RA.GRPC.Address = *grpcAddr
}
if *debugAddr != "" {
c.RA.DebugAddr = *debugAddr
}
scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.RA.DebugAddr)
defer oTelShutdown(context.Background())
logger.Info(cmd.VersionString())
// Validate PA config and set defaults if needed
cmd.FailOnError(c.PA.CheckChallenges(), "Invalid PA configuration")
cmd.FailOnError(c.PA.CheckIdentifiers(), "Invalid PA configuration")
pa, err := policy.New(c.PA.Identifiers, c.PA.Challenges, logger)
cmd.FailOnError(err, "Couldn't create PA")
if c.RA.HostnamePolicyFile == "" {
cmd.Fail("HostnamePolicyFile must be provided.")
}
err = pa.LoadHostnamePolicyFile(c.RA.HostnamePolicyFile)
cmd.FailOnError(err, "Couldn't load hostname policy file")
tlsConfig, err := c.RA.TLS.Load(scope)
cmd.FailOnError(err, "TLS config")
clk := cmd.Clock()
vaConn, err := bgrpc.ClientSetup(c.RA.VAService, tlsConfig, scope, clk)
cmd.FailOnError(err, "Unable to create VA client")
vac := vapb.NewVAClient(vaConn)
caaClient := vapb.NewCAAClient(vaConn)
caConn, err := bgrpc.ClientSetup(c.RA.CAService, tlsConfig, scope, clk)
cmd.FailOnError(err, "Unable to create CA client")
cac := capb.NewCertificateAuthorityClient(caConn)
ocspConn, err := bgrpc.ClientSetup(c.RA.OCSPService, tlsConfig, scope, clk)
cmd.FailOnError(err, "Unable to create CA OCSP client")
ocspc := capb.NewOCSPGeneratorClient(ocspConn)
saConn, err := bgrpc.ClientSetup(c.RA.SAService, tlsConfig, scope, clk)
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to SA")
sac := sapb.NewStorageAuthorityClient(saConn)
conn, err := bgrpc.ClientSetup(c.RA.PublisherService, tlsConfig, scope, clk)
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to Publisher")
pubc := pubpb.NewPublisherClient(conn)
apConn, err := bgrpc.ClientSetup(c.RA.AkamaiPurgerService, tlsConfig, scope, clk)
cmd.FailOnError(err, "Unable to create a Akamai Purger client")
apc := akamaipb.NewAkamaiPurgerClient(apConn)
issuerCertPaths := c.RA.IssuerCerts
issuerCerts := make([]*issuance.Certificate, len(issuerCertPaths))
for i, issuerCertPath := range issuerCertPaths {
issuerCerts[i], err = issuance.LoadCertificate(issuerCertPath)
cmd.FailOnError(err, "Failed to load issuer certificate")
}
// Boulder's components assume that there will always be CT logs configured.
// Issuing a certificate without SCTs embedded is a misissuance event as per
// our CPS 4.4.2, which declares we will always include at least two SCTs.
// Exit early if no groups are configured.
var ctp *ctpolicy.CTPolicy
if len(c.RA.CTLogs.SCTLogs) <= 0 {
cmd.Fail("Must configure CTLogs")
}
allLogs, err := loglist.New(c.RA.CTLogs.LogListFile)
cmd.FailOnError(err, "Failed to parse log list")
sctLogs, err := allLogs.SubsetForPurpose(c.RA.CTLogs.SCTLogs, loglist.Issuance)
cmd.FailOnError(err, "Failed to load SCT logs")
infoLogs, err := allLogs.SubsetForPurpose(c.RA.CTLogs.InfoLogs, loglist.Informational)
cmd.FailOnError(err, "Failed to load informational logs")
finalLogs, err := allLogs.SubsetForPurpose(c.RA.CTLogs.FinalLogs, loglist.Informational)
cmd.FailOnError(err, "Failed to load final logs")
ctp = ctpolicy.New(pubc, sctLogs, infoLogs, finalLogs, c.RA.CTLogs.Stagger.Duration, logger, scope)
if len(c.RA.ValidationProfiles) == 0 {
cmd.Fail("At least one profile must be configured")
}
// TODO(#7993): Remove this fallback and make ValidationProfile.MaxNames a
// required config field. We don't do any validation on the value of this
// top-level MaxNames because that happens inside the call to
// NewValidationProfiles below.
for _, pc := range c.RA.ValidationProfiles {
if pc.MaxNames == 0 {
pc.MaxNames = c.RA.MaxNames
}
}
validationProfiles, err := ra.NewValidationProfiles(c.RA.DefaultProfileName, c.RA.ValidationProfiles)
cmd.FailOnError(err, "Failed to load validation profiles")
if features.Get().AsyncFinalize && c.RA.FinalizeTimeout.Duration == 0 {
cmd.Fail("finalizeTimeout must be supplied when AsyncFinalize feature is enabled")
}
kp, err := sagoodkey.NewPolicy(&c.RA.GoodKey, sac.KeyBlocked)
cmd.FailOnError(err, "Unable to create key policy")
var limiter *ratelimits.Limiter
var txnBuilder *ratelimits.TransactionBuilder
var limiterRedis *bredis.Ring
if c.RA.Limiter.Defaults != "" {
// Setup rate limiting.
limiterRedis, err = bredis.NewRingFromConfig(*c.RA.Limiter.Redis, scope, logger)
cmd.FailOnError(err, "Failed to create Redis ring")
source := ratelimits.NewRedisSource(limiterRedis.Ring, clk, scope)
limiter, err = ratelimits.NewLimiter(clk, source, scope)
cmd.FailOnError(err, "Failed to create rate limiter")
txnBuilder, err = ratelimits.NewTransactionBuilderFromFiles(c.RA.Limiter.Defaults, c.RA.Limiter.Overrides)
cmd.FailOnError(err, "Failed to create rate limits transaction builder")
}
rai := ra.NewRegistrationAuthorityImpl(
clk,
logger,
scope,
c.RA.MaxContactsPerRegistration,
kp,
limiter,
txnBuilder,
c.RA.MaxNames,
validationProfiles,
pubc,
c.RA.FinalizeTimeout.Duration,
ctp,
apc,
issuerCerts,
)
defer rai.Drain()
rai.PA = pa
rai.VA = va.RemoteClients{
VAClient: vac,
CAAClient: caaClient,
}
rai.CA = cac
rai.OCSP = ocspc
rai.SA = sac
start, err := bgrpc.NewServer(c.RA.GRPC, logger).Add(
&rapb.RegistrationAuthority_ServiceDesc, rai).Add(
&rapb.SCTProvider_ServiceDesc, rai).
Build(tlsConfig, scope, clk)
cmd.FailOnError(err, "Unable to setup RA gRPC server")
cmd.FailOnError(start(), "RA gRPC service failed")
}
func init() {
cmd.RegisterCommand("boulder-ra", main, &cmd.ConfigValidator{Config: &Config{}})
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/email-exporter/main.go | third-party/github.com/letsencrypt/boulder/cmd/email-exporter/main.go | package notmain
import (
"context"
"flag"
"os"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/email"
emailpb "github.com/letsencrypt/boulder/email/proto"
bgrpc "github.com/letsencrypt/boulder/grpc"
)
// Config holds the configuration for the email-exporter service.
type Config struct {
EmailExporter struct {
cmd.ServiceConfig
// PerDayLimit enforces the daily request limit imposed by the Pardot
// API. The total daily limit, which varies based on the Salesforce
// Pardot subscription tier, must be distributed among all
// email-exporter instances. For more information, see:
// https://developer.salesforce.com/docs/marketing/pardot/guide/overview.html?q=rate+limits#daily-requests-limits
PerDayLimit float64 `validate:"required,min=1"`
// MaxConcurrentRequests enforces the concurrent request limit imposed
// by the Pardot API. This limit must be distributed among all
// email-exporter instances and be proportional to each instance's
// PerDayLimit. For example, if the total daily limit is 50,000 and one
// instance is assigned 40% (20,000 requests), it should also receive
// 40% of the max concurrent requests (2 out of 5). For more
// information, see:
// https://developer.salesforce.com/docs/marketing/pardot/guide/overview.html?q=rate+limits#concurrent-requests
MaxConcurrentRequests int `validate:"required,min=1,max=5"`
// PardotBusinessUnit is the Pardot business unit to use.
PardotBusinessUnit string `validate:"required"`
// ClientId is the OAuth API client ID provided by Salesforce.
ClientId cmd.PasswordConfig
// ClientSecret is the OAuth API client secret provided by Salesforce.
ClientSecret cmd.PasswordConfig
// SalesforceBaseURL is the base URL for the Salesforce API. (e.g.,
// "https://login.salesforce.com")
SalesforceBaseURL string `validate:"required"`
// PardotBaseURL is the base URL for the Pardot API. (e.g.,
// "https://pi.pardot.com")
PardotBaseURL string `validate:"required"`
// EmailCacheSize controls how many hashed email addresses are retained
// in memory to prevent duplicates from being sent to the Pardot API.
// Each entry consumes ~120 bytes, so 100,000 entries uses around 12 MB
// of memory. If left unset, no caching is performed.
EmailCacheSize int `validate:"omitempty,min=1"`
}
Syslog cmd.SyslogConfig
OpenTelemetry cmd.OpenTelemetryConfig
}
func main() {
configFile := flag.String("config", "", "Path to configuration file")
grpcAddr := flag.String("addr", "", "gRPC listen address override")
debugAddr := flag.String("debug-addr", "", "Debug server address override")
flag.Parse()
if *configFile == "" {
flag.Usage()
os.Exit(1)
}
var c Config
err := cmd.ReadConfigFile(*configFile, &c)
cmd.FailOnError(err, "Reading JSON config file into config structure")
if *grpcAddr != "" {
c.EmailExporter.ServiceConfig.GRPC.Address = *grpcAddr
}
if *debugAddr != "" {
c.EmailExporter.ServiceConfig.DebugAddr = *debugAddr
}
scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.EmailExporter.ServiceConfig.DebugAddr)
defer oTelShutdown(context.Background())
logger.Info(cmd.VersionString())
clk := cmd.Clock()
clientId, err := c.EmailExporter.ClientId.Pass()
cmd.FailOnError(err, "Loading clientId")
clientSecret, err := c.EmailExporter.ClientSecret.Pass()
cmd.FailOnError(err, "Loading clientSecret")
var cache *email.EmailCache
if c.EmailExporter.EmailCacheSize > 0 {
cache = email.NewHashedEmailCache(c.EmailExporter.EmailCacheSize, scope)
}
pardotClient, err := email.NewPardotClientImpl(
clk,
c.EmailExporter.PardotBusinessUnit,
clientId,
clientSecret,
c.EmailExporter.SalesforceBaseURL,
c.EmailExporter.PardotBaseURL,
)
cmd.FailOnError(err, "Creating Pardot API client")
exporterServer := email.NewExporterImpl(pardotClient, cache, c.EmailExporter.PerDayLimit, c.EmailExporter.MaxConcurrentRequests, scope, logger)
tlsConfig, err := c.EmailExporter.TLS.Load(scope)
cmd.FailOnError(err, "Loading email-exporter TLS config")
daemonCtx, shutdownExporterServer := context.WithCancel(context.Background())
go exporterServer.Start(daemonCtx)
start, err := bgrpc.NewServer(c.EmailExporter.GRPC, logger).Add(
&emailpb.Exporter_ServiceDesc, exporterServer).Build(tlsConfig, scope, clk)
cmd.FailOnError(err, "Configuring email-exporter gRPC server")
err = start()
shutdownExporterServer()
exporterServer.Drain()
cmd.FailOnError(err, "email-exporter gRPC service failed to start")
}
func init() {
cmd.RegisterCommand("email-exporter", main, &cmd.ConfigValidator{Config: &Config{}})
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/crl-updater/main.go | third-party/github.com/letsencrypt/boulder/cmd/crl-updater/main.go | package notmain
import (
"context"
"errors"
"flag"
"os"
"time"
capb "github.com/letsencrypt/boulder/ca/proto"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/config"
cspb "github.com/letsencrypt/boulder/crl/storer/proto"
"github.com/letsencrypt/boulder/crl/updater"
"github.com/letsencrypt/boulder/features"
bgrpc "github.com/letsencrypt/boulder/grpc"
"github.com/letsencrypt/boulder/issuance"
sapb "github.com/letsencrypt/boulder/sa/proto"
)
type Config struct {
CRLUpdater struct {
DebugAddr string `validate:"omitempty,hostname_port"`
// TLS client certificate, private key, and trusted root bundle.
TLS cmd.TLSConfig
SAService *cmd.GRPCClientConfig
CRLGeneratorService *cmd.GRPCClientConfig
CRLStorerService *cmd.GRPCClientConfig
// IssuerCerts is a list of paths to issuer certificates on disk. This
// controls the set of CRLs which will be published by this updater: it will
// publish one set of NumShards CRL shards for each issuer in this list.
IssuerCerts []string `validate:"min=1,dive,required"`
// NumShards is the number of shards into which each issuer's "full and
// complete" CRL will be split.
// WARNING: When this number is changed, the "JSON Array of CRL URLs" field
// in CCADB MUST be updated.
NumShards int `validate:"min=1"`
// ShardWidth is the amount of time (width on a timeline) that a single
// shard should cover. Ideally, NumShards*ShardWidth should be an amount of
// time noticeably larger than the current longest certificate lifetime,
// but the updater will continue to work if this is not the case (albeit
// with more confusing mappings of serials to shards).
// WARNING: When this number is changed, revocation entries will move
// between shards.
ShardWidth config.Duration `validate:"-"`
// LookbackPeriod is how far back the updater should look for revoked expired
// certificates. We are required to include every revoked cert in at least
// one CRL, even if it is revoked seconds before it expires, so this must
// always be greater than the UpdatePeriod, and should be increased when
// recovering from an outage to ensure continuity of coverage.
LookbackPeriod config.Duration `validate:"-"`
// UpdatePeriod controls how frequently the crl-updater runs and publishes
// new versions of every CRL shard. The Baseline Requirements, Section 4.9.7:
// "MUST update and publish a new CRL within twenty‐four (24) hours after
// recording a Certificate as revoked."
UpdatePeriod config.Duration
// UpdateTimeout controls how long a single CRL shard is allowed to attempt
// to update before being timed out. The total CRL updating process may take
// significantly longer, since a full update cycle may consist of updating
// many shards with varying degrees of parallelism. This value must be
// strictly less than the UpdatePeriod. Defaults to 10 minutes, one order
// of magnitude greater than our p99 update latency.
UpdateTimeout config.Duration `validate:"-"`
// TemporallyShardedSerialPrefixes is a list of prefixes that were used to
// issue certificates with no CRLDistributionPoints extension, and which are
// therefore temporally sharded. If it's non-empty, the CRL Updater will
// require matching serials when querying by temporal shard. When querying
// by explicit shard, any prefix is allowed.
//
// This should be set to the current set of serial prefixes in production.
// When deploying explicit sharding (i.e. the CRLDistributionPoints extension),
// the CAs should be configured with a new set of serial prefixes that haven't
// been used before (and the OCSP Responder config should be updated to
// recognize the new prefixes as well as the old ones).
TemporallyShardedSerialPrefixes []string
// MaxParallelism controls how many workers may be running in parallel.
// A higher value reduces the total time necessary to update all CRL shards
// that this updater is responsible for, but also increases the memory used
// by this updater. Only relevant in -runOnce mode.
MaxParallelism int `validate:"min=0"`
// MaxAttempts control how many times the updater will attempt to generate
// a single CRL shard. A higher number increases the likelihood of a fully
// successful run, but also increases the worst-case runtime and db/network
// load of said run. The default is 1.
MaxAttempts int `validate:"omitempty,min=1"`
// ExpiresMargin adds a small increment to the CRL's HTTP Expires time.
//
// When uploading a CRL, its Expires field in S3 is set to the expected time
// the next CRL will be uploaded (by this instance). That allows our CDN
// instances to cache for that long. However, since the next update might be
// slow or delayed, we add a margin of error.
//
// Tradeoffs: A large ExpiresMargin reduces the chance that a CRL becomes
// uncacheable and floods S3 with traffic (which might result in 503s while
// S3 scales out).
//
// A small ExpiresMargin means revocations become visible sooner, including
// admin-invoked revocations that may have a time requirement.
ExpiresMargin config.Duration
// CacheControl is a string passed verbatim to the crl-storer to store on
// the S3 object.
//
// Note: if this header contains max-age, it will override
// Expires. https://www.rfc-editor.org/rfc/rfc9111.html#name-calculating-freshness-lifet
// Cache-Control: max-age has the disadvantage that it caches for a fixed
// amount of time, regardless of how close the CRL is to replacement. So
// if max-age is used, the worst-case time for a revocation to become visible
// is UpdatePeriod + the value of max age.
//
// The stale-if-error and stale-while-revalidate headers may be useful here:
// https://aws.amazon.com/about-aws/whats-new/2023/05/amazon-cloudfront-stale-while-revalidate-stale-if-error-cache-control-directives/
//
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control
CacheControl string
Features features.Config
}
Syslog cmd.SyslogConfig
OpenTelemetry cmd.OpenTelemetryConfig
}
func main() {
configFile := flag.String("config", "", "File path to the configuration file for this service")
debugAddr := flag.String("debug-addr", "", "Debug server address override")
runOnce := flag.Bool("runOnce", false, "If true, run once immediately and then exit")
flag.Parse()
if *configFile == "" {
flag.Usage()
os.Exit(1)
}
var c Config
err := cmd.ReadConfigFile(*configFile, &c)
cmd.FailOnError(err, "Reading JSON config file into config structure")
if *debugAddr != "" {
c.CRLUpdater.DebugAddr = *debugAddr
}
features.Set(c.CRLUpdater.Features)
scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.CRLUpdater.DebugAddr)
defer oTelShutdown(context.Background())
logger.Info(cmd.VersionString())
clk := cmd.Clock()
tlsConfig, err := c.CRLUpdater.TLS.Load(scope)
cmd.FailOnError(err, "TLS config")
issuers := make([]*issuance.Certificate, 0, len(c.CRLUpdater.IssuerCerts))
for _, filepath := range c.CRLUpdater.IssuerCerts {
cert, err := issuance.LoadCertificate(filepath)
cmd.FailOnError(err, "Failed to load issuer cert")
issuers = append(issuers, cert)
}
if c.CRLUpdater.ShardWidth.Duration == 0 {
c.CRLUpdater.ShardWidth.Duration = 16 * time.Hour
}
if c.CRLUpdater.LookbackPeriod.Duration == 0 {
c.CRLUpdater.LookbackPeriod.Duration = 24 * time.Hour
}
if c.CRLUpdater.UpdateTimeout.Duration == 0 {
c.CRLUpdater.UpdateTimeout.Duration = 10 * time.Minute
}
saConn, err := bgrpc.ClientSetup(c.CRLUpdater.SAService, tlsConfig, scope, clk)
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to SA")
sac := sapb.NewStorageAuthorityClient(saConn)
caConn, err := bgrpc.ClientSetup(c.CRLUpdater.CRLGeneratorService, tlsConfig, scope, clk)
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to CRLGenerator")
cac := capb.NewCRLGeneratorClient(caConn)
csConn, err := bgrpc.ClientSetup(c.CRLUpdater.CRLStorerService, tlsConfig, scope, clk)
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to CRLStorer")
csc := cspb.NewCRLStorerClient(csConn)
u, err := updater.NewUpdater(
issuers,
c.CRLUpdater.NumShards,
c.CRLUpdater.ShardWidth.Duration,
c.CRLUpdater.LookbackPeriod.Duration,
c.CRLUpdater.UpdatePeriod.Duration,
c.CRLUpdater.UpdateTimeout.Duration,
c.CRLUpdater.MaxParallelism,
c.CRLUpdater.MaxAttempts,
c.CRLUpdater.CacheControl,
c.CRLUpdater.ExpiresMargin.Duration,
c.CRLUpdater.TemporallyShardedSerialPrefixes,
sac,
cac,
csc,
scope,
logger,
clk,
)
cmd.FailOnError(err, "Failed to create crl-updater")
ctx, cancel := context.WithCancel(context.Background())
go cmd.CatchSignals(cancel)
if *runOnce {
err = u.RunOnce(ctx)
if err != nil && !errors.Is(err, context.Canceled) {
cmd.FailOnError(err, "")
}
} else {
err = u.Run(ctx)
if err != nil && !errors.Is(err, context.Canceled) {
cmd.FailOnError(err, "")
}
}
}
func init() {
cmd.RegisterCommand("crl-updater", main, &cmd.ConfigValidator{Config: &Config{}})
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/boulder/main_test.go | third-party/github.com/letsencrypt/boulder/cmd/boulder/main_test.go | package main
import (
"fmt"
"os"
"testing"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/test"
)
// TestConfigValidation checks that each of the components which register a
// validation tagged Config struct at init time can be used to successfully
// validate their corresponding test configuration files.
func TestConfigValidation(t *testing.T) {
configPath := "../../test/config"
if os.Getenv("BOULDER_CONFIG_DIR") == "test/config-next" {
configPath = "../../test/config-next"
}
// Each component is a set of `cmd` package name and a list of paths to
// configuration files to validate.
components := make(map[string][]string)
// For each component, add the paths to the configuration files to validate.
// By default we assume that the configuration file is named after the
// component. However, there are some exceptions to this rule. We've added
// special cases for these components.
for _, cmdName := range cmd.AvailableConfigValidators() {
var fileNames []string
switch cmdName {
case "boulder-ca":
fileNames = []string{"ca.json"}
case "boulder-observer":
fileNames = []string{"observer.yml"}
case "boulder-publisher":
fileNames = []string{"publisher.json"}
case "boulder-ra":
fileNames = []string{"ra.json"}
case "boulder-sa":
fileNames = []string{"sa.json"}
case "boulder-va":
fileNames = []string{"va.json"}
case "remoteva":
fileNames = []string{
"remoteva-a.json",
"remoteva-b.json",
}
case "boulder-wfe2":
fileNames = []string{"wfe2.json"}
case "sfe":
fileNames = []string{"sfe.json"}
case "nonce-service":
fileNames = []string{
"nonce-a.json",
"nonce-b.json",
}
default:
fileNames = []string{cmdName + ".json"}
}
components[cmdName] = append(components[cmdName], fileNames...)
}
t.Parallel()
for cmdName, paths := range components {
for _, path := range paths {
t.Run(path, func(t *testing.T) {
err := readAndValidateConfigFile(cmdName, fmt.Sprintf("%s/%s", configPath, path))
test.AssertNotError(t, err, fmt.Sprintf("Failed to validate config file %q", path))
})
}
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/boulder/main.go | third-party/github.com/letsencrypt/boulder/cmd/boulder/main.go | package main
import (
"fmt"
"os"
"strings"
_ "github.com/letsencrypt/boulder/cmd/akamai-purger"
_ "github.com/letsencrypt/boulder/cmd/bad-key-revoker"
_ "github.com/letsencrypt/boulder/cmd/boulder-ca"
_ "github.com/letsencrypt/boulder/cmd/boulder-observer"
_ "github.com/letsencrypt/boulder/cmd/boulder-publisher"
_ "github.com/letsencrypt/boulder/cmd/boulder-ra"
_ "github.com/letsencrypt/boulder/cmd/boulder-sa"
_ "github.com/letsencrypt/boulder/cmd/boulder-va"
_ "github.com/letsencrypt/boulder/cmd/boulder-wfe2"
_ "github.com/letsencrypt/boulder/cmd/cert-checker"
_ "github.com/letsencrypt/boulder/cmd/crl-checker"
_ "github.com/letsencrypt/boulder/cmd/crl-storer"
_ "github.com/letsencrypt/boulder/cmd/crl-updater"
_ "github.com/letsencrypt/boulder/cmd/email-exporter"
_ "github.com/letsencrypt/boulder/cmd/log-validator"
_ "github.com/letsencrypt/boulder/cmd/nonce-service"
_ "github.com/letsencrypt/boulder/cmd/ocsp-responder"
_ "github.com/letsencrypt/boulder/cmd/remoteva"
_ "github.com/letsencrypt/boulder/cmd/reversed-hostname-checker"
_ "github.com/letsencrypt/boulder/cmd/rocsp-tool"
_ "github.com/letsencrypt/boulder/cmd/sfe"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/cmd"
)
// readAndValidateConfigFile uses the ConfigValidator registered for the given
// command to validate the provided config file. If the command does not have a
// registered ConfigValidator, this function does nothing.
func readAndValidateConfigFile(name, filename string) error {
cv := cmd.LookupConfigValidator(name)
if cv == nil {
return nil
}
file, err := os.Open(filename)
if err != nil {
return err
}
defer file.Close()
if name == "boulder-observer" {
// Only the boulder-observer uses YAML config files.
return cmd.ValidateYAMLConfig(cv, file)
}
return cmd.ValidateJSONConfig(cv, file)
}
// getConfigPath returns the path to the config file if it was provided as a
// command line flag. If the flag was not provided, it returns an empty string.
func getConfigPath() string {
for i := range len(os.Args) {
arg := os.Args[i]
if arg == "--config" || arg == "-config" {
if i+1 < len(os.Args) {
return os.Args[i+1]
}
}
if strings.HasPrefix(arg, "--config=") {
return strings.TrimPrefix(arg, "--config=")
}
if strings.HasPrefix(arg, "-config=") {
return strings.TrimPrefix(arg, "-config=")
}
}
return ""
}
var boulderUsage = fmt.Sprintf(`Usage: %s <subcommand> [flags]
Each boulder component has its own subcommand. Use --list to see
a list of the available components. Use <subcommand> --help to
see the usage for a specific component.
`,
core.Command())
func main() {
defer cmd.AuditPanic()
if len(os.Args) <= 1 {
// No arguments passed.
fmt.Fprint(os.Stderr, boulderUsage)
return
}
if os.Args[1] == "--help" || os.Args[1] == "-help" {
// Help flag passed.
fmt.Fprint(os.Stderr, boulderUsage)
return
}
if os.Args[1] == "--list" || os.Args[1] == "-list" {
// List flag passed.
for _, c := range cmd.AvailableCommands() {
fmt.Println(c)
}
return
}
// Remove the subcommand from the arguments.
command := os.Args[1]
os.Args = os.Args[1:]
config := getConfigPath()
if config != "" {
// Config flag passed.
err := readAndValidateConfigFile(command, config)
if err != nil {
fmt.Fprintf(os.Stderr, "Error validating config file %q for command %q: %s\n", config, command, err)
os.Exit(1)
}
}
commandFunc := cmd.LookupCommand(command)
if commandFunc == nil {
fmt.Fprintf(os.Stderr, "Unknown subcommand %q.\n", command)
os.Exit(1)
}
commandFunc()
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/admin/admin.go | third-party/github.com/letsencrypt/boulder/cmd/admin/admin.go | package main
import (
"context"
"errors"
"fmt"
"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/db"
"github.com/letsencrypt/boulder/features"
bgrpc "github.com/letsencrypt/boulder/grpc"
blog "github.com/letsencrypt/boulder/log"
rapb "github.com/letsencrypt/boulder/ra/proto"
"github.com/letsencrypt/boulder/sa"
sapb "github.com/letsencrypt/boulder/sa/proto"
)
// admin holds all of the external connections necessary to perform admin
// actions on a boulder deployment.
type admin struct {
rac rapb.RegistrationAuthorityClient
sac sapb.StorageAuthorityClient
saroc sapb.StorageAuthorityReadOnlyClient
// TODO: Remove this and only use sac and saroc to interact with the db.
// We cannot have true dry-run safety as long as we have a direct dbMap.
dbMap *db.WrappedMap
// TODO: Remove this when the dbMap is removed and the dryRunSAC and dryRunRAC
// handle all dry-run safety.
dryRun bool
clk clock.Clock
log blog.Logger
}
// newAdmin constructs a new admin object on the heap and returns a pointer to
// it.
func newAdmin(configFile string, dryRun bool) (*admin, error) {
// Unlike most boulder service constructors, this does all of its own config
// parsing and dependency setup. If this is broken out into its own package
// (outside the //cmd/ directory) those pieces of setup should stay behind
// in //cmd/admin/main.go, to match other boulder services.
var c Config
err := cmd.ReadConfigFile(configFile, &c)
if err != nil {
return nil, fmt.Errorf("parsing config file: %w", err)
}
scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, "")
defer oTelShutdown(context.Background())
logger.Info(cmd.VersionString())
clk := cmd.Clock()
features.Set(c.Admin.Features)
tlsConfig, err := c.Admin.TLS.Load(scope)
if err != nil {
return nil, fmt.Errorf("loading TLS config: %w", err)
}
var rac rapb.RegistrationAuthorityClient = dryRunRAC{log: logger}
if !dryRun {
raConn, err := bgrpc.ClientSetup(c.Admin.RAService, tlsConfig, scope, clk)
if err != nil {
return nil, fmt.Errorf("creating RA gRPC client: %w", err)
}
rac = rapb.NewRegistrationAuthorityClient(raConn)
}
saConn, err := bgrpc.ClientSetup(c.Admin.SAService, tlsConfig, scope, clk)
if err != nil {
return nil, fmt.Errorf("creating SA gRPC client: %w", err)
}
saroc := sapb.NewStorageAuthorityReadOnlyClient(saConn)
var sac sapb.StorageAuthorityClient = dryRunSAC{log: logger}
if !dryRun {
sac = sapb.NewStorageAuthorityClient(saConn)
}
dbMap, err := sa.InitWrappedDb(c.Admin.DB, nil, logger)
if err != nil {
return nil, fmt.Errorf("creating database connection: %w", err)
}
return &admin{
rac: rac,
sac: sac,
saroc: saroc,
dbMap: dbMap,
dryRun: dryRun,
clk: clk,
log: logger,
}, nil
}
// findActiveInputMethodFlag returns a single key from setInputs with a value of `true`,
// if exactly one exists. Otherwise it returns an error.
func findActiveInputMethodFlag(setInputs map[string]bool) (string, error) {
var activeFlags []string
for flag, isSet := range setInputs {
if isSet {
activeFlags = append(activeFlags, flag)
}
}
if len(activeFlags) == 0 {
return "", errors.New("at least one input method flag must be specified")
} else if len(activeFlags) > 1 {
return "", fmt.Errorf("more than one input method flag specified: %v", activeFlags)
}
return activeFlags[0], nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/admin/key_test.go | third-party/github.com/letsencrypt/boulder/cmd/admin/key_test.go | package main
import (
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/sha256"
"crypto/x509"
"encoding/hex"
"encoding/pem"
"os"
"os/user"
"path"
"strconv"
"strings"
"testing"
"time"
"github.com/jmhodges/clock"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/emptypb"
"github.com/letsencrypt/boulder/core"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/mocks"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/test"
)
func TestSPKIHashFromPrivateKey(t *testing.T) {
privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "creating test private key")
keyHash, err := core.KeyDigest(privKey.Public())
test.AssertNotError(t, err, "computing test SPKI hash")
keyBytes, err := x509.MarshalPKCS8PrivateKey(privKey)
test.AssertNotError(t, err, "marshalling test private key bytes")
keyFile := path.Join(t.TempDir(), "key.pem")
keyPEM := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: keyBytes})
err = os.WriteFile(keyFile, keyPEM, os.ModeAppend)
test.AssertNotError(t, err, "writing test private key file")
a := admin{}
res, err := a.spkiHashFromPrivateKey(keyFile)
test.AssertNotError(t, err, "")
test.AssertByteEquals(t, res, keyHash[:])
}
func TestSPKIHashesFromFile(t *testing.T) {
var spkiHexes []string
for i := range 10 {
h := sha256.Sum256([]byte(strconv.Itoa(i)))
spkiHexes = append(spkiHexes, hex.EncodeToString(h[:]))
}
spkiFile := path.Join(t.TempDir(), "spkis.txt")
err := os.WriteFile(spkiFile, []byte(strings.Join(spkiHexes, "\n")), os.ModeAppend)
test.AssertNotError(t, err, "writing test spki file")
a := admin{}
res, err := a.spkiHashesFromFile(spkiFile)
test.AssertNotError(t, err, "")
for i, spkiHash := range res {
test.AssertEquals(t, hex.EncodeToString(spkiHash), spkiHexes[i])
}
}
// The key is the p256 test key from RFC9500
const goodCSR = `
-----BEGIN CERTIFICATE REQUEST-----
MIG6MGICAQAwADBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABEIlSPiPt4L/teyj
dERSxyoeVY+9b3O+XkjpMjLMRcWxbEzRDEy41bihcTnpSILImSVymTQl9BQZq36Q
pCpJQnKgADAKBggqhkjOPQQDAgNIADBFAiBadw3gvL9IjUfASUTa7MvmkbC4ZCvl
21m1KMwkIx/+CQIhAKvuyfCcdZ0cWJYOXCOb1OavolWHIUzgEpNGUWul6O0s
-----END CERTIFICATE REQUEST-----
`
// TestCSR checks that we get the correct SPKI from a CSR, even if its signature is invalid
func TestCSR(t *testing.T) {
expectedSPKIHash := "b2b04340cfaee616ec9c2c62d261b208e54bb197498df52e8cadede23ac0ba5e"
goodCSRFile := path.Join(t.TempDir(), "good.csr")
err := os.WriteFile(goodCSRFile, []byte(goodCSR), 0600)
test.AssertNotError(t, err, "writing good csr")
a := admin{log: blog.NewMock()}
goodHash, err := a.spkiHashFromCSRPEM(goodCSRFile, true, "")
test.AssertNotError(t, err, "expected to read CSR")
if len(goodHash) != 1 {
t.Fatalf("expected to read 1 SPKI from CSR, read %d", len(goodHash))
}
test.AssertEquals(t, hex.EncodeToString(goodHash[0]), expectedSPKIHash)
// Flip a bit, in the signature, to make a bad CSR:
badCSR := strings.Replace(goodCSR, "Wul6", "Wul7", 1)
csrFile := path.Join(t.TempDir(), "bad.csr")
err = os.WriteFile(csrFile, []byte(badCSR), 0600)
test.AssertNotError(t, err, "writing bad csr")
_, err = a.spkiHashFromCSRPEM(csrFile, true, "")
test.AssertError(t, err, "expected invalid signature")
badHash, err := a.spkiHashFromCSRPEM(csrFile, false, "")
test.AssertNotError(t, err, "expected to read CSR with bad signature")
if len(badHash) != 1 {
t.Fatalf("expected to read 1 SPKI from CSR, read %d", len(badHash))
}
test.AssertEquals(t, hex.EncodeToString(badHash[0]), expectedSPKIHash)
}
// mockSARecordingBlocks is a mock which only implements the AddBlockedKey gRPC
// method.
type mockSARecordingBlocks struct {
sapb.StorageAuthorityClient
blockRequests []*sapb.AddBlockedKeyRequest
}
// AddBlockedKey is a mock which always succeeds and records the request it
// received.
func (msa *mockSARecordingBlocks) AddBlockedKey(ctx context.Context, req *sapb.AddBlockedKeyRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) {
msa.blockRequests = append(msa.blockRequests, req)
return &emptypb.Empty{}, nil
}
func (msa *mockSARecordingBlocks) reset() {
msa.blockRequests = nil
}
type mockSARO struct {
sapb.StorageAuthorityReadOnlyClient
}
func (sa *mockSARO) GetSerialsByKey(ctx context.Context, _ *sapb.SPKIHash, _ ...grpc.CallOption) (grpc.ServerStreamingClient[sapb.Serial], error) {
return &mocks.ServerStreamClient[sapb.Serial]{}, nil
}
func (sa *mockSARO) KeyBlocked(ctx context.Context, req *sapb.SPKIHash, _ ...grpc.CallOption) (*sapb.Exists, error) {
return &sapb.Exists{Exists: false}, nil
}
func TestBlockSPKIHash(t *testing.T) {
fc := clock.NewFake()
fc.Set(time.Now())
log := blog.NewMock()
msa := mockSARecordingBlocks{}
privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "creating test private key")
keyHash, err := core.KeyDigest(privKey.Public())
test.AssertNotError(t, err, "computing test SPKI hash")
a := admin{saroc: &mockSARO{}, sac: &msa, clk: fc, log: log}
u := &user.User{}
// A full run should result in one request with the right fields.
msa.reset()
log.Clear()
a.dryRun = false
err = a.blockSPKIHash(context.Background(), keyHash[:], u, "hello world")
test.AssertNotError(t, err, "")
test.AssertEquals(t, len(log.GetAllMatching("Found 0 unexpired certificates")), 1)
test.AssertEquals(t, len(msa.blockRequests), 1)
test.AssertByteEquals(t, msa.blockRequests[0].KeyHash, keyHash[:])
test.AssertContains(t, msa.blockRequests[0].Comment, "hello world")
// A dry-run should result in zero requests and two log lines.
msa.reset()
log.Clear()
a.dryRun = true
a.sac = dryRunSAC{log: log}
err = a.blockSPKIHash(context.Background(), keyHash[:], u, "")
test.AssertNotError(t, err, "")
test.AssertEquals(t, len(log.GetAllMatching("Found 0 unexpired certificates")), 1)
test.AssertEquals(t, len(log.GetAllMatching("dry-run: Block SPKI hash "+hex.EncodeToString(keyHash[:]))), 1)
test.AssertEquals(t, len(msa.blockRequests), 0)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/admin/cert_test.go | third-party/github.com/letsencrypt/boulder/cmd/admin/cert_test.go | package main
import (
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/x509"
"encoding/pem"
"errors"
"os"
"path"
"reflect"
"slices"
"strings"
"sync"
"testing"
"time"
"github.com/jmhodges/clock"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/emptypb"
"github.com/letsencrypt/boulder/core"
corepb "github.com/letsencrypt/boulder/core/proto"
berrors "github.com/letsencrypt/boulder/errors"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/mocks"
rapb "github.com/letsencrypt/boulder/ra/proto"
"github.com/letsencrypt/boulder/revocation"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/test"
)
// mockSAWithIncident is a mock which only implements the SerialsForIncident
// gRPC method. It can be initialized with a set of serials for that method
// to return.
type mockSAWithIncident struct {
sapb.StorageAuthorityReadOnlyClient
incidentSerials []string
}
// SerialsForIncident returns a fake gRPC stream client object which itself
// will return the mockSAWithIncident's serials in order.
func (msa *mockSAWithIncident) SerialsForIncident(_ context.Context, _ *sapb.SerialsForIncidentRequest, _ ...grpc.CallOption) (grpc.ServerStreamingClient[sapb.IncidentSerial], error) {
fakeResults := make([]*sapb.IncidentSerial, len(msa.incidentSerials))
for i, serial := range msa.incidentSerials {
fakeResults[i] = &sapb.IncidentSerial{Serial: serial}
}
return &mocks.ServerStreamClient[sapb.IncidentSerial]{Results: fakeResults}, nil
}
func TestSerialsFromIncidentTable(t *testing.T) {
t.Parallel()
serials := []string{"foo", "bar", "baz"}
a := admin{
saroc: &mockSAWithIncident{incidentSerials: serials},
}
res, err := a.serialsFromIncidentTable(context.Background(), "tablename")
test.AssertNotError(t, err, "getting serials from mock SA")
test.AssertDeepEquals(t, res, serials)
}
func TestSerialsFromFile(t *testing.T) {
t.Parallel()
serials := []string{"foo", "bar", "baz"}
serialsFile := path.Join(t.TempDir(), "serials.txt")
err := os.WriteFile(serialsFile, []byte(strings.Join(serials, "\n")), os.ModeAppend)
test.AssertNotError(t, err, "writing temp serials file")
a := admin{}
res, err := a.serialsFromFile(context.Background(), serialsFile)
test.AssertNotError(t, err, "getting serials from file")
test.AssertDeepEquals(t, res, serials)
}
// mockSAWithKey is a mock which only implements the GetSerialsByKey
// gRPC method. It can be initialized with a set of serials for that method
// to return.
type mockSAWithKey struct {
sapb.StorageAuthorityReadOnlyClient
keyHash []byte
serials []string
}
// GetSerialsByKey returns a fake gRPC stream client object which itself
// will return the mockSAWithKey's serials in order.
func (msa *mockSAWithKey) GetSerialsByKey(_ context.Context, req *sapb.SPKIHash, _ ...grpc.CallOption) (grpc.ServerStreamingClient[sapb.Serial], error) {
if !slices.Equal(req.KeyHash, msa.keyHash) {
return &mocks.ServerStreamClient[sapb.Serial]{}, nil
}
fakeResults := make([]*sapb.Serial, len(msa.serials))
for i, serial := range msa.serials {
fakeResults[i] = &sapb.Serial{Serial: serial}
}
return &mocks.ServerStreamClient[sapb.Serial]{Results: fakeResults}, nil
}
func TestSerialsFromPrivateKey(t *testing.T) {
serials := []string{"foo", "bar", "baz"}
fc := clock.NewFake()
fc.Set(time.Now())
privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
test.AssertNotError(t, err, "creating test private key")
keyBytes, err := x509.MarshalPKCS8PrivateKey(privKey)
test.AssertNotError(t, err, "marshalling test private key bytes")
keyFile := path.Join(t.TempDir(), "key.pem")
keyPEM := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: keyBytes})
err = os.WriteFile(keyFile, keyPEM, os.ModeAppend)
test.AssertNotError(t, err, "writing test private key file")
keyHash, err := core.KeyDigest(privKey.Public())
test.AssertNotError(t, err, "computing test SPKI hash")
a := admin{saroc: &mockSAWithKey{keyHash: keyHash[:], serials: serials}}
res, err := a.serialsFromPrivateKey(context.Background(), keyFile)
test.AssertNotError(t, err, "getting serials from keyHashToSerial table")
test.AssertDeepEquals(t, res, serials)
}
// mockSAWithAccount is a mock which only implements the GetSerialsByAccount
// gRPC method. It can be initialized with a set of serials for that method
// to return.
type mockSAWithAccount struct {
sapb.StorageAuthorityReadOnlyClient
regID int64
serials []string
}
func (msa *mockSAWithAccount) GetRegistration(_ context.Context, req *sapb.RegistrationID, _ ...grpc.CallOption) (*corepb.Registration, error) {
if req.Id != msa.regID {
return nil, errors.New("no such reg")
}
return &corepb.Registration{}, nil
}
// GetSerialsByAccount returns a fake gRPC stream client object which itself
// will return the mockSAWithAccount's serials in order.
func (msa *mockSAWithAccount) GetSerialsByAccount(_ context.Context, req *sapb.RegistrationID, _ ...grpc.CallOption) (grpc.ServerStreamingClient[sapb.Serial], error) {
if req.Id != msa.regID {
return &mocks.ServerStreamClient[sapb.Serial]{}, nil
}
fakeResults := make([]*sapb.Serial, len(msa.serials))
for i, serial := range msa.serials {
fakeResults[i] = &sapb.Serial{Serial: serial}
}
return &mocks.ServerStreamClient[sapb.Serial]{Results: fakeResults}, nil
}
func TestSerialsFromRegID(t *testing.T) {
serials := []string{"foo", "bar", "baz"}
a := admin{saroc: &mockSAWithAccount{regID: 123, serials: serials}}
res, err := a.serialsFromRegID(context.Background(), 123)
test.AssertNotError(t, err, "getting serials from serials table")
test.AssertDeepEquals(t, res, serials)
}
// mockRARecordingRevocations is a mock which only implements the
// AdministrativelyRevokeCertificate gRPC method. It can be initialized with
// serials to recognize as already revoked, or to fail.
type mockRARecordingRevocations struct {
rapb.RegistrationAuthorityClient
doomedToFail []string
alreadyRevoked []string
revocationRequests []*rapb.AdministrativelyRevokeCertificateRequest
sync.Mutex
}
// AdministrativelyRevokeCertificate records the request it received on the mock
// RA struct, and succeeds if it doesn't recognize the serial as one it should
// fail for.
func (mra *mockRARecordingRevocations) AdministrativelyRevokeCertificate(_ context.Context, req *rapb.AdministrativelyRevokeCertificateRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) {
mra.Lock()
defer mra.Unlock()
mra.revocationRequests = append(mra.revocationRequests, req)
if slices.Contains(mra.doomedToFail, req.Serial) {
return nil, errors.New("oops")
}
if slices.Contains(mra.alreadyRevoked, req.Serial) {
return nil, berrors.AlreadyRevokedError("too slow")
}
return &emptypb.Empty{}, nil
}
func (mra *mockRARecordingRevocations) reset() {
mra.doomedToFail = nil
mra.alreadyRevoked = nil
mra.revocationRequests = nil
}
func TestRevokeSerials(t *testing.T) {
t.Parallel()
serials := []string{
"2a18592b7f4bf596fb1a1df135567acd825a",
"038c3f6388afb7695dd4d6bbe3d264f1e4e2",
"048c3f6388afb7695dd4d6bbe3d264f1e5e5",
}
mra := mockRARecordingRevocations{}
log := blog.NewMock()
a := admin{rac: &mra, log: log}
assertRequestsContain := func(reqs []*rapb.AdministrativelyRevokeCertificateRequest, code revocation.Reason, skipBlockKey bool) {
t.Helper()
for _, req := range reqs {
test.AssertEquals(t, len(req.Cert), 0)
test.AssertEquals(t, req.Code, int64(code))
test.AssertEquals(t, req.SkipBlockKey, skipBlockKey)
}
}
// Revoking should result in 3 gRPC requests and quiet execution.
mra.reset()
log.Clear()
a.dryRun = false
err := a.revokeSerials(context.Background(), serials, 0, false, 1)
test.AssertEquals(t, len(log.GetAllMatching("invalid serial format")), 0)
test.AssertNotError(t, err, "")
test.AssertEquals(t, len(log.GetAll()), 0)
test.AssertEquals(t, len(mra.revocationRequests), 3)
assertRequestsContain(mra.revocationRequests, 0, false)
// Revoking an already-revoked serial should result in one log line.
mra.reset()
log.Clear()
mra.alreadyRevoked = []string{"048c3f6388afb7695dd4d6bbe3d264f1e5e5"}
err = a.revokeSerials(context.Background(), serials, 0, false, 1)
t.Logf("error: %s", err)
t.Logf("logs: %s", strings.Join(log.GetAll(), ""))
test.AssertError(t, err, "already-revoked should result in error")
test.AssertEquals(t, len(log.GetAllMatching("not revoking")), 1)
test.AssertEquals(t, len(mra.revocationRequests), 3)
assertRequestsContain(mra.revocationRequests, 0, false)
// Revoking a doomed-to-fail serial should also result in one log line.
mra.reset()
log.Clear()
mra.doomedToFail = []string{"048c3f6388afb7695dd4d6bbe3d264f1e5e5"}
err = a.revokeSerials(context.Background(), serials, 0, false, 1)
test.AssertError(t, err, "gRPC error should result in error")
test.AssertEquals(t, len(log.GetAllMatching("failed to revoke")), 1)
test.AssertEquals(t, len(mra.revocationRequests), 3)
assertRequestsContain(mra.revocationRequests, 0, false)
// Revoking with other parameters should get carried through.
mra.reset()
log.Clear()
err = a.revokeSerials(context.Background(), serials, 1, true, 3)
test.AssertNotError(t, err, "")
test.AssertEquals(t, len(mra.revocationRequests), 3)
assertRequestsContain(mra.revocationRequests, 1, true)
// Revoking in dry-run mode should result in no gRPC requests and three logs.
mra.reset()
log.Clear()
a.dryRun = true
a.rac = dryRunRAC{log: log}
err = a.revokeSerials(context.Background(), serials, 0, false, 1)
test.AssertNotError(t, err, "")
test.AssertEquals(t, len(log.GetAllMatching("dry-run:")), 3)
test.AssertEquals(t, len(mra.revocationRequests), 0)
assertRequestsContain(mra.revocationRequests, 0, false)
}
func TestRevokeMalformed(t *testing.T) {
t.Parallel()
mra := mockRARecordingRevocations{}
log := blog.NewMock()
a := &admin{
rac: &mra,
log: log,
dryRun: false,
}
s := subcommandRevokeCert{
crlShard: 623,
}
serial := "0379c3dfdd518be45948f2dbfa6ea3e9b209"
err := s.revokeMalformed(context.Background(), a, []string{serial}, 1)
if err != nil {
t.Errorf("revokedMalformed with crlShard 623: want success, got %s", err)
}
if len(mra.revocationRequests) != 1 {
t.Errorf("revokeMalformed: want 1 revocation request to SA, got %v", mra.revocationRequests)
}
if mra.revocationRequests[0].Serial != serial {
t.Errorf("revokeMalformed: want %s to be revoked, got %s", serial, mra.revocationRequests[0])
}
s = subcommandRevokeCert{
crlShard: 0,
}
err = s.revokeMalformed(context.Background(), a, []string{"038c3f6388afb7695dd4d6bbe3d264f1e4e2"}, 1)
if err == nil {
t.Errorf("revokedMalformed with crlShard 0: want error, got none")
}
s = subcommandRevokeCert{
crlShard: 623,
}
err = s.revokeMalformed(context.Background(), a, []string{"038c3f6388afb7695dd4d6bbe3d264f1e4e2", "28a94f966eae14e525777188512ddf5a0a3b"}, 1)
if err == nil {
t.Errorf("revokedMalformed with multiple serials: want error, got none")
}
}
func TestCleanSerials(t *testing.T) {
input := []string{
"2a:18:59:2b:7f:4b:f5:96:fb:1a:1d:f1:35:56:7a:cd:82:5a",
"03:8c:3f:63:88:af:b7:69:5d:d4:d6:bb:e3:d2:64:f1:e4:e2",
"038c3f6388afb7695dd4d6bbe3d264f1e4e2",
}
expected := []string{
"2a18592b7f4bf596fb1a1df135567acd825a",
"038c3f6388afb7695dd4d6bbe3d264f1e4e2",
"038c3f6388afb7695dd4d6bbe3d264f1e4e2",
}
output, err := cleanSerials(input)
if err != nil {
t.Errorf("cleanSerials(%s): %s, want %s", input, err, expected)
}
if !reflect.DeepEqual(output, expected) {
t.Errorf("cleanSerials(%s)=%s, want %s", input, output, expected)
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/admin/unpause_account_test.go | third-party/github.com/letsencrypt/boulder/cmd/admin/unpause_account_test.go | package main
import (
"context"
"errors"
"os"
"path"
"strings"
"testing"
blog "github.com/letsencrypt/boulder/log"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/test"
"google.golang.org/grpc"
)
func TestReadingUnpauseAccountsFile(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
data []string
expectedRegIDs int
}{
{
name: "No data in file",
data: nil,
},
{
name: "valid",
data: []string{"1"},
expectedRegIDs: 1,
},
{
name: "valid with duplicates",
data: []string{"1", "2", "1", "3", "3"},
expectedRegIDs: 5,
},
{
name: "valid with empty lines and duplicates",
data: []string{"1", "\n", "6", "6", "6"},
expectedRegIDs: 4,
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
log := blog.NewMock()
a := admin{log: log}
file := path.Join(t.TempDir(), path.Base(t.Name()+".txt"))
err := os.WriteFile(file, []byte(strings.Join(testCase.data, "\n")), os.ModePerm)
test.AssertNotError(t, err, "could not write temporary file")
regIDs, err := a.readUnpauseAccountFile(file)
test.AssertNotError(t, err, "no error expected, but received one")
test.AssertEquals(t, len(regIDs), testCase.expectedRegIDs)
})
}
}
type mockSAUnpause struct {
sapb.StorageAuthorityClient
}
func (msa *mockSAUnpause) UnpauseAccount(ctx context.Context, in *sapb.RegistrationID, _ ...grpc.CallOption) (*sapb.Count, error) {
return &sapb.Count{Count: 1}, nil
}
// mockSAUnpauseBroken is a mock that always returns an error.
type mockSAUnpauseBroken struct {
sapb.StorageAuthorityClient
}
func (msa *mockSAUnpauseBroken) UnpauseAccount(ctx context.Context, in *sapb.RegistrationID, _ ...grpc.CallOption) (*sapb.Count, error) {
return nil, errors.New("oh dear")
}
func TestUnpauseAccounts(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
regIDs []int64
saImpl sapb.StorageAuthorityClient
expectErr bool
expectCounts int
}{
{
name: "no data",
regIDs: nil,
expectErr: true,
},
{
name: "valid single entry",
regIDs: []int64{1},
expectCounts: 1,
},
{
name: "valid single entry but broken SA",
expectErr: true,
saImpl: &mockSAUnpauseBroken{},
regIDs: []int64{1},
},
{
name: "valid multiple entries with duplicates",
regIDs: []int64{1, 1, 2, 3, 4},
expectCounts: 4,
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
log := blog.NewMock()
// Default to a working mock SA implementation
if testCase.saImpl == nil {
testCase.saImpl = &mockSAUnpause{}
}
a := admin{sac: testCase.saImpl, log: log}
counts, err := a.unpauseAccounts(context.Background(), testCase.regIDs, 10)
if testCase.expectErr {
test.AssertError(t, err, "should have errored, but did not")
} else {
test.AssertNotError(t, err, "should not have errored")
test.AssertEquals(t, testCase.expectCounts, len(counts))
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/admin/unpause_account.go | third-party/github.com/letsencrypt/boulder/cmd/admin/unpause_account.go | package main
import (
"bufio"
"context"
"errors"
"flag"
"fmt"
"os"
"slices"
"strconv"
"sync"
"sync/atomic"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/unpause"
)
// subcommandUnpauseAccount encapsulates the "admin unpause-account" command.
type subcommandUnpauseAccount struct {
accountID int64
batchFile string
parallelism uint
}
var _ subcommand = (*subcommandUnpauseAccount)(nil)
func (u *subcommandUnpauseAccount) Desc() string {
return "Administratively unpause an account to allow certificate issuance attempts"
}
func (u *subcommandUnpauseAccount) Flags(flag *flag.FlagSet) {
flag.Int64Var(&u.accountID, "account", 0, "A single account ID to unpause")
flag.StringVar(&u.batchFile, "batch-file", "", "Path to a file containing multiple account IDs where each is separated by a newline")
flag.UintVar(&u.parallelism, "parallelism", 10, "The maximum number of concurrent unpause requests to send to the SA (default: 10)")
}
func (u *subcommandUnpauseAccount) Run(ctx context.Context, a *admin) error {
// This is a map of all input-selection flags to whether or not they were set
// to a non-default value. We use this to ensure that exactly one input
// selection flag was given on the command line.
setInputs := map[string]bool{
"-account": u.accountID != 0,
"-batch-file": u.batchFile != "",
}
activeFlag, err := findActiveInputMethodFlag(setInputs)
if err != nil {
return err
}
var regIDs []int64
switch activeFlag {
case "-account":
regIDs = []int64{u.accountID}
case "-batch-file":
regIDs, err = a.readUnpauseAccountFile(u.batchFile)
default:
return errors.New("no recognized input method flag set (this shouldn't happen)")
}
if err != nil {
return fmt.Errorf("collecting serials to revoke: %w", err)
}
_, err = a.unpauseAccounts(ctx, regIDs, u.parallelism)
if err != nil {
return err
}
return nil
}
type unpauseCount struct {
accountID int64
count int64
}
// unpauseAccount concurrently unpauses all identifiers for each account using
// up to `parallelism` workers. It returns a count of the number of identifiers
// unpaused for each account and any accumulated errors.
func (a *admin) unpauseAccounts(ctx context.Context, accountIDs []int64, parallelism uint) ([]unpauseCount, error) {
if len(accountIDs) <= 0 {
return nil, errors.New("no account IDs provided for unpausing")
}
slices.Sort(accountIDs)
accountIDs = slices.Compact(accountIDs)
countChan := make(chan unpauseCount, len(accountIDs))
work := make(chan int64)
var wg sync.WaitGroup
var errCount atomic.Uint64
for i := uint(0); i < parallelism; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for accountID := range work {
totalCount := int64(0)
for {
response, err := a.sac.UnpauseAccount(ctx, &sapb.RegistrationID{Id: accountID})
if err != nil {
errCount.Add(1)
a.log.Errf("error unpausing accountID %d: %v", accountID, err)
break
}
totalCount += response.Count
if response.Count < unpause.RequestLimit {
// All identifiers have been unpaused.
break
}
}
countChan <- unpauseCount{accountID: accountID, count: totalCount}
}
}()
}
go func() {
for _, accountID := range accountIDs {
work <- accountID
}
close(work)
}()
go func() {
wg.Wait()
close(countChan)
}()
var unpauseCounts []unpauseCount
for count := range countChan {
unpauseCounts = append(unpauseCounts, count)
}
if errCount.Load() > 0 {
return unpauseCounts, fmt.Errorf("encountered %d errors while unpausing; see logs above for details", errCount.Load())
}
return unpauseCounts, nil
}
// readUnpauseAccountFile parses the contents of a file containing one account
// ID per into a slice of int64s. It will skip malformed records and continue
// processing until the end of file marker.
func (a *admin) readUnpauseAccountFile(filePath string) ([]int64, error) {
fp, err := os.Open(filePath)
if err != nil {
return nil, fmt.Errorf("opening paused account data file: %w", err)
}
defer fp.Close()
var unpauseAccounts []int64
lineCounter := 0
scanner := bufio.NewScanner(fp)
for scanner.Scan() {
lineCounter++
regID, err := strconv.ParseInt(scanner.Text(), 10, 64)
if err != nil {
a.log.Infof("skipping: malformed account ID entry on line %d\n", lineCounter)
continue
}
unpauseAccounts = append(unpauseAccounts, regID)
}
if err := scanner.Err(); err != nil {
return nil, scanner.Err()
}
return unpauseAccounts, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/admin/pause_identifier.go | third-party/github.com/letsencrypt/boulder/cmd/admin/pause_identifier.go | package main
import (
"context"
"encoding/csv"
"errors"
"flag"
"fmt"
"io"
"os"
"strconv"
"sync"
"sync/atomic"
corepb "github.com/letsencrypt/boulder/core/proto"
"github.com/letsencrypt/boulder/identifier"
sapb "github.com/letsencrypt/boulder/sa/proto"
)
// subcommandPauseIdentifier encapsulates the "admin pause-identifiers" command.
type subcommandPauseIdentifier struct {
batchFile string
parallelism uint
}
var _ subcommand = (*subcommandPauseIdentifier)(nil)
func (p *subcommandPauseIdentifier) Desc() string {
return "Administratively pause an account preventing it from attempting certificate issuance"
}
func (p *subcommandPauseIdentifier) Flags(flag *flag.FlagSet) {
flag.StringVar(&p.batchFile, "batch-file", "", "Path to a CSV file containing (account ID, identifier type, identifier value)")
flag.UintVar(&p.parallelism, "parallelism", 10, "The maximum number of concurrent pause requests to send to the SA (default: 10)")
}
func (p *subcommandPauseIdentifier) Run(ctx context.Context, a *admin) error {
if p.batchFile == "" {
return errors.New("the -batch-file flag is required")
}
idents, err := a.readPausedAccountFile(p.batchFile)
if err != nil {
return err
}
_, err = a.pauseIdentifiers(ctx, idents, p.parallelism)
if err != nil {
return err
}
return nil
}
// pauseIdentifiers concurrently pauses identifiers for each account using up to
// `parallelism` workers. It returns all pause responses and any accumulated
// errors.
func (a *admin) pauseIdentifiers(ctx context.Context, entries []pauseCSVData, parallelism uint) ([]*sapb.PauseIdentifiersResponse, error) {
if len(entries) <= 0 {
return nil, errors.New("cannot pause identifiers because no pauseData was sent")
}
accountToIdents := make(map[int64][]*corepb.Identifier)
for _, entry := range entries {
accountToIdents[entry.accountID] = append(accountToIdents[entry.accountID], &corepb.Identifier{
Type: string(entry.identifierType),
Value: entry.identifierValue,
})
}
var errCount atomic.Uint64
respChan := make(chan *sapb.PauseIdentifiersResponse, len(accountToIdents))
work := make(chan struct {
accountID int64
idents []*corepb.Identifier
}, parallelism)
var wg sync.WaitGroup
for i := uint(0); i < parallelism; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for data := range work {
response, err := a.sac.PauseIdentifiers(ctx, &sapb.PauseRequest{
RegistrationID: data.accountID,
Identifiers: data.idents,
})
if err != nil {
errCount.Add(1)
a.log.Errf("error pausing identifier(s) %q for account %d: %v", data.idents, data.accountID, err)
} else {
respChan <- response
}
}
}()
}
for accountID, idents := range accountToIdents {
work <- struct {
accountID int64
idents []*corepb.Identifier
}{accountID, idents}
}
close(work)
wg.Wait()
close(respChan)
var responses []*sapb.PauseIdentifiersResponse
for response := range respChan {
responses = append(responses, response)
}
if errCount.Load() > 0 {
return responses, fmt.Errorf("encountered %d errors while pausing identifiers; see logs above for details", errCount.Load())
}
return responses, nil
}
// pauseCSVData contains a golang representation of the data loaded in from a
// CSV file for pausing.
type pauseCSVData struct {
accountID int64
identifierType identifier.IdentifierType
identifierValue string
}
// readPausedAccountFile parses the contents of a CSV into a slice of
// `pauseCSVData` objects and returns it or an error. It will skip malformed
// lines and continue processing until either the end of file marker is detected
// or other read error.
func (a *admin) readPausedAccountFile(filePath string) ([]pauseCSVData, error) {
fp, err := os.Open(filePath)
if err != nil {
return nil, fmt.Errorf("opening paused account data file: %w", err)
}
defer fp.Close()
reader := csv.NewReader(fp)
// identifierValue can have 1 or more entries
reader.FieldsPerRecord = -1
reader.TrimLeadingSpace = true
var parsedRecords []pauseCSVData
lineCounter := 0
// Process contents of the CSV file
for {
record, err := reader.Read()
if errors.Is(err, io.EOF) {
break
} else if err != nil {
return nil, err
}
lineCounter++
// We should have strictly 3 fields, note that just commas is considered
// a valid CSV line.
if len(record) != 3 {
a.log.Infof("skipping: malformed line %d, should contain exactly 3 fields\n", lineCounter)
continue
}
recordID := record[0]
accountID, err := strconv.ParseInt(recordID, 10, 64)
if err != nil || accountID == 0 {
a.log.Infof("skipping: malformed accountID entry on line %d\n", lineCounter)
continue
}
// Ensure that an identifier type is present, otherwise skip the line.
if len(record[1]) == 0 {
a.log.Infof("skipping: malformed identifierType entry on line %d\n", lineCounter)
continue
}
if len(record[2]) == 0 {
a.log.Infof("skipping: malformed identifierValue entry on line %d\n", lineCounter)
continue
}
parsedRecord := pauseCSVData{
accountID: accountID,
identifierType: identifier.IdentifierType(record[1]),
identifierValue: record[2],
}
parsedRecords = append(parsedRecords, parsedRecord)
}
a.log.Infof("detected %d valid record(s) from input file\n", len(parsedRecords))
return parsedRecords, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/admin/admin_test.go | third-party/github.com/letsencrypt/boulder/cmd/admin/admin_test.go | package main
import (
"testing"
"github.com/letsencrypt/boulder/test"
)
func Test_findActiveInputMethodFlag(t *testing.T) {
tests := []struct {
name string
setInputs map[string]bool
expected string
wantErr bool
}{
{
name: "No active flags",
setInputs: map[string]bool{
"-private-key": false,
"-spki-file": false,
"-cert-file": false,
},
expected: "",
wantErr: true,
},
{
name: "Multiple active flags",
setInputs: map[string]bool{
"-private-key": true,
"-spki-file": true,
"-cert-file": false,
},
expected: "",
wantErr: true,
},
{
name: "Single active flag",
setInputs: map[string]bool{
"-private-key": true,
"-spki-file": false,
"-cert-file": false,
},
expected: "-private-key",
wantErr: false,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
result, err := findActiveInputMethodFlag(tc.setInputs)
if tc.wantErr {
test.AssertError(t, err, "findActiveInputMethodFlag() should have errored")
} else {
test.AssertNotError(t, err, "findActiveInputMethodFlag() should not have errored")
test.AssertEquals(t, result, tc.expected)
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/admin/pause_identifier_test.go | third-party/github.com/letsencrypt/boulder/cmd/admin/pause_identifier_test.go | package main
import (
"context"
"errors"
"os"
"path"
"strings"
"testing"
blog "github.com/letsencrypt/boulder/log"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/test"
"google.golang.org/grpc"
)
func TestReadingPauseCSV(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
data []string
expectedRecords int
}{
{
name: "No data in file",
data: nil,
},
{
name: "valid",
data: []string{"1,dns,example.com"},
expectedRecords: 1,
},
{
name: "valid with duplicates",
data: []string{"1,dns,example.com", "2,dns,example.org", "1,dns,example.com", "1,dns,example.net", "3,dns,example.gov", "3,dns,example.gov"},
expectedRecords: 6,
},
{
name: "invalid with multiple domains on the same line",
data: []string{"1,dns,example.com,example.net"},
},
{
name: "invalid just commas",
data: []string{",,,"},
},
{
name: "invalid only contains accountID",
data: []string{"1"},
},
{
name: "invalid only contains accountID and identifierType",
data: []string{"1,dns"},
},
{
name: "invalid missing identifierType",
data: []string{"1,,example.com"},
},
{
name: "invalid accountID isnt an int",
data: []string{"blorple"},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
log := blog.NewMock()
a := admin{log: log}
csvFile := path.Join(t.TempDir(), path.Base(t.Name()+".csv"))
err := os.WriteFile(csvFile, []byte(strings.Join(testCase.data, "\n")), os.ModePerm)
test.AssertNotError(t, err, "could not write temporary file")
parsedData, err := a.readPausedAccountFile(csvFile)
test.AssertNotError(t, err, "no error expected, but received one")
test.AssertEquals(t, len(parsedData), testCase.expectedRecords)
})
}
}
// mockSAPaused is a mock which always succeeds. It records the PauseRequest it
// received, and returns the number of identifiers as a
// PauseIdentifiersResponse. It does not maintain state of repaused identifiers.
type mockSAPaused struct {
sapb.StorageAuthorityClient
}
func (msa *mockSAPaused) PauseIdentifiers(ctx context.Context, in *sapb.PauseRequest, _ ...grpc.CallOption) (*sapb.PauseIdentifiersResponse, error) {
return &sapb.PauseIdentifiersResponse{Paused: int64(len(in.Identifiers))}, nil
}
// mockSAPausedBroken is a mock which always errors.
type mockSAPausedBroken struct {
sapb.StorageAuthorityClient
}
func (msa *mockSAPausedBroken) PauseIdentifiers(ctx context.Context, in *sapb.PauseRequest, _ ...grpc.CallOption) (*sapb.PauseIdentifiersResponse, error) {
return nil, errors.New("its all jacked up")
}
func TestPauseIdentifiers(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
data []pauseCSVData
saImpl sapb.StorageAuthorityClient
expectRespLen int
expectErr bool
}{
{
name: "no data",
data: nil,
expectErr: true,
},
{
name: "valid single entry",
data: []pauseCSVData{
{
accountID: 1,
identifierType: "dns",
identifierValue: "example.com",
},
},
expectRespLen: 1,
},
{
name: "valid single entry but broken SA",
expectErr: true,
saImpl: &mockSAPausedBroken{},
data: []pauseCSVData{
{
accountID: 1,
identifierType: "dns",
identifierValue: "example.com",
},
},
},
{
name: "valid multiple entries with duplicates",
data: []pauseCSVData{
{
accountID: 1,
identifierType: "dns",
identifierValue: "example.com",
},
{
accountID: 1,
identifierType: "dns",
identifierValue: "example.com",
},
{
accountID: 2,
identifierType: "dns",
identifierValue: "example.org",
},
{
accountID: 3,
identifierType: "dns",
identifierValue: "example.net",
},
{
accountID: 3,
identifierType: "dns",
identifierValue: "example.org",
},
},
expectRespLen: 3,
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
log := blog.NewMock()
// Default to a working mock SA implementation
if testCase.saImpl == nil {
testCase.saImpl = &mockSAPaused{}
}
a := admin{sac: testCase.saImpl, log: log}
responses, err := a.pauseIdentifiers(context.Background(), testCase.data, 10)
if testCase.expectErr {
test.AssertError(t, err, "should have errored, but did not")
} else {
test.AssertNotError(t, err, "should not have errored")
// Batching will consolidate identifiers under the same account.
test.AssertEquals(t, len(responses), testCase.expectRespLen)
}
})
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/admin/cert.go | third-party/github.com/letsencrypt/boulder/cmd/admin/cert.go | package main
import (
"bufio"
"context"
"errors"
"flag"
"fmt"
"io"
"os"
"os/user"
"strings"
"sync"
"sync/atomic"
"unicode"
"golang.org/x/crypto/ocsp"
core "github.com/letsencrypt/boulder/core"
berrors "github.com/letsencrypt/boulder/errors"
rapb "github.com/letsencrypt/boulder/ra/proto"
"github.com/letsencrypt/boulder/revocation"
sapb "github.com/letsencrypt/boulder/sa/proto"
)
// subcommandRevokeCert encapsulates the "admin revoke-cert" command. It accepts
// many flags specifying different ways a to-be-revoked certificate can be
// identified. It then gathers the serial numbers of all identified certs, spins
// up a worker pool, and revokes all of those serials individually.
//
// Note that some batch methods (such as -incident-table and -serials-file) can
// result in high memory usage, as this subcommand will gather every serial in
// memory before beginning to revoke any of them. This trades local memory usage
// for shorter database and gRPC query times, so that we don't need massive
// timeouts when collecting serials to revoke.
type subcommandRevokeCert struct {
parallelism uint
reasonStr string
skipBlock bool
malformed bool
serial string
incidentTable string
serialsFile string
privKey string
regID int64
certFile string
crlShard int64
}
var _ subcommand = (*subcommandRevokeCert)(nil)
func (s *subcommandRevokeCert) Desc() string {
return "Revoke one or more certificates"
}
func (s *subcommandRevokeCert) Flags(flag *flag.FlagSet) {
// General flags relevant to all certificate input methods.
flag.UintVar(&s.parallelism, "parallelism", 10, "Number of concurrent workers to use while revoking certs")
flag.StringVar(&s.reasonStr, "reason", "unspecified", "Revocation reason (unspecified, keyCompromise, superseded, cessationOfOperation, or privilegeWithdrawn)")
flag.BoolVar(&s.skipBlock, "skip-block-key", false, "Skip blocking the key, if revoked for keyCompromise - use with extreme caution")
flag.BoolVar(&s.malformed, "malformed", false, "Indicates that the cert cannot be parsed - use with caution")
flag.Int64Var(&s.crlShard, "crl-shard", 0, "For malformed certs, the CRL shard the certificate belongs to")
// Flags specifying the input method for the certificates to be revoked.
flag.StringVar(&s.serial, "serial", "", "Revoke the certificate with this hex serial")
flag.StringVar(&s.incidentTable, "incident-table", "", "Revoke all certificates whose serials are in this table")
flag.StringVar(&s.serialsFile, "serials-file", "", "Revoke all certificates whose hex serials are in this file")
flag.StringVar(&s.privKey, "private-key", "", "Revoke all certificates whose pubkey matches this private key")
flag.Int64Var(&s.regID, "reg-id", 0, "Revoke all certificates issued to this account")
flag.StringVar(&s.certFile, "cert-file", "", "Revoke the single PEM-formatted certificate in this file")
}
func (s *subcommandRevokeCert) Run(ctx context.Context, a *admin) error {
if s.parallelism == 0 {
// Why did they override it to 0, instead of just leaving it the default?
return fmt.Errorf("got unacceptable parallelism %d", s.parallelism)
}
reasonCode := revocation.Reason(-1)
for code := range revocation.AdminAllowedReasons {
if s.reasonStr == revocation.ReasonToString[code] {
reasonCode = code
break
}
}
if reasonCode == revocation.Reason(-1) {
return fmt.Errorf("got unacceptable revocation reason %q", s.reasonStr)
}
if s.skipBlock && reasonCode == ocsp.KeyCompromise {
// We would only add the SPKI hash of the pubkey to the blockedKeys table if
// the revocation reason is keyCompromise.
return errors.New("-skip-block-key only makes sense with -reason=1")
}
if s.malformed && reasonCode == ocsp.KeyCompromise {
// This is because we can't extract and block the pubkey if we can't
// parse the certificate.
return errors.New("cannot revoke malformed certs for reason keyCompromise")
}
// This is a map of all input-selection flags to whether or not they were set
// to a non-default value. We use this to ensure that exactly one input
// selection flag was given on the command line.
setInputs := map[string]bool{
"-serial": s.serial != "",
"-incident-table": s.incidentTable != "",
"-serials-file": s.serialsFile != "",
"-private-key": s.privKey != "",
"-reg-id": s.regID != 0,
"-cert-file": s.certFile != "",
}
activeFlag, err := findActiveInputMethodFlag(setInputs)
if err != nil {
return err
}
var serials []string
switch activeFlag {
case "-serial":
serials, err = []string{s.serial}, nil
case "-incident-table":
serials, err = a.serialsFromIncidentTable(ctx, s.incidentTable)
case "-serials-file":
serials, err = a.serialsFromFile(ctx, s.serialsFile)
case "-private-key":
serials, err = a.serialsFromPrivateKey(ctx, s.privKey)
case "-reg-id":
serials, err = a.serialsFromRegID(ctx, s.regID)
case "-cert-file":
serials, err = a.serialsFromCertPEM(ctx, s.certFile)
default:
return errors.New("no recognized input method flag set (this shouldn't happen)")
}
if err != nil {
return fmt.Errorf("collecting serials to revoke: %w", err)
}
serials, err = cleanSerials(serials)
if err != nil {
return err
}
if len(serials) == 0 {
return errors.New("no serials to revoke found")
}
a.log.Infof("Found %d certificates to revoke", len(serials))
if s.malformed {
return s.revokeMalformed(ctx, a, serials, reasonCode)
}
err = a.revokeSerials(ctx, serials, reasonCode, s.skipBlock, s.parallelism)
if err != nil {
return fmt.Errorf("revoking serials: %w", err)
}
return nil
}
func (s *subcommandRevokeCert) revokeMalformed(ctx context.Context, a *admin, serials []string, reasonCode revocation.Reason) error {
u, err := user.Current()
if err != nil {
return fmt.Errorf("getting admin username: %w", err)
}
if s.crlShard == 0 {
return errors.New("when revoking malformed certificates, a nonzero CRL shard must be specified")
}
if len(serials) > 1 {
return errors.New("when revoking malformed certificates, only one cert at a time is allowed")
}
_, err = a.rac.AdministrativelyRevokeCertificate(
ctx,
&rapb.AdministrativelyRevokeCertificateRequest{
Serial: serials[0],
Code: int64(reasonCode),
AdminName: u.Username,
SkipBlockKey: s.skipBlock,
Malformed: true,
CrlShard: s.crlShard,
},
)
return err
}
func (a *admin) serialsFromIncidentTable(ctx context.Context, tableName string) ([]string, error) {
stream, err := a.saroc.SerialsForIncident(ctx, &sapb.SerialsForIncidentRequest{IncidentTable: tableName})
if err != nil {
return nil, fmt.Errorf("setting up stream of serials from incident table %q: %s", tableName, err)
}
var serials []string
for {
is, err := stream.Recv()
if err != nil {
if err == io.EOF {
break
}
return nil, fmt.Errorf("streaming serials from incident table %q: %s", tableName, err)
}
serials = append(serials, is.Serial)
}
return serials, nil
}
func (a *admin) serialsFromFile(_ context.Context, filePath string) ([]string, error) {
file, err := os.Open(filePath)
if err != nil {
return nil, fmt.Errorf("opening serials file: %w", err)
}
var serials []string
scanner := bufio.NewScanner(file)
for scanner.Scan() {
serial := scanner.Text()
if serial == "" {
continue
}
serials = append(serials, serial)
}
return serials, nil
}
func (a *admin) serialsFromPrivateKey(ctx context.Context, privkeyFile string) ([]string, error) {
spkiHash, err := a.spkiHashFromPrivateKey(privkeyFile)
if err != nil {
return nil, err
}
stream, err := a.saroc.GetSerialsByKey(ctx, &sapb.SPKIHash{KeyHash: spkiHash})
if err != nil {
return nil, fmt.Errorf("setting up stream of serials from SA: %s", err)
}
var serials []string
for {
serial, err := stream.Recv()
if err != nil {
if err == io.EOF {
break
}
return nil, fmt.Errorf("streaming serials from SA: %s", err)
}
serials = append(serials, serial.Serial)
}
return serials, nil
}
func (a *admin) serialsFromRegID(ctx context.Context, regID int64) ([]string, error) {
_, err := a.saroc.GetRegistration(ctx, &sapb.RegistrationID{Id: regID})
if err != nil {
return nil, fmt.Errorf("couldn't confirm regID exists: %w", err)
}
stream, err := a.saroc.GetSerialsByAccount(ctx, &sapb.RegistrationID{Id: regID})
if err != nil {
return nil, fmt.Errorf("setting up stream of serials from SA: %s", err)
}
var serials []string
for {
serial, err := stream.Recv()
if err != nil {
if err == io.EOF {
break
}
return nil, fmt.Errorf("streaming serials from SA: %s", err)
}
serials = append(serials, serial.Serial)
}
return serials, nil
}
func (a *admin) serialsFromCertPEM(_ context.Context, filename string) ([]string, error) {
cert, err := core.LoadCert(filename)
if err != nil {
return nil, fmt.Errorf("loading certificate pem: %w", err)
}
return []string{core.SerialToString(cert.SerialNumber)}, nil
}
// cleanSerials removes non-alphanumeric characters from the serials and checks
// that all resulting serials are valid (hex encoded, and the correct length).
func cleanSerials(serials []string) ([]string, error) {
serialStrip := func(r rune) rune {
switch {
case unicode.IsLetter(r):
return r
case unicode.IsDigit(r):
return r
}
return rune(-1)
}
var ret []string
for _, s := range serials {
cleaned := strings.Map(serialStrip, s)
if !core.ValidSerial(cleaned) {
return nil, fmt.Errorf("cleaned serial %q is not valid", cleaned)
}
ret = append(ret, cleaned)
}
return ret, nil
}
func (a *admin) revokeSerials(ctx context.Context, serials []string, reason revocation.Reason, skipBlockKey bool, parallelism uint) error {
u, err := user.Current()
if err != nil {
return fmt.Errorf("getting admin username: %w", err)
}
var errCount atomic.Uint64
wg := new(sync.WaitGroup)
work := make(chan string, parallelism)
for i := uint(0); i < parallelism; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for serial := range work {
_, err := a.rac.AdministrativelyRevokeCertificate(
ctx,
&rapb.AdministrativelyRevokeCertificateRequest{
Serial: serial,
Code: int64(reason),
AdminName: u.Username,
SkipBlockKey: skipBlockKey,
// This is a well-formed certificate so send CrlShard 0
// to let the RA figure out the right shard from the cert.
Malformed: false,
CrlShard: 0,
},
)
if err != nil {
errCount.Add(1)
if errors.Is(err, berrors.AlreadyRevoked) {
a.log.Errf("not revoking %q: already revoked", serial)
} else {
a.log.Errf("failed to revoke %q: %s", serial, err)
}
}
}
}()
}
for _, serial := range serials {
work <- serial
}
close(work)
wg.Wait()
if errCount.Load() > 0 {
return fmt.Errorf("encountered %d errors while revoking certs; see logs above for details", errCount.Load())
}
return nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/admin/dryrun.go | third-party/github.com/letsencrypt/boulder/cmd/admin/dryrun.go | package main
import (
"context"
"google.golang.org/grpc"
"google.golang.org/protobuf/encoding/prototext"
"google.golang.org/protobuf/types/known/emptypb"
blog "github.com/letsencrypt/boulder/log"
rapb "github.com/letsencrypt/boulder/ra/proto"
sapb "github.com/letsencrypt/boulder/sa/proto"
)
type dryRunRAC struct {
rapb.RegistrationAuthorityClient
log blog.Logger
}
func (d dryRunRAC) AdministrativelyRevokeCertificate(_ context.Context, req *rapb.AdministrativelyRevokeCertificateRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) {
b, err := prototext.Marshal(req)
if err != nil {
return nil, err
}
d.log.Infof("dry-run: %#v", string(b))
return &emptypb.Empty{}, nil
}
type dryRunSAC struct {
sapb.StorageAuthorityClient
log blog.Logger
}
func (d dryRunSAC) AddBlockedKey(_ context.Context, req *sapb.AddBlockedKeyRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) {
d.log.Infof("dry-run: Block SPKI hash %x by %s %s", req.KeyHash, req.Comment, req.Source)
return &emptypb.Empty{}, nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/admin/key.go | third-party/github.com/letsencrypt/boulder/cmd/admin/key.go | package main
import (
"bufio"
"context"
"crypto/x509"
"encoding/hex"
"encoding/pem"
"errors"
"flag"
"fmt"
"io"
"os"
"os/user"
"sync"
"sync/atomic"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/letsencrypt/boulder/core"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/privatekey"
sapb "github.com/letsencrypt/boulder/sa/proto"
)
// subcommandBlockKey encapsulates the "admin block-key" command.
type subcommandBlockKey struct {
parallelism uint
comment string
privKey string
spkiFile string
certFile string
csrFile string
csrFileExpectedCN string
checkSignature bool
}
var _ subcommand = (*subcommandBlockKey)(nil)
func (s *subcommandBlockKey) Desc() string {
return "Block a keypair from any future issuance"
}
func (s *subcommandBlockKey) Flags(flag *flag.FlagSet) {
// General flags relevant to all key input methods.
flag.UintVar(&s.parallelism, "parallelism", 10, "Number of concurrent workers to use while blocking keys")
flag.StringVar(&s.comment, "comment", "", "Additional context to add to database comment column")
// Flags specifying the input method for the keys to be blocked.
flag.StringVar(&s.privKey, "private-key", "", "Block issuance for the pubkey corresponding to this private key")
flag.StringVar(&s.spkiFile, "spki-file", "", "Block issuance for all keys listed in this file as SHA256 hashes of SPKI, hex encoded, one per line")
flag.StringVar(&s.certFile, "cert-file", "", "Block issuance for the public key of the single PEM-formatted certificate in this file")
flag.StringVar(&s.csrFile, "csr-file", "", "Block issuance for the public key of the single PEM-formatted CSR in this file")
flag.StringVar(&s.csrFileExpectedCN, "csr-file-expected-cn", "The key that signed this CSR has been publicly disclosed. It should not be used for any purpose.", "The Subject CN of a CSR will be verified to match this before blocking")
flag.BoolVar(&s.checkSignature, "check-signature", true, "Check self-signature of CSR before revoking")
}
func (s *subcommandBlockKey) Run(ctx context.Context, a *admin) error {
// This is a map of all input-selection flags to whether or not they were set
// to a non-default value. We use this to ensure that exactly one input
// selection flag was given on the command line.
setInputs := map[string]bool{
"-private-key": s.privKey != "",
"-spki-file": s.spkiFile != "",
"-cert-file": s.certFile != "",
"-csr-file": s.csrFile != "",
}
activeFlag, err := findActiveInputMethodFlag(setInputs)
if err != nil {
return err
}
var spkiHashes [][]byte
switch activeFlag {
case "-private-key":
var spkiHash []byte
spkiHash, err = a.spkiHashFromPrivateKey(s.privKey)
spkiHashes = [][]byte{spkiHash}
case "-spki-file":
spkiHashes, err = a.spkiHashesFromFile(s.spkiFile)
case "-cert-file":
spkiHashes, err = a.spkiHashesFromCertPEM(s.certFile)
case "-csr-file":
spkiHashes, err = a.spkiHashFromCSRPEM(s.csrFile, s.checkSignature, s.csrFileExpectedCN)
default:
return errors.New("no recognized input method flag set (this shouldn't happen)")
}
if err != nil {
return fmt.Errorf("collecting spki hashes to block: %w", err)
}
err = a.blockSPKIHashes(ctx, spkiHashes, s.comment, s.parallelism)
if err != nil {
return err
}
return nil
}
func (a *admin) spkiHashFromPrivateKey(keyFile string) ([]byte, error) {
_, publicKey, err := privatekey.Load(keyFile)
if err != nil {
return nil, fmt.Errorf("loading private key file: %w", err)
}
spkiHash, err := core.KeyDigest(publicKey)
if err != nil {
return nil, fmt.Errorf("computing SPKI hash: %w", err)
}
return spkiHash[:], nil
}
func (a *admin) spkiHashesFromFile(filePath string) ([][]byte, error) {
file, err := os.Open(filePath)
if err != nil {
return nil, fmt.Errorf("opening spki hashes file: %w", err)
}
var spkiHashes [][]byte
scanner := bufio.NewScanner(file)
for scanner.Scan() {
spkiHex := scanner.Text()
if spkiHex == "" {
continue
}
spkiHash, err := hex.DecodeString(spkiHex)
if err != nil {
return nil, fmt.Errorf("decoding hex spki hash %q: %w", spkiHex, err)
}
if len(spkiHash) != 32 {
return nil, fmt.Errorf("got spki hash of unexpected length: %q (%d)", spkiHex, len(spkiHash))
}
spkiHashes = append(spkiHashes, spkiHash)
}
return spkiHashes, nil
}
func (a *admin) spkiHashesFromCertPEM(filename string) ([][]byte, error) {
cert, err := core.LoadCert(filename)
if err != nil {
return nil, fmt.Errorf("loading certificate pem: %w", err)
}
spkiHash, err := core.KeyDigest(cert.PublicKey)
if err != nil {
return nil, fmt.Errorf("computing SPKI hash: %w", err)
}
return [][]byte{spkiHash[:]}, nil
}
func (a *admin) spkiHashFromCSRPEM(filename string, checkSignature bool, expectedCN string) ([][]byte, error) {
csrFile, err := os.ReadFile(filename)
if err != nil {
return nil, fmt.Errorf("reading CSR file %q: %w", filename, err)
}
data, _ := pem.Decode(csrFile)
if data == nil {
return nil, fmt.Errorf("no PEM data found in %q", filename)
}
a.log.AuditInfof("Parsing key to block from CSR PEM: %x", data)
csr, err := x509.ParseCertificateRequest(data.Bytes)
if err != nil {
return nil, fmt.Errorf("parsing CSR %q: %w", filename, err)
}
if checkSignature {
err = csr.CheckSignature()
if err != nil {
return nil, fmt.Errorf("checking CSR signature: %w", err)
}
}
if csr.Subject.CommonName != expectedCN {
return nil, fmt.Errorf("Got CSR CommonName %q, expected %q", csr.Subject.CommonName, expectedCN)
}
spkiHash, err := core.KeyDigest(csr.PublicKey)
if err != nil {
return nil, fmt.Errorf("computing SPKI hash: %w", err)
}
return [][]byte{spkiHash[:]}, nil
}
func (a *admin) blockSPKIHashes(ctx context.Context, spkiHashes [][]byte, comment string, parallelism uint) error {
u, err := user.Current()
if err != nil {
return fmt.Errorf("getting admin username: %w", err)
}
var errCount atomic.Uint64
wg := new(sync.WaitGroup)
work := make(chan []byte, parallelism)
for i := uint(0); i < parallelism; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for spkiHash := range work {
err = a.blockSPKIHash(ctx, spkiHash, u, comment)
if err != nil {
errCount.Add(1)
if errors.Is(err, berrors.AlreadyRevoked) {
a.log.Errf("not blocking %x: already blocked", spkiHash)
} else {
a.log.Errf("failed to block %x: %s", spkiHash, err)
}
}
}
}()
}
for _, spkiHash := range spkiHashes {
work <- spkiHash
}
close(work)
wg.Wait()
if errCount.Load() > 0 {
return fmt.Errorf("encountered %d errors while revoking certs; see logs above for details", errCount.Load())
}
return nil
}
func (a *admin) blockSPKIHash(ctx context.Context, spkiHash []byte, u *user.User, comment string) error {
exists, err := a.saroc.KeyBlocked(ctx, &sapb.SPKIHash{KeyHash: spkiHash})
if err != nil {
return fmt.Errorf("checking if key is already blocked: %w", err)
}
if exists.Exists {
return berrors.AlreadyRevokedError("the provided key already exists in the 'blockedKeys' table")
}
stream, err := a.saroc.GetSerialsByKey(ctx, &sapb.SPKIHash{KeyHash: spkiHash})
if err != nil {
return fmt.Errorf("setting up stream of serials from SA: %s", err)
}
var count int
for {
_, err := stream.Recv()
if err != nil {
if err == io.EOF {
break
}
return fmt.Errorf("streaming serials from SA: %s", err)
}
count++
}
a.log.Infof("Found %d unexpired certificates matching the provided key", count)
_, err = a.sac.AddBlockedKey(ctx, &sapb.AddBlockedKeyRequest{
KeyHash: spkiHash[:],
Added: timestamppb.New(a.clk.Now()),
Source: "admin-revoker",
Comment: fmt.Sprintf("%s: %s", u.Username, comment),
RevokedBy: 0,
})
if err != nil {
return fmt.Errorf("blocking key: %w", err)
}
return nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/admin/main.go | third-party/github.com/letsencrypt/boulder/cmd/admin/main.go | // Package main provides the "admin" tool, which can perform various
// administrative actions (such as revoking certificates) against a Boulder
// deployment.
//
// Run "admin -h" for a list of flags and subcommands.
//
// Note that the admin tool runs in "dry-run" mode *by default*. All commands
// which mutate the database (either directly or via gRPC requests) will refuse
// to do so, and instead print log lines representing the work they would do,
// unless the "-dry-run=false" flag is passed.
package main
import (
"context"
"flag"
"fmt"
"os"
"strings"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/features"
)
type Config struct {
Admin struct {
// DB controls the admin tool's direct connection to the database.
DB cmd.DBConfig
// TLS controls the TLS client the admin tool uses for gRPC connections.
TLS cmd.TLSConfig
RAService *cmd.GRPCClientConfig
SAService *cmd.GRPCClientConfig
Features features.Config
}
Syslog cmd.SyslogConfig
OpenTelemetry cmd.OpenTelemetryConfig
}
// subcommand specifies the set of methods that a struct must implement to be
// usable as an admin subcommand.
type subcommand interface {
// Desc should return a short (one-sentence) description of the subcommand for
// use in help/usage strings.
Desc() string
// Flags should register command line flags on the provided flagset. These
// should use the "TypeVar" methods on the provided flagset, targeting fields
// on the subcommand struct, so that the results of command line parsing can
// be used by other methods on the struct.
Flags(*flag.FlagSet)
// Run should do all of the subcommand's heavy lifting, with behavior gated on
// the subcommand struct's member fields which have been populated from the
// command line. The provided admin object can be used for access to external
// services like the RA, SA, and configured logger.
Run(context.Context, *admin) error
}
// main is the entry-point for the admin tool. We do not include admin in the
// suite of tools which are subcommands of the "boulder" binary, since it
// should be small and portable and standalone.
func main() {
// Do setup as similarly as possible to all other boulder services, including
// config parsing and stats and logging setup. However, the one downside of
// not being bundled with the boulder binary is that we don't get config
// validation for free.
defer cmd.AuditPanic()
// This is the registry of all subcommands that the admin tool can run.
subcommands := map[string]subcommand{
"revoke-cert": &subcommandRevokeCert{},
"block-key": &subcommandBlockKey{},
"pause-identifier": &subcommandPauseIdentifier{},
"unpause-account": &subcommandUnpauseAccount{},
}
defaultUsage := flag.Usage
flag.Usage = func() {
defaultUsage()
fmt.Printf("\nSubcommands:\n")
for name, command := range subcommands {
fmt.Printf(" %s\n", name)
fmt.Printf("\t%s\n", command.Desc())
}
fmt.Print("\nYou can run \"admin <subcommand> -help\" to get usage for that subcommand.\n")
}
// Start by parsing just the global flags before we get to the subcommand, if
// they're present.
configFile := flag.String("config", "", "Path to the configuration file for this service (required)")
dryRun := flag.Bool("dry-run", true, "Print actions instead of mutating the database")
flag.Parse()
// Figure out which subcommand they want us to run.
unparsedArgs := flag.Args()
if len(unparsedArgs) == 0 {
flag.Usage()
os.Exit(1)
}
subcommand, ok := subcommands[unparsedArgs[0]]
if !ok {
flag.Usage()
os.Exit(1)
}
// Then parse the rest of the args according to the selected subcommand's
// flags, and allow the global flags to be placed after the subcommand name.
subflags := flag.NewFlagSet(unparsedArgs[0], flag.ExitOnError)
subcommand.Flags(subflags)
flag.VisitAll(func(f *flag.Flag) {
// For each flag registered at the global/package level, also register it on
// the subflags FlagSet. The `f.Value` here is a pointer to the same var
// that the original global flag would populate, so the same variable can
// be set either way.
subflags.Var(f.Value, f.Name, f.Usage)
})
_ = subflags.Parse(unparsedArgs[1:])
// With the flags all parsed, now we can parse our config and set up our admin
// object.
if *configFile == "" {
flag.Usage()
os.Exit(1)
}
a, err := newAdmin(*configFile, *dryRun)
cmd.FailOnError(err, "creating admin object")
// Finally, run the selected subcommand.
if a.dryRun {
a.log.AuditInfof("admin tool executing a dry-run with the following arguments: %q", strings.Join(os.Args, " "))
} else {
a.log.AuditInfof("admin tool executing with the following arguments: %q", strings.Join(os.Args, " "))
}
err = subcommand.Run(context.Background(), a)
cmd.FailOnError(err, "executing subcommand")
if a.dryRun {
a.log.AuditInfof("admin tool has successfully completed executing a dry-run with the following arguments: %q", strings.Join(os.Args, " "))
a.log.Info("Dry run complete. Pass -dry-run=false to mutate the database.")
} else {
a.log.AuditInfof("admin tool has successfully completed executing with the following arguments: %q", strings.Join(os.Args, " "))
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/boulder-publisher/main_test.go | third-party/github.com/letsencrypt/boulder/cmd/boulder-publisher/main_test.go | package notmain
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/boulder-publisher/main.go | third-party/github.com/letsencrypt/boulder/cmd/boulder-publisher/main.go | package notmain
import (
"context"
"flag"
"fmt"
"os"
"runtime"
ct "github.com/google/certificate-transparency-go"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/features"
bgrpc "github.com/letsencrypt/boulder/grpc"
"github.com/letsencrypt/boulder/issuance"
"github.com/letsencrypt/boulder/publisher"
pubpb "github.com/letsencrypt/boulder/publisher/proto"
)
type Config struct {
Publisher struct {
cmd.ServiceConfig
Features features.Config
// If this is non-zero, profile blocking events such that one even is
// sampled every N nanoseconds.
// https://golang.org/pkg/runtime/#SetBlockProfileRate
BlockProfileRate int
UserAgent string
// Chains is a list of lists of certificate filenames. Each inner list is
// a chain, starting with the issuing intermediate, followed by one or
// more additional certificates, up to and including a root.
Chains [][]string `validate:"min=1,dive,min=2,dive,required"`
}
Syslog cmd.SyslogConfig
OpenTelemetry cmd.OpenTelemetryConfig
}
func main() {
grpcAddr := flag.String("addr", "", "gRPC listen address override")
debugAddr := flag.String("debug-addr", "", "Debug server address override")
configFile := flag.String("config", "", "File path to the configuration file for this service")
flag.Parse()
if *configFile == "" {
flag.Usage()
os.Exit(1)
}
var c Config
err := cmd.ReadConfigFile(*configFile, &c)
cmd.FailOnError(err, "Reading JSON config file into config structure")
features.Set(c.Publisher.Features)
runtime.SetBlockProfileRate(c.Publisher.BlockProfileRate)
if *grpcAddr != "" {
c.Publisher.GRPC.Address = *grpcAddr
}
if *debugAddr != "" {
c.Publisher.DebugAddr = *debugAddr
}
if c.Publisher.UserAgent == "" {
c.Publisher.UserAgent = "certificate-transparency-go/1.0"
}
scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.Publisher.DebugAddr)
defer oTelShutdown(context.Background())
logger.Info(cmd.VersionString())
if c.Publisher.Chains == nil {
logger.AuditErr("No chain files provided")
os.Exit(1)
}
bundles := make(map[issuance.NameID][]ct.ASN1Cert)
for _, files := range c.Publisher.Chains {
chain, err := issuance.LoadChain(files)
cmd.FailOnError(err, "failed to load chain.")
issuer := chain[0]
id := issuer.NameID()
if _, exists := bundles[id]; exists {
cmd.Fail(fmt.Sprintf("Got multiple chains configured for issuer %q", issuer.Subject.CommonName))
}
bundles[id] = publisher.GetCTBundleForChain(chain)
}
tlsConfig, err := c.Publisher.TLS.Load(scope)
cmd.FailOnError(err, "TLS config")
clk := cmd.Clock()
pubi := publisher.New(bundles, c.Publisher.UserAgent, logger, scope)
start, err := bgrpc.NewServer(c.Publisher.GRPC, logger).Add(
&pubpb.Publisher_ServiceDesc, pubi).Build(tlsConfig, scope, clk)
cmd.FailOnError(err, "Unable to setup Publisher gRPC server")
cmd.FailOnError(start(), "Publisher gRPC service failed")
}
func init() {
cmd.RegisterCommand("boulder-publisher", main, &cmd.ConfigValidator{Config: &Config{}})
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/boulder-sa/main_test.go | third-party/github.com/letsencrypt/boulder/cmd/boulder-sa/main_test.go | package notmain
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/boulder-sa/main.go | third-party/github.com/letsencrypt/boulder/cmd/boulder-sa/main.go | package notmain
import (
"context"
"flag"
"os"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/features"
bgrpc "github.com/letsencrypt/boulder/grpc"
"github.com/letsencrypt/boulder/sa"
sapb "github.com/letsencrypt/boulder/sa/proto"
)
type Config struct {
SA struct {
cmd.ServiceConfig
DB cmd.DBConfig
ReadOnlyDB cmd.DBConfig `validate:"-"`
IncidentsDB cmd.DBConfig `validate:"-"`
Features features.Config
// Max simultaneous SQL queries caused by a single RPC.
ParallelismPerRPC int `validate:"omitempty,min=1"`
// LagFactor is how long to sleep before retrying a read request that may
// have failed solely due to replication lag.
LagFactor config.Duration `validate:"-"`
}
Syslog cmd.SyslogConfig
OpenTelemetry cmd.OpenTelemetryConfig
}
func main() {
grpcAddr := flag.String("addr", "", "gRPC listen address override")
debugAddr := flag.String("debug-addr", "", "Debug server address override")
configFile := flag.String("config", "", "File path to the configuration file for this service")
flag.Parse()
if *configFile == "" {
flag.Usage()
os.Exit(1)
}
var c Config
err := cmd.ReadConfigFile(*configFile, &c)
cmd.FailOnError(err, "Reading JSON config file into config structure")
features.Set(c.SA.Features)
if *grpcAddr != "" {
c.SA.GRPC.Address = *grpcAddr
}
if *debugAddr != "" {
c.SA.DebugAddr = *debugAddr
}
scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.SA.DebugAddr)
defer oTelShutdown(context.Background())
logger.Info(cmd.VersionString())
dbMap, err := sa.InitWrappedDb(c.SA.DB, scope, logger)
cmd.FailOnError(err, "While initializing dbMap")
dbReadOnlyMap := dbMap
if c.SA.ReadOnlyDB != (cmd.DBConfig{}) {
dbReadOnlyMap, err = sa.InitWrappedDb(c.SA.ReadOnlyDB, scope, logger)
cmd.FailOnError(err, "While initializing dbReadOnlyMap")
}
dbIncidentsMap := dbMap
if c.SA.IncidentsDB != (cmd.DBConfig{}) {
dbIncidentsMap, err = sa.InitWrappedDb(c.SA.IncidentsDB, scope, logger)
cmd.FailOnError(err, "While initializing dbIncidentsMap")
}
clk := cmd.Clock()
parallel := c.SA.ParallelismPerRPC
if parallel < 1 {
parallel = 1
}
tls, err := c.SA.TLS.Load(scope)
cmd.FailOnError(err, "TLS config")
saroi, err := sa.NewSQLStorageAuthorityRO(
dbReadOnlyMap, dbIncidentsMap, scope, parallel, c.SA.LagFactor.Duration, clk, logger)
cmd.FailOnError(err, "Failed to create read-only SA impl")
sai, err := sa.NewSQLStorageAuthorityWrapping(saroi, dbMap, scope)
cmd.FailOnError(err, "Failed to create SA impl")
start, err := bgrpc.NewServer(c.SA.GRPC, logger).WithCheckInterval(c.SA.HealthCheckInterval.Duration).Add(
&sapb.StorageAuthorityReadOnly_ServiceDesc, saroi).Add(
&sapb.StorageAuthority_ServiceDesc, sai).Build(
tls, scope, clk)
cmd.FailOnError(err, "Unable to setup SA gRPC server")
cmd.FailOnError(start(), "SA gRPC service failed")
}
func init() {
cmd.RegisterCommand("boulder-sa", main, &cmd.ConfigValidator{Config: &Config{}})
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/rocsp-tool/inflight.go | third-party/github.com/letsencrypt/boulder/cmd/rocsp-tool/inflight.go | package notmain
import "sync"
type inflight struct {
sync.RWMutex
items map[int64]struct{}
}
func newInflight() *inflight {
return &inflight{
items: make(map[int64]struct{}),
}
}
func (i *inflight) add(n int64) {
i.Lock()
defer i.Unlock()
i.items[n] = struct{}{}
}
func (i *inflight) remove(n int64) {
i.Lock()
defer i.Unlock()
delete(i.items, n)
}
func (i *inflight) len() int {
i.RLock()
defer i.RUnlock()
return len(i.items)
}
// min returns the numerically smallest key inflight. If nothing is inflight,
// it returns 0. Note: this takes O(n) time in the number of keys and should
// be called rarely.
func (i *inflight) min() int64 {
i.RLock()
defer i.RUnlock()
if len(i.items) == 0 {
return 0
}
var min int64
for k := range i.items {
if min == 0 {
min = k
}
if k < min {
min = k
}
}
return min
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/rocsp-tool/client.go | third-party/github.com/letsencrypt/boulder/cmd/rocsp-tool/client.go | package notmain
import (
"context"
"fmt"
"math/rand/v2"
"os"
"sync/atomic"
"time"
"github.com/jmhodges/clock"
"golang.org/x/crypto/ocsp"
"google.golang.org/protobuf/types/known/timestamppb"
capb "github.com/letsencrypt/boulder/ca/proto"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/db"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/rocsp"
"github.com/letsencrypt/boulder/sa"
"github.com/letsencrypt/boulder/test/ocsp/helper"
)
type client struct {
redis *rocsp.RWClient
db *db.WrappedMap // optional
ocspGenerator capb.OCSPGeneratorClient
clk clock.Clock
scanBatchSize int
logger blog.Logger
}
// processResult represents the result of attempting to sign and store status
// for a single certificateStatus ID. If `err` is non-nil, it indicates the
// attempt failed.
type processResult struct {
id int64
err error
}
func getStartingID(ctx context.Context, clk clock.Clock, db *db.WrappedMap) (int64, error) {
// To scan the DB efficiently, we want to select only currently-valid certificates. There's a
// handy expires index, but for selecting a large set of rows, using the primary key will be
// more efficient. So first we find a good id to start with, then scan from there. Note: since
// AUTO_INCREMENT can skip around a bit, we add padding to ensure we get all currently-valid
// certificates.
startTime := clk.Now().Add(-24 * time.Hour)
var minID *int64
err := db.QueryRowContext(
ctx,
"SELECT MIN(id) FROM certificateStatus WHERE notAfter >= ?",
startTime,
).Scan(&minID)
if err != nil {
return 0, fmt.Errorf("selecting minID: %w", err)
}
if minID == nil {
return 0, fmt.Errorf("no entries in certificateStatus (where notAfter >= %s)", startTime)
}
return *minID, nil
}
func (cl *client) loadFromDB(ctx context.Context, speed ProcessingSpeed, startFromID int64) error {
prevID := startFromID
var err error
if prevID == 0 {
prevID, err = getStartingID(ctx, cl.clk, cl.db)
if err != nil {
return fmt.Errorf("getting starting ID: %w", err)
}
}
// Find the current maximum id in certificateStatus. We do this because the table is always
// growing. If we scanned until we saw a batch with no rows, we would scan forever.
var maxID *int64
err = cl.db.QueryRowContext(
ctx,
"SELECT MAX(id) FROM certificateStatus",
).Scan(&maxID)
if err != nil {
return fmt.Errorf("selecting maxID: %w", err)
}
if maxID == nil {
return fmt.Errorf("no entries in certificateStatus")
}
// Limit the rate of reading rows.
frequency := time.Duration(float64(time.Second) / float64(time.Duration(speed.RowsPerSecond)))
// a set of all inflight certificate statuses, indexed by their `ID`.
inflightIDs := newInflight()
statusesToSign := cl.scanFromDB(ctx, prevID, *maxID, frequency, inflightIDs)
results := make(chan processResult, speed.ParallelSigns)
var runningSigners int32
for range speed.ParallelSigns {
atomic.AddInt32(&runningSigners, 1)
go cl.signAndStoreResponses(ctx, statusesToSign, results, &runningSigners)
}
var successCount, errorCount int64
for result := range results {
inflightIDs.remove(result.id)
if result.err != nil {
errorCount++
if errorCount < 10 ||
(errorCount < 1000 && rand.IntN(1000) < 100) ||
(errorCount < 100000 && rand.IntN(1000) < 10) ||
(rand.IntN(1000) < 1) {
cl.logger.Errf("error: %s", result.err)
}
} else {
successCount++
}
total := successCount + errorCount
if total < 10 ||
(total < 1000 && rand.IntN(1000) < 100) ||
(total < 100000 && rand.IntN(1000) < 10) ||
(rand.IntN(1000) < 1) {
cl.logger.Infof("stored %d responses, %d errors", successCount, errorCount)
}
}
cl.logger.Infof("done. processed %d successes and %d errors\n", successCount, errorCount)
if inflightIDs.len() != 0 {
return fmt.Errorf("inflightIDs non-empty! has %d items, lowest %d", inflightIDs.len(), inflightIDs.min())
}
return nil
}
// scanFromDB scans certificateStatus rows from the DB, starting with `minID`, and writes them to
// its output channel at a maximum frequency of `frequency`. When it's read all available rows, it
// closes its output channel and exits.
// If there is an error, it logs the error, closes its output channel, and exits.
func (cl *client) scanFromDB(ctx context.Context, prevID int64, maxID int64, frequency time.Duration, inflightIDs *inflight) <-chan *sa.CertStatusMetadata {
statusesToSign := make(chan *sa.CertStatusMetadata)
go func() {
defer close(statusesToSign)
var err error
currentMin := prevID
for currentMin < maxID {
currentMin, err = cl.scanFromDBOneBatch(ctx, currentMin, frequency, statusesToSign, inflightIDs)
if err != nil {
cl.logger.Infof("error scanning rows: %s", err)
}
}
}()
return statusesToSign
}
// scanFromDBOneBatch scans up to `cl.scanBatchSize` rows from certificateStatus, in order, and
// writes them to `output`. When done, it returns the highest `id` it saw during the scan.
// We do this in batches because if we tried to scan the whole table in a single query, MariaDB
// would terminate the query after a certain amount of data transferred.
func (cl *client) scanFromDBOneBatch(ctx context.Context, prevID int64, frequency time.Duration, output chan<- *sa.CertStatusMetadata, inflightIDs *inflight) (int64, error) {
rowTicker := time.NewTicker(frequency)
clauses := "WHERE id > ? ORDER BY id LIMIT ?"
params := []interface{}{prevID, cl.scanBatchSize}
selector, err := db.NewMappedSelector[sa.CertStatusMetadata](cl.db)
if err != nil {
return -1, fmt.Errorf("initializing db map: %w", err)
}
rows, err := selector.QueryContext(ctx, clauses, params...)
if err != nil {
return -1, fmt.Errorf("scanning certificateStatus: %w", err)
}
var scanned int
var previousID int64
err = rows.ForEach(func(row *sa.CertStatusMetadata) error {
<-rowTicker.C
status, err := rows.Get()
if err != nil {
return fmt.Errorf("scanning row %d (previous ID %d): %w", scanned, previousID, err)
}
scanned++
inflightIDs.add(status.ID)
// Emit a log line every 100000 rows. For our current ~215M rows, that
// will emit about 2150 log lines. This probably strikes a good balance
// between too spammy and having a reasonably frequent checkpoint.
if scanned%100000 == 0 {
cl.logger.Infof("scanned %d certificateStatus rows. minimum inflight ID %d", scanned, inflightIDs.min())
}
output <- status
previousID = status.ID
return nil
})
if err != nil {
return -1, err
}
return previousID, nil
}
// signAndStoreResponses consumes cert statuses on its input channel and writes them to its output
// channel. Before returning, it atomically decrements the provided runningSigners int. If the
// result is 0, indicating this was the last running signer, it closes its output channel.
func (cl *client) signAndStoreResponses(ctx context.Context, input <-chan *sa.CertStatusMetadata, output chan processResult, runningSigners *int32) {
defer func() {
if atomic.AddInt32(runningSigners, -1) <= 0 {
close(output)
}
}()
for status := range input {
ocspReq := &capb.GenerateOCSPRequest{
Serial: status.Serial,
IssuerID: status.IssuerID,
Status: string(status.Status),
Reason: int32(status.RevokedReason), //nolint: gosec // Revocation reasons are guaranteed to be small, no risk of overflow.
RevokedAt: timestamppb.New(status.RevokedDate),
}
result, err := cl.ocspGenerator.GenerateOCSP(ctx, ocspReq)
if err != nil {
output <- processResult{id: status.ID, err: err}
continue
}
resp, err := ocsp.ParseResponse(result.Response, nil)
if err != nil {
output <- processResult{id: status.ID, err: err}
continue
}
err = cl.redis.StoreResponse(ctx, resp)
if err != nil {
output <- processResult{id: status.ID, err: err}
} else {
output <- processResult{id: status.ID, err: nil}
}
}
}
type expiredError struct {
serial string
ago time.Duration
}
func (e expiredError) Error() string {
return fmt.Sprintf("response for %s expired %s ago", e.serial, e.ago)
}
func (cl *client) storeResponsesFromFiles(ctx context.Context, files []string) error {
for _, respFile := range files {
respBytes, err := os.ReadFile(respFile)
if err != nil {
return fmt.Errorf("reading response file %q: %w", respFile, err)
}
err = cl.storeResponse(ctx, respBytes)
if err != nil {
return err
}
}
return nil
}
func (cl *client) storeResponse(ctx context.Context, respBytes []byte) error {
resp, err := ocsp.ParseResponse(respBytes, nil)
if err != nil {
return fmt.Errorf("parsing response: %w", err)
}
serial := core.SerialToString(resp.SerialNumber)
if resp.NextUpdate.Before(cl.clk.Now()) {
return expiredError{
serial: serial,
ago: cl.clk.Now().Sub(resp.NextUpdate),
}
}
cl.logger.Infof("storing response for %s, generated %s, ttl %g hours",
serial,
resp.ThisUpdate,
time.Until(resp.NextUpdate).Hours(),
)
err = cl.redis.StoreResponse(ctx, resp)
if err != nil {
return fmt.Errorf("storing response: %w", err)
}
retrievedResponse, err := cl.redis.GetResponse(ctx, serial)
if err != nil {
return fmt.Errorf("getting response: %w", err)
}
parsedRetrievedResponse, err := ocsp.ParseResponse(retrievedResponse, nil)
if err != nil {
return fmt.Errorf("parsing retrieved response: %w", err)
}
cl.logger.Infof("retrieved %s", helper.PrettyResponse(parsedRetrievedResponse))
return nil
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/rocsp-tool/client_test.go | third-party/github.com/letsencrypt/boulder/cmd/rocsp-tool/client_test.go | package notmain
import (
"context"
"fmt"
"math/big"
"testing"
"time"
"github.com/jmhodges/clock"
"github.com/redis/go-redis/v9"
"golang.org/x/crypto/ocsp"
"google.golang.org/grpc"
capb "github.com/letsencrypt/boulder/ca/proto"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/db"
blog "github.com/letsencrypt/boulder/log"
"github.com/letsencrypt/boulder/metrics"
"github.com/letsencrypt/boulder/rocsp"
"github.com/letsencrypt/boulder/sa"
"github.com/letsencrypt/boulder/test"
"github.com/letsencrypt/boulder/test/vars"
)
func makeClient() (*rocsp.RWClient, clock.Clock) {
CACertFile := "../../test/certs/ipki/minica.pem"
CertFile := "../../test/certs/ipki/localhost/cert.pem"
KeyFile := "../../test/certs/ipki/localhost/key.pem"
tlsConfig := cmd.TLSConfig{
CACertFile: CACertFile,
CertFile: CertFile,
KeyFile: KeyFile,
}
tlsConfig2, err := tlsConfig.Load(metrics.NoopRegisterer)
if err != nil {
panic(err)
}
rdb := redis.NewRing(&redis.RingOptions{
Addrs: map[string]string{
"shard1": "10.77.77.2:4218",
"shard2": "10.77.77.3:4218",
},
Username: "unittest-rw",
Password: "824968fa490f4ecec1e52d5e34916bdb60d45f8d",
TLSConfig: tlsConfig2,
})
clk := clock.NewFake()
return rocsp.NewWritingClient(rdb, 500*time.Millisecond, clk, metrics.NoopRegisterer), clk
}
func insertCertificateStatus(t *testing.T, dbMap db.Executor, serial string, notAfter, ocspLastUpdated time.Time) int64 {
result, err := dbMap.ExecContext(context.Background(),
`INSERT INTO certificateStatus
(serial, notAfter, status, ocspLastUpdated, revokedDate, revokedReason, lastExpirationNagSent, issuerID)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)`,
serial,
notAfter,
core.OCSPStatusGood,
ocspLastUpdated,
time.Time{},
0,
time.Time{},
99)
test.AssertNotError(t, err, "inserting certificate status")
id, err := result.LastInsertId()
test.AssertNotError(t, err, "getting last insert ID")
return id
}
func TestGetStartingID(t *testing.T) {
clk := clock.NewFake()
dbMap, err := sa.DBMapForTest(vars.DBConnSAFullPerms)
test.AssertNotError(t, err, "failed setting up db client")
defer test.ResetBoulderTestDatabase(t)()
firstID := insertCertificateStatus(t, dbMap, "1337", clk.Now().Add(12*time.Hour), time.Time{})
secondID := insertCertificateStatus(t, dbMap, "1338", clk.Now().Add(36*time.Hour), time.Time{})
t.Logf("first ID %d, second ID %d", firstID, secondID)
clk.Sleep(48 * time.Hour)
startingID, err := getStartingID(context.Background(), clk, dbMap)
test.AssertNotError(t, err, "getting starting ID")
test.AssertEquals(t, startingID, secondID)
}
func TestStoreResponse(t *testing.T) {
redisClient, clk := makeClient()
issuer, err := core.LoadCert("../../test/hierarchy/int-e1.cert.pem")
test.AssertNotError(t, err, "loading int-e1")
issuerKey, err := test.LoadSigner("../../test/hierarchy/int-e1.key.pem")
test.AssertNotError(t, err, "loading int-e1 key ")
response, err := ocsp.CreateResponse(issuer, issuer, ocsp.Response{
SerialNumber: big.NewInt(1337),
Status: 0,
ThisUpdate: clk.Now(),
NextUpdate: clk.Now().Add(time.Hour),
}, issuerKey)
test.AssertNotError(t, err, "creating OCSP response")
cl := client{
redis: redisClient,
db: nil,
ocspGenerator: nil,
clk: clk,
logger: blog.NewMock(),
}
err = cl.storeResponse(context.Background(), response)
test.AssertNotError(t, err, "storing response")
}
type mockOCSPGenerator struct{}
func (mog mockOCSPGenerator) GenerateOCSP(ctx context.Context, in *capb.GenerateOCSPRequest, opts ...grpc.CallOption) (*capb.OCSPResponse, error) {
return &capb.OCSPResponse{
Response: []byte("phthpbt"),
}, nil
}
func TestLoadFromDB(t *testing.T) {
redisClient, clk := makeClient()
dbMap, err := sa.DBMapForTest(vars.DBConnSA)
if err != nil {
t.Fatalf("Failed to create dbMap: %s", err)
}
defer test.ResetBoulderTestDatabase(t)
for i := range 100 {
insertCertificateStatus(t, dbMap, fmt.Sprintf("%036x", i), clk.Now().Add(200*time.Hour), clk.Now())
if err != nil {
t.Fatalf("Failed to insert certificateStatus: %s", err)
}
}
rocspToolClient := client{
redis: redisClient,
db: dbMap,
ocspGenerator: mockOCSPGenerator{},
clk: clk,
scanBatchSize: 10,
logger: blog.NewMock(),
}
speed := ProcessingSpeed{
RowsPerSecond: 10000,
ParallelSigns: 100,
}
err = rocspToolClient.loadFromDB(context.Background(), speed, 0)
if err != nil {
t.Fatalf("loading from DB: %s", err)
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/rocsp-tool/inflight_test.go | third-party/github.com/letsencrypt/boulder/cmd/rocsp-tool/inflight_test.go | package notmain
import (
"testing"
"github.com/letsencrypt/boulder/test"
)
func TestInflight(t *testing.T) {
ifl := newInflight()
test.AssertEquals(t, ifl.len(), 0)
test.AssertEquals(t, ifl.min(), int64(0))
ifl.add(1337)
test.AssertEquals(t, ifl.len(), 1)
test.AssertEquals(t, ifl.min(), int64(1337))
ifl.remove(1337)
test.AssertEquals(t, ifl.len(), 0)
test.AssertEquals(t, ifl.min(), int64(0))
ifl.add(7341)
ifl.add(3317)
ifl.add(1337)
test.AssertEquals(t, ifl.len(), 3)
test.AssertEquals(t, ifl.min(), int64(1337))
ifl.remove(3317)
ifl.remove(1337)
ifl.remove(7341)
test.AssertEquals(t, ifl.len(), 0)
test.AssertEquals(t, ifl.min(), int64(0))
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/rocsp-tool/main.go | third-party/github.com/letsencrypt/boulder/cmd/rocsp-tool/main.go | package notmain
import (
"context"
"encoding/base64"
"encoding/pem"
"flag"
"fmt"
"os"
"strings"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/ocsp"
capb "github.com/letsencrypt/boulder/ca/proto"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/db"
bgrpc "github.com/letsencrypt/boulder/grpc"
"github.com/letsencrypt/boulder/metrics"
rocsp_config "github.com/letsencrypt/boulder/rocsp/config"
"github.com/letsencrypt/boulder/sa"
"github.com/letsencrypt/boulder/test/ocsp/helper"
)
type Config struct {
ROCSPTool struct {
DebugAddr string `validate:"omitempty,hostname_port"`
Redis rocsp_config.RedisConfig
// If using load-from-db, this provides credentials to connect to the DB
// and the CA. Otherwise, it's optional.
LoadFromDB *LoadFromDBConfig
}
Syslog cmd.SyslogConfig
OpenTelemetry cmd.OpenTelemetryConfig
}
// LoadFromDBConfig provides the credentials and configuration needed to load
// data from the certificateStatuses table in the DB and get it signed.
type LoadFromDBConfig struct {
// Credentials to connect to the DB.
DB cmd.DBConfig
// Credentials to request OCSP signatures from the CA.
GRPCTLS cmd.TLSConfig
// Timeouts and hostnames for the CA.
OCSPGeneratorService cmd.GRPCClientConfig
// How fast to process rows.
Speed ProcessingSpeed
}
type ProcessingSpeed struct {
// If using load-from-db, this limits how many items per second we
// scan from the DB. We might go slower than this depending on how fast
// we read rows from the DB, but we won't go faster. Defaults to 2000.
RowsPerSecond int `validate:"min=0"`
// If using load-from-db, this controls how many parallel requests to
// boulder-ca for OCSP signing we can make. Defaults to 100.
ParallelSigns int `validate:"min=0"`
// If using load-from-db, the LIMIT on our scanning queries. We have to
// apply a limit because MariaDB will cut off our response at some
// threshold of total bytes transferred (1 GB by default). Defaults to 10000.
ScanBatchSize int `validate:"min=0"`
}
func init() {
cmd.RegisterCommand("rocsp-tool", main, &cmd.ConfigValidator{Config: &Config{}})
}
func main() {
err := main2()
if err != nil {
cmd.FailOnError(err, "")
}
}
var startFromID *int64
func main2() error {
debugAddr := flag.String("debug-addr", "", "Debug server address override")
configFile := flag.String("config", "", "File path to the configuration file for this service")
startFromID = flag.Int64("start-from-id", 0, "For load-from-db, the first ID in the certificateStatus table to scan")
flag.Usage = helpExit
flag.Parse()
if *configFile == "" || len(flag.Args()) < 1 {
helpExit()
}
var conf Config
err := cmd.ReadConfigFile(*configFile, &conf)
if err != nil {
return fmt.Errorf("reading JSON config file: %w", err)
}
if *debugAddr != "" {
conf.ROCSPTool.DebugAddr = *debugAddr
}
_, logger, oTelShutdown := cmd.StatsAndLogging(conf.Syslog, conf.OpenTelemetry, conf.ROCSPTool.DebugAddr)
defer oTelShutdown(context.Background())
logger.Info(cmd.VersionString())
clk := cmd.Clock()
redisClient, err := rocsp_config.MakeClient(&conf.ROCSPTool.Redis, clk, metrics.NoopRegisterer)
if err != nil {
return fmt.Errorf("making client: %w", err)
}
var db *db.WrappedMap
var ocspGenerator capb.OCSPGeneratorClient
var scanBatchSize int
if conf.ROCSPTool.LoadFromDB != nil {
lfd := conf.ROCSPTool.LoadFromDB
db, err = sa.InitWrappedDb(lfd.DB, nil, logger)
if err != nil {
return fmt.Errorf("connecting to DB: %w", err)
}
ocspGenerator, err = configureOCSPGenerator(lfd.GRPCTLS,
lfd.OCSPGeneratorService, clk, metrics.NoopRegisterer)
if err != nil {
return fmt.Errorf("configuring gRPC to CA: %w", err)
}
setDefault(&lfd.Speed.RowsPerSecond, 2000)
setDefault(&lfd.Speed.ParallelSigns, 100)
setDefault(&lfd.Speed.ScanBatchSize, 10000)
scanBatchSize = lfd.Speed.ScanBatchSize
}
ctx := context.Background()
cl := client{
redis: redisClient,
db: db,
ocspGenerator: ocspGenerator,
clk: clk,
scanBatchSize: scanBatchSize,
logger: logger,
}
for _, sc := range subCommands {
if flag.Arg(0) == sc.name {
return sc.cmd(ctx, cl, conf, flag.Args()[1:])
}
}
fmt.Fprintf(os.Stderr, "unrecognized subcommand %q\n", flag.Arg(0))
helpExit()
return nil
}
// subCommand represents a single subcommand. `name` is the name used to invoke it, and `help` is
// its help text.
type subCommand struct {
name string
help string
cmd func(context.Context, client, Config, []string) error
}
var (
Store = subCommand{"store", "for each filename on command line, read the file as an OCSP response and store it in Redis",
func(ctx context.Context, cl client, _ Config, args []string) error {
err := cl.storeResponsesFromFiles(ctx, flag.Args()[1:])
if err != nil {
return err
}
return nil
},
}
Get = subCommand{
"get",
"for each serial on command line, fetch that serial's response and pretty-print it",
func(ctx context.Context, cl client, _ Config, args []string) error {
for _, serial := range flag.Args()[1:] {
resp, err := cl.redis.GetResponse(ctx, serial)
if err != nil {
return err
}
parsed, err := ocsp.ParseResponse(resp, nil)
if err != nil {
fmt.Fprintf(os.Stderr, "parsing error on %x: %s", resp, err)
continue
} else {
fmt.Printf("%s\n", helper.PrettyResponse(parsed))
}
}
return nil
},
}
GetPEM = subCommand{"get-pem", "for each serial on command line, fetch that serial's response and print it PEM-encoded",
func(ctx context.Context, cl client, _ Config, args []string) error {
for _, serial := range flag.Args()[1:] {
resp, err := cl.redis.GetResponse(ctx, serial)
if err != nil {
return err
}
block := pem.Block{
Bytes: resp,
Type: "OCSP RESPONSE",
}
err = pem.Encode(os.Stdout, &block)
if err != nil {
return err
}
}
return nil
},
}
LoadFromDB = subCommand{"load-from-db", "scan the database for all OCSP entries for unexpired certificates, and store in Redis",
func(ctx context.Context, cl client, c Config, args []string) error {
if c.ROCSPTool.LoadFromDB == nil {
return fmt.Errorf("config field LoadFromDB was missing")
}
err := cl.loadFromDB(ctx, c.ROCSPTool.LoadFromDB.Speed, *startFromID)
if err != nil {
return fmt.Errorf("loading OCSP responses from DB: %w", err)
}
return nil
},
}
ScanResponses = subCommand{"scan-responses", "scan Redis for OCSP response entries. For each entry, print the serial and base64-encoded response",
func(ctx context.Context, cl client, _ Config, args []string) error {
results := cl.redis.ScanResponses(ctx, "*")
for r := range results {
if r.Err != nil {
return r.Err
}
fmt.Printf("%s: %s\n", r.Serial, base64.StdEncoding.EncodeToString(r.Body))
}
return nil
},
}
)
var subCommands = []subCommand{
Store, Get, GetPEM, LoadFromDB, ScanResponses,
}
func helpExit() {
var names []string
var helpStrings []string
for _, s := range subCommands {
names = append(names, s.name)
helpStrings = append(helpStrings, fmt.Sprintf(" %s -- %s", s.name, s.help))
}
fmt.Fprintf(os.Stderr, "Usage: %s [%s] --config path/to/config.json\n", os.Args[0], strings.Join(names, "|"))
os.Stderr.Write([]byte(strings.Join(helpStrings, "\n")))
fmt.Fprintln(os.Stderr)
fmt.Fprintln(os.Stderr)
flag.PrintDefaults()
os.Exit(1)
}
func configureOCSPGenerator(tlsConf cmd.TLSConfig, grpcConf cmd.GRPCClientConfig, clk clock.Clock, scope prometheus.Registerer) (capb.OCSPGeneratorClient, error) {
tlsConfig, err := tlsConf.Load(scope)
if err != nil {
return nil, fmt.Errorf("loading TLS config: %w", err)
}
caConn, err := bgrpc.ClientSetup(&grpcConf, tlsConfig, scope, clk)
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to CA")
return capb.NewOCSPGeneratorClient(caConn), nil
}
// setDefault sets the target to a default value, if it is zero.
func setDefault(target *int, def int) {
if *target == 0 {
*target = def
}
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/bad-key-revoker/main_test.go | third-party/github.com/letsencrypt/boulder/cmd/bad-key-revoker/main_test.go | package notmain
import (
"context"
"crypto/rand"
"fmt"
"sync"
"testing"
"time"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/emptypb"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/db"
blog "github.com/letsencrypt/boulder/log"
rapb "github.com/letsencrypt/boulder/ra/proto"
"github.com/letsencrypt/boulder/sa"
"github.com/letsencrypt/boulder/test"
"github.com/letsencrypt/boulder/test/vars"
)
func randHash(t *testing.T) []byte {
t.Helper()
h := make([]byte, 32)
_, err := rand.Read(h)
test.AssertNotError(t, err, "failed to read rand")
return h
}
func insertBlockedRow(t *testing.T, dbMap *db.WrappedMap, fc clock.Clock, hash []byte, by int64, checked bool) {
t.Helper()
_, err := dbMap.ExecContext(context.Background(), `INSERT INTO blockedKeys
(keyHash, added, source, revokedBy, extantCertificatesChecked)
VALUES
(?, ?, ?, ?, ?)`,
hash,
fc.Now(),
1,
by,
checked,
)
test.AssertNotError(t, err, "failed to add test row")
}
func TestSelectUncheckedRows(t *testing.T) {
ctx := context.Background()
dbMap, err := sa.DBMapForTest(vars.DBConnSAFullPerms)
test.AssertNotError(t, err, "failed setting up db client")
defer test.ResetBoulderTestDatabase(t)()
fc := clock.NewFake()
bkr := &badKeyRevoker{
dbMap: dbMap,
logger: blog.NewMock(),
clk: fc,
}
hashA, hashB, hashC := randHash(t), randHash(t), randHash(t)
insertBlockedRow(t, dbMap, fc, hashA, 1, true)
count, err := bkr.countUncheckedKeys(ctx)
test.AssertNotError(t, err, "countUncheckedKeys failed")
test.AssertEquals(t, count, 0)
_, err = bkr.selectUncheckedKey(ctx)
test.AssertError(t, err, "selectUncheckedKey didn't fail with no rows to process")
test.Assert(t, db.IsNoRows(err), "returned error is not sql.ErrNoRows")
insertBlockedRow(t, dbMap, fc, hashB, 1, false)
insertBlockedRow(t, dbMap, fc, hashC, 1, false)
count, err = bkr.countUncheckedKeys(ctx)
test.AssertNotError(t, err, "countUncheckedKeys failed")
test.AssertEquals(t, count, 2)
row, err := bkr.selectUncheckedKey(ctx)
test.AssertNotError(t, err, "selectUncheckKey failed")
test.AssertByteEquals(t, row.KeyHash, hashB)
test.AssertEquals(t, row.RevokedBy, int64(1))
}
func insertRegistration(t *testing.T, dbMap *db.WrappedMap, fc clock.Clock) int64 {
t.Helper()
jwkHash := make([]byte, 32)
_, err := rand.Read(jwkHash)
test.AssertNotError(t, err, "failed to read rand")
res, err := dbMap.ExecContext(
context.Background(),
"INSERT INTO registrations (jwk, jwk_sha256, agreement, createdAt, status, LockCol) VALUES (?, ?, ?, ?, ?, ?)",
[]byte{},
fmt.Sprintf("%x", jwkHash),
"yes",
fc.Now(),
string(core.StatusValid),
0,
)
test.AssertNotError(t, err, "failed to insert test registrations row")
regID, err := res.LastInsertId()
test.AssertNotError(t, err, "failed to get registration ID")
return regID
}
type ExpiredStatus bool
const (
Expired = ExpiredStatus(true)
Unexpired = ExpiredStatus(false)
Revoked = core.OCSPStatusRevoked
Unrevoked = core.OCSPStatusGood
)
func insertGoodCert(t *testing.T, dbMap *db.WrappedMap, fc clock.Clock, keyHash []byte, serial string, regID int64) {
insertCert(t, dbMap, fc, keyHash, serial, regID, Unexpired, Unrevoked)
}
func insertCert(t *testing.T, dbMap *db.WrappedMap, fc clock.Clock, keyHash []byte, serial string, regID int64, expiredStatus ExpiredStatus, status core.OCSPStatus) {
t.Helper()
ctx := context.Background()
expiresOffset := 0 * time.Second
if !expiredStatus {
expiresOffset = 90*24*time.Hour - 1*time.Second // 90 days exclusive
}
_, err := dbMap.ExecContext(
ctx,
`INSERT IGNORE INTO keyHashToSerial
(keyHash, certNotAfter, certSerial) VALUES
(?, ?, ?)`,
keyHash,
fc.Now().Add(expiresOffset),
serial,
)
test.AssertNotError(t, err, "failed to insert test keyHashToSerial row")
_, err = dbMap.ExecContext(
ctx,
"INSERT INTO certificateStatus (serial, status, isExpired, ocspLastUpdated, revokedDate, revokedReason, lastExpirationNagSent) VALUES (?, ?, ?, ?, ?, ?, ?)",
serial,
status,
expiredStatus,
fc.Now(),
time.Time{},
0,
time.Time{},
)
test.AssertNotError(t, err, "failed to insert test certificateStatus row")
_, err = dbMap.ExecContext(
ctx,
"INSERT INTO precertificates (serial, registrationID, der, issued, expires) VALUES (?, ?, ?, ?, ?)",
serial,
regID,
[]byte{1, 2, 3},
fc.Now(),
fc.Now().Add(expiresOffset),
)
test.AssertNotError(t, err, "failed to insert test certificateStatus row")
_, err = dbMap.ExecContext(
ctx,
"INSERT INTO certificates (serial, registrationID, der, digest, issued, expires) VALUES (?, ?, ?, ?, ?, ?)",
serial,
regID,
[]byte{1, 2, 3},
[]byte{},
fc.Now(),
fc.Now().Add(expiresOffset),
)
test.AssertNotError(t, err, "failed to insert test certificates row")
}
// Test that we produce an error when a serial from the keyHashToSerial table
// does not have a corresponding entry in the certificateStatus and
// precertificates table.
func TestFindUnrevokedNoRows(t *testing.T) {
ctx := context.Background()
dbMap, err := sa.DBMapForTest(vars.DBConnSAFullPerms)
test.AssertNotError(t, err, "failed setting up db client")
defer test.ResetBoulderTestDatabase(t)()
fc := clock.NewFake()
hashA := randHash(t)
_, err = dbMap.ExecContext(
ctx,
"INSERT INTO keyHashToSerial (keyHash, certNotAfter, certSerial) VALUES (?, ?, ?)",
hashA,
fc.Now().Add(90*24*time.Hour-1*time.Second), // 90 days exclusive
"zz",
)
test.AssertNotError(t, err, "failed to insert test keyHashToSerial row")
bkr := &badKeyRevoker{dbMap: dbMap, serialBatchSize: 1, maxRevocations: 10, clk: fc}
_, err = bkr.findUnrevoked(ctx, uncheckedBlockedKey{KeyHash: hashA})
test.Assert(t, db.IsNoRows(err), "expected NoRows error")
}
func TestFindUnrevoked(t *testing.T) {
ctx := context.Background()
dbMap, err := sa.DBMapForTest(vars.DBConnSAFullPerms)
test.AssertNotError(t, err, "failed setting up db client")
defer test.ResetBoulderTestDatabase(t)()
fc := clock.NewFake()
regID := insertRegistration(t, dbMap, fc)
bkr := &badKeyRevoker{dbMap: dbMap, serialBatchSize: 1, maxRevocations: 10, clk: fc}
hashA := randHash(t)
// insert valid, unexpired
insertCert(t, dbMap, fc, hashA, "ff", regID, Unexpired, Unrevoked)
// insert valid, unexpired, duplicate
insertCert(t, dbMap, fc, hashA, "ff", regID, Unexpired, Unrevoked)
// insert valid, expired
insertCert(t, dbMap, fc, hashA, "ee", regID, Expired, Unrevoked)
// insert revoked
insertCert(t, dbMap, fc, hashA, "dd", regID, Unexpired, Revoked)
rows, err := bkr.findUnrevoked(ctx, uncheckedBlockedKey{KeyHash: hashA})
test.AssertNotError(t, err, "findUnrevoked failed")
test.AssertEquals(t, len(rows), 1)
test.AssertEquals(t, rows[0].Serial, "ff")
test.AssertEquals(t, rows[0].RegistrationID, int64(1))
test.AssertByteEquals(t, rows[0].DER, []byte{1, 2, 3})
bkr.maxRevocations = 0
_, err = bkr.findUnrevoked(ctx, uncheckedBlockedKey{KeyHash: hashA})
test.AssertError(t, err, "findUnrevoked didn't fail with 0 maxRevocations")
test.AssertEquals(t, err.Error(), fmt.Sprintf("too many certificates to revoke associated with %x: got 1, max 0", hashA))
}
type mockRevoker struct {
revoked int
mu sync.Mutex
}
func (mr *mockRevoker) AdministrativelyRevokeCertificate(ctx context.Context, in *rapb.AdministrativelyRevokeCertificateRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) {
mr.mu.Lock()
defer mr.mu.Unlock()
mr.revoked++
return nil, nil
}
func TestRevokeCerts(t *testing.T) {
dbMap, err := sa.DBMapForTest(vars.DBConnSAFullPerms)
test.AssertNotError(t, err, "failed setting up db client")
defer test.ResetBoulderTestDatabase(t)()
fc := clock.NewFake()
mr := &mockRevoker{}
bkr := &badKeyRevoker{dbMap: dbMap, raClient: mr, clk: fc}
err = bkr.revokeCerts([]unrevokedCertificate{
{ID: 0, Serial: "ff"},
{ID: 1, Serial: "ee"},
})
test.AssertNotError(t, err, "revokeCerts failed")
test.AssertEquals(t, mr.revoked, 2)
}
func TestCertificateAbsent(t *testing.T) {
ctx := context.Background()
dbMap, err := sa.DBMapForTest(vars.DBConnSAFullPerms)
test.AssertNotError(t, err, "failed setting up db client")
defer test.ResetBoulderTestDatabase(t)()
fc := clock.NewFake()
// populate DB with all the test data
regIDA := insertRegistration(t, dbMap, fc)
hashA := randHash(t)
insertBlockedRow(t, dbMap, fc, hashA, regIDA, false)
// Add an entry to keyHashToSerial but not to certificateStatus or certificate
// status, and expect an error.
_, err = dbMap.ExecContext(
ctx,
"INSERT INTO keyHashToSerial (keyHash, certNotAfter, certSerial) VALUES (?, ?, ?)",
hashA,
fc.Now().Add(90*24*time.Hour-1*time.Second), // 90 days exclusive
"ffaaee",
)
test.AssertNotError(t, err, "failed to insert test keyHashToSerial row")
bkr := &badKeyRevoker{
dbMap: dbMap,
maxRevocations: 1,
serialBatchSize: 1,
raClient: &mockRevoker{},
logger: blog.NewMock(),
clk: fc,
}
_, err = bkr.invoke(ctx)
test.AssertError(t, err, "expected error when row in keyHashToSerial didn't have a matching cert")
}
func TestInvoke(t *testing.T) {
ctx := context.Background()
dbMap, err := sa.DBMapForTest(vars.DBConnSAFullPerms)
test.AssertNotError(t, err, "failed setting up db client")
defer test.ResetBoulderTestDatabase(t)()
fc := clock.NewFake()
mr := &mockRevoker{}
bkr := &badKeyRevoker{
dbMap: dbMap,
maxRevocations: 10,
serialBatchSize: 1,
raClient: mr,
logger: blog.NewMock(),
clk: fc,
}
// populate DB with all the test data
regIDA := insertRegistration(t, dbMap, fc)
regIDB := insertRegistration(t, dbMap, fc)
regIDC := insertRegistration(t, dbMap, fc)
regIDD := insertRegistration(t, dbMap, fc)
hashA := randHash(t)
insertBlockedRow(t, dbMap, fc, hashA, regIDC, false)
insertGoodCert(t, dbMap, fc, hashA, "ff", regIDA)
insertGoodCert(t, dbMap, fc, hashA, "ee", regIDB)
insertGoodCert(t, dbMap, fc, hashA, "dd", regIDC)
insertGoodCert(t, dbMap, fc, hashA, "cc", regIDD)
noWork, err := bkr.invoke(ctx)
test.AssertNotError(t, err, "invoke failed")
test.AssertEquals(t, noWork, false)
test.AssertEquals(t, mr.revoked, 4)
test.AssertMetricWithLabelsEquals(t, keysToProcess, prometheus.Labels{}, 1)
var checked struct {
ExtantCertificatesChecked bool
}
err = dbMap.SelectOne(ctx, &checked, "SELECT extantCertificatesChecked FROM blockedKeys WHERE keyHash = ?", hashA)
test.AssertNotError(t, err, "failed to select row from blockedKeys")
test.AssertEquals(t, checked.ExtantCertificatesChecked, true)
// add a row with no associated valid certificates
hashB := randHash(t)
insertBlockedRow(t, dbMap, fc, hashB, regIDC, false)
insertCert(t, dbMap, fc, hashB, "bb", regIDA, Expired, Revoked)
noWork, err = bkr.invoke(ctx)
test.AssertNotError(t, err, "invoke failed")
test.AssertEquals(t, noWork, false)
checked.ExtantCertificatesChecked = false
err = dbMap.SelectOne(ctx, &checked, "SELECT extantCertificatesChecked FROM blockedKeys WHERE keyHash = ?", hashB)
test.AssertNotError(t, err, "failed to select row from blockedKeys")
test.AssertEquals(t, checked.ExtantCertificatesChecked, true)
noWork, err = bkr.invoke(ctx)
test.AssertNotError(t, err, "invoke failed")
test.AssertEquals(t, noWork, true)
}
func TestInvokeRevokerHasNoExtantCerts(t *testing.T) {
// This test checks that when the user who revoked the initial
// certificate that added the row to blockedKeys doesn't have any
// extant certificates themselves their contact email is still
// resolved and we avoid sending any emails to accounts that
// share the same email.
dbMap, err := sa.DBMapForTest(vars.DBConnSAFullPerms)
test.AssertNotError(t, err, "failed setting up db client")
defer test.ResetBoulderTestDatabase(t)()
fc := clock.NewFake()
mr := &mockRevoker{}
bkr := &badKeyRevoker{dbMap: dbMap,
maxRevocations: 10,
serialBatchSize: 1,
raClient: mr,
logger: blog.NewMock(),
clk: fc,
}
// populate DB with all the test data
regIDA := insertRegistration(t, dbMap, fc)
regIDB := insertRegistration(t, dbMap, fc)
regIDC := insertRegistration(t, dbMap, fc)
hashA := randHash(t)
insertBlockedRow(t, dbMap, fc, hashA, regIDA, false)
insertGoodCert(t, dbMap, fc, hashA, "ee", regIDB)
insertGoodCert(t, dbMap, fc, hashA, "dd", regIDB)
insertGoodCert(t, dbMap, fc, hashA, "cc", regIDC)
insertGoodCert(t, dbMap, fc, hashA, "bb", regIDC)
noWork, err := bkr.invoke(context.Background())
test.AssertNotError(t, err, "invoke failed")
test.AssertEquals(t, noWork, false)
test.AssertEquals(t, mr.revoked, 4)
}
func TestBackoffPolicy(t *testing.T) {
fc := clock.NewFake()
mocklog := blog.NewMock()
bkr := &badKeyRevoker{
clk: fc,
backoffIntervalMax: time.Second * 60,
backoffIntervalBase: time.Second * 1,
backoffFactor: 1.3,
logger: mocklog,
}
// Backoff once. Check to make sure the backoff is logged.
bkr.backoff()
resultLog := mocklog.GetAllMatching("INFO: backoff trying again in")
if len(resultLog) == 0 {
t.Fatalf("no backoff loglines found")
}
// Make sure `backoffReset` resets the ticker.
bkr.backoffReset()
test.AssertEquals(t, bkr.backoffTicker, 0)
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/bad-key-revoker/main.go | third-party/github.com/letsencrypt/boulder/cmd/bad-key-revoker/main.go | package notmain
import (
"context"
"flag"
"fmt"
"os"
"time"
"github.com/jmhodges/clock"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/ocsp"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/emptypb"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/core"
"github.com/letsencrypt/boulder/db"
bgrpc "github.com/letsencrypt/boulder/grpc"
blog "github.com/letsencrypt/boulder/log"
rapb "github.com/letsencrypt/boulder/ra/proto"
"github.com/letsencrypt/boulder/sa"
)
const blockedKeysGaugeLimit = 1000
var keysToProcess = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "bad_keys_to_process",
Help: fmt.Sprintf("A gauge of blockedKeys rows to process (max: %d)", blockedKeysGaugeLimit),
})
var keysProcessed = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "bad_keys_processed",
Help: "A counter of blockedKeys rows processed labelled by processing state",
}, []string{"state"})
var certsRevoked = prometheus.NewCounter(prometheus.CounterOpts{
Name: "bad_keys_certs_revoked",
Help: "A counter of certificates associated with rows in blockedKeys that have been revoked",
})
// revoker is an interface used to reduce the scope of a RA gRPC client
// to only the single method we need to use, this makes testing significantly
// simpler
type revoker interface {
AdministrativelyRevokeCertificate(ctx context.Context, in *rapb.AdministrativelyRevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
}
type badKeyRevoker struct {
dbMap *db.WrappedMap
maxRevocations int
serialBatchSize int
raClient revoker
logger blog.Logger
clk clock.Clock
backoffIntervalBase time.Duration
backoffIntervalMax time.Duration
backoffFactor float64
backoffTicker int
}
// uncheckedBlockedKey represents a row in the blockedKeys table
type uncheckedBlockedKey struct {
KeyHash []byte
RevokedBy int64
}
func (ubk uncheckedBlockedKey) String() string {
return fmt.Sprintf("[revokedBy: %d, keyHash: %x]",
ubk.RevokedBy, ubk.KeyHash)
}
func (bkr *badKeyRevoker) countUncheckedKeys(ctx context.Context) (int, error) {
var count int
err := bkr.dbMap.SelectOne(
ctx,
&count,
`SELECT COUNT(*)
FROM (SELECT 1 FROM blockedKeys
WHERE extantCertificatesChecked = false
LIMIT ?) AS a`,
blockedKeysGaugeLimit,
)
return count, err
}
func (bkr *badKeyRevoker) selectUncheckedKey(ctx context.Context) (uncheckedBlockedKey, error) {
var row uncheckedBlockedKey
err := bkr.dbMap.SelectOne(
ctx,
&row,
`SELECT keyHash, revokedBy
FROM blockedKeys
WHERE extantCertificatesChecked = false
LIMIT 1`,
)
return row, err
}
// unrevokedCertificate represents a yet to be revoked certificate
type unrevokedCertificate struct {
ID int
Serial string
DER []byte
RegistrationID int64
Status core.OCSPStatus
IsExpired bool
}
func (uc unrevokedCertificate) String() string {
return fmt.Sprintf("id=%d serial=%s regID=%d status=%s expired=%t",
uc.ID, uc.Serial, uc.RegistrationID, uc.Status, uc.IsExpired)
}
// findUnrevoked looks for all unexpired, currently valid certificates which have a specific SPKI hash,
// by looking first at the keyHashToSerial table and then the certificateStatus and certificates tables.
// If the number of certificates it finds is larger than bkr.maxRevocations it'll error out.
func (bkr *badKeyRevoker) findUnrevoked(ctx context.Context, unchecked uncheckedBlockedKey) ([]unrevokedCertificate, error) {
var unrevokedCerts []unrevokedCertificate
initialID := 0
for {
var batch []struct {
ID int
CertSerial string
}
_, err := bkr.dbMap.Select(
ctx,
&batch,
"SELECT id, certSerial FROM keyHashToSerial WHERE keyHash = ? AND id > ? AND certNotAfter > ? ORDER BY id LIMIT ?",
unchecked.KeyHash,
initialID,
bkr.clk.Now(),
bkr.serialBatchSize,
)
if err != nil {
return nil, err
}
if len(batch) == 0 {
break
}
initialID = batch[len(batch)-1].ID
for _, serial := range batch {
var unrevokedCert unrevokedCertificate
// NOTE: This has a `LIMIT 1` because the certificateStatus and precertificates
// tables do not have a UNIQUE KEY on serial (for partitioning reasons). So it's
// possible we could get multiple results for a single serial number, but they
// would be duplicates.
err = bkr.dbMap.SelectOne(
ctx,
&unrevokedCert,
`SELECT cs.id, cs.serial, c.registrationID, c.der, cs.status, cs.isExpired
FROM certificateStatus AS cs
JOIN precertificates AS c
ON cs.serial = c.serial
WHERE cs.serial = ?
LIMIT 1`,
serial.CertSerial,
)
if err != nil {
return nil, err
}
if unrevokedCert.IsExpired || unrevokedCert.Status == core.OCSPStatusRevoked {
continue
}
unrevokedCerts = append(unrevokedCerts, unrevokedCert)
}
}
if len(unrevokedCerts) > bkr.maxRevocations {
return nil, fmt.Errorf("too many certificates to revoke associated with %x: got %d, max %d", unchecked.KeyHash, len(unrevokedCerts), bkr.maxRevocations)
}
return unrevokedCerts, nil
}
// markRowChecked updates a row in the blockedKeys table to mark a keyHash
// as having been checked for extant unrevoked certificates.
func (bkr *badKeyRevoker) markRowChecked(ctx context.Context, unchecked uncheckedBlockedKey) error {
_, err := bkr.dbMap.ExecContext(ctx, "UPDATE blockedKeys SET extantCertificatesChecked = true WHERE keyHash = ?", unchecked.KeyHash)
return err
}
// revokeCerts revokes all the provided certificates. It uses reason
// keyCompromise and includes note indicating that they were revoked by
// bad-key-revoker.
func (bkr *badKeyRevoker) revokeCerts(certs []unrevokedCertificate) error {
for _, cert := range certs {
_, err := bkr.raClient.AdministrativelyRevokeCertificate(context.Background(), &rapb.AdministrativelyRevokeCertificateRequest{
Cert: cert.DER,
Serial: cert.Serial,
Code: int64(ocsp.KeyCompromise),
AdminName: "bad-key-revoker",
})
if err != nil {
return err
}
certsRevoked.Inc()
}
return nil
}
// invoke exits early and returns true if there is no work to be done.
// Otherwise, it processes a single key in the blockedKeys table and returns false.
func (bkr *badKeyRevoker) invoke(ctx context.Context) (bool, error) {
// Gather a count of rows to be processed.
uncheckedCount, err := bkr.countUncheckedKeys(ctx)
if err != nil {
return false, err
}
// Set the gauge to the number of rows to be processed (max:
// blockedKeysGaugeLimit).
keysToProcess.Set(float64(uncheckedCount))
if uncheckedCount >= blockedKeysGaugeLimit {
bkr.logger.AuditInfof("found >= %d unchecked blocked keys left to process", uncheckedCount)
} else {
bkr.logger.AuditInfof("found %d unchecked blocked keys left to process", uncheckedCount)
}
// select a row to process
unchecked, err := bkr.selectUncheckedKey(ctx)
if err != nil {
if db.IsNoRows(err) {
return true, nil
}
return false, err
}
bkr.logger.AuditInfo(fmt.Sprintf("found unchecked block key to work on: %s", unchecked))
// select all unrevoked, unexpired serials associated with the blocked key hash
unrevokedCerts, err := bkr.findUnrevoked(ctx, unchecked)
if err != nil {
bkr.logger.AuditInfo(fmt.Sprintf("finding unrevoked certificates related to %s: %s",
unchecked, err))
return false, err
}
if len(unrevokedCerts) == 0 {
bkr.logger.AuditInfo(fmt.Sprintf("found no certificates that need revoking related to %s, marking row as checked", unchecked))
// mark row as checked
err = bkr.markRowChecked(ctx, unchecked)
if err != nil {
return false, err
}
return false, nil
}
var serials []string
for _, cert := range unrevokedCerts {
serials = append(serials, cert.Serial)
}
bkr.logger.AuditInfo(fmt.Sprintf("revoking serials %v for key with hash %x", serials, unchecked.KeyHash))
// revoke each certificate
err = bkr.revokeCerts(unrevokedCerts)
if err != nil {
return false, err
}
// mark the key as checked
err = bkr.markRowChecked(ctx, unchecked)
if err != nil {
return false, err
}
return false, nil
}
type Config struct {
BadKeyRevoker struct {
DB cmd.DBConfig
DebugAddr string `validate:"omitempty,hostname_port"`
TLS cmd.TLSConfig
RAService *cmd.GRPCClientConfig
// MaximumRevocations specifies the maximum number of certificates associated with
// a key hash that bad-key-revoker will attempt to revoke. If the number of certificates
// is higher than MaximumRevocations bad-key-revoker will error out and refuse to
// progress until this is addressed.
MaximumRevocations int `validate:"gte=0"`
// FindCertificatesBatchSize specifies the maximum number of serials to select from the
// keyHashToSerial table at once
FindCertificatesBatchSize int `validate:"required"`
// Interval specifies the minimum duration bad-key-revoker
// should sleep between attempting to find blockedKeys rows to
// process when there is an error or no work to do.
Interval config.Duration `validate:"-"`
// BackoffIntervalMax specifies a maximum duration the backoff
// algorithm will wait before retrying in the event of error
// or no work to do.
BackoffIntervalMax config.Duration `validate:"-"`
// Deprecated: the bad-key-revoker no longer sends emails; we use ARI.
// TODO(#8199): Remove this config stanza entirely.
Mailer struct {
cmd.SMTPConfig `validate:"-"`
SMTPTrustedRootFile string
From string
EmailSubject string
EmailTemplate string
}
}
Syslog cmd.SyslogConfig
OpenTelemetry cmd.OpenTelemetryConfig
}
func main() {
debugAddr := flag.String("debug-addr", "", "Debug server address override")
configPath := flag.String("config", "", "File path to the configuration file for this service")
flag.Parse()
if *configPath == "" {
flag.Usage()
os.Exit(1)
}
var config Config
err := cmd.ReadConfigFile(*configPath, &config)
cmd.FailOnError(err, "Failed reading config file")
if *debugAddr != "" {
config.BadKeyRevoker.DebugAddr = *debugAddr
}
scope, logger, oTelShutdown := cmd.StatsAndLogging(config.Syslog, config.OpenTelemetry, config.BadKeyRevoker.DebugAddr)
defer oTelShutdown(context.Background())
logger.Info(cmd.VersionString())
clk := cmd.Clock()
scope.MustRegister(keysProcessed)
scope.MustRegister(certsRevoked)
dbMap, err := sa.InitWrappedDb(config.BadKeyRevoker.DB, scope, logger)
cmd.FailOnError(err, "While initializing dbMap")
tlsConfig, err := config.BadKeyRevoker.TLS.Load(scope)
cmd.FailOnError(err, "TLS config")
conn, err := bgrpc.ClientSetup(config.BadKeyRevoker.RAService, tlsConfig, scope, clk)
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to RA")
rac := rapb.NewRegistrationAuthorityClient(conn)
bkr := &badKeyRevoker{
dbMap: dbMap,
maxRevocations: config.BadKeyRevoker.MaximumRevocations,
serialBatchSize: config.BadKeyRevoker.FindCertificatesBatchSize,
raClient: rac,
logger: logger,
clk: clk,
backoffIntervalMax: config.BadKeyRevoker.BackoffIntervalMax.Duration,
backoffIntervalBase: config.BadKeyRevoker.Interval.Duration,
backoffFactor: 1.3,
}
// If `BackoffIntervalMax` was not set via the config, set it to 60
// seconds. This will avoid a tight loop on error but not be an
// excessive delay if the config value was not deliberately set.
if bkr.backoffIntervalMax == 0 {
bkr.backoffIntervalMax = time.Second * 60
}
// If `Interval` was not set via the config then set
// `bkr.backoffIntervalBase` to a default 1 second.
if bkr.backoffIntervalBase == 0 {
bkr.backoffIntervalBase = time.Second
}
// Run bad-key-revoker in a loop. Backoff if no work or errors.
for {
noWork, err := bkr.invoke(context.Background())
if err != nil {
keysProcessed.WithLabelValues("error").Inc()
logger.AuditErrf("failed to process blockedKeys row: %s", err)
// Calculate and sleep for a backoff interval
bkr.backoff()
continue
}
if noWork {
logger.Info("no work to do")
// Calculate and sleep for a backoff interval
bkr.backoff()
} else {
keysProcessed.WithLabelValues("success").Inc()
// Successfully processed, reset backoff.
bkr.backoffReset()
}
}
}
// backoff increments the backoffTicker, calls core.RetryBackoff to
// calculate a new backoff duration, then logs the backoff and sleeps for
// the calculated duration.
func (bkr *badKeyRevoker) backoff() {
bkr.backoffTicker++
backoffDur := core.RetryBackoff(
bkr.backoffTicker,
bkr.backoffIntervalBase,
bkr.backoffIntervalMax,
bkr.backoffFactor,
)
bkr.logger.Infof("backoff trying again in %.2f seconds", backoffDur.Seconds())
bkr.clk.Sleep(backoffDur)
}
// reset sets the backoff ticker and duration to zero.
func (bkr *badKeyRevoker) backoffReset() {
bkr.backoffTicker = 0
}
func init() {
cmd.RegisterCommand("bad-key-revoker", main, &cmd.ConfigValidator{Config: &Config{}})
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/nonce-service/main.go | third-party/github.com/letsencrypt/boulder/cmd/nonce-service/main.go | package notmain
import (
"context"
"flag"
"fmt"
"net"
"net/netip"
"os"
"github.com/letsencrypt/boulder/cmd"
bgrpc "github.com/letsencrypt/boulder/grpc"
"github.com/letsencrypt/boulder/nonce"
noncepb "github.com/letsencrypt/boulder/nonce/proto"
)
type Config struct {
NonceService struct {
cmd.ServiceConfig
MaxUsed int
// NonceHMACKey is a path to a file containing an HMAC key which is a
// secret used for deriving the prefix of each nonce instance. It should
// contain 256 bits (32 bytes) of random data to be suitable as an
// HMAC-SHA256 key (e.g. the output of `openssl rand -hex 32`). In a
// multi-DC deployment this value should be the same across all
// boulder-wfe and nonce-service instances.
NonceHMACKey cmd.HMACKeyConfig `validate:"required"`
Syslog cmd.SyslogConfig
OpenTelemetry cmd.OpenTelemetryConfig
}
}
func derivePrefix(key []byte, grpcAddr string) (string, error) {
host, port, err := net.SplitHostPort(grpcAddr)
if err != nil {
return "", fmt.Errorf("parsing gRPC listen address: %w", err)
}
if host == "" {
return "", fmt.Errorf("nonce service gRPC address must include an IP address: got %q", grpcAddr)
}
if host != "" && port != "" {
hostIP, err := netip.ParseAddr(host)
if err != nil {
return "", fmt.Errorf("gRPC address host part was not an IP address")
}
if hostIP.IsUnspecified() {
return "", fmt.Errorf("nonce service gRPC address must be a specific IP address: got %q", grpcAddr)
}
}
return nonce.DerivePrefix(grpcAddr, key), nil
}
func main() {
grpcAddr := flag.String("addr", "", "gRPC listen address override. Also used to derive the nonce prefix.")
debugAddr := flag.String("debug-addr", "", "Debug server address override")
configFile := flag.String("config", "", "File path to the configuration file for this service")
flag.Parse()
if *configFile == "" {
flag.Usage()
os.Exit(1)
}
var c Config
err := cmd.ReadConfigFile(*configFile, &c)
cmd.FailOnError(err, "Reading JSON config file into config structure")
if *grpcAddr != "" {
c.NonceService.GRPC.Address = *grpcAddr
}
if *debugAddr != "" {
c.NonceService.DebugAddr = *debugAddr
}
key, err := c.NonceService.NonceHMACKey.Load()
cmd.FailOnError(err, "Failed to load nonceHMACKey file.")
noncePrefix, err := derivePrefix(key, c.NonceService.GRPC.Address)
cmd.FailOnError(err, "Failed to derive nonce prefix")
scope, logger, oTelShutdown := cmd.StatsAndLogging(c.NonceService.Syslog, c.NonceService.OpenTelemetry, c.NonceService.DebugAddr)
defer oTelShutdown(context.Background())
logger.Info(cmd.VersionString())
ns, err := nonce.NewNonceService(scope, c.NonceService.MaxUsed, noncePrefix)
cmd.FailOnError(err, "Failed to initialize nonce service")
tlsConfig, err := c.NonceService.TLS.Load(scope)
cmd.FailOnError(err, "tlsConfig config")
nonceServer := nonce.NewServer(ns)
start, err := bgrpc.NewServer(c.NonceService.GRPC, logger).Add(
&noncepb.NonceService_ServiceDesc, nonceServer).Build(tlsConfig, scope, cmd.Clock())
cmd.FailOnError(err, "Unable to setup nonce service gRPC server")
cmd.FailOnError(start(), "Nonce service gRPC server failed")
}
func init() {
cmd.RegisterCommand("nonce-service", main, &cmd.ConfigValidator{Config: &Config{}})
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/sfe/main.go | third-party/github.com/letsencrypt/boulder/cmd/sfe/main.go | package notmain
import (
"context"
"flag"
"net/http"
"os"
"github.com/letsencrypt/boulder/cmd"
"github.com/letsencrypt/boulder/config"
"github.com/letsencrypt/boulder/features"
bgrpc "github.com/letsencrypt/boulder/grpc"
rapb "github.com/letsencrypt/boulder/ra/proto"
sapb "github.com/letsencrypt/boulder/sa/proto"
"github.com/letsencrypt/boulder/sfe"
"github.com/letsencrypt/boulder/web"
)
type Config struct {
SFE struct {
DebugAddr string `validate:"omitempty,hostname_port"`
// ListenAddress is the address:port on which to listen for incoming
// HTTP requests. Defaults to ":80".
ListenAddress string `validate:"omitempty,hostname_port"`
// Timeout is the per-request overall timeout. This should be slightly
// lower than the upstream's timeout when making requests to this service.
Timeout config.Duration `validate:"-"`
// ShutdownStopTimeout determines the maximum amount of time to wait
// for extant request handlers to complete before exiting. It should be
// greater than Timeout.
ShutdownStopTimeout config.Duration
TLS cmd.TLSConfig
RAService *cmd.GRPCClientConfig
SAService *cmd.GRPCClientConfig
// UnpauseHMACKey validates incoming JWT signatures at the unpause
// endpoint. This key must be the same as the one configured for all
// WFEs. This field is required to enable the pausing feature.
UnpauseHMACKey cmd.HMACKeyConfig
Features features.Config
}
Syslog cmd.SyslogConfig
OpenTelemetry cmd.OpenTelemetryConfig
// OpenTelemetryHTTPConfig configures tracing on incoming HTTP requests
OpenTelemetryHTTPConfig cmd.OpenTelemetryHTTPConfig
}
func main() {
listenAddr := flag.String("addr", "", "HTTP listen address override")
debugAddr := flag.String("debug-addr", "", "Debug server address override")
configFile := flag.String("config", "", "File path to the configuration file for this service")
flag.Parse()
if *configFile == "" {
flag.Usage()
os.Exit(1)
}
var c Config
err := cmd.ReadConfigFile(*configFile, &c)
cmd.FailOnError(err, "Reading JSON config file into config structure")
features.Set(c.SFE.Features)
if *listenAddr != "" {
c.SFE.ListenAddress = *listenAddr
}
if c.SFE.ListenAddress == "" {
cmd.Fail("HTTP listen address is not configured")
}
if *debugAddr != "" {
c.SFE.DebugAddr = *debugAddr
}
stats, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.SFE.DebugAddr)
logger.Info(cmd.VersionString())
clk := cmd.Clock()
unpauseHMACKey, err := c.SFE.UnpauseHMACKey.Load()
cmd.FailOnError(err, "Failed to load unpauseHMACKey")
tlsConfig, err := c.SFE.TLS.Load(stats)
cmd.FailOnError(err, "TLS config")
raConn, err := bgrpc.ClientSetup(c.SFE.RAService, tlsConfig, stats, clk)
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to RA")
rac := rapb.NewRegistrationAuthorityClient(raConn)
saConn, err := bgrpc.ClientSetup(c.SFE.SAService, tlsConfig, stats, clk)
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to SA")
sac := sapb.NewStorageAuthorityReadOnlyClient(saConn)
sfei, err := sfe.NewSelfServiceFrontEndImpl(
stats,
clk,
logger,
c.SFE.Timeout.Duration,
rac,
sac,
unpauseHMACKey,
)
cmd.FailOnError(err, "Unable to create SFE")
logger.Infof("Server running, listening on %s....", c.SFE.ListenAddress)
handler := sfei.Handler(stats, c.OpenTelemetryHTTPConfig.Options()...)
srv := web.NewServer(c.SFE.ListenAddress, handler, logger)
go func() {
err := srv.ListenAndServe()
if err != nil && err != http.ErrServerClosed {
cmd.FailOnError(err, "Running HTTP server")
}
}()
// When main is ready to exit (because it has received a shutdown signal),
// gracefully shutdown the servers. Calling these shutdown functions causes
// ListenAndServe() and ListenAndServeTLS() to immediately return, then waits
// for any lingering connection-handling goroutines to finish their work.
defer func() {
ctx, cancel := context.WithTimeout(context.Background(), c.SFE.ShutdownStopTimeout.Duration)
defer cancel()
_ = srv.Shutdown(ctx)
oTelShutdown(ctx)
}()
cmd.WaitForSignal()
}
func init() {
cmd.RegisterCommand("sfe", main, &cmd.ConfigValidator{Config: &Config{}})
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
cli/cli | https://github.com/cli/cli/blob/c534a758887878331dda780aeb696b113f37b4ab/third-party/github.com/letsencrypt/boulder/cmd/ceremony/key_test.go | third-party/github.com/letsencrypt/boulder/cmd/ceremony/key_test.go | package main
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"math/big"
"os"
"path"
"strings"
"testing"
"github.com/letsencrypt/boulder/pkcs11helpers"
"github.com/letsencrypt/boulder/test"
"github.com/miekg/pkcs11"
)
func setupCtx() pkcs11helpers.MockCtx {
return pkcs11helpers.MockCtx{
GenerateKeyPairFunc: func(pkcs11.SessionHandle, []*pkcs11.Mechanism, []*pkcs11.Attribute, []*pkcs11.Attribute) (pkcs11.ObjectHandle, pkcs11.ObjectHandle, error) {
return 0, 0, nil
},
SignInitFunc: func(pkcs11.SessionHandle, []*pkcs11.Mechanism, pkcs11.ObjectHandle) error {
return nil
},
GenerateRandomFunc: func(pkcs11.SessionHandle, int) ([]byte, error) {
return []byte{1, 2, 3}, nil
},
FindObjectsInitFunc: func(pkcs11.SessionHandle, []*pkcs11.Attribute) error {
return nil
},
FindObjectsFunc: func(pkcs11.SessionHandle, int) ([]pkcs11.ObjectHandle, bool, error) {
return nil, false, nil
},
FindObjectsFinalFunc: func(pkcs11.SessionHandle) error {
return nil
},
}
}
func TestGenerateKeyRSA(t *testing.T) {
tmp := t.TempDir()
ctx := setupCtx()
rsaPriv, err := rsa.GenerateKey(rand.Reader, 1024)
test.AssertNotError(t, err, "Failed to generate a test RSA key")
ctx.GetAttributeValueFunc = func(pkcs11.SessionHandle, pkcs11.ObjectHandle, []*pkcs11.Attribute) ([]*pkcs11.Attribute, error) {
return []*pkcs11.Attribute{
pkcs11.NewAttribute(pkcs11.CKA_PUBLIC_EXPONENT, big.NewInt(int64(rsaPriv.E)).Bytes()),
pkcs11.NewAttribute(pkcs11.CKA_MODULUS, rsaPriv.N.Bytes()),
}, nil
}
ctx.SignFunc = func(_ pkcs11.SessionHandle, msg []byte) ([]byte, error) {
// Chop of the hash identifier and feed back into rsa.SignPKCS1v15
return rsa.SignPKCS1v15(rand.Reader, rsaPriv, crypto.SHA256, msg[19:])
}
s := &pkcs11helpers.Session{Module: &ctx, Session: 0}
keyPath := path.Join(tmp, "test-rsa-key.pem")
keyInfo, err := generateKey(s, "", keyPath, keyGenConfig{
Type: "rsa",
RSAModLength: 1024,
})
test.AssertNotError(t, err, "Failed to generate RSA key")
diskKeyBytes, err := os.ReadFile(keyPath)
test.AssertNotError(t, err, "Failed to load key from disk")
block, _ := pem.Decode(diskKeyBytes)
diskKey, err := x509.ParsePKIXPublicKey(block.Bytes)
test.AssertNotError(t, err, "Failed to parse disk key")
test.AssertDeepEquals(t, diskKey, keyInfo.key)
}
func setECGenerateFuncs(ctx *pkcs11helpers.MockCtx) {
ecPriv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
panic(err)
}
ctx.GetAttributeValueFunc = func(pkcs11.SessionHandle, pkcs11.ObjectHandle, []*pkcs11.Attribute) ([]*pkcs11.Attribute, error) {
return []*pkcs11.Attribute{
pkcs11.NewAttribute(pkcs11.CKA_EC_PARAMS, []byte{6, 8, 42, 134, 72, 206, 61, 3, 1, 7}),
pkcs11.NewAttribute(pkcs11.CKA_EC_POINT, elliptic.Marshal(elliptic.P256(), ecPriv.X, ecPriv.Y)),
}, nil
}
ctx.SignFunc = func(_ pkcs11.SessionHandle, msg []byte) ([]byte, error) {
return ecPKCS11Sign(ecPriv, msg)
}
}
func TestGenerateKeyEC(t *testing.T) {
tmp := t.TempDir()
ctx := setupCtx()
setECGenerateFuncs(&ctx)
keyPath := path.Join(tmp, "test-ecdsa-key.pem")
s := &pkcs11helpers.Session{Module: &ctx, Session: 0}
keyInfo, err := generateKey(s, "", keyPath, keyGenConfig{
Type: "ecdsa",
ECDSACurve: "P-256",
})
test.AssertNotError(t, err, "Failed to generate ECDSA key")
diskKeyBytes, err := os.ReadFile(keyPath)
test.AssertNotError(t, err, "Failed to load key from disk")
block, _ := pem.Decode(diskKeyBytes)
diskKey, err := x509.ParsePKIXPublicKey(block.Bytes)
test.AssertNotError(t, err, "Failed to parse disk key")
test.AssertDeepEquals(t, diskKey, keyInfo.key)
}
func setFindObjectsFuncs(label string, ctx *pkcs11helpers.MockCtx) {
var objectsFound []pkcs11.ObjectHandle
ctx.FindObjectsInitFunc = func(_ pkcs11.SessionHandle, template []*pkcs11.Attribute) error {
for _, attr := range template {
if attr.Type == pkcs11.CKA_LABEL && string(attr.Value) == label {
objectsFound = []pkcs11.ObjectHandle{1}
}
}
return nil
}
ctx.FindObjectsFunc = func(pkcs11.SessionHandle, int) ([]pkcs11.ObjectHandle, bool, error) {
return objectsFound, false, nil
}
ctx.FindObjectsFinalFunc = func(pkcs11.SessionHandle) error {
objectsFound = nil
return nil
}
}
func TestGenerateKeySlotHasSomethingWithLabel(t *testing.T) {
tmp := t.TempDir()
ctx := setupCtx()
label := "someLabel"
setFindObjectsFuncs(label, &ctx)
keyPath := path.Join(tmp, "should-not-exist.pem")
s := &pkcs11helpers.Session{Module: &ctx, Session: 0}
_, err := generateKey(s, label, keyPath, keyGenConfig{
Type: "ecdsa",
ECDSACurve: "P-256",
})
test.AssertError(t, err, "expected failure for a slot with an object already in it")
test.Assert(t, strings.HasPrefix(err.Error(), "expected no preexisting objects with label"), "wrong error")
}
func TestGenerateKeySlotHasSomethingWithDifferentLabel(t *testing.T) {
tmp := t.TempDir()
ctx := setupCtx()
setECGenerateFuncs(&ctx)
setFindObjectsFuncs("someLabel", &ctx)
keyPath := path.Join(tmp, "should-not-exist.pem")
s := &pkcs11helpers.Session{Module: &ctx, Session: 0}
_, err := generateKey(s, "someOtherLabel", keyPath, keyGenConfig{
Type: "ecdsa",
ECDSACurve: "P-256",
})
test.AssertNotError(t, err, "expected success even though there was an object with a different label")
}
| go | MIT | c534a758887878331dda780aeb696b113f37b4ab | 2026-01-07T08:35:47.579368Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.